From 13893bf74d256aa933f09844260c9493be4b727a Mon Sep 17 00:00:00 2001 From: Inhyuk Andy Cho Date: Thu, 5 Jan 2023 16:41:22 +0900 Subject: [PATCH] feat: otx refactoring and bug fix --- .../mmaction/models/heads/roi_head.py | 2 +- .../adapters/deep_object_reid/__init__.py | 15 - .../deep_object_reid/configs/__init__.py | 15 - .../configs/configuration.yaml | 161 ---- .../configs/efficientnet_b0/__init__.py | 15 - .../configs/efficientnet_b0/hpo_config.yaml | 15 - .../configs/efficientnet_b0/main_model.yaml | 85 -- .../efficientnet_b0/main_model_multihead.yaml | 86 --- .../main_model_multilabel.yaml | 90 --- .../template_experimental.yaml | 56 -- .../configs/efficientnet_v2_s/__init__.py | 15 - .../efficientnet_v2_s/compression_config.json | 72 -- .../configs/efficientnet_v2_s/hpo_config.yaml | 15 - .../configs/efficientnet_v2_s/main_model.yaml | 84 -- .../main_model_multihead.yaml | 87 --- .../main_model_multilabel.yaml | 91 --- .../template_experimental.yaml | 56 -- .../mobilenet_v3_large_075/__init__.py | 15 - .../mobilenet_v3_large_075/aux_model.yaml | 39 - .../compression_config.json | 94 --- .../mobilenet_v3_large_075/hpo_config.yaml | 15 - .../mobilenet_v3_large_075/main_model.yaml | 86 --- .../main_model_multihead.yaml | 81 -- .../main_model_multilabel.yaml | 89 --- .../template_experimental.yaml | 56 -- .../configs/mobilenet_v3_large_1/__init__.py | 15 - .../mobilenet_v3_large_1/aux_model.yaml | 39 - .../compression_config.json | 94 --- .../mobilenet_v3_large_1/hpo_config.yaml | 15 - .../mobilenet_v3_large_1/main_model.yaml | 86 --- .../main_model_multihead.yaml | 81 -- .../main_model_multilabel.yaml | 89 --- .../template_experimental.yaml | 56 -- .../configs/mobilenet_v3_small/__init__.py | 15 - .../configs/mobilenet_v3_small/aux_model.yaml | 39 - .../mobilenet_v3_small/hpo_config.yaml | 16 - .../mobilenet_v3_small/main_model.yaml | 85 -- .../main_model_multihead.yaml | 81 -- .../main_model_multilabel.yaml | 87 --- .../template_experimental.yaml | 56 -- .../deep_object_reid/data/__init__.py | 19 - .../adapters/deep_object_reid/data/dataset.py | 116 --- .../deep_object_reid/tasks/__init__.py | 21 - .../adapters/deep_object_reid/tasks/nncf.py | 725 ------------------ .../deep_object_reid/utils/__init__.py | 40 - .../deep_object_reid/utils/monitors.py | 71 -- .../adapters/deep_object_reid/utils/utils.py | 232 ------ .../classification/adapters/mmcls/__init__.py | 5 + .../adapters/mmcls/models/classifiers/byol.py | 17 +- .../adapters/mmcls/nncf/__init__.py | 7 +- .../adapters/mmcls/nncf/builder.py | 36 +- .../adapters/mmcls/nncf/registers.py | 1 - .../adapters/mmcls/utils/__init__.py | 4 +- .../adapters/mmcls/utils/builder.py | 5 +- .../adapters/mmcls/utils/config_utils.py | 37 +- .../model_wrappers/openvino_models.py | 2 - .../efficientnet_b0_cls_incr/deployment.py | 2 + .../efficientnet_v2_s_cls_incr/deployment.py | 2 + .../compression_config.json | 36 +- .../deployment.py | 2 + .../compression_config.json | 50 +- .../classification/tasks/inference.py | 63 +- otx/algorithms/classification/tasks/nncf.py | 6 +- otx/algorithms/classification/tasks/train.py | 30 +- .../common/adapters/mmcv/__init__.py | 6 +- .../common/adapters/mmcv/data_cpu.py | 76 -- .../common/adapters/mmcv/nncf/__init__.py | 11 +- .../common/adapters/mmcv/nncf/hooks.py | 4 +- .../common/adapters/mmcv/nncf/patches.py | 14 +- .../common/adapters/mmcv/nncf/runners.py | 28 +- .../common/adapters/mmcv/nncf/utils.py | 109 +-- .../common/adapters/mmcv/utils/__init__.py | 48 ++ .../common/adapters/mmcv/utils/builder.py | 162 ++++ .../mmcv/{utils.py => utils/config_utils.py} | 33 +- .../common/adapters/nncf/__init__.py | 6 +- .../common/adapters/nncf/compression.py | 1 - otx/algorithms/common/adapters/nncf/config.py | 51 +- .../common/adapters/nncf/patchers/__init__.py | 2 +- .../common/adapters/nncf/patchers/patcher.py | 26 +- .../common/adapters/nncf/patchers/wrappers.py | 5 +- .../common/adapters/nncf/patches.py | 2 +- .../common/adapters/nncf/utils/__init__.py | 8 +- .../common/adapters/nncf/utils/utils.py | 8 +- otx/algorithms/common/tasks/nncf_base.py | 63 +- otx/algorithms/common/tasks/training_base.py | 48 +- otx/algorithms/common/utils/__init__.py | 2 +- otx/algorithms/common/utils/data.py | 7 +- otx/algorithms/common/utils/utils.py | 11 +- .../detection/adapters/mmdet/__init__.py | 5 + .../detection/adapters/mmdet/data/dataset.py | 51 +- .../adapters/mmdet/evaluation/__init__.py | 2 +- .../adapters/mmdet/evaluation/mae.py | 95 ++- .../adapters/mmdet/evaluation/mean_ap_seg.py | 129 ++-- .../detection/adapters/mmdet/nncf/__init__.py | 5 +- .../detection/adapters/mmdet/nncf/builder.py | 50 +- .../detection/adapters/mmdet/nncf/patches.py | 13 +- .../adapters/mmdet/utils/__init__.py | 4 +- .../detection/adapters/mmdet/utils/builder.py | 5 +- .../adapters/mmdet/utils/config_utils.py | 21 +- .../model_wrappers/openvino_models.py | 14 +- .../cspdarknet_yolox/data_pipeline.py | 4 +- .../detection/cspdarknet_yolox/deployment.py | 2 + .../detection/mobilenetv2_atss/deployment.py | 2 + .../detection/mobilenetv2_ssd/deployment.py | 2 + .../efficientnetb2b_maskrcnn/deployment.py | 2 + .../efficientnetb2b_maskrcnn/template.yaml | 3 +- .../resnet50_maskrcnn/deployment.py | 2 + .../resnet50_maskrcnn/model.py | 5 +- otx/algorithms/detection/tasks/inference.py | 23 +- otx/algorithms/detection/tasks/nncf.py | 31 +- otx/algorithms/detection/tasks/train.py | 33 +- otx/algorithms/detection/utils/data.py | 1 + .../segmentation/adapters/mmseg/__init__.py | 7 +- .../adapters/mmseg/nncf/__init__.py | 6 +- .../adapters/mmseg/nncf/builder.py | 50 +- .../segmentation/adapters/mmseg/nncf/hooks.py | 54 +- .../adapters/mmseg/utils/__init__.py | 4 +- .../adapters/mmseg/utils/builder.py | 6 +- .../adapters/mmseg/utils/config_utils.py | 24 +- .../configs/base/models/mean_teacher.py | 4 +- .../configs/ocr_lite_hrnet_18/deployment.py | 2 + .../configs/ocr_lite_hrnet_18/model.py | 2 +- .../ocr_lite_hrnet_18_mod2/deployment.py | 2 + .../configs/ocr_lite_hrnet_18_mod2/model.py | 2 +- .../ocr_lite_hrnet_s_mod2/deployment.py | 2 + .../configs/ocr_lite_hrnet_s_mod2/model.py | 2 +- .../ocr_lite_hrnet_x_mod3/deployment.py | 2 + .../configs/ocr_lite_hrnet_x_mod3/model.py | 2 +- .../ocr_lite_hrnet_x_mod3/template.yaml | 3 +- .../segmentation/tasks/inference.py | 43 +- otx/algorithms/segmentation/tasks/nncf.py | 5 - otx/algorithms/segmentation/tasks/train.py | 31 +- otx/cli/tools/eval.py | 1 + otx/cli/utils/tests.py | 9 +- .../models/detectors/unbiased_teacher.py | 19 +- .../segmentors/mean_teacher_segmentor.py | 19 +- otx/mpa/utils/data_cpu.py | 75 -- .../cli/classification/test_classification.py | 2 +- .../cli/detection/test_detection.py | 3 +- .../cli/segmentation/test_segmentation.py | 3 +- 140 files changed, 886 insertions(+), 4551 deletions(-) delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/__init__.py delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/__init__.py delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/configuration.yaml delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_b0/__init__.py delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_b0/hpo_config.yaml delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_b0/main_model.yaml delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_b0/main_model_multihead.yaml delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_b0/main_model_multilabel.yaml delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_b0/template_experimental.yaml delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_v2_s/__init__.py delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_v2_s/compression_config.json delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_v2_s/hpo_config.yaml delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_v2_s/main_model.yaml delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_v2_s/main_model_multihead.yaml delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_v2_s/main_model_multilabel.yaml delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_v2_s/template_experimental.yaml delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/__init__.py delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/aux_model.yaml delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/compression_config.json delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/hpo_config.yaml delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/main_model.yaml delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/main_model_multihead.yaml delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/main_model_multilabel.yaml delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/template_experimental.yaml delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/__init__.py delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/aux_model.yaml delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/compression_config.json delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/hpo_config.yaml delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/main_model.yaml delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/main_model_multihead.yaml delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/main_model_multilabel.yaml delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/template_experimental.yaml delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/__init__.py delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/aux_model.yaml delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/hpo_config.yaml delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/main_model.yaml delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/main_model_multihead.yaml delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/main_model_multilabel.yaml delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/template_experimental.yaml delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/data/__init__.py delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/data/dataset.py delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/tasks/__init__.py delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/tasks/nncf.py delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/utils/__init__.py delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/utils/monitors.py delete mode 100644 otx/algorithms/classification/adapters/deep_object_reid/utils/utils.py rename otx/algorithms/classification/{adapters/deep_object_reid/configs/efficientnet_b0 => configs/mobilenet_v3_large_075_cls_incr}/compression_config.json (75%) rename otx/algorithms/classification/{adapters/deep_object_reid/configs/mobilenet_v3_small => configs/mobilenet_v3_small_cls_incr}/compression_config.json (66%) delete mode 100644 otx/algorithms/common/adapters/mmcv/data_cpu.py create mode 100644 otx/algorithms/common/adapters/mmcv/utils/__init__.py create mode 100644 otx/algorithms/common/adapters/mmcv/utils/builder.py rename otx/algorithms/common/adapters/mmcv/{utils.py => utils/config_utils.py} (92%) delete mode 100644 otx/mpa/utils/data_cpu.py diff --git a/otx/algorithms/action/adapters/mmaction/models/heads/roi_head.py b/otx/algorithms/action/adapters/mmaction/models/heads/roi_head.py index 2ae71ec7b92..87fc46ba84a 100644 --- a/otx/algorithms/action/adapters/mmaction/models/heads/roi_head.py +++ b/otx/algorithms/action/adapters/mmaction/models/heads/roi_head.py @@ -5,7 +5,7 @@ @MMDET_HEADS.register_module(force=True) -# pylint: disable=abstract-method, unused-argument +# pylint: disable=abstract-method, unused-argument, too-many-ancestors class AVARoIHead(MMAVARoIHead): """AVARoIHead for OTX.""" diff --git a/otx/algorithms/classification/adapters/deep_object_reid/__init__.py b/otx/algorithms/classification/adapters/deep_object_reid/__init__.py deleted file mode 100644 index c9b79b0fa62..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Adapters of classification - deep object reid.""" - -# Copyright (C) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions -# and limitations under the License. diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/__init__.py b/otx/algorithms/classification/adapters/deep_object_reid/configs/__init__.py deleted file mode 100644 index 415ba28ce33..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -"""OTX Adapters - deep_object_reid.configs.""" - -# Copyright (C) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions -# and limitations under the License. diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/configuration.yaml b/otx/algorithms/classification/adapters/deep_object_reid/configs/configuration.yaml deleted file mode 100644 index fa962df2de7..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/configuration.yaml +++ /dev/null @@ -1,161 +0,0 @@ -description: Configuration for an image classification task -header: Configuration for an image classification task -learning_parameters: - batch_size: - affects_outcome_of: TRAINING - default_value: 32 - description: - The number of training samples seen in each iteration of training. - Increasing this value improves training time and may make the training more - stable. A larger batch size has higher memory requirements. - editable: true - header: Batch size - max_value: 512 - min_value: 1 - type: INTEGER - ui_rules: - action: DISABLE_EDITING - operator: AND - rules: [] - type: UI_RULES - visible_in_ui: true - warning: - Increasing this value may cause the system to use more memory than available, - potentially causing out of memory errors, please update with caution. - auto_hpo_state: NOT_POSSIBLE - description: Learning Parameters - header: Learning Parameters - learning_rate: - affects_outcome_of: TRAINING - default_value: 0.01 - description: - Increasing this value will speed up training convergence but might - make it unstable. - editable: true - header: Learning rate - max_value: 0.1 - min_value: 1.0e-07 - type: FLOAT - ui_rules: - action: DISABLE_EDITING - operator: AND - rules: [] - type: UI_RULES - visible_in_ui: true - warning: null - auto_hpo_state: NOT_POSSIBLE - max_num_epochs: - affects_outcome_of: TRAINING - default_value: 200 - description: - Increasing this value causes the results to be more robust but training - time will be longer. - editable: true - header: Maximum number of training epochs - max_value: 1000 - min_value: 1 - type: INTEGER - ui_rules: - action: DISABLE_EDITING - operator: AND - rules: [] - type: UI_RULES - visible_in_ui: true - warning: null - enable_early_stopping: - affects_outcome_of: TRAINING - default_value: true - description: Adaptive early exit from training when accuracy isn't changed or decreased for several epochs. - editable: true - header: Enable adaptive early stopping of the training - type: BOOLEAN - ui_rules: - action: DISABLE_EDITING - operator: AND - rules: [] - type: UI_RULES - visible_in_ui: false - warning: null - enable_lr_finder: - affects_outcome_of: TRAINING - default_value: false - description: Learning rate parameter value will be ignored if enabled. - editable: true - header: Enable automatic learing rate estimation - type: BOOLEAN - ui_rules: - action: DISABLE_EDITING - operator: AND - rules: [] - type: UI_RULES - visible_in_ui: true - warning: null - type: PARAMETER_GROUP - visible_in_ui: true -nncf_optimization: - description: Optimization by NNCF - header: Optimization by NNCF - enable_quantization: - affects_outcome_of: TRAINING - default_value: true - description: Enable quantization algorithm - editable: true - header: Enable quantization algorithm - type: BOOLEAN - ui_rules: - action: DISABLE_EDITING - operator: AND - rules: [] - type: UI_RULES - value: true - visible_in_ui: true - warning: null - enable_pruning: - affects_outcome_of: TRAINING - default_value: false - description: Enable filter pruning algorithm - editable: true - header: Enable filter pruning algorithm - type: BOOLEAN - ui_rules: - action: DISABLE_EDITING - operator: AND - rules: [] - type: UI_RULES - value: false - visible_in_ui: true - warning: null - pruning_supported: - affects_outcome_of: TRAINING - default_value: false - description: Whether filter pruning is supported - editable: false - header: Whether filter pruning is supported - type: BOOLEAN - ui_rules: - action: DISABLE_EDITING - operator: AND - rules: [] - type: UI_RULES - value: false - visible_in_ui: false - warning: null - maximal_accuracy_degradation: - affects_outcome_of: TRAINING - default_value: 1.0 - description: The maximal allowed accuracy metric drop - editable: true - header: Maximum accuracy degradation - max_value: 100.0 - min_value: 0.0 - type: FLOAT - ui_rules: - action: DISABLE_EDITING - operator: AND - rules: [] - type: UI_RULES - value: 1.0 - visible_in_ui: true - warning: null - type: PARAMETER_GROUP - visible_in_ui: false diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_b0/__init__.py b/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_b0/__init__.py deleted file mode 100644 index c86b57bec9d..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_b0/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Initialization of EfficinetNet-B0 model for Deep-Object-Reid Task.""" - -# Copyright (C) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions -# and limitations under the License. diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_b0/hpo_config.yaml b/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_b0/hpo_config.yaml deleted file mode 100644 index 56f7d3a80fe..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_b0/hpo_config.yaml +++ /dev/null @@ -1,15 +0,0 @@ -metric: Top-1 -search_algorithm: asha -hp_space: - learning_parameters.learning_rate: - param_type: qloguniform - range: - - 0.0003 - - 0.1 - - 0.0001 - learning_parameters.batch_size: - param_type: qloguniform - range: - - 32 - - 128 - - 2 diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_b0/main_model.yaml b/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_b0/main_model.yaml deleted file mode 100644 index daa919b21a0..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_b0/main_model.yaml +++ /dev/null @@ -1,85 +0,0 @@ -lr_finder: - enable: False - mode: TPE - stop_after: False - num_epochs: 6 - step: 0.001 - smooth_f: 0.01 - epochs_warmup: 1 - path_to_savefig: "lr_finder.jpg" - max_lr: 0.01 - min_lr: 0.001 - n_trials: 15 - -model: - name: "efficientnet_b0" - type: "classification" - pretrained: False - load_weights: "https://storage.openvinotoolkit.org/repositories/openvino_training_extensions/models/custom_image_classification/efficientnet_b0_imagenet_cls.pth" - save_all_chkpts: False - -custom_datasets: - roots: ["data/CIFAR100/train", "data/CIFAR100/val"] - types: ["classification_image_folder", "classification_image_folder"] - -data: - root: "./" - height: 224 - width: 224 - norm_mean: [0.485, 0.456, 0.406] - norm_std: [0.229, 0.224, 0.225] - save_dir: "output/efficient_b0/log" - workers: 6 - transforms: - random_flip: - enable: True - p: 0.5 - random_rotate: - enable: True - p: 0.35 - angle: (-10,10) - augmix: - enable: True - cfg_str: "augmix-m5-w3" - cutout: - enable: True - cutout_factor: 0.35 - p: 0.35 - -loss: - name: "am_softmax" - softmax: - s: 7. - compute_s: True - -sampler: - train_sampler: "RandomSampler" - -train: - optim: "sam" - lr: 0.003 - nbd: True - max_epoch: 200 - weight_decay: 5e-4 - batch_size: 64 - fixbase_epoch: 0 - lr_scheduler: "warmup" - warmup: 15 - base_scheduler: "reduce_on_plateau" - early_stopping: True - train_patience: 5 - lr_decay_factor: 200 - deterministic: True - patience: 5 - gamma: 0.1 - sam: - rho: 0.05 - ema: - enable: True - ema_decay: 0.999 - mix_precision: True - -test: - batch_size: 128 - evaluate: False - eval_freq: 1 diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_b0/main_model_multihead.yaml b/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_b0/main_model_multihead.yaml deleted file mode 100644 index fb021c284dc..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_b0/main_model_multihead.yaml +++ /dev/null @@ -1,86 +0,0 @@ -lr_finder: - enable: False - mode: TPE - stop_after: False - num_epochs: 10 - step: 1e-5 - epochs_warmup: 1 - path_to_savefig: "lr_finder.jpg" - max_lr: 1e-3 - min_lr: 1e-5 - n_trials: 15 - -model: - name: "efficientnet_b0" - type: "multihead" - pretrained: False - load_weights: "https://storage.openvinotoolkit.org/repositories/openvino_training_extensions/models/custom_image_classification/efficientnet_b0_imagenet_cls.pth" - save_all_chkpts: False - dropout_cls: - p: 0.1 - -custom_datasets: - roots: ["data/coco/train.json", "data/coco/val.json"] - types: ["multihead_classification", "multihead_classification"] - -data: - root: "./" - height: 224 - width: 224 - norm_mean: [0.485, 0.456, 0.406] - norm_std: [0.229, 0.224, 0.225] - save_dir: "output/mulitihead/efficientnet_b0" - workers: 6 - transforms: - random_flip: - enable: True - p: 0.5 - randaugment: - enable: True - cutout: - enable: True - cutout_factor: 0.35 - p: 0.35 - -loss: - name: "softmax,asl" - softmax: - s: 1.0 - compute_s: False - asl: - gamma_pos: 0. - gamma_neg: 4. - -sampler: - train_sampler: "RandomSampler" - -train: - optim: "sam" - lr: 0.007 - nbd: True - max_epoch: 60 - weight_decay: 5e-4 - batch_size: 64 - lr_scheduler: "onecycle" - early_stopping: True - pct_start: 0.1 - train_patience: 5 - lr_decay_factor: 1000 - deterministic: True - target_metric: test_acc - gamma: 0.1 - sam: - rho: 0.05 - ema: - enable: True - ema_decay: 0.9995 - mix_precision: True - -test: - batch_size: 128 - evaluate: False - eval_freq: 1 - -sc_integration: - epoch_scale: 3. - lr_scale: 1.5 diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_b0/main_model_multilabel.yaml b/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_b0/main_model_multilabel.yaml deleted file mode 100644 index fd2e6bed468..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_b0/main_model_multilabel.yaml +++ /dev/null @@ -1,90 +0,0 @@ -lr_finder: - enable: False - mode: TPE - stop_after: False - num_epochs: 10 - step: 1e-5 - epochs_warmup: 1 - path_to_savefig: "lr_finder.jpg" - max_lr: 1e-3 - min_lr: 1e-5 - n_trials: 15 - -model: - name: "efficientnet_b0" - type: "multilabel" - pretrained: False - load_weights: "https://storage.openvinotoolkit.org/repositories/openvino_training_extensions/models/custom_image_classification/efficientnet_b0_imagenet_cls.pth" - save_all_chkpts: False - dropout_cls: - p: 0.1 - -custom_datasets: - roots: ["data/coco/train.json", "data/coco/val.json"] - types: ["multilabel_classification", "multilabel_classification"] - -data: - root: "./" - height: 224 - width: 224 - norm_mean: [0.485, 0.456, 0.406] - norm_std: [0.229, 0.224, 0.225] - save_dir: "output/mulitilabel/efficientnet_b0" - workers: 6 - transforms: - random_flip: - enable: True - p: 0.5 - randaugment: - enable: True - cutout: - enable: True - cutout_factor: 0.35 - p: 0.35 - -loss: - name: "am_binary" - softmax: - s: 20.0 - m: 0.01 - compute_s: False - asl: - gamma_pos: 0. - gamma_neg: 0. - am_binary: - amb_t: 1.0 - amb_k: 0.7 - -sampler: - train_sampler: "RandomSampler" - -train: - optim: "sam" - lr: 0.007 - nbd: True - max_epoch: 60 - weight_decay: 5e-4 - batch_size: 64 - lr_scheduler: "onecycle" - early_stopping: True - pct_start: 0.1 - train_patience: 5 - lr_decay_factor: 1000 - deterministic: True - target_metric: test_acc - gamma: 0.1 - sam: - rho: 0.05 - ema: - enable: True - ema_decay: 0.9995 - mix_precision: True - -test: - batch_size: 128 - evaluate: False - eval_freq: 1 - -sc_integration: - epoch_scale: 3. - lr_scale: 1.5 diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_b0/template_experimental.yaml b/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_b0/template_experimental.yaml deleted file mode 100644 index 014af4aef11..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_b0/template_experimental.yaml +++ /dev/null @@ -1,56 +0,0 @@ -# Description. -model_template_id: Custom_Image_Classification_EfficinetNet-B0 -name: EfficientNet-B0 -task_type: CLASSIFICATION -task_family: VISION -instantiation: "CLASS" -summary: Provides better performance on large datasets, but may be not so stable in case of small amount of training data. -application: ~ - -# Algo backend. -framework: OTEClassification v1.2.3 - -# Task implementations. deep-object-reid only runs nncf task. -entrypoints: - base: otx.algorithms.classification.adapters.deep_object_reid.tasks.ClassificationNNCFTask - nncf: otx.algorithms.classification.adapters.deep_object_reid.tasks.ClassificationNNCFTask - -# Capabilities. -capabilities: - - compute_representations - - compute_uncertainty_score - -# Hyperparameters. -hyper_parameters: - base_path: "../configuration.yaml" - parameter_overrides: - learning_parameters: - batch_size: - default_value: 32 - auto_hpo_state: POSSIBLE - max_num_epochs: - default_value: 200 - learning_rate: - default_value: 0.007 - auto_hpo_state: POSSIBLE - enable_early_stopping: - default_value: true - nncf_optimization: - enable_quantization: - default_value: true - enable_pruning: - default_value: false - pruning_supported: - default_value: true - maximal_accuracy_degradation: - default_value: 1.0 - -# Training resources. -max_nodes: 1 -training_targets: - - GPU - - CPU - -# Stats. -gigaflops: 0.81 -size: 4.09 diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_v2_s/__init__.py b/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_v2_s/__init__.py deleted file mode 100644 index ff24d1d3d05..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_v2_s/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Initialization of EfficinetNet-V2 model for Deep-Object-Reid Task.""" - -# Copyright (C) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions -# and limitations under the License. diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_v2_s/compression_config.json b/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_v2_s/compression_config.json deleted file mode 100644 index 4d7f216fc0a..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_v2_s/compression_config.json +++ /dev/null @@ -1,72 +0,0 @@ -{ - "base": { - "nncf_config": { - "log_dir": "." - }, - "lr_finder": { - "enable": false - }, - "train": { - "batch_size": 64, - "lr_scheduler": "reduce_on_plateau", - "mix_precision": false - }, - "test": { - "batch_size": 64 - } - }, - "nncf_quantization": { - "nncf_config": { - "compression": { - "algorithm": "quantization", - "preset": "mixed", - "initializer": { - "range": { - "num_init_samples": 8192 - }, - "batchnorm_adaptation": { - "num_bn_adaptation_samples": 8192 - } - } - }, - "accuracy_aware_training": { - "mode": "early_exit", - "params": { - "maximal_absolute_accuracy_degradation": 0.01, - "maximal_total_epochs": 100 - } - } - } - }, - "nncf_quantization_pruning": { - "nncf": { - "coeff_decrease_lr_for_nncf": 1.0 - }, - "nncf_config": { - "compression": [ - { - "algorithm": "quantization", - "preset": "mixed", - "initializer": { - "range": { - "num_init_samples": 8192 - }, - "batchnorm_adaptation": { - "num_bn_adaptation_samples": 8192 - } - } - } - ], - "accuracy_aware_training": { - "mode": "adaptive_compression_level", - "params": { - "maximal_absolute_accuracy_degradation": 0.01, - "initial_training_phase_epochs": 100, - "patience_epochs": 100, - "maximal_total_epochs": 200 - } - } - } - }, - "order_of_parts": ["nncf_quantization", "nncf_quantization_pruning"] -} diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_v2_s/hpo_config.yaml b/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_v2_s/hpo_config.yaml deleted file mode 100644 index 8f99bfe2ba6..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_v2_s/hpo_config.yaml +++ /dev/null @@ -1,15 +0,0 @@ -metric: Top-1 -search_algorithm: asha -hp_space: - learning_parameters.learning_rate: - param_type: qloguniform - range: - - 0.0014 - - 0.035 - - 0.0001 - learning_parameters.batch_size: - param_type: qloguniform - range: - - 20 - - 48 - - 2 diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_v2_s/main_model.yaml b/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_v2_s/main_model.yaml deleted file mode 100644 index 55d193ea34d..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_v2_s/main_model.yaml +++ /dev/null @@ -1,84 +0,0 @@ -lr_finder: - enable: False - mode: TPE - stop_after: False - num_epochs: 6 - step: 0.001 - smooth_f: 0.01 - epochs_warmup: 1 - path_to_savefig: "lr_finder.jpg" - max_lr: 0.03 - min_lr: 0.003 - n_trials: 15 - -model: - name: "efficientnetv2_s_21k" - type: "classification" - pretrained: True - save_all_chkpts: False - export_onnx_opset: 11 - -custom_datasets: - roots: ["data/CIFAR100/train", "data/CIFAR100/val"] - types: ["classification_image_folder", "classification_image_folder"] - -data: - root: "./" - height: 224 - width: 224 - norm_mean: [0.5, 0.5, 0.5] - norm_std: [0.5, 0.5, 0.5] - save_dir: "output/efficientv2_s_21k/log" - workers: 6 - transforms: - random_flip: - enable: True - p: 0.5 - random_rotate: - enable: True - p: 0.35 - angle: (-10,10) - augmix: - enable: True - cfg_str: "augmix-m5-w3" - cutout: - enable: True - cutout_factor: 0.35 - p: 0.35 - -loss: - name: "am_softmax" - softmax: - compute_s: True - -sampler: - train_sampler: "RandomSampler" - -train: - optim: "sam" - lr: 0.005 - nbd: True - max_epoch: 200 - weight_decay: 5e-4 - batch_size: 64 - fixbase_epoch: 0 - lr_scheduler: "warmup" - warmup: 5 - base_scheduler: "reduce_on_plateau" - early_stopping: True - train_patience: 5 - lr_decay_factor: 200 - deterministic: True - patience: 5 - gamma: 0.1 - sam: - rho: 0.05 - ema: - enable: True - ema_decay: 0.999 - mix_precision: True - -test: - batch_size: 128 - evaluate: False - eval_freq: 1 diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_v2_s/main_model_multihead.yaml b/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_v2_s/main_model_multihead.yaml deleted file mode 100644 index 3ac498efd3c..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_v2_s/main_model_multihead.yaml +++ /dev/null @@ -1,87 +0,0 @@ -lr_finder: - enable: False - mode: TPE - stop_after: False - num_epochs: 6 - step: 1e-5 - epochs_warmup: 1 - path_to_savefig: "lr_finder.jpg" - max_lr: 1e-3 - min_lr: 1e-5 - n_trials: 15 - -model: - name: "efficientnetv2_s_21k" - type: "multihead" - pretrained: True - save_all_chkpts: False - dropout_cls: - p: 0.15 - export_onnx_opset: 11 - -custom_datasets: - roots: ["datasets/coco/train.json", "datasets/coco/val.json"] - types: ["multihead_classification", "multihead_classification"] - -data: - root: "./" - height: 224 - width: 224 - norm_mean: [0.5, 0.5, 0.5] - norm_std: [0.5, 0.5, 0.5] - save_dir: "experiments/efficientv2" - workers: 6 - transforms: - random_flip: - enable: True - p: 0.5 - randaugment: - enable: True - cutout: - enable: True - cutout_factor: 0.3 - p: 0.35 - -loss: - name: "softmax,asl" - softmax: - s: 1.0 - compute_s: False - asl: - gamma_pos: 0. - gamma_neg: 4. - -sampler: - train_sampler: "RandomSampler" - -train: - optim: "sgd" - lr: 0.007 - nbd: True - max_epoch: 60 - weight_decay: 1e-4 - batch_size: 48 - lr_scheduler: "onecycle" - pct_start: 0.1 - early_stopping: True - lr_decay_factor: 1000 - deterministic: True - train_patience: 5 - target_metric: test_acc - gamma: 0.1 - ema: - enable: True - ema_decay: 0.9997 - sam: - rho: 0.05 - adaptive: False - mix_precision: False - -test: - batch_size: 64 - evaluate: False - eval_freq: 1 - -sc_integration: - epoch_scale: 3. - lr_scale: 1.5 diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_v2_s/main_model_multilabel.yaml b/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_v2_s/main_model_multilabel.yaml deleted file mode 100644 index 05556b6fa44..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_v2_s/main_model_multilabel.yaml +++ /dev/null @@ -1,91 +0,0 @@ -lr_finder: - enable: False - mode: TPE - stop_after: False - num_epochs: 6 - step: 1e-5 - epochs_warmup: 1 - path_to_savefig: "lr_finder.jpg" - max_lr: 1e-3 - min_lr: 1e-5 - n_trials: 15 - -model: - name: "efficientnetv2_s_21k" - type: "multilabel" - pretrained: True - save_all_chkpts: False - dropout_cls: - p: 0.15 - export_onnx_opset: 11 - -custom_datasets: - roots: ["datasets/coco/train.json", "datasets/coco/val.json"] - types: ["multilabel_classification", "multilabel_classification"] - -data: - root: "./" - height: 224 - width: 224 - norm_mean: [0.5, 0.5, 0.5] - norm_std: [0.5, 0.5, 0.5] - save_dir: "experiments/efficientv2" - workers: 6 - transforms: - random_flip: - enable: True - p: 0.5 - randaugment: - enable: True - cutout: - enable: True - cutout_factor: 0.3 - p: 0.35 - -loss: - name: "am_binary" - softmax: - s: 20.0 - m: 0.01 - compute_s: False - asl: - gamma_pos: 0. - gamma_neg: 0. - am_binary: - amb_t: 1.0 - amb_k: 0.7 - -sampler: - train_sampler: "RandomSampler" - -train: - optim: "sam" - lr: 0.007 - nbd: True - max_epoch: 60 - weight_decay: 1e-4 - batch_size: 48 - lr_scheduler: "onecycle" - pct_start: 0.1 - early_stopping: True - lr_decay_factor: 1000 - deterministic: True - train_patience: 5 - target_metric: test_acc - gamma: 0.1 - ema: - enable: True - ema_decay: 0.9997 - sam: - rho: 0.05 - adaptive: False - mix_precision: True - -test: - batch_size: 64 - evaluate: False - eval_freq: 1 - -sc_integration: - epoch_scale: 3. - lr_scale: 1.5 diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_v2_s/template_experimental.yaml b/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_v2_s/template_experimental.yaml deleted file mode 100644 index c7d63e4b111..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_v2_s/template_experimental.yaml +++ /dev/null @@ -1,56 +0,0 @@ -# Description. -model_template_id: Custom_Image_Classification_EfficientNet-V2-S -name: EfficientNet-V2-S -task_type: CLASSIFICATION -task_family: VISION -instantiation: "CLASS" -summary: This model is quite slow, but provides superior single and multi label classification performance. -application: ~ - -# Algo backend. -framework: OTEClassification v1.2.3 - -# Task implementations. deep-object-reid only runs nncf task. -entrypoints: - base: otx.algorithms.classification.adapters.deep_object_reid.tasks.ClassificationNNCFTask - nncf: otx.algorithms.classification.adapters.deep_object_reid.tasks.ClassificationNNCFTask - -# Capabilities. -capabilities: - - compute_representations - - compute_uncertainty_score - -# Hyperparameters. -hyper_parameters: - base_path: "../configuration.yaml" - parameter_overrides: - learning_parameters: - batch_size: - default_value: 32 - auto_hpo_state: POSSIBLE - max_num_epochs: - default_value: 200 - learning_rate: - default_value: 0.007 - auto_hpo_state: POSSIBLE - enable_early_stopping: - default_value: true - nncf_optimization: - enable_quantization: - default_value: true - enable_pruning: - default_value: false - pruning_supported: - default_value: false - maximal_accuracy_degradation: - default_value: 1.0 - -# Training resources. -max_nodes: 1 -training_targets: - - GPU - - CPU - -# Stats. -gigaflops: 5.76 -size: 20.23 diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/__init__.py b/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/__init__.py deleted file mode 100644 index 13fa11a8239..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Initialization of MobileNet-V3-large-075 model for Deep-Object-Reid Task.""" - -# Copyright (C) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions -# and limitations under the License. diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/aux_model.yaml b/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/aux_model.yaml deleted file mode 100644 index c9c9751a060..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/aux_model.yaml +++ /dev/null @@ -1,39 +0,0 @@ -model: - name: "mobilenetv3_large" - type: "classification" - pretrained: True - feature_dim: 1280 - dropout_cls: - p: 0.2 - dist: "bernoulli" - -loss: - name: "am_softmax" - softmax: - s: 1.0 - compute_s: False - -sampler: - train_sampler: "RandomSampler" - -train: - optim: "sam" - lr: 0.013 - nbd: True - weight_decay: 5e-4 - lr_scheduler: "warmup" - warmup: 15 - base_scheduler: "reduce_on_plateau" - early_stopping: True - train_patience: 5 - lr_decay_factor: 200 - deterministic: True - patience: 5 - gamma: 0.1 - sam: - rho: 0.15 - -test: - batch_size: 128 - evaluate: False - eval_freq: 1 diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/compression_config.json b/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/compression_config.json deleted file mode 100644 index 8db180c5776..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/compression_config.json +++ /dev/null @@ -1,94 +0,0 @@ -{ - "base": { - "nncf_config": { - "log_dir": "." - }, - "lr_finder": { - "enable": false - }, - "train": { - "batch_size": 64, - "lr_scheduler": "reduce_on_plateau", - "mix_precision": false - }, - "test": { - "batch_size": 64 - }, - "nncf_aux_config_changes": [ - { - "train": { - "batch_size": 64, - "lr_scheduler": "reduce_on_plateau", - "mix_precision": false - }, - "test": { - "batch_size": 64 - } - } - ] - }, - "nncf_quantization": { - "nncf_config": { - "compression": { - "algorithm": "quantization", - "preset": "mixed", - "initializer": { - "range": { - "num_init_samples": 8192 - }, - "batchnorm_adaptation": { - "num_bn_adaptation_samples": 8192 - } - } - }, - "accuracy_aware_training": { - "mode": "early_exit", - "params": { - "maximal_absolute_accuracy_degradation": 0.01, - "maximal_total_epochs": 100 - } - } - } - }, - "nncf_quantization_pruning": { - "nncf": { - "coeff_decrease_lr_for_nncf": 1.0 - }, - "nncf_config": { - "compression": [ - { - "algorithm": "filter_pruning", - "pruning_init": 0.1, - "params": { - "schedule": "baseline", - "pruning_flops_target": 0.1, - "filter_importance": "geometric_median", - "prune_downsample_convs": true - } - }, - { - "algorithm": "quantization", - "preset": "mixed", - "initializer": { - "range": { - "num_init_samples": 8192 - }, - "batchnorm_adaptation": { - "num_bn_adaptation_samples": 8192 - } - } - } - ], - "accuracy_aware_training": { - "mode": "adaptive_compression_level", - "params": { - "maximal_absolute_accuracy_degradation": 0.01, - "initial_training_phase_epochs": 100, - "patience_epochs": 100, - "maximal_total_epochs": 200 - } - } - } - }, - "order_of_parts": ["nncf_quantization", "nncf_quantization_pruning"] -} diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/hpo_config.yaml b/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/hpo_config.yaml deleted file mode 100644 index 72632876d3e..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/hpo_config.yaml +++ /dev/null @@ -1,15 +0,0 @@ -metric: Top-1 -search_algorithm: asha -hp_space: - learning_parameters.learning_rate: - param_type: qloguniform - range: - - 0.0032 - - 0.08 - - 0.0001 - learning_parameters.batch_size: - param_type: qloguniform - range: - - 20 - - 48 - - 2 diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/main_model.yaml b/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/main_model.yaml deleted file mode 100644 index 1eaebe499e2..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/main_model.yaml +++ /dev/null @@ -1,86 +0,0 @@ -lr_finder: - enable: False - mode: TPE - stop_after: False - num_epochs: 6 - step: 0.001 - epochs_warmup: 1 - path_to_savefig: "lr_finder.jpg" - max_lr: 0.029 - min_lr: 0.005 - n_trials: 15 - -model: - name: "mobilenetv3_large_075" - type: "classification" - pretrained: True - save_all_chkpts: False - feature_dim: 1280 - -mutual_learning: - aux_configs: ["aux_model.yaml"] - -custom_datasets: - roots: ["data/CIFAR100/train", "data/CIFAR100/val"] - types: ["classification_image_folder", "classification_image_folder"] - -data: - root: "./" - height: 224 - width: 224 - norm_mean: [0.485, 0.456, 0.406] - norm_std: [0.229, 0.224, 0.225] - save_dir: "output/mobilenetv3_large_075/log" - workers: 6 - transforms: - random_flip: - enable: True - p: 0.5 - random_rotate: - enable: True - p: 0.35 - angle: (-10,10) - cutout: - enable: True - cutout_factor: 0.35 - p: 0.35 - augmix: - enable: True - cfg_str: "augmix-m4-w2" - -loss: - name: "softmax" - softmax: - s: 1.0 - compute_s: False - -sampler: - train_sampler: "RandomSampler" - -train: - optim: "sam" - lr: 0.013 - nbd: True - max_epoch: 200 - weight_decay: 5e-4 - batch_size: 100 - lr_scheduler: "warmup" - warmup: 15 - base_scheduler: "reduce_on_plateau" - early_stopping: True - train_patience: 5 - lr_decay_factor: 200 - deterministic: True - patience: 5 - gamma: 0.1 - sam: - rho: 0.15 - ema: - enable: True - ema_decay: 0.999 - mix_precision: True - -test: - batch_size: 128 - evaluate: False - eval_freq: 1 diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/main_model_multihead.yaml b/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/main_model_multihead.yaml deleted file mode 100644 index 20d9809fdd2..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/main_model_multihead.yaml +++ /dev/null @@ -1,81 +0,0 @@ -lr_finder: - enable: False - mode: TPE - stop_after: False - num_epochs: 7 - step: 0.001 - epochs_warmup: 1 - path_to_savefig: "lr_finder.jpg" - max_lr: 1e-3 - min_lr: 1e-5 - n_trials: 15 - -model: - name: "mobilenetv3_large_075" - type: "multihead" - pretrained: True - save_all_chkpts: False - dropout_cls: - p: 0.1 - -custom_datasets: - roots: ["mlc_voc_2007/train_mh_full.json", "mlc_voc_2007/val_mh_full.json"] - types: ["multihead_classification", "multihead_classification"] - -data: - root: "./" - height: 224 - width: 224 - norm_mean: [0.485, 0.456, 0.406] - norm_std: [0.229, 0.224, 0.225] - save_dir: "output/mulitihead/mobilenetv3_large_75" - workers: 6 - transforms: - random_flip: - enable: True - p: 0.5 - randaugment: - enable: True - cutout: - enable: True - cutout_factor: 0.35 - p: 0.35 - -loss: - name: "softmax,asl" - softmax: - s: 1.0 - compute_s: False - asl: - gamma_pos: 0. - gamma_neg: 4. - -sampler: - train_sampler: "RandomSampler" - -train: - optim: "sam" - lr: 0.016 - nbd: True - max_epoch: 80 - weight_decay: 5e-4 - batch_size: 64 - lr_scheduler: "onecycle" - early_stopping: True - pct_start: 0.2 - train_patience: 5 - lr_decay_factor: 1000 - deterministic: True - target_metric: test_acc - gamma: 0.1 - sam: - rho: 0.05 - ema: - enable: True - ema_decay: 0.9995 - mix_precision: True - -test: - batch_size: 128 - evaluate: False - eval_freq: 1 diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/main_model_multilabel.yaml b/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/main_model_multilabel.yaml deleted file mode 100644 index 9ea2cc30bd6..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/main_model_multilabel.yaml +++ /dev/null @@ -1,89 +0,0 @@ -lr_finder: - enable: False - mode: TPE - stop_after: False - num_epochs: 7 - step: 0.001 - epochs_warmup: 1 - path_to_savefig: "lr_finder.jpg" - max_lr: 1e-3 - min_lr: 1e-5 - n_trials: 15 - -model: - name: "mobilenetv3_large_075" - type: "multilabel" - pretrained: True - save_all_chkpts: False - dropout_cls: - p: 0.1 - -custom_datasets: - roots: ["data/coco/train.json", "data/coco/val.json"] - types: ["multilabel_classification", "multilabel_classification"] - -data: - root: "./" - height: 224 - width: 224 - norm_mean: [0.485, 0.456, 0.406] - norm_std: [0.229, 0.224, 0.225] - save_dir: "output/mulitilabel/mobilenetv3_large_75" - workers: 6 - transforms: - random_flip: - enable: True - p: 0.5 - randaugment: - enable: True - cutout: - enable: True - cutout_factor: 0.35 - p: 0.35 - -loss: - name: "am_binary" - softmax: - s: 30.0 - m: 0.01 - compute_s: False - asl: - gamma_pos: 0. - gamma_neg: 0. - am_binary: - amb_t: 2.0 - amb_k: 0.7 - -sampler: - train_sampler: "RandomSampler" - -train: - optim: "sam" - lr: 0.016 - nbd: True - max_epoch: 60 - weight_decay: 5e-4 - batch_size: 64 - lr_scheduler: "onecycle" - early_stopping: True - pct_start: 0.1 - train_patience: 5 - lr_decay_factor: 1000 - deterministic: True - target_metric: test_acc - gamma: 0.1 - sam: - rho: 0.05 - ema: - enable: True - ema_decay: 0.9995 - mix_precision: True - -test: - batch_size: 128 - evaluate: False - eval_freq: 1 - -sc_integration: - epoch_scale: 3. - lr_scale: 3. diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/template_experimental.yaml b/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/template_experimental.yaml deleted file mode 100644 index afbd6b7b2eb..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_075/template_experimental.yaml +++ /dev/null @@ -1,56 +0,0 @@ -# Description. -model_template_id: MobileNet-V3-large-0.75x -name: MobileNet-V3-large-0.75x -task_type: CLASSIFICATION -task_family: VISION -instantiation: "CLASS" -summary: Provides stable training and fast inference, but may be not as accurate as larger models, especially on large datasets. -application: ~ - -# Algo backend. -framework: OTEClassification v1.2.3 - -# Task implementations. deep-object-reid only runs nncf task. -entrypoints: - base: otx.algorithms.classification.adapters.deep_object_reid.tasks.ClassificationNNCFTask - nncf: otx.algorithms.classification.adapters.deep_object_reid.tasks.ClassificationNNCFTask - -# Capabilities. -capabilities: - - compute_representations - - compute_uncertainty_score - -# Hyperparameters. -hyper_parameters: - base_path: "../configuration.yaml" - parameter_overrides: - learning_parameters: - batch_size: - default_value: 32 - auto_hpo_state: POSSIBLE - max_num_epochs: - default_value: 200 - learning_rate: - default_value: 0.016 - auto_hpo_state: POSSIBLE - enable_early_stopping: - default_value: true - nncf_optimization: - enable_quantization: - default_value: true - enable_pruning: - default_value: false - pruning_supported: - default_value: true - maximal_accuracy_degradation: - default_value: 1.0 - -# Training resources. -max_nodes: 1 -training_targets: - - GPU - - CPU - -# Stats. -gigaflops: 0.32 -size: 2.76 diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/__init__.py b/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/__init__.py deleted file mode 100644 index 2ca98636811..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Initialization of MobileNet-V3-large-1 model for Deep-Object-Reid Task.""" - -# Copyright (C) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions -# and limitations under the License. diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/aux_model.yaml b/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/aux_model.yaml deleted file mode 100644 index 034d8e0e86b..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/aux_model.yaml +++ /dev/null @@ -1,39 +0,0 @@ -model: - name: "mobilenetv3_large" - type: "classification" - pretrained: True - feature_dim: 1280 - dropout_cls: - p: 0.2 - dist: "bernoulli" - -loss: - name: "am_softmax" - softmax: - s: 1.0 - compute_s: False - -sampler: - train_sampler: "RandomSampler" - -train: - optim: "sam" - lr: 0.013 - nbd: True - weight_decay: 5e-4 - lr_scheduler: "warmup" - warmup: 15 - base_scheduler: "reduce_on_plateau" - early_stopping: True - train_patience: 5 - lr_decay_factor: 200 - deterministic: True - patience: 5 - gamma: 0.1 - sam: - rho: 0.2 - -test: - batch_size: 128 - evaluate: False - eval_freq: 1 diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/compression_config.json b/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/compression_config.json deleted file mode 100644 index 8db180c5776..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/compression_config.json +++ /dev/null @@ -1,94 +0,0 @@ -{ - "base": { - "nncf_config": { - "log_dir": "." - }, - "lr_finder": { - "enable": false - }, - "train": { - "batch_size": 64, - "lr_scheduler": "reduce_on_plateau", - "mix_precision": false - }, - "test": { - "batch_size": 64 - }, - "nncf_aux_config_changes": [ - { - "train": { - "batch_size": 64, - "lr_scheduler": "reduce_on_plateau", - "mix_precision": false - }, - "test": { - "batch_size": 64 - } - } - ] - }, - "nncf_quantization": { - "nncf_config": { - "compression": { - "algorithm": "quantization", - "preset": "mixed", - "initializer": { - "range": { - "num_init_samples": 8192 - }, - "batchnorm_adaptation": { - "num_bn_adaptation_samples": 8192 - } - } - }, - "accuracy_aware_training": { - "mode": "early_exit", - "params": { - "maximal_absolute_accuracy_degradation": 0.01, - "maximal_total_epochs": 100 - } - } - } - }, - "nncf_quantization_pruning": { - "nncf": { - "coeff_decrease_lr_for_nncf": 1.0 - }, - "nncf_config": { - "compression": [ - { - "algorithm": "filter_pruning", - "pruning_init": 0.1, - "params": { - "schedule": "baseline", - "pruning_flops_target": 0.1, - "filter_importance": "geometric_median", - "prune_downsample_convs": true - } - }, - { - "algorithm": "quantization", - "preset": "mixed", - "initializer": { - "range": { - "num_init_samples": 8192 - }, - "batchnorm_adaptation": { - "num_bn_adaptation_samples": 8192 - } - } - } - ], - "accuracy_aware_training": { - "mode": "adaptive_compression_level", - "params": { - "maximal_absolute_accuracy_degradation": 0.01, - "initial_training_phase_epochs": 100, - "patience_epochs": 100, - "maximal_total_epochs": 200 - } - } - } - }, - "order_of_parts": ["nncf_quantization", "nncf_quantization_pruning"] -} diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/hpo_config.yaml b/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/hpo_config.yaml deleted file mode 100644 index 72632876d3e..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/hpo_config.yaml +++ /dev/null @@ -1,15 +0,0 @@ -metric: Top-1 -search_algorithm: asha -hp_space: - learning_parameters.learning_rate: - param_type: qloguniform - range: - - 0.0032 - - 0.08 - - 0.0001 - learning_parameters.batch_size: - param_type: qloguniform - range: - - 20 - - 48 - - 2 diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/main_model.yaml b/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/main_model.yaml deleted file mode 100644 index 7a61596f9b4..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/main_model.yaml +++ /dev/null @@ -1,86 +0,0 @@ -lr_finder: - enable: False - mode: TPE - stop_after: False - num_epochs: 6 - step: 0.001 - epochs_warmup: 1 - path_to_savefig: "lr_finder.jpg" - max_lr: 0.029 - min_lr: 0.005 - n_trials: 15 - -model: - name: "mobilenetv3_large" - type: "classification" - pretrained: True - save_all_chkpts: False - feature_dim: 1280 - -mutual_learning: - aux_configs: ["aux_model.yaml"] - -custom_datasets: - roots: ["data/CIFAR100/train", "data/CIFAR100/val"] - types: ["classification_image_folder", "classification_image_folder"] - -data: - root: "./" - height: 224 - width: 224 - norm_mean: [0.485, 0.456, 0.406] - norm_std: [0.229, 0.224, 0.225] - save_dir: "output/mobilenetv3_large/log" - workers: 6 - transforms: - random_flip: - enable: True - p: 0.5 - random_rotate: - enable: True - p: 0.35 - angle: (-10,10) - cutout: - enable: True - cutout_factor: 0.35 - p: 0.35 - augmix: - enable: True - cfg_str: "augmix-m5-w3" - -loss: - name: "softmax" - softmax: - s: 1.0 - compute_s: False - -sampler: - train_sampler: "RandomSampler" - -train: - optim: "sam" - lr: 0.013 - nbd: True - max_epoch: 200 - weight_decay: 5e-4 - batch_size: 84 - lr_scheduler: "warmup" - warmup: 15 - base_scheduler: "reduce_on_plateau" - early_stopping: True - train_patience: 5 - lr_decay_factor: 200 - deterministic: True - patience: 5 - gamma: 0.1 - sam: - rho: 0.2 - ema: - enable: True - ema_decay: 0.999 - mix_precision: True - -test: - batch_size: 128 - evaluate: False - eval_freq: 1 diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/main_model_multihead.yaml b/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/main_model_multihead.yaml deleted file mode 100644 index ec1eeff40ef..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/main_model_multihead.yaml +++ /dev/null @@ -1,81 +0,0 @@ -lr_finder: - enable: False - mode: TPE - stop_after: False - num_epochs: 7 - step: 0.001 - epochs_warmup: 1 - path_to_savefig: "lr_finder.jpg" - max_lr: 1e-3 - min_lr: 1e-5 - n_trials: 15 - -model: - name: "mobilenetv3_large" - type: "multihead" - pretrained: True - save_all_chkpts: False - dropout_cls: - p: 0.1 - -custom_datasets: - roots: ["mlc_voc_2007/train_mh_full.json", "mlc_voc_2007/val_mh_full.json"] - types: ["multihead_classification", "multihead_classification"] - -data: - root: "./" - height: 224 - width: 224 - norm_mean: [0.485, 0.456, 0.406] - norm_std: [0.229, 0.224, 0.225] - save_dir: "output/mulitihead/mobilenetv3_large_1" - workers: 6 - transforms: - random_flip: - enable: True - p: 0.5 - randaugment: - enable: True - cutout: - enable: True - cutout_factor: 0.35 - p: 0.35 - -loss: - name: "softmax,asl" - softmax: - s: 1.0 - compute_s: False - asl: - gamma_pos: 0. - gamma_neg: 4. - -sampler: - train_sampler: "RandomSampler" - -train: - optim: "sam" - lr: 0.016 - nbd: True - max_epoch: 80 - weight_decay: 5e-4 - batch_size: 64 - lr_scheduler: "onecycle" - early_stopping: True - pct_start: 0.2 - train_patience: 5 - lr_decay_factor: 1000 - deterministic: True - target_metric: test_acc - gamma: 0.1 - sam: - rho: 0.05 - ema: - enable: True - ema_decay: 0.9995 - mix_precision: True - -test: - batch_size: 128 - evaluate: False - eval_freq: 1 diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/main_model_multilabel.yaml b/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/main_model_multilabel.yaml deleted file mode 100644 index 8c599377a26..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/main_model_multilabel.yaml +++ /dev/null @@ -1,89 +0,0 @@ -lr_finder: - enable: False - mode: TPE - stop_after: False - num_epochs: 7 - step: 0.001 - epochs_warmup: 1 - path_to_savefig: "lr_finder.jpg" - max_lr: 1e-3 - min_lr: 1e-5 - n_trials: 15 - -model: - name: "mobilenetv3_large" - type: "multilabel" - pretrained: True - save_all_chkpts: False - dropout_cls: - p: 0.1 - -custom_datasets: - roots: ["data/coco/train.json", "data/coco/val.json"] - types: ["multilabel_classification", "multilabel_classification"] - -data: - root: "./" - height: 224 - width: 224 - norm_mean: [0.485, 0.456, 0.406] - norm_std: [0.229, 0.224, 0.225] - save_dir: "output/mulitilabel/mobilenetv3_large" - workers: 6 - transforms: - random_flip: - enable: True - p: 0.5 - randaugment: - enable: True - cutout: - enable: True - cutout_factor: 0.35 - p: 0.35 - -loss: - name: "am_binary" - softmax: - s: 30.0 - m: 0.15 - compute_s: False - asl: - gamma_pos: 0. - gamma_neg: 0. - am_binary: - amb_t: 2.0 - amb_k: 0.7 - -sampler: - train_sampler: "RandomSampler" - -train: - optim: "sam" - lr: 0.016 - nbd: True - max_epoch: 60 - weight_decay: 5e-4 - batch_size: 64 - lr_scheduler: "onecycle" - early_stopping: True - pct_start: 0.1 - train_patience: 5 - lr_decay_factor: 1000 - deterministic: True - target_metric: test_acc - gamma: 0.1 - sam: - rho: 0.05 - ema: - enable: True - ema_decay: 0.9995 - mix_precision: True - -test: - batch_size: 128 - evaluate: False - eval_freq: 1 - -sc_integration: - epoch_scale: 3. - lr_scale: 3. diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/template_experimental.yaml b/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/template_experimental.yaml deleted file mode 100644 index 336052ff817..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_large_1/template_experimental.yaml +++ /dev/null @@ -1,56 +0,0 @@ -# Description. -model_template_id: Custom_Image_Classification_MobileNet-V3-large-1x -name: MobileNet-V3-large-1x -task_type: CLASSIFICATION -task_family: VISION -instantiation: "CLASS" -summary: Custom Image Classification MobileNet-V3-large-1x -application: ~ - -# Algo backend. -framework: OTEClassification v1.2.3 - -# Task implementations. deep-object-reid only runs nncf task. -entrypoints: - base: otx.algorithms.classification.adapters.deep_object_reid.tasks.ClassificationNNCFTask - nncf: otx.algorithms.classification.adapters.deep_object_reid.tasks.ClassificationNNCFTask - -# Capabilities. -capabilities: - - compute_representations - - compute_uncertainty_score - -# Hyperparameters. -hyper_parameters: - base_path: "../configuration.yaml" - parameter_overrides: - learning_parameters: - batch_size: - default_value: 32 - auto_hpo_state: POSSIBLE - max_num_epochs: - default_value: 200 - learning_rate: - default_value: 0.016 - auto_hpo_state: POSSIBLE - enable_early_stopping: - default_value: true - nncf_optimization: - enable_quantization: - default_value: true - enable_pruning: - default_value: false - pruning_supported: - default_value: true - maximal_accuracy_degradation: - default_value: 1.0 - -# Training resources. -max_nodes: 1 -training_targets: - - GPU - - CPU - -# Stats. -gigaflops: 0.44 -size: 4.29 diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/__init__.py b/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/__init__.py deleted file mode 100644 index 188ef14f95f..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Initialization of MobileNet-V3-Small for Deep-Object-Reid Task.""" - -# Copyright (C) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions -# and limitations under the License. diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/aux_model.yaml b/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/aux_model.yaml deleted file mode 100644 index d7e7bfe2cd0..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/aux_model.yaml +++ /dev/null @@ -1,39 +0,0 @@ -model: - name: "mobilenetv3_large" - type: "classification" - pretrained: True - feature_dim: 1280 - dropout_cls: - p: 0.2 - dist: "bernoulli" - -loss: - name: "am_softmax" - softmax: - s: 1.0 - compute_s: False - -sampler: - train_sampler: "RandomSampler" - -train: - optim: "sam" - lr: 0.013 - nbd: True - weight_decay: 5e-4 - lr_scheduler: "warmup" - warmup: 15 - base_scheduler: "reduce_on_plateau" - early_stopping: True - train_patience: 5 - lr_decay_factor: 200 - deterministic: True - patience: 5 - gamma: 0.1 - sam: - rho: 0.07 - -test: - batch_size: 128 - evaluate: False - eval_freq: 1 diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/hpo_config.yaml b/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/hpo_config.yaml deleted file mode 100644 index 3ac8318b8f7..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/hpo_config.yaml +++ /dev/null @@ -1,16 +0,0 @@ -metric: Top-1 -search_algorithm: asha -early_stop: None -hp_space: - learning_parameters.learning_rate: - param_type: qloguniform - range: - - 0.0032 - - 0.08 - - 0.0001 - learning_parameters.batch_size: - param_type: qloguniform - range: - - 20 - - 48 - - 2 diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/main_model.yaml b/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/main_model.yaml deleted file mode 100644 index b964ca0ab19..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/main_model.yaml +++ /dev/null @@ -1,85 +0,0 @@ -lr_finder: - enable: False - mode: TPE - stop_after: False - num_epochs: 6 - step: 0.001 - epochs_warmup: 1 - path_to_savefig: "lr_finder.jpg" - max_lr: 0.029 - min_lr: 0.005 - n_trials: 15 - -model: - name: "mobilenetv3_small" - type: "classification" - pretrained: True - save_all_chkpts: False - -mutual_learning: - aux_configs: ["aux_model.yaml"] - -custom_datasets: - roots: ["data/CIFAR100/train", "data/CIFAR100/val"] - types: ["classification_image_folder", "classification_image_folder"] - -data: - root: "./" - height: 224 - width: 224 - norm_mean: [0.485, 0.456, 0.406] - norm_std: [0.229, 0.224, 0.225] - save_dir: "output/mobilenetv3_small/log" - workers: 6 - transforms: - random_flip: - enable: True - p: 0.5 - random_rotate: - enable: True - p: 0.35 - angle: (-10,10) - augmix: - enable: True - cfg_str: "augmix-m4-w2" - cutout: - enable: True - cutout_factor: 0.35 - p: 0.35 - -loss: - name: "softmax" - softmax: - s: 1.0 - compute_s: False - -sampler: - train_sampler: "RandomSampler" - -train: - optim: "sam" - lr: 0.013 - nbd: True - max_epoch: 200 - weight_decay: 5e-4 - batch_size: 128 - lr_scheduler: "warmup" - warmup: 15 - base_scheduler: "reduce_on_plateau" - early_stopping: True - train_patience: 5 - lr_decay_factor: 200 - deterministic: True - patience: 5 - gamma: 0.1 - sam: - rho: 0.07 - ema: - enable: True - ema_decay: 0.999 - mix_precision: True - -test: - batch_size: 128 - evaluate: False - eval_freq: 1 diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/main_model_multihead.yaml b/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/main_model_multihead.yaml deleted file mode 100644 index 52ed4828021..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/main_model_multihead.yaml +++ /dev/null @@ -1,81 +0,0 @@ -lr_finder: - enable: False - mode: TPE - stop_after: False - num_epochs: 7 - step: 0.001 - epochs_warmup: 1 - path_to_savefig: "lr_finder.jpg" - max_lr: 1e-3 - min_lr: 1e-5 - n_trials: 15 - -model: - name: "mobilenetv3_small" - type: "multihead" - pretrained: True - save_all_chkpts: False - dropout_cls: - p: 0.1 - -custom_datasets: - roots: ["mlc_voc_2007/train_mh_full.json", "mlc_voc_2007/val_mh_full.json"] - types: ["multihead_classification", "multihead_classification"] - -data: - root: "./" - height: 224 - width: 224 - norm_mean: [0.485, 0.456, 0.406] - norm_std: [0.229, 0.224, 0.225] - save_dir: "output/mulitihead/mobilenetv3_small" - workers: 6 - transforms: - random_flip: - enable: True - p: 0.5 - randaugment: - enable: True - cutout: - enable: True - cutout_factor: 0.35 - p: 0.35 - -loss: - name: "softmax,asl" - softmax: - s: 1.0 - compute_s: False - asl: - gamma_pos: 0. - gamma_neg: 4. - -sampler: - train_sampler: "RandomSampler" - -train: - optim: "sam" - lr: 0.016 - nbd: True - max_epoch: 80 - weight_decay: 5e-4 - batch_size: 64 - lr_scheduler: "onecycle" - early_stopping: True - pct_start: 0.2 - train_patience: 5 - lr_decay_factor: 1000 - deterministic: True - target_metric: test_acc - gamma: 0.1 - sam: - rho: 0.05 - ema: - enable: True - ema_decay: 0.9995 - mix_precision: True - -test: - batch_size: 128 - evaluate: False - eval_freq: 1 diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/main_model_multilabel.yaml b/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/main_model_multilabel.yaml deleted file mode 100644 index bf253b52e94..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/main_model_multilabel.yaml +++ /dev/null @@ -1,87 +0,0 @@ -lr_finder: - enable: False - mode: TPE - stop_after: False - num_epochs: 7 - step: 0.001 - epochs_warmup: 1 - path_to_savefig: "lr_finder.jpg" - max_lr: 1e-3 - min_lr: 1e-5 - n_trials: 15 - -model: - name: "mobilenetv3_small" - type: "multilabel" - pretrained: True - save_all_chkpts: False - -custom_datasets: - roots: ["data/coco/train.json", "data/coco/val.json"] - types: ["multilabel_classification", "multilabel_classification"] - -data: - root: "./" - height: 224 - width: 224 - norm_mean: [0.485, 0.456, 0.406] - norm_std: [0.229, 0.224, 0.225] - save_dir: "output/mulitilabel/mobilenetv3_small" - workers: 6 - transforms: - random_flip: - enable: True - p: 0.5 - randaugment: - enable: True - cutout: - enable: True - cutout_factor: 0.35 - p: 0.35 - -loss: - name: "am_binary" - softmax: - s: 30.0 - m: 0.01 - compute_s: False - asl: - gamma_pos: 0. - gamma_neg: 0. - am_binary: - amb_t: 2.0 - amb_k: 0.7 - -sampler: - train_sampler: "RandomSampler" - -train: - optim: "sam" - lr: 0.016 - nbd: True - max_epoch: 60 - weight_decay: 5e-4 - batch_size: 64 - lr_scheduler: "onecycle" - early_stopping: True - pct_start: 0.1 - train_patience: 5 - lr_decay_factor: 1000 - deterministic: True - target_metric: test_acc - gamma: 0.1 - sam: - rho: 0.05 - ema: - enable: True - ema_decay: 0.9995 - mix_precision: True - -test: - batch_size: 128 - evaluate: False - eval_freq: 1 - -sc_integration: - epoch_scale: 3. - lr_scale: 3. diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/template_experimental.yaml b/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/template_experimental.yaml deleted file mode 100644 index 2a3ddafa04a..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/template_experimental.yaml +++ /dev/null @@ -1,56 +0,0 @@ -# Description. -model_template_id: MobileNet-V3-small -name: MobileNet-V3-small -task_type: CLASSIFICATION -task_family: VISION -instantiation: "CLASS" -summary: Custom Image Classification MobileNet-V3-small -application: ~ - -# Algo backend. -framework: OTEClassification v1.2.3 - -# Task implementations. deep-object-reid only runs nncf task. -entrypoints: - base: otx.algorithms.classification.adapters.deep_object_reid.tasks.ClassificationNNCFTask - nncf: otx.algorithms.classification.adapters.deep_object_reid.tasks.ClassificationNNCFTask - -# Capabilities. -capabilities: - - compute_representations - - compute_uncertainty_score - -# Hyperparameters. -hyper_parameters: - base_path: "../configuration.yaml" - parameter_overrides: - learning_parameters: - batch_size: - default_value: 32 - auto_hpo_state: POSSIBLE - max_num_epochs: - default_value: 200 - learning_rate: - default_value: 0.016 - auto_hpo_state: POSSIBLE - enable_early_stopping: - default_value: true - nncf_optimization: - enable_quantization: - default_value: true - enable_pruning: - default_value: false - pruning_supported: - default_value: true - maximal_accuracy_degradation: - default_value: 1.0 - -# Training resources. -max_nodes: 1 -training_targets: - - GPU - - CPU - -# Stats. -gigaflops: 0.12 -size: 1.56 diff --git a/otx/algorithms/classification/adapters/deep_object_reid/data/__init__.py b/otx/algorithms/classification/adapters/deep_object_reid/data/__init__.py deleted file mode 100644 index a7b0cba17e7..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/data/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -"""OTX Adapters - deep_object_reid.data.""" - -# Copyright (C) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions -# and limitations under the License. - -from .dataset import ClassificationDataset - -__all__ = ["ClassificationDataset"] diff --git a/otx/algorithms/classification/adapters/deep_object_reid/data/dataset.py b/otx/algorithms/classification/adapters/deep_object_reid/data/dataset.py deleted file mode 100644 index b82b370adcc..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/data/dataset.py +++ /dev/null @@ -1,116 +0,0 @@ -"""Utils for deep_object_reid tasks.""" - -# Copyright (C) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions -# and limitations under the License. - -# pylint: disable=too-many-nested-blocks, invalid-name - -from typing import List - -from otx.api.entities.datasets import DatasetEntity -from otx.api.entities.label import LabelEntity -from otx.api.utils.argument_checks import ( - DatasetParamTypeCheck, - check_input_parameters_type, -) - - -# pylint: disable=too-many-instance-attributes -class ClassificationDataset: - """Dataset used in deep_object_reid tasks.""" - - @check_input_parameters_type({"otx_dataset": DatasetParamTypeCheck}) - def __init__( - self, - otx_dataset: DatasetEntity, - labels: List[LabelEntity], - multilabel: bool = False, - hierarchical: bool = False, - mixed_cls_heads_info: dict = None, - keep_empty_label: bool = False, - ): # pylint: disable=too-many-branches, too-many-locals - super().__init__() - self.otx_dataset = otx_dataset - self.multilabel = multilabel - self.mixed_cls_heads_info = mixed_cls_heads_info - self.hierarchical = hierarchical - self.labels = labels - self.annotation = [] - self.keep_empty_label = keep_empty_label - self.label_names = [label.name for label in self.labels] - - for i, _ in enumerate(self.otx_dataset): - class_indices = [] - item_labels = self.otx_dataset[i].get_roi_labels(self.labels, include_empty=self.keep_empty_label) - ignored_labels = self.otx_dataset[i].ignored_labels - if item_labels: - if not self.hierarchical: - for otx_lbl in item_labels: - if otx_lbl not in ignored_labels: - class_indices.append(self.label_names.index(otx_lbl.name)) - else: - class_indices.append(-1) - else: - if self.mixed_cls_heads_info is None: - raise TypeError("mixed_cls_heads_info is NoneType.") - num_cls_heads = self.mixed_cls_heads_info["num_multiclass_heads"] - - class_indices = [0] * ( - self.mixed_cls_heads_info["num_multiclass_heads"] - + self.mixed_cls_heads_info["num_multilabel_classes"] - ) - for j in range(num_cls_heads): - class_indices[j] = -1 - for otx_lbl in item_labels: - group_idx, in_group_idx = self.mixed_cls_heads_info["class_to_group_idx"][otx_lbl.name] - if group_idx < num_cls_heads: - class_indices[group_idx] = in_group_idx - else: - if otx_lbl not in ignored_labels: - class_indices[num_cls_heads + in_group_idx] = 1 - else: - class_indices[num_cls_heads + in_group_idx] = -1 - - else: # this supposed to happen only on inference stage or if we have a negative in multilabel data - if self.mixed_cls_heads_info: - class_indices = [-1] * ( - self.mixed_cls_heads_info["num_multiclass_heads"] - + self.mixed_cls_heads_info["num_multilabel_classes"] - ) - else: - class_indices.append(-1) - - if self.multilabel or self.hierarchical: - self.annotation.append({"label": tuple(class_indices)}) - else: - self.annotation.append({"label": class_indices[0]}) # type: ignore - - @check_input_parameters_type() - def __getitem__(self, idx: int): - """Get item from dataset.""" - sample = self.otx_dataset[idx].numpy # This returns 8-bit numpy array of shape (height, width, RGB) - label = self.annotation[idx]["label"] - return {"img": sample, "label": label} - - def __len__(self): - """Get annotation length.""" - return len(self.annotation) - - def get_annotation(self): - """Get annotation.""" - return self.annotation - - def get_classes(self): - """Get classes' name.""" - return self.label_names diff --git a/otx/algorithms/classification/adapters/deep_object_reid/tasks/__init__.py b/otx/algorithms/classification/adapters/deep_object_reid/tasks/__init__.py deleted file mode 100644 index 12de4f85839..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/tasks/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -"""OTX Adapters - deep_object_reid.tasks.""" - -# Copyright (C) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions -# and limitations under the License. - -from .nncf import ClassificationNNCFTask - -__all__ = [ - "ClassificationNNCFTask", -] diff --git a/otx/algorithms/classification/adapters/deep_object_reid/tasks/nncf.py b/otx/algorithms/classification/adapters/deep_object_reid/tasks/nncf.py deleted file mode 100644 index 531b0613d3b..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/tasks/nncf.py +++ /dev/null @@ -1,725 +0,0 @@ -"""NNCF Task for OTX Classification.""" - -# Copyright (C) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the 'License'); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an 'AS IS' BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions -# and limitations under the License. - -import io -import logging -import math -import os -import shutil -import tempfile -from typing import Any, Dict, List, Optional - -import torch -import torchreid -from scripts.default_config import ( - get_default_config, - imagedata_kwargs, - lr_scheduler_kwargs, - merge_from_files_with_base, - model_kwargs, - optimizer_kwargs, -) -from torchreid.apis.export import export_ir, export_onnx -from torchreid.apis.training import run_training -from torchreid.integration.nncf.compression import ( - check_nncf_is_enabled, - is_nncf_state, - wrap_nncf_model, -) -from torchreid.integration.nncf.compression_script_utils import ( - calculate_lr_for_nncf_training, - patch_config, -) -from torchreid.metrics.classification import score_extraction -from torchreid.ops import DataParallel -from torchreid.utils import load_pretrained_weights, set_model_attr, set_random_seed - -from otx.algorithms.classification.adapters.deep_object_reid.data import ( - ClassificationDataset, -) -from otx.algorithms.classification.adapters.deep_object_reid.utils import ( - DefaultMetricsMonitor, - StopCallback, - active_score_from_probs, - force_fp32, - get_hierarchical_predictions, - get_multiclass_predictions, - get_multihead_class_info, - get_multilabel_predictions, - sigmoid_numpy, - softmax_numpy, -) -from otx.algorithms.classification.configs import ClassificationConfig -from otx.algorithms.common.utils.callback import ( - InferenceProgressCallback, - OptimizationProgressCallback, -) -from otx.api.configuration import cfg_helper -from otx.api.configuration.helper.utils import ids_to_strings -from otx.api.entities.datasets import DatasetEntity -from otx.api.entities.inference_parameters import InferenceParameters -from otx.api.entities.metadata import FloatMetadata, FloatType -from otx.api.entities.model import ( - ModelEntity, - ModelFormat, - ModelOptimizationType, - ModelPrecision, - OptimizationMethod, -) -from otx.api.entities.model_template import parse_model_template -from otx.api.entities.optimization_parameters import OptimizationParameters -from otx.api.entities.result_media import ResultMediaEntity -from otx.api.entities.resultset import ResultSetEntity -from otx.api.entities.scored_label import ScoredLabel -from otx.api.entities.subset import Subset -from otx.api.entities.task_environment import TaskEnvironment -from otx.api.entities.tensor import TensorEntity -from otx.api.entities.train_parameters import default_progress_callback -from otx.api.serialization.label_mapper import label_schema_to_bytes -from otx.api.usecases.evaluation.metrics_helper import MetricsHelper -from otx.api.usecases.tasks.interfaces.evaluate_interface import IEvaluationTask -from otx.api.usecases.tasks.interfaces.export_interface import ExportType, IExportTask -from otx.api.usecases.tasks.interfaces.inference_interface import IInferenceTask -from otx.api.usecases.tasks.interfaces.optimization_interface import ( - IOptimizationTask, - OptimizationType, -) -from otx.api.usecases.tasks.interfaces.unload_interface import IUnload -from otx.api.utils.argument_checks import ( - DatasetParamTypeCheck, - check_input_parameters_type, -) -from otx.api.utils.labels_utils import get_empty_label -from otx.api.utils.vis_utils import get_actmap - -logger = logging.getLogger(__name__) - - -class ClassificationInferenceTask( - IInferenceTask, IEvaluationTask, IExportTask, IUnload -): # pylint: disable=too-many-instance-attributes - """Inference task running through deep-object-reid.""" - - task_environment: TaskEnvironment - - @check_input_parameters_type() - def __init__(self, task_environment: TaskEnvironment, output_path: Optional[str] = None): - logger.info("Loading ClassificationTask.") - if output_path is None: - output_path = tempfile.mkdtemp(prefix="otx-cls-scratch-") - self._scratch_space = output_path - logger.info(f"Scratch space created at {self._scratch_space}") - - self._task_environment = task_environment - if len(task_environment.get_labels(False)) == 1: - self._labels = task_environment.get_labels(include_empty=True) - else: - self._labels = task_environment.get_labels(include_empty=False) - self._empty_label = get_empty_label(task_environment.label_schema) - self._multilabel = len(task_environment.label_schema.get_groups(False)) > 1 and len( - task_environment.label_schema.get_groups(False) - ) == len(task_environment.get_labels(include_empty=False)) - - self._multihead_class_info = {} - self._hierarchical = False - if not self._multilabel and len(task_environment.label_schema.get_groups(False)) > 1: - self._hierarchical = True - self._multihead_class_info = get_multihead_class_info(task_environment.label_schema) - - template_file_path = task_environment.model_template.model_template_path - - self._base_dir = os.path.abspath(os.path.dirname(template_file_path)) - - self._cfg = get_default_config() - self._patch_config(self._base_dir) - - if self._multilabel: - assert self._cfg.model.type == "multilabel", ( - task_environment.model_template.model_template_path - + " model template does not support multilabel classification" - ) - elif self._hierarchical: - assert self._cfg.model.type == "multihead", ( - task_environment.model_template.model_template_path - + " model template does not support hierarchical classification" - ) - else: - assert self._cfg.model.type == "classification", ( - task_environment.model_template.model_template_path - + " model template does not support multiclass classification" - ) - - self.device = torch.device("cuda:0") if torch.cuda.device_count() else torch.device("cpu") - self._model = self._load_model(task_environment.model, device=self.device) - - self.stop_callback = StopCallback() - self.metrics_monitor = DefaultMetricsMonitor() - - # Set default model attributes. - self._optimization_methods = [] # type: List[Any] - self._precision = [ModelPrecision.FP32] - self._optimization_type = ModelOptimizationType.MO - - @property - def _hyperparams(self): - return self._task_environment.get_hyper_parameters(ClassificationConfig) - - def _load_model( - self, model: Optional[ModelEntity], device: torch.device, pretrained_dict: Optional[Dict] = None - ): # pylint: disable=unused-argument - if model is not None: - # If a model has been trained and saved for the task already, create empty model and load weights here - if pretrained_dict is None: - buffer = io.BytesIO(model.get_data("weights.pth")) - model_data = torch.load(buffer, map_location=torch.device("cpu")) - else: - model_data = pretrained_dict - - model = self._create_model(self._cfg, from_scratch=True) - - try: - load_pretrained_weights(model, pretrained_dict=model_data) - logger.info("Loaded model weights from Task Environment") - except BaseException as ex: - raise ValueError("Could not load the saved model. The model file structure is invalid.") from ex - else: - # If there is no trained model yet, create model with pretrained weights as defined in the model config - # file. - model = self._create_model(self._cfg, from_scratch=False) - logger.info("No trained model in project yet. Created new model with general-purpose pretrained weights.") - - return model.to(device) # type: ignore - - def _create_model(self, config, from_scratch: bool = False): - """Creates a model, based on the configuration in config. - - :param config: deep-object-reid configuration from which the model has to be built - :param from_scratch: bool, if True does not load any weights - :return model: Model in training mode - """ - num_train_classes = len(self._labels) - model = torchreid.models.build_model(**model_kwargs(config, num_train_classes)) - if self._cfg.model.load_weights and not from_scratch: - load_pretrained_weights(model, self._cfg.model.load_weights) - return model - - def _patch_config(self, base_dir: str): - self._cfg = get_default_config() - if self._multilabel: - config_file_path = os.path.join(base_dir, "main_model_multilabel.yaml") - elif self._hierarchical: - config_file_path = os.path.join(base_dir, "main_model_multihead.yaml") - else: - config_file_path = os.path.join(base_dir, "main_model.yaml") - merge_from_files_with_base(self._cfg, config_file_path) - self._cfg.use_gpu = torch.cuda.device_count() > 0 - self.num_devices = 1 if self._cfg.use_gpu else 0 - if not self._cfg.use_gpu: - self._cfg.train.mix_precision = False - - self._cfg.custom_datasets.types = ["external_classification_wrapper", "external_classification_wrapper"] - self._cfg.custom_datasets.roots = [""] * 2 - self._cfg.data.save_dir = self._scratch_space - - self._cfg.test.test_before_train = False - self.num_classes = len(self._labels) - - for i, conf in enumerate(self._cfg.mutual_learning.aux_configs): - if str(base_dir) not in conf: - self._cfg.mutual_learning.aux_configs[i] = os.path.join(base_dir, conf) - - self._cfg.train.lr = self._hyperparams.learning_parameters.learning_rate - self._cfg.train.batch_size = self._hyperparams.learning_parameters.batch_size - self._cfg.test.batch_size = max(1, self._hyperparams.learning_parameters.batch_size // 2) - self._cfg.train.max_epoch = self._hyperparams.learning_parameters.max_num_epochs - self._cfg.lr_finder.enable = self._hyperparams.learning_parameters.enable_lr_finder - self._cfg.train.early_stopping = self._hyperparams.learning_parameters.enable_early_stopping - - # pylint: disable=too-many-locals, too-many-branches - @check_input_parameters_type({"dataset": DatasetParamTypeCheck}) - def infer( - self, dataset: DatasetEntity, inference_parameters: Optional[InferenceParameters] = None - ) -> DatasetEntity: - """Perform inference on the given dataset. - - :param dataset: Dataset entity to analyse - :param inference_parameters: Additional parameters for inference. - For example, when results are generated for evaluation purposes, Saliency maps can be turned off. - :return: Dataset that also includes the classification results - """ - if len(dataset) == 0: - logger.warning("Empty dataset has been passed for the inference.") - return dataset - - if inference_parameters is not None: - update_progress_callback = inference_parameters.update_progress - else: - update_progress_callback = default_progress_callback - - self._cfg.test.batch_size = max(1, self._hyperparams.learning_parameters.batch_size // 2) - self._cfg.data.workers = max(min(self._cfg.data.workers, len(dataset) - 1), 0) - - time_monitor = InferenceProgressCallback( - math.ceil(len(dataset) / self._cfg.test.batch_size), update_progress_callback - ) - - data = ClassificationDataset( - dataset, - self._labels, - self._multilabel, - self._hierarchical, - self._multihead_class_info, - keep_empty_label=self._empty_label in self._labels, - ) - self._cfg.custom_datasets.roots = [data, data] - datamanager = torchreid.data.ImageDataManager(**imagedata_kwargs(self._cfg)) - with force_fp32(self._model): - self._model.eval() - self._model.to(self.device) - if inference_parameters is not None: - dump_features = not inference_parameters.is_evaluation - inference_results, _ = score_extraction( - datamanager.test_loader, - self._model, - self._cfg.use_gpu, - perf_monitor=time_monitor, - feature_dump_mode="all" if dump_features else "vecs", - ) - if dump_features: - scores, saliency_maps, feature_vecs = inference_results - else: - scores, feature_vecs = inference_results - - if self._multilabel: - scores = sigmoid_numpy(scores) - - for i in range(scores.shape[0]): - dataset_item = dataset[i] - - if self._multilabel: - item_labels = get_multilabel_predictions(scores[i], self._labels, activate=False) - elif self._hierarchical: - item_labels = get_hierarchical_predictions( - scores[i], - self._labels, - self._task_environment.label_schema, - self._multihead_class_info, - activate=True, - ) - else: - scores[i] = softmax_numpy(scores[i]) - item_labels = get_multiclass_predictions(scores[i], self._labels, activate=False) - - if not item_labels: - if self._empty_label is not None: - item_labels = [ScoredLabel(self._empty_label, probability=1.0)] - - dataset_item.append_labels(item_labels) - active_score = active_score_from_probs(scores[i]) - active_score_media = FloatMetadata( - name="active_score", value=active_score, float_type=FloatType.ACTIVE_SCORE - ) - dataset_item.append_metadata_item(active_score_media, model=self._task_environment.model) - feature_vec_media = TensorEntity(name="representation_vector", numpy=feature_vecs[i].reshape(-1)) - dataset_item.append_metadata_item(feature_vec_media, model=self._task_environment.model) - - if dump_features: - actmap = get_actmap(saliency_maps[i], (dataset_item.width, dataset_item.height)) - saliency_media = ResultMediaEntity( - name="Saliency Map", - type="saliency_map", - annotation_scene=dataset_item.annotation_scene, - numpy=actmap, - roi=dataset_item.roi, - label=item_labels[0].label, - ) - dataset_item.append_metadata_item(saliency_media, model=self._task_environment.model) - - return dataset - - @check_input_parameters_type() - def evaluate(self, output_resultset: ResultSetEntity, evaluation_metric: Optional[str] = None): - """Evaluate.""" - performance = MetricsHelper.compute_accuracy(output_resultset).get_performance() - logger.info(f"Computes performance of {performance}") - output_resultset.performance = performance - - @check_input_parameters_type() - def export(self, export_type: ExportType, output_model: ModelEntity): - """Export.""" - assert export_type == ExportType.OPENVINO - output_model.model_format = ModelFormat.OPENVINO - output_model.optimization_type = self._optimization_type - - with tempfile.TemporaryDirectory() as tempdir: - optimized_model_dir = os.path.join(tempdir, "deep_object_reid") - logger.info(f'Optimized model will be temporarily saved to "{optimized_model_dir}"') - os.makedirs(optimized_model_dir, exist_ok=True) - try: - onnx_model_path = os.path.join(optimized_model_dir, "model.onnx") - with force_fp32(self._model): - self._model.old_forward = self._model.forward - self._model.forward = lambda x: self._model.old_forward(x, return_all=True, apply_scale=True) - export_onnx( - self._model.eval(), - self._cfg, - onnx_model_path, - opset=self._cfg.model.export_onnx_opset, - output_names=["logits", "saliency_map", "feature_vector"], - ) - self._model.forward = self._model.old_forward - del self._model.old_forward - pruning_transformation = OptimizationMethod.FILTER_PRUNING in self._optimization_methods - export_ir( - onnx_model_path, - self._cfg.data.norm_mean, - self._cfg.data.norm_std, - optimized_model_dir=optimized_model_dir, - pruning_transformation=pruning_transformation, - ) - - bin_file = [f for f in os.listdir(optimized_model_dir) if f.endswith(".bin")][0] - xml_file = [f for f in os.listdir(optimized_model_dir) if f.endswith(".xml")][0] - with open(os.path.join(optimized_model_dir, bin_file), "rb") as f: - output_model.set_data("openvino.bin", f.read()) - with open(os.path.join(optimized_model_dir, xml_file), "rb") as f: - output_model.set_data("openvino.xml", f.read()) - output_model.precision = self._precision - output_model.optimization_methods = self._optimization_methods - except Exception as ex: - raise RuntimeError("Optimization was unsuccessful.") from ex - - output_model.set_data("label_schema.json", label_schema_to_bytes(self._task_environment.label_schema)) - logger.info("Exporting completed.") - - @staticmethod - def _is_docker(): - """Checks whether the task runs in docker container. - - :return bool: True if task runs in docker - """ - path = "/proc/self/cgroup" - is_in_docker = False - if os.path.isfile(path): - with open(path, "rb") as f: - is_in_docker = is_in_docker or any("docker" in line for line in f) - is_in_docker = is_in_docker or os.path.exists("/.dockerenv") - return is_in_docker - - def _delete_scratch_space(self): - """Remove model checkpoints and logs.""" - if os.path.exists(self._scratch_space): - shutil.rmtree(self._scratch_space, ignore_errors=False) - - def unload(self): - """Unload the task.""" - self._delete_scratch_space() - if self._is_docker(): - logger.warning("Got unload request. Unloading models. Throwing Segmentation Fault on purpose") - import ctypes - - ctypes.string_at(0) - else: - logger.warning("Got unload request, but not on Docker. Only clearing CUDA cache") - torch.cuda.empty_cache() - logger.warning( - f"Done unloading. " f"Torch is still occupying {torch.cuda.memory_allocated()} bytes of GPU memory" - ) - - def _save_model(self, output_model: ModelEntity, state_dict: Optional[Dict] = None): - """Save model.""" - buffer = io.BytesIO() - hyperparams = self._task_environment.get_hyper_parameters(ClassificationConfig) - hyperparams_str = ids_to_strings(cfg_helper.convert(hyperparams, dict, enum_to_str=True)) - modelinfo = {"model": self._model.state_dict(), "config": hyperparams_str, "VERSION": 1} - - if state_dict is not None: - modelinfo.update(state_dict) - - torch.save(modelinfo, buffer) - output_model.set_data("weights.pth", buffer.getvalue()) - output_model.set_data("label_schema.json", label_schema_to_bytes(self._task_environment.label_schema)) - - -class ClassificationNNCFTask( - ClassificationInferenceTask, IOptimizationTask -): # pylint: disable=too-many-instance-attributes - """Task for compressing classification models using NNCF.""" - - def __init__(self, task_environment: TaskEnvironment, **kwargs): - curr_model_path = task_environment.model_template.model_template_path - base_model_path = os.path.join( - os.path.dirname(os.path.abspath(curr_model_path)), - task_environment.model_template.base_model_path, - ) - if os.path.isfile(base_model_path): - logger.info(f"Base model for NNCF: {base_model_path}") - # Redirect to base model - task_environment.model_template = parse_model_template(base_model_path) - logger.info("Loading ClassificationNNCFTask.") - super().__init__(task_environment, **kwargs) - - check_nncf_is_enabled() - - # Set hyperparameters - self._nncf_preset = None - self._max_acc_drop = None - self._set_attributes_by_hyperparams() - - # Patch the config - if not self._cfg.nncf.nncf_config_path: - self._cfg.nncf.nncf_config_path = os.path.join(self._base_dir, "compression_config.json") - self._cfg = patch_config(self._cfg, self._nncf_preset, self._max_acc_drop) - - self._compression_ctrl = None - self._nncf_metainfo = None - - # Load NNCF model. - if task_environment.model is not None: - if task_environment.model.optimization_type == ModelOptimizationType.NNCF: - logger.info("Loading the NNCF model") - self._compression_ctrl, self._model, self._nncf_metainfo = self._load_nncf_model(task_environment.model) - - # Set default model attributes. - self._optimization_type = ModelOptimizationType.NNCF - logger.info("ClassificationNNCFTask initialization completed") - set_model_attr(self._model, "mix_precision", self._cfg.train.mix_precision) - - @property - def _initial_lr(self): - return getattr(self, "__initial_lr") - - @_initial_lr.setter - def _initial_lr(self, value): - setattr(self, "__initial_lr", value) - - def _set_attributes_by_hyperparams(self): - logger.info("Hyperparameters: ") - logger.info( - f"maximal_accuracy_degradation = " f"{self._hyperparams.nncf_optimization.maximal_accuracy_degradation}" - ) - logger.info(f"enable_quantization = {self._hyperparams.nncf_optimization.enable_quantization}") - logger.info(f"enable_pruning = {self._hyperparams.nncf_optimization.enable_pruning}") - self._max_acc_drop = self._hyperparams.nncf_optimization.maximal_accuracy_degradation / 100.0 - quantization = self._hyperparams.nncf_optimization.enable_quantization - pruning = self._hyperparams.nncf_optimization.enable_pruning - if quantization and pruning: - self._nncf_preset = "nncf_quantization_pruning" - self._optimization_methods = [OptimizationMethod.QUANTIZATION, OptimizationMethod.FILTER_PRUNING] - self._precision = [ModelPrecision.INT8] - return - if quantization and not pruning: - self._nncf_preset = "nncf_quantization" - self._optimization_methods = [OptimizationMethod.QUANTIZATION] - self._precision = [ModelPrecision.INT8] - return - if not quantization and pruning: - self._nncf_preset = "nncf_pruning" - self._optimization_methods = [OptimizationMethod.FILTER_PRUNING] - self._precision = [ModelPrecision.FP32] - return - raise RuntimeError("Not selected optimization algorithm") - - def _load_model(self, model: Optional[ModelEntity], device: torch.device, pretrained_dict: Optional[Dict] = None): - if model is None: - raise ValueError("No trained model in the project. NNCF require pretrained weights to compress the model") - - if model.optimization_type == ModelOptimizationType.NNCF: - logger.info("Skip loading the original model") - return None - - model_data = pretrained_dict if pretrained_dict else self._load_model_data(model, "weights.pth") - if is_nncf_state(model_data): - raise ValueError("Model optimization type is not consistent with the model checkpoint.") - - self._initial_lr = model_data.get("initial_lr") - - return super()._load_model(model, device, pretrained_dict=model_data) - - def _load_nncf_model(self, model: Optional[ModelEntity]): - if model is None: - raise ValueError("No NNCF trained model in project.") - - model_data = self._load_model_data(model, "weights.pth") - if not is_nncf_state(model_data): - raise ValueError("Model optimization type is not consistent with the NNCF model checkpoint.") - model = self._create_model(self._cfg, from_scratch=True) - - compression_ctrl, model, nncf_metainfo = wrap_nncf_model(model, self._cfg, checkpoint_dict=model_data) - logger.info("Loaded NNCF model weights from Task Environment.") - return compression_ctrl, model, nncf_metainfo - - def _load_aux_models_data(self, model: Optional[ModelEntity]): - aux_models_data = [] - num_aux_models = len(self._cfg.mutual_learning.aux_configs) - if model is None: - raise TypeError("Model is NoneType.") - for idx in range(num_aux_models): - data_name = f"aux_model_{idx + 1}.pth" - if data_name not in model.model_adapters: - return [] - model_data = self._load_model_data(model, data_name) - aux_models_data.append(model_data) - return aux_models_data - - @check_input_parameters_type({"dataset": DatasetParamTypeCheck}) - def optimize( - self, - optimization_type: OptimizationType, - dataset: DatasetEntity, - output_model: ModelEntity, - optimization_parameters: Optional[OptimizationParameters] = None, - ): # pylint: disable=too-many-locals - """Optimize a model on a dataset.""" - if optimization_type is not OptimizationType.NNCF: - raise RuntimeError("NNCF is the only supported optimization") - if self._compression_ctrl: - raise RuntimeError("The model is already optimized. NNCF requires the original model for optimization.") - if self._cfg.lr_finder.enable: - raise RuntimeError("LR finder could not be used together with NNCF compression") - - aux_pretrained_dicts = self._load_aux_models_data(self._task_environment.model) - if len(aux_pretrained_dicts) == 0: - self._cfg.mutual_learning.aux_configs = [] - logger.warning("WARNING: No pretrained weights are loaded for aux model.") - num_aux_models = len(self._cfg.mutual_learning.aux_configs) - - if optimization_parameters is not None: - update_progress_callback = optimization_parameters.update_progress - else: - update_progress_callback = default_progress_callback - - num_epoch = self._cfg.nncf_config["accuracy_aware_training"]["params"]["maximal_total_epochs"] - train_subset = dataset.get_subset(Subset.TRAINING) - time_monitor = OptimizationProgressCallback( - update_progress_callback, - num_epoch=num_epoch, - num_train_steps=max(1, math.floor(len(train_subset) / self._cfg.train.batch_size)), - num_val_steps=0, - num_test_steps=0, - loading_stage_progress_percentage=5, - initialization_stage_progress_percentage=5, - ) - - self.metrics_monitor = DefaultMetricsMonitor() - self.stop_callback.reset() - - set_random_seed(self._cfg.train.seed) - val_subset = dataset.get_subset(Subset.VALIDATION) - self._cfg.custom_datasets.roots = [ - ClassificationDataset( - train_subset, - self._labels, - self._multilabel, - self._hierarchical, - self._multihead_class_info, - keep_empty_label=self._empty_label in self._labels, - ), - ClassificationDataset( - val_subset, - self._labels, - self._multilabel, - self._hierarchical, - self._multihead_class_info, - keep_empty_label=self._empty_label in self._labels, - ), - ] - datamanager = torchreid.data.ImageDataManager(**imagedata_kwargs(self._cfg)) - - self._compression_ctrl, self._model, self._nncf_metainfo = wrap_nncf_model( - self._model, self._cfg, multihead_info=self._multihead_class_info, datamanager_for_init=datamanager - ) - - time_monitor.on_initialization_end() - - self._cfg.train.lr = calculate_lr_for_nncf_training(self._cfg, self._initial_lr, False) - - train_model = self._model - if self._cfg.use_gpu: - main_device_ids = list(range(self.num_devices)) - extra_device_ids = [main_device_ids for _ in range(num_aux_models)] - train_model = DataParallel(train_model, device_ids=main_device_ids, output_device=0).cuda( - main_device_ids[0] - ) - else: - extra_device_ids = [None for _ in range(num_aux_models)] # type: ignore - - optimizer = torchreid.optim.build_optimizer(train_model, **optimizer_kwargs(self._cfg)) - - scheduler = torchreid.optim.build_lr_scheduler( - optimizer, num_iter=datamanager.num_iter, **lr_scheduler_kwargs(self._cfg) - ) - - logger.info("Start training") - time_monitor.on_train_begin() - run_training( - self._cfg, - datamanager, - train_model, - optimizer, - scheduler, - extra_device_ids, - self._cfg.train.lr, - should_freeze_aux_models=True, - aux_pretrained_dicts=aux_pretrained_dicts, - tb_writer=self.metrics_monitor, - perf_monitor=time_monitor, - stop_callback=self.stop_callback, - nncf_metainfo=self._nncf_metainfo, - compression_ctrl=self._compression_ctrl, - ) - time_monitor.on_train_end() - - self.metrics_monitor.close() - if self.stop_callback.check_stop(): - logger.info("Training cancelled.") - return - - logger.info("Training completed") - - self.save_model(output_model) - - output_model.model_format = ModelFormat.BASE_FRAMEWORK - output_model.optimization_type = self._optimization_type - output_model.optimization_methods = self._optimization_methods - output_model.precision = self._precision - - @check_input_parameters_type() - def save_model(self, output_model: ModelEntity): - """Save model.""" - state_dict = None - if self._compression_ctrl is not None: - state_dict = { - "compression_state": self._compression_ctrl.get_compression_state(), - "nncf_metainfo": self._nncf_metainfo, - } - self._save_model(output_model, state_dict) - - @check_input_parameters_type() - def export(self, export_type: ExportType, output_model: ModelEntity): - """Export.""" - if self._compression_ctrl is None: - super().export(export_type, output_model) - else: - self._compression_ctrl.prepare_for_export() - self._model.disable_dynamic_graph_building() - super().export(export_type, output_model) - self._model.enable_dynamic_graph_building() - - @staticmethod - def _load_model_data(model, data_name): - buffer = io.BytesIO(model.get_data(data_name)) - return torch.load(buffer, map_location=torch.device("cpu")) diff --git a/otx/algorithms/classification/adapters/deep_object_reid/utils/__init__.py b/otx/algorithms/classification/adapters/deep_object_reid/utils/__init__.py deleted file mode 100644 index e19055e0e1a..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/utils/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -"""OTX Adapters - deep_object_reid.utils.""" - -# Copyright (C) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions -# and limitations under the License. - -from .monitors import DefaultMetricsMonitor, StopCallback -from .utils import ( - active_score_from_probs, - force_fp32, - get_hierarchical_predictions, - get_multiclass_predictions, - get_multihead_class_info, - get_multilabel_predictions, - sigmoid_numpy, - softmax_numpy, -) - -__all__ = [ - "DefaultMetricsMonitor", - "StopCallback", - "active_score_from_probs", - "force_fp32", - "get_hierarchical_predictions", - "get_multiclass_predictions", - "get_multihead_class_info", - "get_multilabel_predictions", - "sigmoid_numpy", - "softmax_numpy", -] diff --git a/otx/algorithms/classification/adapters/deep_object_reid/utils/monitors.py b/otx/algorithms/classification/adapters/deep_object_reid/utils/monitors.py deleted file mode 100644 index 0b85ae52c05..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/utils/monitors.py +++ /dev/null @@ -1,71 +0,0 @@ -"""Monitors for deep_object_reid tasks.""" - -# Copyright (C) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions -# and limitations under the License. - -from otx.api.utils.argument_checks import check_input_parameters_type - - -class StopCallback: - """Stop callback class.""" - - def __init__(self): - self.stop_flag = False - - def stop(self): - """Stop.""" - self.stop_flag = True - - def check_stop(self): - """Check Stop.""" - return self.stop_flag - - def reset(self): - """Reset.""" - self.stop_flag = False - - -class DefaultMetricsMonitor: - """Default metric monitor class.""" - - def __init__(self): - self.metrics_dict = {} - - @check_input_parameters_type() - def add_scalar(self, capture: str, value: float, timestamp: int): - """Add scalar.""" - if capture in self.metrics_dict: - self.metrics_dict[capture].append((timestamp, value)) - else: - self.metrics_dict[capture] = [ - (timestamp, value), - ] - - def get_metric_keys(self): - """Get metric keys.""" - return self.metrics_dict.keys() - - @check_input_parameters_type() - def get_metric_values(self, capture: str): - """Get metric values.""" - return [item[1] for item in self.metrics_dict[capture]] - - @check_input_parameters_type() - def get_metric_timestamps(self, capture: str): - """Get metric timestamps.""" - return [item[0] for item in self.metrics_dict[capture]] - - def close(self): - """Close.""" - pass # pylint: disable=unnecessary-pass diff --git a/otx/algorithms/classification/adapters/deep_object_reid/utils/utils.py b/otx/algorithms/classification/adapters/deep_object_reid/utils/utils.py deleted file mode 100644 index 053fbb6b55f..00000000000 --- a/otx/algorithms/classification/adapters/deep_object_reid/utils/utils.py +++ /dev/null @@ -1,232 +0,0 @@ -"""Utils for deep_object_reid tasks.""" - -# Copyright (C) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions -# and limitations under the License. - -# pylint: disable=too-many-nested-blocks, invalid-name - -import math -import shutil -import tempfile -from contextlib import contextmanager -from operator import itemgetter -from os import path as osp -from typing import List - -import numpy as np -from torch.nn.modules import Module -from torchreid.utils import get_model_attr, set_model_attr - -from otx.api.entities.label import Domain, LabelEntity -from otx.api.entities.label_schema import LabelGroup, LabelGroupType, LabelSchemaEntity -from otx.api.entities.model_template import ModelTemplate -from otx.api.entities.scored_label import ScoredLabel -from otx.api.utils.argument_checks import check_input_parameters_type - - -@check_input_parameters_type() -def generate_label_schema(not_empty_labels: List[LabelEntity], multilabel: bool = False): - """Generate label schema.""" - assert len(not_empty_labels) > 1 - - label_schema = LabelSchemaEntity() - if multilabel: - emptylabel = LabelEntity(name="Empty label", is_empty=True, domain=Domain.CLASSIFICATION) - empty_group = LabelGroup(name="empty", labels=[emptylabel], group_type=LabelGroupType.EMPTY_LABEL) - for label in not_empty_labels: - label_schema.add_group(LabelGroup(name=label.name, labels=[label], group_type=LabelGroupType.EXCLUSIVE)) - label_schema.add_group(empty_group) - else: - main_group = LabelGroup(name="labels", labels=not_empty_labels, group_type=LabelGroupType.EXCLUSIVE) - label_schema.add_group(main_group) - return label_schema - - -# pylint: disable=invalid-name, too-many-locals -@check_input_parameters_type() -def get_multihead_class_info(label_schema: LabelSchemaEntity): - """Get multihead class info by label schema.""" - all_groups = label_schema.get_groups(include_empty=False) - all_groups_str = [] - for g in all_groups: - group_labels_str = [lbl.name for lbl in g.labels] - all_groups_str.append(group_labels_str) - - single_label_groups = [g for g in all_groups_str if len(g) == 1] - exclusive_groups = [sorted(g) for g in all_groups_str if len(g) > 1] - single_label_groups.sort(key=itemgetter(0)) - exclusive_groups.sort(key=itemgetter(0)) - class_to_idx = {} - head_idx_to_logits_range = {} - num_single_label_classes = 0 - last_logits_pos = 0 - for i, group in enumerate(exclusive_groups): - head_idx_to_logits_range[i] = (last_logits_pos, last_logits_pos + len(group)) - last_logits_pos += len(group) - for j, cls in enumerate(group): - class_to_idx[cls] = (i, j) # group idx and idx inside group - num_single_label_classes += 1 - - # other labels are in multilabel group - for j, group in enumerate(single_label_groups): - class_to_idx[group[0]] = (len(exclusive_groups), j) - - all_labels = label_schema.get_labels(include_empty=False) - label_to_idx = {lbl.name: i for i, lbl in enumerate(all_labels)} - - mixed_cls_heads_info = { - "num_multiclass_heads": len(exclusive_groups), - "num_multilabel_classes": len(single_label_groups), - "head_idx_to_logits_range": head_idx_to_logits_range, - "num_single_label_classes": num_single_label_classes, - "class_to_group_idx": class_to_idx, - "all_groups": exclusive_groups + single_label_groups, - "label_to_idx": label_to_idx, - } - return mixed_cls_heads_info - - -@check_input_parameters_type() -def reload_hyper_parameters(model_template: ModelTemplate): - """Reload hyper-parameters function. - - This function copies template.yaml file and its configuration.yaml dependency to temporal folder. - Then it re-loads hyper parameters from copied template.yaml file. - This function should not be used in general case, it is assumed that - the 'configuration.yaml' should be in the same folder as 'template.yaml' file. - """ - - template_file = model_template.model_template_path - template_dir = osp.dirname(template_file) - temp_folder = tempfile.mkdtemp() - conf_yaml = [ - dep.source - for dep in model_template.dependencies - if dep.destination == model_template.hyper_parameters.base_path - ][0] - conf_yaml = osp.join(template_dir, conf_yaml) - shutil.copy(conf_yaml, temp_folder) - shutil.copy(template_file, temp_folder) - model_template.hyper_parameters.load_parameters(osp.join(temp_folder, "template_experimental.yaml")) - assert model_template.hyper_parameters.data - - -@check_input_parameters_type() -def set_values_as_default(parameters: dict): - """Set values as default.""" - for v in parameters.values(): - if isinstance(v, dict) and "value" not in v: - set_values_as_default(v) - elif isinstance(v, dict) and "value" in v: - if v["value"] != v["default_value"]: - v["value"] = v["default_value"] - - -@contextmanager -@check_input_parameters_type() -def force_fp32(model: Module): - """Force fp32.""" - mix_precision_status = get_model_attr(model, "mix_precision") - set_model_attr(model, "mix_precision", False) - try: - yield model - finally: - set_model_attr(model, "mix_precision", mix_precision_status) - - -@check_input_parameters_type() -def active_score_from_probs(predictions): - """Active score form probs.""" - top_idxs = np.argpartition(predictions, -2)[-2:] - top_probs = predictions[top_idxs] - return np.max(top_probs) - np.min(top_probs) - - -@check_input_parameters_type() -def sigmoid_numpy(x: np.ndarray): - """Sigmoid numpy.""" - return 1.0 / (1.0 + np.exp(-1.0 * x)) - - -@check_input_parameters_type() -def softmax_numpy(x: np.ndarray): - """Softmax numpy.""" - x = np.exp(x - np.max(x)) - x /= np.sum(x) - return x - - -@check_input_parameters_type() -def get_multiclass_predictions( - logits: np.ndarray, labels: List[LabelEntity], activate: bool = True -) -> List[ScoredLabel]: - """Get multiclass predictions.""" - i = np.argmax(logits) - if activate: - logits = softmax_numpy(logits) - if math.isnan(float(logits[i])): - return [] - return [ScoredLabel(labels[i], probability=float(logits[i]))] - - -@check_input_parameters_type() -def get_multilabel_predictions( - logits: np.ndarray, labels: List[LabelEntity], pos_thr: float = 0.5, activate: bool = True -) -> List[ScoredLabel]: - """Get multilabel predictions.""" - if activate: - logits = sigmoid_numpy(logits) - item_labels = [] - for i in range(logits.shape[0]): - if logits[i] > pos_thr: - label = ScoredLabel(label=labels[i], probability=float(logits[i])) - item_labels.append(label) - - return item_labels - - -@check_input_parameters_type() -def get_hierarchical_predictions( - logits: np.ndarray, - labels: List[LabelEntity], - label_schema: LabelSchemaEntity, - multihead_class_info: dict, - pos_thr: float = 0.5, - activate: bool = True, -) -> List[ScoredLabel]: - """Get hierarchical predictions.""" - predicted_labels = [] - for i in range(multihead_class_info["num_multiclass_heads"]): - logits_begin, logits_end = multihead_class_info["head_idx_to_logits_range"][i] - head_logits = logits[logits_begin:logits_end] - if activate: - head_logits = softmax_numpy(head_logits) - j = np.argmax(head_logits) - label_str = multihead_class_info["all_groups"][i][j] - otx_label = next(x for x in labels if x.name == label_str) - predicted_labels.append(ScoredLabel(label=otx_label, probability=float(head_logits[j]))) - - if multihead_class_info["num_multilabel_classes"]: - logits_begin, logits_end = multihead_class_info["num_single_label_classes"], -1 - head_logits = logits[logits_begin:logits_end] - if activate: - head_logits = sigmoid_numpy(head_logits) - - for i in range(head_logits.shape[0]): - if head_logits[i] > pos_thr: - label_str = multihead_class_info["all_groups"][multihead_class_info["num_multiclass_heads"] + i][0] - otx_label = next(x for x in labels if x.name == label_str) - predicted_labels.append(ScoredLabel(label=otx_label, probability=float(head_logits[i]))) - - return label_schema.resolve_labels_probabilistic(predicted_labels) diff --git a/otx/algorithms/classification/adapters/mmcls/__init__.py b/otx/algorithms/classification/adapters/mmcls/__init__.py index 8754857bb93..6fa0ca7cb84 100644 --- a/otx/algorithms/classification/adapters/mmcls/__init__.py +++ b/otx/algorithms/classification/adapters/mmcls/__init__.py @@ -14,13 +14,18 @@ # See the License for the specific language governing permissions # and limitations under the License. + from .data import MPAClsDataset, SelfSLDataset from .models import BYOL, ConstrastiveHead, SelfSLMLP +# fmt: off +# isort: off # FIXME: openvino pot library adds stream handlers to root logger # which makes annoying duplicated logging from mmcls.utils import get_root_logger get_root_logger().propagate = False +# isort:on +# fmt: on __all__ = [ "MPAClsDataset", diff --git a/otx/algorithms/classification/adapters/mmcls/models/classifiers/byol.py b/otx/algorithms/classification/adapters/mmcls/models/classifiers/byol.py index 378c6a18c40..a8ad3642b9a 100644 --- a/otx/algorithms/classification/adapters/mmcls/models/classifiers/byol.py +++ b/otx/algorithms/classification/adapters/mmcls/models/classifiers/byol.py @@ -203,12 +203,15 @@ def _parse_losses(self, losses: Dict[str, Any]): return loss, log_vars @staticmethod - def state_dict_hook(module, state_dict, *args, **kwargs): + def state_dict_hook(module, state_dict, prefix, *args, **kwargs): """Save only online backbone as output state_dict.""" logger.info("----------------- BYOL.state_dict_hook() called") - output = OrderedDict() - for k, v in state_dict.items(): - if "online_backbone." in k: - k = k.replace("online_backbone.", "") - output[k] = v - return output + for k in list(state_dict.keys()): + v = state_dict.pop(k) + if not prefix or k.startswith(prefix): + k = k.replace(prefix, "", 1) + if k.startswith("online_backbone."): + k = k.replace("online_backbone.", "", 1) + k = prefix + k + state_dict[k] = v + return state_dict diff --git a/otx/algorithms/classification/adapters/mmcls/nncf/__init__.py b/otx/algorithms/classification/adapters/mmcls/nncf/__init__.py index 44c2472d1d6..0db052bebe4 100644 --- a/otx/algorithms/classification/adapters/mmcls/nncf/__init__.py +++ b/otx/algorithms/classification/adapters/mmcls/nncf/__init__.py @@ -2,15 +2,12 @@ # SPDX-License-Identifier: Apache-2.0 # -from .builder import ( - build_nncf_classifier, -) +# flake8: noqa +from .builder import build_nncf_classifier from .patches import * - from .registers import * __all__ = [ "build_nncf_classifier", ] - diff --git a/otx/algorithms/classification/adapters/mmcls/nncf/builder.py b/otx/algorithms/classification/adapters/mmcls/nncf/builder.py index 21ecf18d069..aa729157101 100644 --- a/otx/algorithms/classification/adapters/mmcls/nncf/builder.py +++ b/otx/algorithms/classification/adapters/mmcls/nncf/builder.py @@ -2,7 +2,6 @@ # SPDX-License-Identifier: Apache-2.0 # -from copy import deepcopy from functools import partial from typing import Optional, Union @@ -11,6 +10,7 @@ from mmcv.runner import CheckpointLoader, load_state_dict from mmcv.utils import Config, ConfigDict +from otx.algorithms.classification.adapters.mmcls.utils import build_classifier from otx.algorithms.common.adapters.mmcv.utils import ( get_configs_by_dict, get_configs_by_keys, @@ -22,8 +22,6 @@ NNCF_STATE_NAME, STATE_TO_BUILD_NAME, ) -from otx.algorithms.classification.adapters.mmcls.utils import build_classifier - logger = get_root_logger() @@ -37,15 +35,18 @@ def build_nncf_classifier( ): from mmcls.apis import multi_gpu_test, single_gpu_test from mmcls.datasets import build_dataloader as mmcls_build_dataloader - from mmcls.datasets import build_dataset + from mmcls.datasets import build_dataset as mmcls_build_dataset from mmcls.datasets.pipelines import Compose from otx.algorithms.common.adapters.mmcv.nncf import ( - build_dataloader, get_fake_input, model_eval, wrap_nncf_model, ) + from otx.algorithms.common.adapters.mmcv.utils.builder import ( + build_dataloader, + build_dataset, + ) if cfg_options is not None: config.merge_from_dict(cfg_options) @@ -80,21 +81,29 @@ def build_nncf_classifier( data_to_build_nncf = datasets[0][0].numpy init_dataloader = build_dataloader( + build_dataset( + config, + subset="train", + dataset_builder=mmcls_build_dataset, + ), config, subset="train", - distributed=distributed, dataloader_builder=mmcls_build_dataloader, - dataset_builder=build_dataset, + distributed=distributed, ) val_dataloader = None if is_acc_aware: val_dataloader = build_dataloader( + build_dataset( + config, + subset="val", + dataset_builder=mmcls_build_dataset, + ), config, subset="val", - distributed=distributed, dataloader_builder=mmcls_build_dataloader, - dataset_builder=build_dataset, + distributed=distributed, ) model_eval_fn = partial( @@ -126,9 +135,7 @@ def build_nncf_classifier( # update custom hooks custom_hooks = config.get("custom_hooks", []) - custom_hooks.append( - ConfigDict(type="CompressionHook", compression_ctrl=compression_ctrl) - ) + custom_hooks.append(ConfigDict(type="CompressionHook", compression_ctrl=compression_ctrl)) custom_hooks.append(ConfigDict({"type": "CancelTrainingHook"})) custom_hooks.append( ConfigDict( @@ -146,10 +153,7 @@ def build_nncf_classifier( for hook in get_configs_by_dict(custom_hooks, dict(type="OTXProgressHook")): time_monitor = hook.get("time_monitor", None) - if ( - time_monitor - and getattr(time_monitor, "on_initialization_end", None) is not None - ): + if time_monitor and getattr(time_monitor, "on_initialization_end", None) is not None: time_monitor.on_initialization_end() return compression_ctrl, model diff --git a/otx/algorithms/classification/adapters/mmcls/nncf/registers.py b/otx/algorithms/classification/adapters/mmcls/nncf/registers.py index d8d803c8b3e..85b7c0dd212 100644 --- a/otx/algorithms/classification/adapters/mmcls/nncf/registers.py +++ b/otx/algorithms/classification/adapters/mmcls/nncf/registers.py @@ -4,7 +4,6 @@ from otx.algorithms.common.adapters.nncf.utils import is_nncf_enabled - if is_nncf_enabled(): from nncf.torch import register_module from timm.models.layers.conv2d_same import Conv2dSame diff --git a/otx/algorithms/classification/adapters/mmcls/utils/__init__.py b/otx/algorithms/classification/adapters/mmcls/utils/__init__.py index 45c64f6c569..01fdc8e3b16 100644 --- a/otx/algorithms/classification/adapters/mmcls/utils/__init__.py +++ b/otx/algorithms/classification/adapters/mmcls/utils/__init__.py @@ -3,13 +3,13 @@ # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +from .builder import build_classifier from .config_utils import ( - patch_recipe_config, patch_datasets, patch_evaluation, + patch_recipe_config, prepare_for_training, ) -from .builder import build_classifier __all__ = [ "patch_recipe_config", diff --git a/otx/algorithms/classification/adapters/mmcls/utils/builder.py b/otx/algorithms/classification/adapters/mmcls/utils/builder.py index 18822192fca..7580dd4dd15 100644 --- a/otx/algorithms/classification/adapters/mmcls/utils/builder.py +++ b/otx/algorithms/classification/adapters/mmcls/utils/builder.py @@ -16,12 +16,13 @@ def build_classifier( device: Union[str, torch.device] = "cpu", cfg_options: Optional[Union[Config, ConfigDict]] = None, from_scratch: bool = False, -): +) -> torch.nn.Module: """Creates a model, based on the configuration in config. Note that this function consumes/updates 'load_from' attribute of 'config'. """ from mmcls.models import build_classifier as origin_build_classifier + from mmcls.utils import get_root_logger if cfg_options is not None: config.merge_from_dict(cfg_options) @@ -32,7 +33,7 @@ def build_classifier( checkpoint = checkpoint if checkpoint else config.pop("load_from", None) if checkpoint is not None and not from_scratch: - load_checkpoint(model, checkpoint, map_location=device) + load_checkpoint(model, checkpoint, map_location=device, logger=get_root_logger()) config.load_from = None else: config.load_from = checkpoint diff --git a/otx/algorithms/classification/adapters/mmcls/utils/config_utils.py b/otx/algorithms/classification/adapters/mmcls/utils/config_utils.py index 82e82ea2e3b..65cef1f6601 100644 --- a/otx/algorithms/classification/adapters/mmcls/utils/config_utils.py +++ b/otx/algorithms/classification/adapters/mmcls/utils/config_utils.py @@ -15,30 +15,28 @@ # and limitations under the License. import math -from typing import List, Union, Optional +from typing import List, Optional, Union -import torch from mmcv import Config, ConfigDict -from mpa.utils.logger import get_logger from otx.algorithms.common.adapters.mmcv.utils import ( + get_configs_by_dict, + get_configs_by_keys, + get_dataset_configs, get_meta_keys, is_epoch_based_runner, + patch_color_conversion, prepare_work_dir, - get_dataset_configs, - get_configs_by_keys, - update_config, remove_from_config, remove_from_configs_by_type, - patch_color_conversion, - get_configs_by_dict, + update_config, ) from otx.api.entities.label import Domain, LabelEntity from otx.api.utils.argument_checks import ( DirectoryPathCheck, check_input_parameters_type, ) - +from otx.mpa.utils.logger import get_logger logger = get_logger() @@ -167,7 +165,7 @@ def patch_datasets( data_config: Optional[ConfigDict], domain: Domain = Domain.CLASSIFICATION, subsets: List[str] = ["train", "val", "test", "unlabeled"], - **kwargs + **kwargs, ): """Update dataset configs.""" assert "data" in config @@ -176,9 +174,7 @@ def patch_datasets( for subset in subsets: if subset not in config.data: continue - config.data[f"{subset}_dataloader"] = config.data.get( - f"{subset}_dataloader", ConfigDict() - ) + config.data[f"{subset}_dataloader"] = config.data.get(f"{subset}_dataloader", ConfigDict()) # For stable hierarchical information indexing if subset == "train" and kwargs["type"] == "MPAHierarchicalClsDataset": @@ -192,14 +188,17 @@ def patch_datasets( cfg.update(kwargs) if subset == "train": - collect_cfg = get_configs_by_dict(cfg.pipeline, dict(type="Collect")) - assert len(collect_cfg) == 1 - get_meta_keys(collect_cfg[0]) + for collect_cfg in get_configs_by_dict(cfg.pipeline, dict(type="Collect")): + get_meta_keys(collect_cfg) # In train dataset, when sample size is smaller than batch size if data_config is not None: - data_cfg = data_config.data.get(subset) - if len(data_cfg.get("otx_dataset", [])) < config.data.get("samples_per_gpu", 2): - config.data[f"{subset}_dataloader"].drop_last = False + data_cfg = data_config.data.get(subset, None) + if data_cfg is not None: + samples_per_gpu = config.data[f"{subset}_dataloader"].get( + "samples_per_gpu", config.data.get("samples_per_gpu", 2) + ) + if len(data_cfg.get("otx_dataset", [])) < samples_per_gpu: + config.data[f"{subset}_dataloader"].drop_last = False patch_color_conversion(config) diff --git a/otx/algorithms/classification/adapters/openvino/model_wrappers/openvino_models.py b/otx/algorithms/classification/adapters/openvino/model_wrappers/openvino_models.py index 53ce7e658fa..5f91d98f5e2 100644 --- a/otx/algorithms/classification/adapters/openvino/model_wrappers/openvino_models.py +++ b/otx/algorithms/classification/adapters/openvino/model_wrappers/openvino_models.py @@ -18,7 +18,6 @@ from typing import Any, Dict -import cv2 import numpy as np from otx.api.utils.argument_checks import check_input_parameters_type @@ -26,7 +25,6 @@ try: from openvino.model_zoo.model_api.models.classification import Classification from openvino.model_zoo.model_api.models.types import BooleanValue, DictValue - from openvino.model_zoo.model_api.models.utils import pad_image except ImportError: import warnings diff --git a/otx/algorithms/classification/configs/efficientnet_b0_cls_incr/deployment.py b/otx/algorithms/classification/configs/efficientnet_b0_cls_incr/deployment.py index 109cb0d39d3..421059d07f0 100644 --- a/otx/algorithms/classification/configs/efficientnet_b0_cls_incr/deployment.py +++ b/otx/algorithms/classification/configs/efficientnet_b0_cls_incr/deployment.py @@ -1,3 +1,5 @@ +"""EfficientNet-B0 for multi-class MMDeploy config.""" + _base_ = ["../base/deployments/base_classification_dynamic.py"] ir_config = dict( diff --git a/otx/algorithms/classification/configs/efficientnet_v2_s_cls_incr/deployment.py b/otx/algorithms/classification/configs/efficientnet_v2_s_cls_incr/deployment.py index 109cb0d39d3..6ab4d2e2d5e 100644 --- a/otx/algorithms/classification/configs/efficientnet_v2_s_cls_incr/deployment.py +++ b/otx/algorithms/classification/configs/efficientnet_v2_s_cls_incr/deployment.py @@ -1,3 +1,5 @@ +"""EfficientNet-V2 for multi-class MMDeploy config.""" + _base_ = ["../base/deployments/base_classification_dynamic.py"] ir_config = dict( diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_b0/compression_config.json b/otx/algorithms/classification/configs/mobilenet_v3_large_075_cls_incr/compression_config.json similarity index 75% rename from otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_b0/compression_config.json rename to otx/algorithms/classification/configs/mobilenet_v3_large_075_cls_incr/compression_config.json index 8e22c1f8a94..6091a285e96 100644 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/efficientnet_b0/compression_config.json +++ b/otx/algorithms/classification/configs/mobilenet_v3_large_075_cls_incr/compression_config.json @@ -1,34 +1,27 @@ { "base": { + "find_unused_parameters": true, "nncf_config": { + "compression": [], "log_dir": "." - }, - "lr_finder": { - "enable": false - }, - "train": { - "batch_size": 64, - "lr_scheduler": "reduce_on_plateau", - "mix_precision": false - }, - "test": { - "batch_size": 64 } }, "nncf_quantization": { "nncf_config": { - "compression": { - "algorithm": "quantization", - "preset": "mixed", - "initializer": { - "range": { - "num_init_samples": 8192 - }, - "batchnorm_adaptation": { - "num_bn_adaptation_samples": 8192 + "compression": [ + { + "algorithm": "quantization", + "preset": "mixed", + "initializer": { + "range": { + "num_init_samples": 8192 + }, + "batchnorm_adaptation": { + "num_bn_adaptation_samples": 8192 + } } } - }, + ], "accuracy_aware_training": { "mode": "early_exit", "params": { @@ -80,3 +73,4 @@ }, "order_of_parts": ["nncf_quantization", "nncf_quantization_pruning"] } + diff --git a/otx/algorithms/classification/configs/mobilenet_v3_large_1_cls_incr/deployment.py b/otx/algorithms/classification/configs/mobilenet_v3_large_1_cls_incr/deployment.py index 109cb0d39d3..b57137aed3c 100644 --- a/otx/algorithms/classification/configs/mobilenet_v3_large_1_cls_incr/deployment.py +++ b/otx/algorithms/classification/configs/mobilenet_v3_large_1_cls_incr/deployment.py @@ -1,3 +1,5 @@ +"""MobileNet-V3-large-1 for multi-class MMDeploy config.""" + _base_ = ["../base/deployments/base_classification_dynamic.py"] ir_config = dict( diff --git a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/compression_config.json b/otx/algorithms/classification/configs/mobilenet_v3_small_cls_incr/compression_config.json similarity index 66% rename from otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/compression_config.json rename to otx/algorithms/classification/configs/mobilenet_v3_small_cls_incr/compression_config.json index 8db180c5776..6091a285e96 100644 --- a/otx/algorithms/classification/adapters/deep_object_reid/configs/mobilenet_v3_small/compression_config.json +++ b/otx/algorithms/classification/configs/mobilenet_v3_small_cls_incr/compression_config.json @@ -1,46 +1,27 @@ { "base": { + "find_unused_parameters": true, "nncf_config": { + "compression": [], "log_dir": "." - }, - "lr_finder": { - "enable": false - }, - "train": { - "batch_size": 64, - "lr_scheduler": "reduce_on_plateau", - "mix_precision": false - }, - "test": { - "batch_size": 64 - }, - "nncf_aux_config_changes": [ - { - "train": { - "batch_size": 64, - "lr_scheduler": "reduce_on_plateau", - "mix_precision": false - }, - "test": { - "batch_size": 64 - } - } - ] + } }, "nncf_quantization": { "nncf_config": { - "compression": { - "algorithm": "quantization", - "preset": "mixed", - "initializer": { - "range": { - "num_init_samples": 8192 - }, - "batchnorm_adaptation": { - "num_bn_adaptation_samples": 8192 + "compression": [ + { + "algorithm": "quantization", + "preset": "mixed", + "initializer": { + "range": { + "num_init_samples": 8192 + }, + "batchnorm_adaptation": { + "num_bn_adaptation_samples": 8192 + } } } - }, + ], "accuracy_aware_training": { "mode": "early_exit", "params": { @@ -92,3 +73,4 @@ }, "order_of_parts": ["nncf_quantization", "nncf_quantization_pruning"] } + diff --git a/otx/algorithms/classification/tasks/inference.py b/otx/algorithms/classification/tasks/inference.py index 93fd6765e43..13f6bf23280 100644 --- a/otx/algorithms/classification/tasks/inference.py +++ b/otx/algorithms/classification/tasks/inference.py @@ -10,30 +10,27 @@ import numpy as np from mmcv.utils import ConfigDict +from otx.algorithms.classification.adapters.mmcls.utils.builder import build_classifier +from otx.algorithms.classification.adapters.mmcls.utils.config_utils import ( + patch_datasets, + patch_evaluation, +) from otx.algorithms.classification.configs import ClassificationConfig from otx.algorithms.classification.utils import ( get_multihead_class_info as get_hierarchical_info, ) -from otx.algorithms.common.adapters.mmcv.utils import get_meta_keys, patch_data_pipeline -from otx.algorithms.common.configs import TrainType -from otx.algorithms.common.tasks import BaseTask from otx.algorithms.common.adapters.mmcv.utils import ( + patch_data_pipeline, patch_default_config, patch_runner, ) -from otx.algorithms.classification.adapters.mmcls.utils.config_utils import ( - patch_datasets, - patch_evaluation, -) -from otx.algorithms.classification.adapters.mmcls.utils.builder import ( - build_classifier, -) +from otx.algorithms.common.configs import TrainType +from otx.algorithms.common.tasks import BaseTask from otx.api.entities.datasets import DatasetEntity from otx.api.entities.inference_parameters import ( InferenceParameters, default_progress_callback, ) -from otx.api.entities.label import Domain from otx.api.entities.model import ( # ModelStatus ModelEntity, ModelFormat, @@ -54,7 +51,6 @@ from otx.api.utils.dataset_utils import add_saliency_maps_to_dataset_item from otx.api.utils.labels_utils import get_empty_label from otx.mpa import MPAConstants -from otx.mpa.stage import Stage from otx.mpa.utils.config_utils import MPAConfig from otx.mpa.utils.logger import get_logger @@ -195,17 +191,11 @@ def export(self, export_type: ExportType, output_model: ModelEntity): output_model.optimization_type = ModelOptimizationType.MO stage_module = "ClsExporter" - results = self._run_task( - stage_module, - mode="train", - export=True - ) + results = self._run_task(stage_module, mode="train", export=True) outputs = results.get("outputs") logger.debug(f"results of run_task = {outputs}") if outputs is None: - logger.error( - f"error while exporting model, result is None: {results.get('msg')}" - ) + logger.error(f"error while exporting model, result is None: {results.get('msg')}") else: bin_file = outputs.get("bin") xml_file = outputs.get("xml") @@ -308,36 +298,37 @@ def _add_saliency_maps_to_dataset(self, saliency_maps, dataset, update_progress_ update_progress_callback(int(i / dataset_size * 100)) def _init_recipe_hparam(self) -> dict: - warmup_iters = int(self._hyperparams.learning_parameters.learning_rate_warmup_iters) + params = self._hyperparams.learning_parameters + warmup_iters = int(params.learning_rate_warmup_iters) if self._multilabel: # hack to use 1cycle policy - lr_config = ConfigDict(max_lr=self._hyperparams.learning_parameters.learning_rate, warmup=None) + lr_config = ConfigDict(max_lr=params.learning_rate, warmup=None) else: lr_config = ( ConfigDict(warmup_iters=warmup_iters) if warmup_iters > 0 else ConfigDict(warmup_iters=0, warmup=None) ) - if self._hyperparams.learning_parameters.enable_early_stopping: + if params.enable_early_stopping: early_stop = ConfigDict( - start=int(self._hyperparams.learning_parameters.early_stop_start), - patience=int(self._hyperparams.learning_parameters.early_stop_patience), - iteration_patience=int(self._hyperparams.learning_parameters.early_stop_iteration_patience), + start=int(params.early_stop_start), + patience=int(params.early_stop_patience), + iteration_patience=int(params.early_stop_iteration_patience), ) else: early_stop = False - if self._recipe_cfg.runner.get("type") == "IterBasedRunner": # type: ignore - runner = ConfigDict(max_iters=int(self._hyperparams.learning_parameters.num_iters)) + if self._recipe_cfg.runner.get("type").startswith("IterBasedRunner"): # type: ignore + runner = ConfigDict(max_iters=int(params.num_iters)) else: - runner = ConfigDict(max_epochs=int(self._hyperparams.learning_parameters.num_iters)) + runner = ConfigDict(max_epochs=int(params.num_iters)) return ConfigDict( - optimizer=ConfigDict(lr=self._hyperparams.learning_parameters.learning_rate), + optimizer=ConfigDict(lr=params.learning_rate), lr_config=lr_config, early_stop=early_stop, data=ConfigDict( - samples_per_gpu=int(self._hyperparams.learning_parameters.batch_size), - workers_per_gpu=int(self._hyperparams.learning_parameters.num_workers), + samples_per_gpu=int(params.batch_size), + workers_per_gpu=int(params.num_workers), ), runner=runner, ) @@ -385,10 +376,7 @@ def _init_recipe(self): # During semi-implementation, this line should be fixed to -> self._recipe_cfg.train_type = train_type self._recipe_cfg.train_type = train_type.name - options_for_patch_datasets = { - "type": "MPAClsDataset", - "empty_label": self._empty_label - } + options_for_patch_datasets = {"type": "MPAClsDataset", "empty_label": self._empty_label} options_for_patch_evaluation = {"task": "normal"} if self._multilabel: options_for_patch_datasets["type"] = "MPAMultilabelClsDataset" @@ -401,7 +389,8 @@ def _init_recipe(self): options_for_patch_datasets["type"] = "SelfSLDataset" patch_default_config(self._recipe_cfg) patch_runner(self._recipe_cfg) - patch_datasets(self._recipe_cfg, self._data_cfg, **options_for_patch_datasets) # for OTX compatibility + patch_data_pipeline(self._recipe_cfg, pipeline_path) + patch_datasets(self._recipe_cfg, self._data_cfg, **options_for_patch_datasets) # for OTX compatibility patch_evaluation(self._recipe_cfg, **options_for_patch_evaluation) # for OTX compatibility logger.info(f"initialized recipe = {recipe}") diff --git a/otx/algorithms/classification/tasks/nncf.py b/otx/algorithms/classification/tasks/nncf.py index 3548e3dbd70..4881a83f78a 100644 --- a/otx/algorithms/classification/tasks/nncf.py +++ b/otx/algorithms/classification/tasks/nncf.py @@ -17,20 +17,16 @@ from functools import partial from typing import Optional -from mpa.utils.logger import get_logger - from otx.algorithms.classification.adapters.mmcls.nncf.builder import ( build_nncf_classifier, ) from otx.algorithms.common.tasks.nncf_base import NNCFBaseTask from otx.api.entities.datasets import DatasetEntity from otx.api.entities.optimization_parameters import OptimizationParameters -from otx.api.entities.task_environment import TaskEnvironment -from otx.api.utils.argument_checks import check_input_parameters_type +from otx.mpa.utils.logger import get_logger from .inference import ClassificationInferenceTask - logger = get_logger() diff --git a/otx/algorithms/classification/tasks/train.py b/otx/algorithms/classification/tasks/train.py index 565470df995..951dc1121a4 100644 --- a/otx/algorithms/classification/tasks/train.py +++ b/otx/algorithms/classification/tasks/train.py @@ -16,7 +16,7 @@ from otx.algorithms.classification.configs import ClassificationConfig from otx.algorithms.common.adapters.mmcv import OTXLoggerHook from otx.algorithms.common.utils.callback import TrainingProgressCallback -from otx.algorithms.common.utils.data import get_unlabeled_dataset +from otx.algorithms.common.utils.data import get_dataset from otx.api.configuration import cfg_helper from otx.api.configuration.helper.utils import ids_to_strings from otx.api.entities.datasets import DatasetEntity @@ -146,24 +146,18 @@ def train( def _init_train_data_cfg(self, dataset: DatasetEntity): logger.info("init data cfg.") - data_cfg = ConfigDict( - data=ConfigDict( - train=ConfigDict( - otx_dataset=dataset.get_subset(Subset.TRAINING), + data_cfg = ConfigDict(data=ConfigDict()) + + for cfg_key, subset in zip( + ["train", "val", "unlabeled"], + [Subset.TRAINING, Subset.VALIDATION, Subset.UNLABELED], + ): + subset = get_dataset(dataset, subset) + if subset: + data_cfg.data[cfg_key] = ConfigDict( + otx_dataset=subset, labels=self._labels, - ), - val=ConfigDict( - otx_dataset=dataset.get_subset(Subset.VALIDATION), - labels=self._labels, - ), - ) - ) - unlabeled_dataset = get_unlabeled_dataset(dataset) - if unlabeled_dataset: - data_cfg.data.unlabeled = ConfigDict( - otx_dataset=unlabeled_dataset, - labels=self._labels, - ) + ) for label in self._labels: label.hotkey = "a" diff --git a/otx/algorithms/common/adapters/mmcv/__init__.py b/otx/algorithms/common/adapters/mmcv/__init__.py index c89f006ec3a..0063347ac4a 100644 --- a/otx/algorithms/common/adapters/mmcv/__init__.py +++ b/otx/algorithms/common/adapters/mmcv/__init__.py @@ -24,11 +24,9 @@ ReduceLROnPlateauLrUpdaterHook, StopLossNanTrainingHook, ) -from .runner import EpochRunnerWithCancel, IterBasedRunnerWithCancel - -from .nncf.hooks import CompressionHook, CheckpointHookBeforeTraining +from .nncf.hooks import CheckpointHookBeforeTraining, CompressionHook from .nncf.runners import AccuracyAwareRunner - +from .runner import EpochRunnerWithCancel, IterBasedRunnerWithCancel __all__ = [ "EpochRunnerWithCancel", diff --git a/otx/algorithms/common/adapters/mmcv/data_cpu.py b/otx/algorithms/common/adapters/mmcv/data_cpu.py deleted file mode 100644 index c2dee8068cb..00000000000 --- a/otx/algorithms/common/adapters/mmcv/data_cpu.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (C) 2018-2021 OpenMMLab -# SPDX-License-Identifier: Apache-2.0 -# -# Copyright (C) 2016-2021 Facebook, Inc -# SPDX-License-Identifier: BSD-3-Clause -# -# Copyright (C) 2020-2021 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import torch -from mmcv.parallel.data_container import DataContainer -from mmcv.parallel import MMDataParallel - - -def scatter_cpu(inputs): - """Scatter inputs to cpu. - :type:`~mmcv.parallel.DataContainer`. - """ - - def scatter_map(obj): - if isinstance(obj, torch.Tensor): - return [obj] - if isinstance(obj, DataContainer): - return obj.data - if isinstance(obj, tuple) and len(obj) > 0: - return list(zip(*map(scatter_map, obj))) - if isinstance(obj, list) and len(obj) > 0: - out = list(map(list, zip(*map(scatter_map, obj)))) - return out - if isinstance(obj, dict) and len(obj) > 0: - out = list(map(type(obj), zip(*map(scatter_map, obj.items())))) - return out - return [obj] - - # After scatter_map is called, a scatter_map cell will exist. This cell - # has a reference to the actual function scatter_map, which has references - # to a closure that has a reference to the scatter_map cell (because the - # fn is recursive). To avoid this reference cycle, we set the function to - # None, clearing the cell - try: - return scatter_map(inputs) - finally: - scatter_map = None - - -def scatter_kwargs(inputs, kwargs): - """Scatter with support for kwargs dictionary""" - inputs = scatter_cpu(inputs) if inputs else [] - kwargs = scatter_cpu(kwargs) if kwargs else [] - if len(inputs) < len(kwargs): - inputs.extend([() for _ in range(len(kwargs) - len(inputs))]) - elif len(kwargs) < len(inputs): - kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))]) - inputs = tuple(inputs) - kwargs = tuple(kwargs) - return inputs, kwargs - - -class MMDataCPU(MMDataParallel): - """Implementation of MMDataParallel to use CPU for training""" - - def scatter(self, inputs, kwargs): - return scatter_kwargs(inputs, kwargs) - - def train_step(self, *inputs, **kwargs): - inputs, kwargs = self.scatter(inputs, kwargs) - return self.module.train_step(*inputs[0], **kwargs[0]) - - def val_step(self, *inputs, **kwargs): - inputs, kwargs = self.scatter(inputs, kwargs) - return self.module.val_step(*inputs[0], **kwargs[0]) - - def forward(self, *inputs, **kwargs): - inputs, kwargs = self.scatter(inputs, kwargs) - return self.module(*inputs[0], **kwargs[0]) diff --git a/otx/algorithms/common/adapters/mmcv/nncf/__init__.py b/otx/algorithms/common/adapters/mmcv/nncf/__init__.py index 47c1c38cb29..fe4395eb4b7 100644 --- a/otx/algorithms/common/adapters/mmcv/nncf/__init__.py +++ b/otx/algorithms/common/adapters/mmcv/nncf/__init__.py @@ -2,20 +2,13 @@ # SPDX-License-Identifier: Apache-2.0 # -from .utils import ( - prepare_model_for_execution, - get_fake_input, - build_dataloader, - model_eval, - wrap_nncf_model, -) +# flake8: noqa from .patches import * +from .utils import get_fake_input, model_eval, wrap_nncf_model __all__ = [ - "prepare_model_for_execution", "get_fake_input", - "build_dataloader", "model_eval", "wrap_nncf_model", ] diff --git a/otx/algorithms/common/adapters/mmcv/nncf/hooks.py b/otx/algorithms/common/adapters/mmcv/nncf/hooks.py index c366e7d4117..91c7ee4512c 100644 --- a/otx/algorithms/common/adapters/mmcv/nncf/hooks.py +++ b/otx/algorithms/common/adapters/mmcv/nncf/hooks.py @@ -35,7 +35,7 @@ def after_run(self, runner): algo_state["scheduler_state"] = {"current_step": 0, "current_epoch": 0} torch.save( compression_state, - os.path.join(runner.work_dir, "compression_state.pth") + os.path.join(runner.work_dir, "compression_state.pth"), ) @@ -57,7 +57,7 @@ def __init__(self, out_dir=None, **kwargs): @master_only def before_run(self, runner): - runner.logger.info(f"Saving checkpoint before training") + runner.logger.info("Saving checkpoint before training") if not self.out_dir: self.out_dir = runner.work_dir runner.save_checkpoint( diff --git a/otx/algorithms/common/adapters/mmcv/nncf/patches.py b/otx/algorithms/common/adapters/mmcv/nncf/patches.py index 63c330d42c5..67692e4ab5c 100644 --- a/otx/algorithms/common/adapters/mmcv/nncf/patches.py +++ b/otx/algorithms/common/adapters/mmcv/nncf/patches.py @@ -5,15 +5,15 @@ from copy import deepcopy -from otx.algorithms.common.adapters.nncf.utils import is_nncf_enabled from otx.algorithms.common.adapters.nncf.patchers import ( NNCF_PATCHER, no_nncf_trace_wrapper, ) - +from otx.algorithms.common.adapters.nncf.utils import is_nncf_enabled if is_nncf_enabled(): from nncf.torch.nncf_network import NNCFNetwork + from otx.algorithms.common.adapters.nncf.patches import nncf_train_step # add wrapper train_step method @@ -27,15 +27,17 @@ def evaluation_wrapper(self, fn, runner, *args, **kwargs): NNCF_PATCHER.patch("mmcv.runner.EvalHook.evaluate", evaluation_wrapper) +NNCF_PATCHER.patch("otx.mpa.modules.hooks.eval_hook.CustomEvalHook.evaluate", evaluation_wrapper) + NNCF_PATCHER.patch( - "mpa.modules.hooks.eval_hook.CustomEvalHook.evaluate", evaluation_wrapper + "otx.mpa.modules.hooks.recording_forward_hooks.FeatureVectorHook.func", + no_nncf_trace_wrapper, ) - NNCF_PATCHER.patch( - "mpa.modules.hooks.recording_forward_hooks.FeatureVectorHook.func", + "otx.mpa.modules.hooks.recording_forward_hooks.ActivationMapHook.func", no_nncf_trace_wrapper, ) NNCF_PATCHER.patch( - "mpa.modules.hooks.recording_forward_hooks.ActivationMapHook.func", + "otx.mpa.modules.hooks.recording_forward_hooks.ReciproCAMHook.func", no_nncf_trace_wrapper, ) diff --git a/otx/algorithms/common/adapters/mmcv/nncf/runners.py b/otx/algorithms/common/adapters/mmcv/nncf/runners.py index 51ea026a1d9..c99e6e9fbf0 100644 --- a/otx/algorithms/common/adapters/mmcv/nncf/runners.py +++ b/otx/algorithms/common/adapters/mmcv/nncf/runners.py @@ -8,8 +8,8 @@ from mmcv.runner.hooks.lr_updater import LrUpdaterHook from mmcv.runner.utils import get_host_info -from otx.algorithms.common.adapters.mmcv.runner import EpochRunnerWithCancel from otx.algorithms.common.adapters.mmcv.nncf.hooks import CompressionHook +from otx.algorithms.common.adapters.mmcv.runner import EpochRunnerWithCancel from otx.algorithms.common.adapters.nncf import ( AccuracyAwareLrUpdater, check_nncf_is_enabled, @@ -31,7 +31,7 @@ def __init__(self, *args, nncf_config, **kwargs): super().__init__(*args, **kwargs) self.nncf_config = nncf_config self.compression_ctrl = None - self._target_metric_name = nncf_config['target_metric_name'] + self._target_metric_name = nncf_config["target_metric_name"] def run(self, data_loaders, *args, **kwargs): check_nncf_is_enabled() @@ -49,21 +49,15 @@ def run(self, data_loaders, *args, **kwargs): lr_update_hook.append(hook) if isinstance(hook, CompressionHook): found_compression_hook = True - assert ( - found_compression_hook - ), f"{CompressionHook} must be registered to {self}." + assert found_compression_hook, f"{CompressionHook} must be registered to {self}." assert len(lr_update_hook) <= 1, ( - f"More than 1 lr update hooks ({len(lr_update_hook)} " - f"are registered to {self}" + f"More than 1 lr update hooks ({len(lr_update_hook)} " f"are registered to {self}" ) work_dir = self.work_dir if self.work_dir is not None else "NONE" - self.logger.info( - "Start running, host: %s, work_dir: %s", get_host_info(), work_dir - ) + self.logger.info("Start running, host: %s, work_dir: %s", get_host_info(), work_dir) self.logger.warning( - "Note that the workflow and max_epochs parameters " - "are not used in NNCF-based accuracy-aware training" + "Note that the workflow and max_epochs parameters are not used in NNCF-based accuracy-aware training" ) # taking only the first data loader for NNCF training @@ -73,10 +67,8 @@ def run(self, data_loaders, *args, **kwargs): self._max_epochs = params["maximal_total_epochs"] self._max_iters = self._max_epochs * len(self.train_data_loader) - self.logger.info('Start running, host: %s, work_dir: %s', - get_host_info(), work_dir) - self.logger.info('Hooks will be executed in the following order:\n%s', - self.get_hook_info()) + self.logger.info("Start running, host: %s, work_dir: %s", get_host_info(), work_dir) + self.logger.info("Hooks will be executed in the following order:\n%s", self.get_hook_info()) self.call_hook("before_run") def configure_optimizers_fn(): @@ -85,7 +77,7 @@ def configure_optimizers_fn(): if len(lr_update_hook) == 1: lr_update_hook = lr_update_hook[0] - def configure_optimizers_fn(): + def configure_optimizers_fn(): # noqa: F811 return self.optimizer, AccuracyAwareLrUpdater(lr_update_hook) acc_aware_training_loop = create_accuracy_aware_training_loop( @@ -119,7 +111,7 @@ def validation_fn(self, *args, **kwargs): # Get metric from runner's attributes that set in EvalHook.evaluate() function all_metrics = getattr(self, "all_metrics", {}) if len(all_metrics) == 0: - evalhook = [hook for hook in self.hooks if getattr(hook, '_do_evaluate', None)] + evalhook = [hook for hook in self.hooks if getattr(hook, "_do_evaluate", None)] assert len(evalhook) == 1 evalhook[0]._do_evaluate(self) all_metrics = getattr(self, "all_metrics", {}) diff --git a/otx/algorithms/common/adapters/mmcv/nncf/utils.py b/otx/algorithms/common/adapters/mmcv/nncf/utils.py index 2ba3598378d..4bee7c074c4 100644 --- a/otx/algorithms/common/adapters/mmcv/nncf/utils.py +++ b/otx/algorithms/common/adapters/mmcv/nncf/utils.py @@ -5,23 +5,16 @@ import os import pathlib from copy import deepcopy -from typing import Any, Callable, Dict, Optional, Tuple +from typing import Any, Callable, Dict, List, Optional, Tuple import numpy as np import torch import torch.nn as nn from mmcv import Config -from mmcv.parallel import ( - DataContainer, - MMDataParallel, - MMDistributedDataParallel, - collate, - scatter, -) -from mpa.utils.logger import get_logger +from mmcv.parallel import DataContainer, collate, scatter from torch.utils.data import DataLoader -from otx.algorithms.common.adapters.mmcv.data_cpu import MMDataCPU, scatter_cpu +from otx.algorithms.common.adapters.mmcv.utils.builder import build_data_parallel from otx.algorithms.common.adapters.nncf.compression import ( COMPRESSION_STATE_NAME, NNCF_STATE_NAME, @@ -33,44 +26,11 @@ no_nncf_trace, ) from otx.algorithms.common.utils import get_arg_spec - +from otx.mpa.utils.logger import get_logger logger = get_logger() -def prepare_model_for_execution( - model: nn.Module, - cfg: Config, - distributed: bool = False, -): - """ - Prepare model for execution. - Return model import ast, MMDataParallel or MMDataCPU. - - :param model: Model. - :param cfg: training mmdet config. - :param distributed: Enable distributed training mode. - :return: - """ - if torch.cuda.is_available(): - if distributed: - # put model on gpus - find_unused_parameters = cfg.get("find_unused_parameters", False) - # Sets the `find_unused_parameters` parameter in - # torch.nn.parallel.DistributedDataParallel - model = MMDistributedDataParallel( - model, - device_ids=[torch.cuda.current_device()], - broadcast_buffers=False, - find_unused_parameters=find_unused_parameters, - ) - else: - model = MMDataParallel(model.cuda(cfg.gpu_ids[0]), device_ids=[0]) - else: - model = MMDataCPU(model) - return model - - def get_fake_input( preprocessor: Callable[..., Dict[str, Any]], data: Optional[np.ndarray] = None, @@ -88,44 +48,12 @@ def get_fake_input( data[key] = [value] if device == torch.device("cpu"): - data = scatter_cpu(collate([data], samples_per_gpu=1))[0] + data = scatter(collate([data], samples_per_gpu=1), [-1])[0] else: data = scatter(collate([data], samples_per_gpu=1), [device.index])[0] return data -def build_dataloader( - config: Config, - subset: str, - distributed: bool, - dataloader_builder: Callable, - dataset_builder: Callable, -): - loader_cfg = dict( - samples_per_gpu=config.data.get("samples_per_gpu", 1), - workers_per_gpu=config.data.get("workers_per_gpu", 0), - num_gpus=len(config.gpu_ids), - dist=distributed, - seed=config.get("seed", None), - ) - if subset == "train": - default_args = dict(test_mode=False) - else: - default_args = dict(test_mode=True) - loader_cfg["shuffle"] = False - loader_cfg["samples_per_gpu"] = 1 - - dataset = dataset_builder(config.data.get(subset), default_args) - - loader_cfg = {**loader_cfg, **config.data.get(f"{subset}_dataloader", {})} - - dataloader = dataloader_builder( - dataset, - **loader_cfg, - ) - return dataloader - - def model_eval( model: nn.Module, *, @@ -149,7 +77,7 @@ def model_eval( nncf_config = config.get("nncf_config") metric_name = nncf_config.get("target_metric_name") - prepared_model = prepare_model_for_execution(model, config, distributed) + prepared_model = build_data_parallel(model, config, distributed=distributed) logger.info("Calculating an original model accuracy") @@ -161,14 +89,12 @@ def model_eval( evaluation_cfg["metric"] = metric_name if distributed: - dist_eval_res = [None] + dist_eval_res: List[Dict[str, Any]] = [dict()] results = evaluate_fn(prepared_model, val_dataloader, gpu_collect=True) if torch.distributed.get_rank() == 0: eval_res = val_dataloader.dataset.evaluate(results, **evaluation_cfg) if metric_name not in eval_res: - raise RuntimeError( - f"Cannot find {metric_name} metric in " "the evaluation result dict" - ) + raise RuntimeError(f"Cannot find {metric_name} metric in the evaluation result dict") dist_eval_res[0] = eval_res torch.distributed.broadcast_object_list(dist_eval_res, src=0) @@ -178,10 +104,7 @@ def model_eval( eval_res = val_dataloader.dataset.evaluate(results, **evaluation_cfg) if metric_name not in eval_res: - raise RuntimeError( - f"Cannot find {metric_name} metric in " - f"the evaluation result dict {eval_res.keys()}" - ) + raise RuntimeError(f"Cannot find {metric_name} metric in the evaluation result dict {eval_res.keys()}") return eval_res[metric_name] @@ -217,10 +140,7 @@ class MMInitializeDataLoader(PTInitializingDataLoader): def get_inputs(self, dataloader_output): # redefined PTInitializingDataLoader because # of DataContainer format in mmdet - kwargs = { - k: v.data[0] if isinstance(v, DataContainer) else v - for k, v in dataloader_output.items() - } + kwargs = {k: v.data[0] if isinstance(v, DataContainer) else v for k, v in dataloader_output.items()} return (), kwargs pathlib.Path(config.work_dir).mkdir(parents=True, exist_ok=True) @@ -247,10 +167,7 @@ def get_inputs(self, dataloader_output): checkpoint_path = config.get("load_from") if not is_checkpoint_nncf(checkpoint_path): checkpoint_path = None - logger.info( - "Received non-NNCF checkpoint to start training " - "-- initialization of NNCF fields will be done" - ) + logger.info("Received non-NNCF checkpoint to start training -- initialization of NNCF fields will be done") else: checkpoint_path = None @@ -282,9 +199,7 @@ def get_inputs(self, dataloader_output): def _get_fake_data_for_forward(nncf_config): device = next(model.parameters()).device - if nncf_config.get("input_info", None) and nncf_config.get( - "input_info" - ).get("sample_size", None): + if nncf_config.get("input_info", None) and nncf_config.get("input_info").get("sample_size", None): input_size = nncf_config.get("input_info").get("sample_size") assert len(input_size) == 4 and input_size[0] == 1 H, W, C = input_size[2], input_size[3], input_size[1] diff --git a/otx/algorithms/common/adapters/mmcv/utils/__init__.py b/otx/algorithms/common/adapters/mmcv/utils/__init__.py new file mode 100644 index 00000000000..27529d4578c --- /dev/null +++ b/otx/algorithms/common/adapters/mmcv/utils/__init__.py @@ -0,0 +1,48 @@ +"""OTX Adapters - mmcv.utils.""" + +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from .builder import build_dataset, build_dataloader, build_data_parallel +from .config_utils import ( + remove_from_config, + remove_from_configs_by_type, + get_configs_by_dict, + get_configs_by_keys, + update_config, + get_dataset_configs, + prepare_for_testing, + is_epoch_based_runner, + config_from_string, + patch_default_config, + patch_data_pipeline, + patch_color_conversion, + patch_runner, + align_data_config_with_recipe, + get_meta_keys, + prepare_work_dir, + get_data_cfg, +) + +__all__ = [ + "build_dataset", + "build_dataloader", + "build_data_parallel", + "remove_from_config", + "remove_from_configs_by_type", + "get_configs_by_dict", + "get_configs_by_keys", + "update_config", + "get_dataset_configs", + "prepare_for_testing", + "is_epoch_based_runner", + "config_from_string", + "patch_default_config", + "patch_data_pipeline", + "patch_color_conversion", + "patch_runner", + "align_data_config_with_recipe", + "get_meta_keys", + "prepare_work_dir", + "get_data_cfg", +] diff --git a/otx/algorithms/common/adapters/mmcv/utils/builder.py b/otx/algorithms/common/adapters/mmcv/utils/builder.py new file mode 100644 index 00000000000..a0846556d53 --- /dev/null +++ b/otx/algorithms/common/adapters/mmcv/utils/builder.py @@ -0,0 +1,162 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +import os +from typing import Callable, Union, TYPE_CHECKING, overload, Literal + +from mmcv import Config +from mmcv.parallel import ( + MMDataParallel, + MMDistributedDataParallel, +) + +import torch +from torch.utils.data import DataLoader, Dataset + +from otx.api.utils.argument_checks import ( + check_input_parameters_type, +) + + +if TYPE_CHECKING: + + @overload + def build_data_parallel( + model: torch.nn.Module, + config: Config, + *, + distributed: bool, + ) -> Union[MMDataParallel, MMDistributedDataParallel]: + ... + + @overload + def build_data_parallel( + model: torch.nn.Module, + config: Config, + *, + distributed: Literal[True], + ) -> MMDistributedDataParallel: + ... + + @overload + def build_data_parallel( + model: torch.nn.Module, + config: Config, + *, + distributed: Literal[False] = False, + ) -> MMDataParallel: + ... + + +@check_input_parameters_type() +def build_dataset( + config: Config, + subset: str, + dataset_builder: Callable, + *, + consume: bool = False, +) -> Dataset: + if subset in ["test", "val"]: + default_args = dict(test_mode=True) + else: + default_args = dict(test_mode=False) + + dataset_cfg = config.data.pop(subset) if consume else config.data.get(subset) + dataset = dataset_builder(dataset_cfg, default_args) + return dataset + + +@check_input_parameters_type() +def build_dataloader( + dataset, + config: Config, + subset: str, + dataloader_builder: Callable, + *, + distributed: bool = False, + consume: bool = False, + **kwargs, +) -> DataLoader: + + # samples_per_gpu = config.data.get("samples_per_gpu", 1) + # if subset in ["test", "val"]: + # samples_per_gpu = 1 + + loader_cfg = dict( + samples_per_gpu=config.data.get("samples_per_gpu", 1), + workers_per_gpu=config.data.get("workers_per_gpu", 0), + num_gpus=len(config.gpu_ids), + dist=distributed, + seed=config.get("seed", None), + shuffle=False if subset in ["test", "val"] else True, + ) + + # The overall dataloader settings + loader_cfg.update( + { + k: v + for k, v in config.data.items() + if k + not in [ + "train", + "val", + "test", + "unlabeled", + "train_dataloader", + "val_dataloader", + "test_dataloader", + "unlabeled_dataloader", + ] + } + ) + + specific_loader_cfg = ( + config.data.pop(f"{subset}_dataloader", {}) + if consume + else config.data.get(f"{subset}_dataloader", {}) + ) + loader_cfg = {**loader_cfg, **specific_loader_cfg, **kwargs} + + dataloader = dataloader_builder( + dataset, + **loader_cfg, + ) + return dataloader + + +@check_input_parameters_type() +def build_data_parallel( + model: torch.nn.Module, + config: Config, + *, + distributed: bool = False, +) -> Union[MMDataParallel, MMDistributedDataParallel]: + """ + Prepare model for execution. + Return model import ast, MMDataParallel or MMDataCPU. + + :param model: Model. + :param config: config. + :param distributed: Enable distributed training mode. + :return: + """ + if torch.cuda.is_available(): + if distributed: + model = model.cuda() + # put model on gpus + find_unused_parameters = config.get("find_unused_parameters", False) + # Sets the `find_unused_parameters` parameter in + # torch.nn.parallel.DistributedDataParallel + model = MMDistributedDataParallel( + model, + device_ids=[int(os.environ['LOCAL_RANK'])], + broadcast_buffers=False, + find_unused_parameters=find_unused_parameters, + ) + else: + model = model.cuda(config.gpu_ids[0]) + model = MMDataParallel(model, device_ids=config.gpu_ids) + else: + model = MMDataParallel(model, device_ids=[-1]) + return model diff --git a/otx/algorithms/common/adapters/mmcv/utils.py b/otx/algorithms/common/adapters/mmcv/utils/config_utils.py similarity index 92% rename from otx/algorithms/common/adapters/mmcv/utils.py rename to otx/algorithms/common/adapters/mmcv/utils/config_utils.py index d10e41aeacf..2419ac9c405 100644 --- a/otx/algorithms/common/adapters/mmcv/utils.py +++ b/otx/algorithms/common/adapters/mmcv/utils/config_utils.py @@ -19,7 +19,7 @@ import os import tempfile from collections.abc import Mapping -from typing import Union, List, Literal, Any, Dict, Tuple, overload, TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Dict, List, Literal, Tuple, Union, overload from mmcv import Config, ConfigDict @@ -28,14 +28,13 @@ DatasetParamTypeCheck, check_input_parameters_type, ) - -from mpa.utils.logger import get_logger - +from otx.mpa.utils.logger import get_logger logger = get_logger() if TYPE_CHECKING: + @overload def get_configs_by_dict( config: Union[Config, ConfigDict], @@ -126,12 +125,7 @@ def get_configs_by_dict( # noqa: C901 def get_config(config, path=()): out = dict() if isinstance(config, (Config, Mapping)): - if all( - [ - True if config.get(key, None) == value else False - for key, value in pairs.items() - ] - ): + if all([True if config.get(key, None) == value else False for key, value in pairs.items()]): return {path: config} for key, value in config.items(): out.update(get_config(value, (*path, key))) @@ -204,9 +198,7 @@ def update_config( if key not in ptr: ptr[key] = ConfigDict() elif isinstance(ptr, (list, tuple)): - assert isinstance(key, int), ( - f"{key} of {path} must be int for ({type(ptr)}: {ptr})" - ) + assert isinstance(key, int), f"{key} of {path} must be int for ({type(ptr)}: {ptr})" assert len(ptr) < key, f"{key} of {path} exceeds {len(ptr)}" if len(path_) == 0: ptr[key] = value @@ -214,9 +206,7 @@ def update_config( @check_input_parameters_type() -def get_dataset_configs( - config: Union[Config, ConfigDict], subset: str = "train" -) -> List[ConfigDict]: +def get_dataset_configs(config: Union[Config, ConfigDict], subset: str = "train") -> List[ConfigDict]: if config.data.get(subset, None) is None: return [] data_cfg = config.data[subset] @@ -308,10 +298,7 @@ def patch_runner(config: Config): @check_input_parameters_type() -def align_data_config_with_recipe( - data_config: ConfigDict, - config: Union[Config, ConfigDict] -): +def align_data_config_with_recipe(data_config: ConfigDict, config: Union[Config, ConfigDict]): # we assumed config has 'otx_dataset' and 'label' key in it # by 'patch_datasets' function @@ -320,11 +307,7 @@ def align_data_config_with_recipe( for subset in data_config.keys(): subset_config = data_config.get(subset, {}) for key in list(subset_config.keys()): - found_config = get_configs_by_keys( - config.get(subset), - key, - return_path=True - ) + found_config = get_configs_by_keys(config.get(subset), key, return_path=True) assert len(found_config) == 1 value = subset_config.pop(key) path = list(found_config.keys())[0] diff --git a/otx/algorithms/common/adapters/nncf/__init__.py b/otx/algorithms/common/adapters/nncf/__init__.py index 71c8a690f39..f1bbdc0ab4b 100644 --- a/otx/algorithms/common/adapters/nncf/__init__.py +++ b/otx/algorithms/common/adapters/nncf/__init__.py @@ -2,6 +2,8 @@ # SPDX-License-Identifier: Apache-2.0 # +# flake8: noqa + from .compression import ( AccuracyAwareLrUpdater, get_nncf_metadata, @@ -9,6 +11,7 @@ is_checkpoint_nncf, is_state_nncf, ) +from .patches import * from .utils import ( check_nncf_is_enabled, get_nncf_version, @@ -17,9 +20,6 @@ no_nncf_trace, ) -from .patches import * - - __all__ = [ "AccuracyAwareLrUpdater", "check_nncf_is_enabled", diff --git a/otx/algorithms/common/adapters/nncf/compression.py b/otx/algorithms/common/adapters/nncf/compression.py index 5359ca71dcb..5192c79d823 100644 --- a/otx/algorithms/common/adapters/nncf/compression.py +++ b/otx/algorithms/common/adapters/nncf/compression.py @@ -6,7 +6,6 @@ from .utils import check_nncf_is_enabled, get_nncf_version, is_nncf_enabled - NNCF_STATE_NAME = "nncf_model_state" COMPRESSION_STATE_NAME = "compression_state" DATA_TO_BUILD_NAME = "data_to_build_nncf" diff --git a/otx/algorithms/common/adapters/nncf/config.py b/otx/algorithms/common/adapters/nncf/config.py index 96557a590aa..022021604c4 100644 --- a/otx/algorithms/common/adapters/nncf/config.py +++ b/otx/algorithms/common/adapters/nncf/config.py @@ -16,8 +16,7 @@ def load_nncf_config(path): - assert path.endswith('.json'), ( - f'Only json files are allowed as optimisation configs, provided {path}') + assert path.endswith(".json"), f"Only json files are allowed as optimisation configs, provided {path}" with open(path) as f_src: nncf_config = json.load(f_src) return nncf_config @@ -26,37 +25,38 @@ def load_nncf_config(path): def compose_nncf_config(nncf_config, enabled_options): optimisation_parts = nncf_config - if 'order_of_parts' in optimisation_parts: + if "order_of_parts" in optimisation_parts: # The result of applying the changes from optimisation parts # may depend on the order of applying the changes # (e.g. if for nncf_quantization it is sufficient to have `total_epochs=2`, # but for sparsity it is required `total_epochs=50`) # So, user can define `order_of_parts` in the optimisation_config # to specify the order of applying the parts. - order_of_parts = optimisation_parts['order_of_parts'] - assert isinstance(order_of_parts, list), \ - 'The field "order_of_parts" in optimisation config should be a list' + order_of_parts = optimisation_parts["order_of_parts"] + assert isinstance(order_of_parts, list), 'The field "order_of_parts" in optimisation config should be a list' for part in enabled_options: - assert part in order_of_parts, ( - f'The part {part} is selected, but it is absent in order_of_parts={order_of_parts}') + assert ( + part in order_of_parts + ), f"The part {part} is selected, but it is absent in order_of_parts={order_of_parts}" optimisation_parts_to_choose = [part for part in order_of_parts if part in enabled_options] - assert 'base' in optimisation_parts, 'Error: the optimisation config does not contain the "base" part' - nncf_config_part = optimisation_parts['base'] + assert "base" in optimisation_parts, 'Error: the optimisation config does not contain the "base" part' + nncf_config_part = optimisation_parts["base"] for part in optimisation_parts_to_choose: - assert part in optimisation_parts, ( - f'Error: the optimisation config does not contain the part "{part}"') + assert part in optimisation_parts, f'Error: the optimisation config does not contain the part "{part}"' optimisation_part_dict = optimisation_parts[part] try: nncf_config_part = merge_dicts_and_lists_b_into_a(nncf_config_part, optimisation_part_dict) except AssertionError as cur_error: - err_descr = (f'Error during merging the parts of nncf configs:\n' - f'the current part={part}, ' - f'the order of merging parts into base is {optimisation_parts_to_choose}.\n' - f'The error is:\n{cur_error}') + err_descr = ( + f"Error during merging the parts of nncf configs:\n" + f"the current part={part}, " + f"the order of merging parts into base is {optimisation_parts_to_choose}.\n" + f"The error is:\n{cur_error}" + ) raise RuntimeError(err_descr) from None return nncf_config_part @@ -76,16 +76,19 @@ def _merge_dicts_and_lists_b_into_a(a, b, cur_key=None): Note that we merge b into a (whereas Config makes merge a into b), since otherwise the order of list merging is counter-intuitive. """ + def _err_str(_a, _b, _key): if _key is None: - _key_str = 'of whole structures' + _key_str = "of whole structures" else: - _key_str = f'during merging for key=`{_key}`' - return (f'Error in merging parts of config: different types {_key_str},' - f' type(a) = {type(_a)},' - f' type(b) = {type(_b)}') - - assert isinstance(a, (dict, list)), f'Can merge only dicts and lists, whereas type(a)={type(a)}' + _key_str = f"during merging for key=`{_key}`" + return ( + f"Error in merging parts of config: different types {_key_str}," + f" type(a) = {type(_a)}," + f" type(b) = {type(_b)}" + ) + + assert isinstance(a, (dict, list)), f"Can merge only dicts and lists, whereas type(a)={type(a)}" assert isinstance(b, (dict, list)), _err_str(a, b, cur_key) assert isinstance(a, list) == isinstance(b, list), _err_str(a, b, cur_key) if isinstance(a, list): @@ -97,7 +100,7 @@ def _err_str(_a, _b, _key): if k not in a: a[k] = copy(b[k]) continue - new_cur_key = cur_key + '.' + k if cur_key else k + new_cur_key = cur_key + "." + k if cur_key else k if isinstance(a[k], (dict, list)): a[k] = _merge_dicts_and_lists_b_into_a(a[k], b[k], new_cur_key) continue diff --git a/otx/algorithms/common/adapters/nncf/patchers/__init__.py b/otx/algorithms/common/adapters/nncf/patchers/__init__.py index 133c1d7cf58..52e37bd7966 100644 --- a/otx/algorithms/common/adapters/nncf/patchers/__init__.py +++ b/otx/algorithms/common/adapters/nncf/patchers/__init__.py @@ -1,3 +1,4 @@ +"""simple monky patch helper""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -5,7 +6,6 @@ from .patcher import Patcher from .wrappers import nncf_trace_wrapper, no_nncf_trace_wrapper - NNCF_PATCHER = Patcher() diff --git a/otx/algorithms/common/adapters/nncf/patchers/patcher.py b/otx/algorithms/common/adapters/nncf/patchers/patcher.py index 4e3363f7ca0..b525e3d0e91 100644 --- a/otx/algorithms/common/adapters/nncf/patchers/patcher.py +++ b/otx/algorithms/common/adapters/nncf/patchers/patcher.py @@ -6,24 +6,27 @@ import inspect from collections import OrderedDict from functools import partial, partialmethod -from typing import Callable, Optional +from typing import Callable class Patcher: + """Simple monkey patch helper.""" + def __init__(self): self._patched = OrderedDict() - def patch( + def patch( # noqa: C901 self, obj_cls, wrapper: Callable, *, force: bool = True, ): + """Do monkey patch.""" obj_cls, fn_name = self.import_obj(obj_cls) # wrap only if function does exist - n_args = len(inspect.getargspec(obj_cls.__getattribute__)[0]) + n_args = len(inspect.getfullargspec(obj_cls.__getattribute__)[0]) if n_args == 1: try: fn = obj_cls.__getattribute__(fn_name) @@ -33,7 +36,7 @@ def patch( else: if inspect.isclass(obj_cls): try: - fn = obj_cls.__getattribute__(obj_cls, fn_name) + fn = obj_cls.__getattribute__(obj_cls, fn_name) # type: ignore except AttributeError: return self._patch_class_fn(obj_cls, fn_name, fn, wrapper, force) @@ -44,7 +47,8 @@ def patch( return self._patch_instance_fn(obj_cls, fn_name, fn, wrapper, force) - def import_obj(self, obj_cls): + def import_obj(self, obj_cls): # noqa: C901 + """Object import helper.""" if isinstance(obj_cls, str): fn_name = obj_cls.split(".")[-1] obj_cls = ".".join(obj_cls.split(".")[:-1]) @@ -69,9 +73,7 @@ def import_obj(self, obj_cls): obj_cls = obj_cls.__self__ else: fn_name = obj_cls.__name__ - obj_cls = ".".join( - [obj_cls.__module__] + obj_cls.__qualname__.split(".")[:-1] - ) + obj_cls = ".".join([obj_cls.__module__] + obj_cls.__qualname__.split(".")[:-1]) if isinstance(obj_cls, str): try: @@ -83,7 +85,7 @@ def import_obj(self, obj_cls): return obj_cls, fn_name def _patch_module_fn(self, obj_cls, fn_name, fn, wrapper, force): - assert len(inspect.getargspec(obj_cls.__getattribute__)[0]) == 1 + assert len(inspect.getfullargspec(obj_cls.__getattribute__)[0]) == 1 obj_cls_path = obj_cls.__name__ key = (obj_cls_path, fn_name) fn_ = self._initialize(key, force) @@ -107,13 +109,13 @@ def helper(*args, **kwargs): else: - def helper(self, *args, **kwargs): + def helper(self, *args, **kwargs): # type: ignore kwargs.pop("__obj_cls") wrapper = kwargs.pop("__wrapper") fn = kwargs.pop("__fn") return wrapper(self, fn.__get__(self), *args, **kwargs) - assert len(inspect.getargspec(obj_cls.__getattribute__)[0]) == 2 + assert len(inspect.getfullargspec(obj_cls.__getattribute__)[0]) == 2 obj_cls_path = obj_cls.__module__ + "." + obj_cls.__name__ key = (obj_cls_path, fn_name) fn_ = self._initialize(key, force) @@ -127,7 +129,7 @@ def helper(self, *args, **kwargs): self._patched[key].append((fn, wrapper)) def _patch_instance_fn(self, obj_cls, fn_name, fn, wrapper, force): - assert len(inspect.getargspec(obj_cls.__getattribute__)[0]) == 2 + assert len(inspect.getfullargspec(obj_cls.__getattribute__)[0]) == 2 obj_cls_path = id(obj_cls) key = (obj_cls_path, fn_name) fn_ = self._initialize(key, force) diff --git a/otx/algorithms/common/adapters/nncf/patchers/wrappers.py b/otx/algorithms/common/adapters/nncf/patchers/wrappers.py index 7847c504c64..4384606e24e 100644 --- a/otx/algorithms/common/adapters/nncf/patchers/wrappers.py +++ b/otx/algorithms/common/adapters/nncf/patchers/wrappers.py @@ -2,10 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 # -from otx.algorithms.common.adapters.nncf.utils import ( - nncf_trace, - no_nncf_trace, -) +from otx.algorithms.common.adapters.nncf.utils import nncf_trace, no_nncf_trace def no_nncf_trace_wrapper(self, fn, *args, **kwargs): diff --git a/otx/algorithms/common/adapters/nncf/patches.py b/otx/algorithms/common/adapters/nncf/patches.py index 46288c74330..7b6ac25c9b9 100644 --- a/otx/algorithms/common/adapters/nncf/patches.py +++ b/otx/algorithms/common/adapters/nncf/patches.py @@ -30,7 +30,7 @@ def nncf_trace_context(self, img_metas, nncf_compress_postprocessing=True): def nncf_train_step(self, data, optimizer): - import torch + import torch # noqa: F401 from nncf.torch.dynamic_graph.io_handling import replicate_same_tensors with self._compressed_context as ctx: diff --git a/otx/algorithms/common/adapters/nncf/utils/__init__.py b/otx/algorithms/common/adapters/nncf/utils/__init__.py index 918e4ca6416..0320c4f9011 100644 --- a/otx/algorithms/common/adapters/nncf/utils/__init__.py +++ b/otx/algorithms/common/adapters/nncf/utils/__init__.py @@ -3,14 +3,14 @@ # from .utils import ( - is_nncf_enabled, check_nncf_is_enabled, get_nncf_version, + is_accuracy_aware_training_set, + is_in_nncf_tracing, + is_nncf_enabled, load_checkpoint, - no_nncf_trace, nncf_trace, - is_in_nncf_tracing, - is_accuracy_aware_training_set, + no_nncf_trace, ) __all__ = [ diff --git a/otx/algorithms/common/adapters/nncf/utils/utils.py b/otx/algorithms/common/adapters/nncf/utils/utils.py index 5473b1c78d8..5b0e623ec8c 100644 --- a/otx/algorithms/common/adapters/nncf/utils/utils.py +++ b/otx/algorithms/common/adapters/nncf/utils/utils.py @@ -2,14 +2,13 @@ # SPDX-License-Identifier: Apache-2.0 # -import importlib from collections import OrderedDict from contextlib import contextmanager +from importlib.util import find_spec import torch - -_is_nncf_enabled = importlib.util.find_spec("nncf") is not None +_is_nncf_enabled = find_spec("nncf") is not None def is_nncf_enabled(): @@ -80,9 +79,11 @@ def no_nncf_trace(): def nncf_trace(): if is_nncf_enabled(): + @contextmanager def _nncf_trace(): from nncf.torch.dynamic_graph.context import get_current_context + ctx = get_current_context() if ctx is not None and not ctx.is_tracing: ctx.enable_tracing() @@ -90,6 +91,7 @@ def _nncf_trace(): ctx.disable_tracing() else: yield + return _nncf_trace() return nullcontext() diff --git a/otx/algorithms/common/tasks/nncf_base.py b/otx/algorithms/common/tasks/nncf_base.py index 80fe298621b..39356fef959 100644 --- a/otx/algorithms/common/tasks/nncf_base.py +++ b/otx/algorithms/common/tasks/nncf_base.py @@ -21,12 +21,10 @@ import tempfile from collections.abc import Mapping from copy import deepcopy -from functools import partial -from typing import List, Optional +from typing import Dict, List, Optional import torch -from mmcv.utils import Config, ConfigDict -from mpa.utils.logger import get_logger +from mmcv.utils import ConfigDict from otx.algorithms.common.adapters.mmcv.utils import ( get_configs_by_keys, @@ -44,6 +42,7 @@ ) from otx.algorithms.common.adapters.nncf.config import compose_nncf_config from otx.algorithms.common.utils.callback import OptimizationProgressCallback +from otx.algorithms.common.utils.data import get_dataset from otx.api.configuration import cfg_helper from otx.api.configuration.helper.utils import ids_to_strings from otx.api.entities.datasets import DatasetEntity @@ -69,10 +68,10 @@ DatasetParamTypeCheck, check_input_parameters_type, ) +from otx.mpa.utils.logger import get_logger from .training_base import BaseTask - logger = get_logger() @@ -84,9 +83,9 @@ def __init__(self, task_environment: TaskEnvironment, **kwargs): # Set default model attributes. check_nncf_is_enabled() self._nncf_data_to_build = None - self._nncf_state_dict_to_build = dict() + self._nncf_state_dict_to_build: Dict[str, torch.Tensor] = dict() self._nncf_preset = None - self._optimization_methods = [] # type: List + self._optimization_methods: List[OptimizationMethod] = [] self._precision = [ModelPrecision.FP32] self._scratch_space = tempfile.mkdtemp(prefix="otx-nncf-scratch-") @@ -127,18 +126,18 @@ def _set_attributes_by_hyperparams(self): def _init_train_data_cfg(self, dataset: DatasetEntity): logger.info("init data cfg.") - data_cfg = ConfigDict( - data=ConfigDict( - train=ConfigDict( - otx_dataset=dataset.get_subset(Subset.TRAINING), - labels=self._labels, - ), - val=ConfigDict( - otx_dataset=dataset.get_subset(Subset.VALIDATION), + data_cfg = ConfigDict(data=ConfigDict()) + + for cfg_key, subset in zip( + ["train", "val"], + [Subset.TRAINING, Subset.VALIDATION], + ): + subset = get_dataset(dataset, subset) + if subset: + data_cfg.data[cfg_key] = ConfigDict( + otx_dataset=subset, labels=self._labels, - ), - ) - ) + ) # Temparory remedy for cfg.pretty_text error for label in self._labels: @@ -152,13 +151,9 @@ def _init_nncf_cfg(self): with open(nncf_config_path, encoding="UTF-8") as nncf_config_file: common_nncf_config = json.load(nncf_config_file) - optimization_config = compose_nncf_config( - common_nncf_config, [self._nncf_preset] - ) + optimization_config = compose_nncf_config(common_nncf_config, [self._nncf_preset]) - max_acc_drop = ( - self._hyperparams.nncf_optimization.maximal_accuracy_degradation / 100 - ) + max_acc_drop = self._hyperparams.nncf_optimization.maximal_accuracy_degradation / 100 if "accuracy_aware_training" in optimization_config["nncf_config"]: # Update maximal_absolute_accuracy_degradation ( @@ -183,9 +178,7 @@ def _initialize_post_hook(self, options=dict()): # when initializing and training NNCF if self._data_cfg is not None: data_loader = self._recipe_cfg.data.get("train_dataloader", {}) - samples_per_gpu = data_loader.get( - "samples_per_gpu", self._recipe_cfg.data.get("samples_per_gpu") - ) + samples_per_gpu = data_loader.get("samples_per_gpu", self._recipe_cfg.data.get("samples_per_gpu")) otx_dataset = get_configs_by_keys(self._data_cfg.data.train, "otx_dataset") assert len(otx_dataset) == 1 otx_dataset = otx_dataset[0] @@ -210,10 +203,7 @@ def _initialize_post_hook(self, options=dict()): if isinstance(metric_name, list): metric_name = metric_name[0] nncf_config.target_metric_name = metric_name - logger.info( - "'target_metric_name' not found in nncf config. " - f"Using {metric_name} as target metric" - ) + logger.info("'target_metric_name' not found in nncf config. Using {metric_name} as target metric") if is_accuracy_aware_training_set(nncf_config): # Prepare runner for Accuracy Aware @@ -317,9 +307,7 @@ def optimize( } # update checkpoint to the newly trained model - self._model_ckpt = os.path.join( - os.path.dirname(results.get("final_ckpt")), "temporary.pth" - ) + self._model_ckpt = os.path.join(os.path.dirname(results.get("final_ckpt")), "temporary.pth") torch.save(model_ckpt, self._model_ckpt) self._optimize_post_hook(dataset, output_model) @@ -339,10 +327,11 @@ def _save_model_post_hook(self, modelinfo): @check_input_parameters_type() def save_model(self, output_model: ModelEntity): """Saving model function for NNCF Task.""" + assert self._recipe_cfg is not None + assert self._model_cfg is not None + buffer = io.BytesIO() - hyperparams_str = ids_to_strings( - cfg_helper.convert(self._hyperparams, dict, enum_to_str=True) - ) + hyperparams_str = ids_to_strings(cfg_helper.convert(self._hyperparams, dict, enum_to_str=True)) labels = {label.name: label.color.rgb_tuple for label in self._labels} # some custom hooks are not pickable diff --git a/otx/algorithms/common/tasks/training_base.py b/otx/algorithms/common/tasks/training_base.py index 896d4284140..280fad5eb4e 100644 --- a/otx/algorithms/common/tasks/training_base.py +++ b/otx/algorithms/common/tasks/training_base.py @@ -20,19 +20,19 @@ import os import shutil import tempfile -from typing import DefaultDict, Dict, List, Optional, Union from copy import deepcopy +from typing import DefaultDict, Dict, List, Optional, Union import numpy as np import torch from mmcv.utils.config import Config, ConfigDict from otx.algorithms.common.adapters.mmcv.hooks import OTXLoggerHook -from otx.algorithms.common.configs import TrainType from otx.algorithms.common.adapters.mmcv.utils import ( align_data_config_with_recipe, - get_configs_by_dict + get_configs_by_dict, ) +from otx.algorithms.common.configs import TrainType from otx.api.entities.datasets import DatasetEntity from otx.api.entities.label import LabelEntity from otx.api.entities.model import ModelEntity, ModelPrecision, OptimizationMethod @@ -47,9 +47,12 @@ from otx.mpa.builder import build from otx.mpa.modules.hooks.cancel_interface_hook import CancelInterfaceHook from otx.mpa.stage import Stage -from otx.mpa.utils.config_utils import remove_custom_hook, update_or_add_custom_hook +from otx.mpa.utils.config_utils import ( + MPAConfig, + remove_custom_hook, + update_or_add_custom_hook, +) from otx.mpa.utils.logger import get_logger -from otx.mpa.utils.config_utils import MPAConfig logger = get_logger() @@ -121,8 +124,8 @@ def _run_task(self, stage_module, mode=None, dataset=None, **kwargs): train_data_cfg["new_classes"] = new_classes logger.info( - "running task... kwargs = " + - str({k: v if k != "model_builder" else object.__repr__(v) for k, v in kwargs.items()}) + "running task... kwargs = " + + str({k: v if k != "model_builder" else object.__repr__(v) for k, v in kwargs.items()}) ) if self._recipe_cfg is None: raise RuntimeError("'recipe_cfg' is not initialized yet. call prepare() method before calling this method") @@ -137,7 +140,7 @@ def _run_task(self, stage_module, mode=None, dataset=None, **kwargs): deepcopy(self._recipe_cfg), self._mode, stage_type=stage_module, - common_cfg=common_cfg + common_cfg=common_cfg, ) # run workflow with task specific model config and data config @@ -270,11 +273,15 @@ def _initialize(self, options): # if num_workers is 0, persistent_workers must be False data_cfg = self._recipe_cfg.data - if data_cfg.get("workers_per_gpu", 0) == 0: - for subset in ["train", "val", "test"]: - dataloader_cfg = data_cfg.get( - f"{subset}_dataloader", ConfigDict() - ) + for subset in ["train", "val", "test", "unlabeled"]: + if subset not in data_cfg: + continue + dataloader_cfg = data_cfg.get(f"{subset}_dataloader", ConfigDict()) + workers_per_gpu = dataloader_cfg.get( + "workers_per_gpu", + data_cfg.get("workers_per_gpu", 0), + ) + if workers_per_gpu == 0: dataloader_cfg["persistent_workers"] = False data_cfg[f"{subset}_dataloader"] = dataloader_cfg @@ -329,6 +336,11 @@ def _init_recipe_hparam(self) -> dict: else: early_stop = False + if self._recipe_cfg.runner.get("type").startswith("IterBasedRunner"): # type: ignore + runner = ConfigDict(max_iters=int(params.num_iters)) + else: + runner = ConfigDict(max_epochs=int(params.num_iters)) + return ConfigDict( optimizer=ConfigDict(lr=params.learning_rate), lr_config=lr_config, @@ -337,7 +349,7 @@ def _init_recipe_hparam(self) -> dict: samples_per_gpu=int(params.batch_size), workers_per_gpu=int(params.num_workers), ), - runner=ConfigDict(max_epochs=int(params.num_iters)), + runner=runner, ) def _update_stage_module(self, stage_module: str): @@ -354,7 +366,7 @@ def patch_input_preprocessing(deploy_cfg): normalize_cfg = get_configs_by_dict( self._recipe_cfg.data.test.pipeline, - dict(type="Normalize") + dict(type="Normalize"), ) assert len(normalize_cfg) == 1 normalize_cfg = normalize_cfg[0] @@ -397,7 +409,7 @@ def patch_input_preprocessing(deploy_cfg): def patch_input_shape(deploy_cfg): resize_cfg = get_configs_by_dict( self._recipe_cfg.data.test.pipeline, - dict(type="Resize") + dict(type="Resize"), ) assert len(resize_cfg) == 1 resize_cfg = resize_cfg[0] @@ -407,9 +419,7 @@ def patch_input_shape(deploy_cfg): assert all(isinstance(i, int) and i > 0 for i in size) # default is static shape to prevent an unexpected error # when converting to OpenVINO IR - deploy_cfg.backend_config.model_inputs = [ - ConfigDict(opt_shapes=ConfigDict(input=[1, 3, *size])) - ] + deploy_cfg.backend_config.model_inputs = [ConfigDict(opt_shapes=ConfigDict(input=[1, 3, *size]))] patch_input_preprocessing(deploy_cfg) if not deploy_cfg.backend_config.get("model_inputs", []): diff --git a/otx/algorithms/common/utils/__init__.py b/otx/algorithms/common/utils/__init__.py index d1ee06274a5..421669cf205 100644 --- a/otx/algorithms/common/utils/__init__.py +++ b/otx/algorithms/common/utils/__init__.py @@ -20,7 +20,7 @@ TrainingProgressCallback, ) from .data import get_cls_img_indices, get_old_new_img_indices -from .utils import get_task_class, load_template, get_arg_spec +from .utils import get_arg_spec, get_task_class, load_template __all__ = [ "get_cls_img_indices", diff --git a/otx/algorithms/common/utils/data.py b/otx/algorithms/common/utils/data.py index 6bd89225300..399a0cbbaab 100644 --- a/otx/algorithms/common/utils/data.py +++ b/otx/algorithms/common/utils/data.py @@ -95,10 +95,9 @@ def load_unlabeled_dataset_items( return dataset_items -def get_unlabeled_dataset(dataset: DatasetEntity): - """Get unlabeled dataset from otx dataset.""" - unlabeled_data = dataset.get_subset(Subset.UNLABELED) - return unlabeled_data if len(unlabeled_data) > 0 else None +def get_dataset(dataset: DatasetEntity, subset: Subset): + data = dataset.get_subset(subset) + return data if len(data) > 0 else None def get_cls_img_indices(labels, dataset): diff --git a/otx/algorithms/common/utils/utils.py b/otx/algorithms/common/utils/utils.py index 2bcfa91d64e..cc0fed54778 100644 --- a/otx/algorithms/common/utils/utils.py +++ b/otx/algorithms/common/utils/utils.py @@ -16,7 +16,7 @@ import importlib import inspect -from typing import Optional, Tuple, Callable +from typing import Callable, Optional, Tuple import yaml @@ -40,7 +40,10 @@ def get_task_class(path: str): @check_input_parameters_type() -def get_arg_spec(fn: Callable, depth: Optional[int] = None) -> Tuple[str]: +def get_arg_spec( # noqa: C901 + fn: Callable, + depth: Optional[int] = None, +) -> Tuple[str, ...]: args = set() cls_obj = None @@ -56,7 +59,7 @@ def get_arg_spec(fn: Callable, depth: Optional[int] = None) -> Tuple[str]: cls_obj = globals()[".".join(names[:-1])] if cls_obj: - for obj in cls_obj.mro(): + for obj in cls_obj.mro(): # type: ignore fn_obj = cls_obj.__dict__.get(fn_name, None) if fn_obj is not None: if isinstance(fn_obj, staticmethod): @@ -69,7 +72,7 @@ def get_arg_spec(fn: Callable, depth: Optional[int] = None) -> Tuple[str]: args.update(spec.args) else: # method, classmethod - for i, obj in enumerate(cls_obj.mro()): + for i, obj in enumerate(cls_obj.mro()): # type: ignore if depth is not None and i == depth: break method = getattr(obj, fn_name, None) diff --git a/otx/algorithms/detection/adapters/mmdet/__init__.py b/otx/algorithms/detection/adapters/mmdet/__init__.py index 7a5545c232d..a69691e4de9 100644 --- a/otx/algorithms/detection/adapters/mmdet/__init__.py +++ b/otx/algorithms/detection/adapters/mmdet/__init__.py @@ -3,11 +3,16 @@ # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 + from .data import MPADetDataset +# fmt: off +# isort: off # FIXME: openvino pot library adds stream handlers to root logger # which makes annoying duplicated logging from mmdet.utils import get_root_logger get_root_logger().propagate = False +# isort:on +# fmt: on __all__ = ["MPADetDataset"] diff --git a/otx/algorithms/detection/adapters/mmdet/data/dataset.py b/otx/algorithms/detection/adapters/mmdet/data/dataset.py index 5635c633964..e5015c66bac 100644 --- a/otx/algorithms/detection/adapters/mmdet/data/dataset.py +++ b/otx/algorithms/detection/adapters/mmdet/data/dataset.py @@ -71,27 +71,14 @@ def get_annotation_mmdet_format( continue class_indices = [ - label_idx[label.id] - for label in annotation.get_labels(include_empty=False) - if label.domain == domain + label_idx[label.id] for label in annotation.get_labels(include_empty=False) if label.domain == domain ] n = len(class_indices) - gt_bboxes.extend( - [ - [box.x1 * width, box.y1 * height, box.x2 * width, box.y2 * height] - for _ in range(n) - ] - ) + gt_bboxes.extend([[box.x1 * width, box.y1 * height, box.x2 * width, box.y2 * height] for _ in range(n)]) if domain != Domain.DETECTION: polygon = ShapeFactory.shape_as_polygon(annotation.shape) - polygon = np.array( - [ - p - for point in polygon.points - for p in [point.x * width, point.y * height] - ] - ) + polygon = np.array([p for point in polygon.points for p in [point.x * width, point.y * height]]) gt_polygons.extend([[polygon] for _ in range(n)]) gt_labels.extend(class_indices) @@ -99,9 +86,7 @@ def get_annotation_mmdet_format( ann_info = dict( bboxes=np.array(gt_bboxes, dtype=np.float32).reshape(-1, 4), labels=np.array(gt_labels, dtype=int), - masks=PolygonMasks(gt_polygons, height=height, width=width) - if gt_polygons - else [], + masks=PolygonMasks(gt_polygons, height=height, width=width) if gt_polygons else [], ) else: ann_info = dict( @@ -152,9 +137,7 @@ def __getitem__(self, index): dataset = self.otx_dataset item = dataset[index] - ignored_labels = np.array( - [self.label_idx[lbs.id] for lbs in item.ignored_labels] - ) + ignored_labels = np.array([self.label_idx[lbs.id] for lbs in item.ignored_labels]) height, width = item.height, item.width @@ -198,9 +181,7 @@ def __init__( # small image size, since otherwise reading the whole dataset during initialization will be required. self.data_infos = MPADetDataset._DataInfoProxy(otx_dataset, labels) - self.proposals = ( - None # Attribute expected by mmdet but not used for OTX datasets - ) + self.proposals = None # Attribute expected by mmdet but not used for OTX datasets if not test_mode: self._set_group_flag() @@ -324,9 +305,7 @@ def evaluate( eval_results["mAP"] = sum(mean_aps) / len(mean_aps) elif metric == "recall": gt_bboxes = [ann["bboxes"] for ann in annotations] - recalls = eval_recalls( - gt_bboxes, results, proposal_nums, iou_thr, logger=logger - ) + recalls = eval_recalls(gt_bboxes, results, proposal_nums, iou_thr, logger=logger) for i, num in enumerate(proposal_nums): for j, iou in enumerate(iou_thrs): eval_results[f"recall@{num}@{iou}"] = recalls[i, j] @@ -359,25 +338,17 @@ def evaluate( labels=self.CLASSES, ) eval_results["MAE best score"] = float(f"{mae.mae.value:.3f}") - eval_results["MAE conf thres"] = float( - f"{mae.best_confidence_threshold.value:.3f}" - ) + eval_results["MAE conf thres"] = float(f"{mae.best_confidence_threshold.value:.3f}") print(f"MAE best score = {mae.mae.value:.3f}") print(f"MAE conf thres = {mae.best_confidence_threshold.value:.3f}") for class_name, score_metric in mae.mae_per_label.items(): - eval_results[f"MAE:{class_name}"] = float( - f"{score_metric.value:.3f}" - ) + eval_results[f"MAE:{class_name}"] = float(f"{score_metric.value:.3f}") print(f"MAE:{class_name} = {score_metric.value:.3f}") - eval_results["Relative MAE best score"] = float( - f"{mae.relative_mae.value:.3f}" - ) + eval_results["Relative MAE best score"] = float(f"{mae.relative_mae.value:.3f}") print(f"Relative MAE best score = {mae.relative_mae.value:.3f}") for class_name, score_metric in mae.relative_mae_per_label.items(): - eval_results[f"Relative MAE:{class_name}"] = float( - f"{score_metric.value:.3f}" - ) + eval_results[f"Relative MAE:{class_name}"] = float(f"{score_metric.value:.3f}") print(f"Relative MAE:{class_name} = {score_metric.value:.3f}") eval_results["mae"] = eval_results["MAE best score"] eval_results["mae%"] = eval_results["Relative MAE best score"] diff --git a/otx/algorithms/detection/adapters/mmdet/evaluation/__init__.py b/otx/algorithms/detection/adapters/mmdet/evaluation/__init__.py index a36021e7539..745f2346a0a 100644 --- a/otx/algorithms/detection/adapters/mmdet/evaluation/__init__.py +++ b/otx/algorithms/detection/adapters/mmdet/evaluation/__init__.py @@ -2,8 +2,8 @@ # SPDX-License-Identifier: Apache-2.0 # -from .mean_ap_seg import eval_segm from .mae import CustomMAE +from .mean_ap_seg import eval_segm __all__ = [ "eval_segm", diff --git a/otx/algorithms/detection/adapters/mmdet/evaluation/mae.py b/otx/algorithms/detection/adapters/mmdet/evaluation/mae.py index 9ca53bb0084..8b4ba8d1e5b 100644 --- a/otx/algorithms/detection/adapters/mmdet/evaluation/mae.py +++ b/otx/algorithms/detection/adapters/mmdet/evaluation/mae.py @@ -1,13 +1,19 @@ -import os +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + import math +import os +from collections import OrderedDict +from typing import Dict, List, Optional, Union + import numpy as np -from collections import defaultdict, OrderedDict -from typing import Dict, List, Optional, Tuple, Union + # from prettytable import PrettyTable class ScoreMetric: - """ Score Metric """ + """Score Metric""" def __init__(self, name: str, value: float): self.name = name @@ -30,16 +36,14 @@ def type(): class CurveMetric: - """ Curve Metric """ + """Curve Metric""" def __init__(self, name: str, ys: List[float], xs: Optional[List[float]] = None): self.name = name self.__ys = ys if xs is not None: if len(xs) != len(self.__ys): - raise ValueError( - f"Curve error must contain the same length for x and y: ({len(xs)} vs {len(self.ys)})" - ) + raise ValueError(f"Curve error must contain the same length for x and y: ({len(xs)} vs {len(self.ys)})") self.__xs = xs else: # if x values are not provided, set them to the 1-index of the y values @@ -124,13 +128,13 @@ def __init__( class MAE: - """ Mean Absolute Error Metric + """Mean Absolute Error Metric Returns: _type_: _description_ """ - def __init__(self, cocoDt, cocoGt, vary_confidence_threshold: bool = False, metric='mae', show_table=False): - assert metric in ['mae', 'mae%'] + def __init__(self, cocoDt, cocoGt, vary_confidence_threshold: bool = False, metric="mae", show_table=False): + assert metric in ["mae", "mae%"] self.metric = metric self.box_score_index = 4 self.box_class_index = 5 @@ -158,8 +162,7 @@ def __init__(self, cocoDt, cocoGt, vary_confidence_threshold: bool = False, metr for class_idx, class_name in classes.items(): mae_per_label[class_name] = ScoreMetric(name=class_name, value=result.best_mae_per_class[class_name]) relative_mae_per_label[class_name] = ScoreMetric( - name=class_name, - value=result.best_relative_mae_per_class[class_name] + name=class_name, value=result.best_relative_mae_per_class[class_name] ) self._mae_per_label = mae_per_label self._relative_mae_per_label = relative_mae_per_label @@ -180,16 +183,14 @@ def __init__(self, cocoDt, cocoGt, vary_confidence_threshold: bool = False, metr self._mae_per_confidence = mae_per_confidence self._best_confidence_threshold = best_confidence_threshold - def prepare(self, cocoAPI) -> OrderedDict: - new_annotations = OrderedDict() + def prepare(self, cocoAPI) -> Dict[str, list]: + new_annotations: Dict[str, list] = OrderedDict() for image_id, bboxes in cocoAPI.imgToAnns.items(): new_annotations[image_id] = [] for b in bboxes: x1, y1, w, h = b["bbox"] score = b["score"] if "score" in b else 1.0 - new_annotations[image_id].append( - [x1, y1, x1 + w, y1 + h, score, b["category_id"]] - ) + new_annotations[image_id].append([x1, y1, x1 + w, y1 + h, score, b["category_id"]]) for image_id in cocoAPI.getImgIds(): if image_id not in new_annotations: new_annotations[image_id] = [] @@ -222,9 +223,9 @@ def evaluate_detections( best_relative_mae = results_per_confidence.best_relative_mae for _, class_name in classes.items(): - if self.metric == 'mae': + if self.metric == "mae": curve = results_per_confidence.mae_curve[class_name] - elif self.metric == 'mae%': + elif self.metric == "mae%": curve = results_per_confidence.relative_mae_curve[class_name] idx = np.argmin(curve) best_mae_per_class[class_name] = results_per_confidence.mae_curve[class_name][idx] @@ -246,7 +247,7 @@ def get_results_per_confidence( classes: Dict[int, str], confidence_range: List[float], img_ids, - all_classes_name: str = "All Classes" + all_classes_name: str = "All Classes", ) -> _AggregatedResults: result = _AggregatedResults( @@ -256,9 +257,10 @@ def get_results_per_confidence( all_classes_relative_mae_curve=[], best_y_pred=[], best_y_true=[], - best_mae=float('inf'), - best_relative_mae=float('inf'), - best_threshold=0.1) + best_mae=float("inf"), + best_relative_mae=float("inf"), + best_threshold=0.1, + ) for confidence_threshold in np.arange(*confidence_range): result_point = self.evaluate_classes( @@ -266,7 +268,7 @@ def get_results_per_confidence( predicted_boxes_per_image=predicted_boxes_per_image, classes=classes, img_ids=img_ids, - conf_thresold=confidence_threshold + conf_thresold=confidence_threshold, ) all_classes_mae = result_point[all_classes_name].mae all_classes_relative_mae = result_point[all_classes_name].relative_mae @@ -278,10 +280,10 @@ def get_results_per_confidence( result.mae_curve[class_name].append(result_point[class_name].mae) result.relative_mae_curve[class_name].append(result_point[class_name].relative_mae) - if self.metric == 'mae': + if self.metric == "mae": global_best = all_classes_mae curr_best = result.best_mae - elif self.metric == 'mae%': + elif self.metric == "mae%": global_best = all_classes_relative_mae curr_best = result.best_relative_mae @@ -293,9 +295,14 @@ def get_results_per_confidence( result.best_y_true = y_true return result - def evaluate_classes(self, ground_truth_boxes_per_image: Dict, predicted_boxes_per_image: Dict, - classes: Dict[int, str], img_ids: List[Union[int, str]], conf_thresold: float - ) -> Dict[str, _Metrics]: + def evaluate_classes( + self, + ground_truth_boxes_per_image: Dict, + predicted_boxes_per_image: Dict, + classes: Dict[int, str], + img_ids: List[Union[int, str]], + conf_thresold: float, + ) -> Dict[str, _Metrics]: all_classes_name = "All Classes" result: Dict[str, _Metrics] = {} @@ -329,7 +336,7 @@ def evaluate_classes(self, ground_truth_boxes_per_image: Dict, predicted_boxes_p # for all classes result[all_classes_name] = _Metrics( mae=np.average(diffs), - relative_mae=np.average(np.sum(diffs)/(np.sum(y_trues) + 1e-16)), + relative_mae=np.average(np.sum(diffs) / (np.sum(y_trues) + 1e-16)), y_pred=y_preds, y_true=y_trues, ) @@ -340,13 +347,13 @@ def get_mae( class_ground_truth_boxes_per_image: Dict, class_predicted_boxes_per_image: Dict, img_ids: List[Union[int, str]], - ) -> Tuple[_Metrics, _ResultCounters]: + ) -> _Metrics: y_pred = np.array([len(class_predicted_boxes_per_image[idx]) for idx in img_ids]) y_true = np.array([len(class_ground_truth_boxes_per_image[idx]) for idx in img_ids]) diff = np.abs(y_pred - y_true) - relative_ae = np.sum(diff)/(np.sum(y_true) + 1e-16) + relative_ae = np.sum(diff) / (np.sum(y_true) + 1e-16) results = _Metrics(mae=np.average(diff), relative_mae=np.average(relative_ae), y_pred=y_pred, y_true=y_true) return results @@ -407,10 +414,17 @@ def best_confidence_threshold(self) -> Optional[ScoreMetric]: class CustomMAE(MAE): - - def __init__(self, ote_dataset, prediction, ground_truth, vary_confidence_threshold: bool = False, - labels: list = [], metric='mae', show_table=False): - assert metric in ['mae', 'mae%'] + def __init__( + self, + ote_dataset, + prediction, + ground_truth, + vary_confidence_threshold: bool = False, + labels: list = [], + metric="mae", + show_table=False, + ): + assert metric in ["mae", "mae%"] self.metric = metric self.box_score_index = 0 self.box_class_index = 1 @@ -438,7 +452,8 @@ def __init__(self, ote_dataset, prediction, ground_truth, vary_confidence_thresh for class_idx, class_name in classes.items(): mae_per_label[class_name] = ScoreMetric(name=class_name, value=result.best_mae_per_class[class_name]) relative_mae_per_label[class_name] = ScoreMetric( - name=class_name, value=result.best_relative_mae_per_class[class_name]) + name=class_name, value=result.best_relative_mae_per_class[class_name] + ) self._mae_per_label = mae_per_label self._relative_mae_per_label = relative_mae_per_label @@ -478,8 +493,8 @@ def prepare_gt(self, annotation, img_names): for img_name, anno in zip(img_names, annotation): if img_name not in ground_truth_per_image: ground_truth_per_image[img_name] = [] - if len(anno['labels']) == 0: + if len(anno["labels"]) == 0: continue - for label in anno['labels']: + for label in anno["labels"]: ground_truth_per_image[img_name].append((1.0, label)) return ground_truth_per_image diff --git a/otx/algorithms/detection/adapters/mmdet/evaluation/mean_ap_seg.py b/otx/algorithms/detection/adapters/mmdet/evaluation/mean_ap_seg.py index ebbe66033f3..fb8d2a56f24 100644 --- a/otx/algorithms/detection/adapters/mmdet/evaluation/mean_ap_seg.py +++ b/otx/algorithms/detection/adapters/mmdet/evaluation/mean_ap_seg.py @@ -1,21 +1,22 @@ +"""Evaluate mean AP for segmentation.""" +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +from multiprocessing import Pool +from typing import Dict, List + import mmcv import numpy as np import pycocotools.mask as mask_util from mmcv.utils import print_log -from multiprocessing import Pool -from terminaltables import AsciiTable -from typing import Dict, List - -from mmdet.core.mask.structures import PolygonMasks from mmdet.core.evaluation.class_names import get_classes from mmdet.core.evaluation.mean_ap import average_precision +from mmdet.core.mask.structures import PolygonMasks +from terminaltables import AsciiTable -def print_map_summary(mean_ap, - results, - dataset=None, - scale_ranges=None, - logger=None): +def print_map_summary(mean_ap, results, dataset=None, scale_ranges=None, logger=None): """Print mAP/mIoU and results of each class. A table will be printed to show the gts/dets/recall/AP/IoU of each class @@ -30,11 +31,11 @@ def print_map_summary(mean_ap, summary. See `mmcv.utils.print_log()` for details. Default: None. """ - if logger == 'silent': + if logger == "silent": return - if isinstance(results[0]['ap'], np.ndarray): - num_scales = len(results[0]['ap']) + if isinstance(results[0]["ap"], np.ndarray): + num_scales = len(results[0]["ap"]) else: num_scales = 1 @@ -48,11 +49,11 @@ def print_map_summary(mean_ap, num_gts = np.zeros((num_scales, num_classes), dtype=int) mious = np.zeros((num_scales, num_classes), dtype=np.float32) for i, cls_result in enumerate(results): - if cls_result['recall'].size > 0: - recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1] - aps[:, i] = cls_result['ap'] - mious[:, i] = cls_result['miou'] - num_gts[:, i] = cls_result['num_gts'] + if cls_result["recall"].size > 0: + recalls[:, i] = np.array(cls_result["recall"], ndmin=2)[:, -1] + aps[:, i] = cls_result["ap"] + mious[:, i] = cls_result["miou"] + num_gts[:, i] = cls_result["num_gts"] if dataset is None: label_names = [str(i) for i in range(num_classes)] @@ -64,30 +65,29 @@ def print_map_summary(mean_ap, if not isinstance(mean_ap, list): mean_ap = [mean_ap] - header = ['class', 'gts', 'dets', 'recall', 'ap', 'miou'] + header = ["class", "gts", "dets", "recall", "ap", "miou"] for i in range(num_scales): if scale_ranges is not None: - print_log(f'Scale range {scale_ranges[i]}', logger=logger) + print_log(f"Scale range {scale_ranges[i]}", logger=logger) table_data = [header] for j in range(num_classes): row_data = [ - label_names[j], num_gts[i, j], results[j]['num_dets'], - f'{recalls[i, j]:.3f}', f'{aps[i, j]:.3f}', f'{mious[i, j]:.3f}' + label_names[j], + num_gts[i, j], + results[j]["num_dets"], + f"{recalls[i, j]:.3f}", + f"{aps[i, j]:.3f}", + f"{mious[i, j]:.3f}", ] table_data.append(row_data) - table_data.append( - ['mAP', '', '', '', f'{mean_ap[i]:.3f}', f'{np.mean(mious[i]):.3f}']) + table_data.append(["mAP", "", "", "", f"{mean_ap[i]:.3f}", f"{np.mean(mious[i]):.3f}"]) table = AsciiTable(table_data) table.inner_footing_row_border = True - print_log('\n' + table.table, logger=logger) + print_log("\n" + table.table, logger=logger) -def tpfpmiou_func( - det_masks: List[Dict], - gt_masks: List[Dict], - cls_scores, - iou_thr=0.5): - """ Calculate Mean Intersection and Union (mIoU) and AP across +def tpfpmiou_func(det_masks: List[Dict], gt_masks: List[Dict], cls_scores, iou_thr=0.5): + """Calculate Mean Intersection and Union (mIoU) and AP across predicted masks and GT masks. Args: @@ -163,24 +163,20 @@ def get_cls_results(det_results, annotations, class_id): cls_dets.append([]) for det_mask in det_masks: if isinstance(det_mask, np.ndarray): - cls_dets[i].append( - mask_util.encode( - np.array( - det_mask[:, :, np.newaxis], order='F', dtype='uint8'))[0]) + cls_dets[i].append(mask_util.encode(np.array(det_mask[:, :, np.newaxis], order="F", dtype="uint8"))[0]) else: cls_dets[i].append(det_mask) cls_gts = [] for ann in annotations: - gt_inds = ann['labels'] == class_id - if isinstance(ann['masks'], PolygonMasks): - masks = ann['masks'].to_ndarray()[gt_inds] + gt_inds = ann["labels"] == class_id + if isinstance(ann["masks"], PolygonMasks): + masks = ann["masks"].to_ndarray()[gt_inds] encoded_masks = [ - mask_util.encode( - np.array(m[:, :, np.newaxis], order='F', dtype='uint8') - )[0] for m in masks] + mask_util.encode(np.array(m[:, :, np.newaxis], order="F", dtype="uint8"))[0] for m in masks + ] cls_gts.append(encoded_masks) - elif isinstance(ann['masks'], list): + elif isinstance(ann["masks"], list): cls_gts.append([]) else: raise RuntimeError("Unknown annotation format") @@ -188,14 +184,7 @@ def get_cls_results(det_results, annotations, class_id): return cls_dets, cls_gts, cls_scores -def eval_segm( - det_results, - annotations, - iou_thr=0.5, - dataset=None, - logger=None, - nproc=4, - metric='mAP'): +def eval_segm(det_results, annotations, iou_thr=0.5, dataset=None, logger=None, nproc=4, metric="mAP"): """Evaluate mAP/mIoU of a dataset. Args: @@ -233,10 +222,7 @@ def eval_segm( cls_dets, cls_gts, cls_scores = cls_results # compute tp and fp for each image with multiple processes - tpfpmiou = pool.starmap( - tpfpmiou_func, - zip(cls_dets, cls_gts, cls_scores, - [iou_thr for _ in range(num_imgs)])) + tpfpmiou = pool.starmap(tpfpmiou_func, zip(cls_dets, cls_gts, cls_scores, [iou_thr for _ in range(num_imgs)])) tp, fp, miou = tuple(zip(*tpfpmiou)) # sort all det bboxes by score, also sort tp and fp @@ -254,30 +240,31 @@ def eval_segm( precisions = tp / np.maximum((tp + fp), eps) miou = np.mean(np.stack(miou)) # calculate AP - mode = 'area' if dataset != 'voc07' else '11points' + mode = "area" if dataset != "voc07" else "11points" ap = average_precision(recalls, precisions, mode) - eval_results.append({ - 'num_gts': num_gts, - 'num_dets': num_dets, - 'recall': recalls, - 'precision': precisions, - 'ap': ap, - 'miou': miou - }) + eval_results.append( + { + "num_gts": num_gts, + "num_dets": num_dets, + "recall": recalls, + "precision": precisions, + "ap": ap, + "miou": miou, + } + ) pool.close() - metrics = {'mAP': 0.0, 'mIoU': 0.0} + metrics = {"mAP": 0.0, "mIoU": 0.0} mious, aps = [], [] for cls_result in eval_results: - if cls_result['num_gts'] > 0: - aps.append(cls_result['ap']) - mious.append(cls_result['miou']) + if cls_result["num_gts"] > 0: + aps.append(cls_result["ap"]) + mious.append(cls_result["miou"]) mean_ap = np.array(aps).mean().item() if aps else 0.0 mean_miou = np.array(mious).mean().item() if mious else 0.0 - metrics['mAP'] = mean_ap - metrics['mIoU'] = mean_miou + metrics["mAP"] = mean_ap + metrics["mIoU"] = mean_miou - print_map_summary( - mean_ap, eval_results, dataset, None, logger=logger) + print_map_summary(mean_ap, eval_results, dataset, None, logger=logger) return metrics[metric], eval_results diff --git a/otx/algorithms/detection/adapters/mmdet/nncf/__init__.py b/otx/algorithms/detection/adapters/mmdet/nncf/__init__.py index 32d95b31698..e654b9660e1 100644 --- a/otx/algorithms/detection/adapters/mmdet/nncf/__init__.py +++ b/otx/algorithms/detection/adapters/mmdet/nncf/__init__.py @@ -2,10 +2,9 @@ # SPDX-License-Identifier: Apache-2.0 # -from .builder import ( - build_nncf_detector, -) +# flake8: noqa +from .builder import build_nncf_detector from .patches import * __all__ = [ diff --git a/otx/algorithms/detection/adapters/mmdet/nncf/builder.py b/otx/algorithms/detection/adapters/mmdet/nncf/builder.py index 593ddf407c5..0417b119320 100644 --- a/otx/algorithms/detection/adapters/mmdet/nncf/builder.py +++ b/otx/algorithms/detection/adapters/mmdet/nncf/builder.py @@ -2,7 +2,6 @@ # SPDX-License-Identifier: Apache-2.0 # -from copy import deepcopy from functools import partial from typing import Optional, Union @@ -25,7 +24,6 @@ from otx.algorithms.common.adapters.nncf.utils import no_nncf_trace from otx.algorithms.detection.adapters.mmdet.utils import build_detector - logger = get_root_logger() @@ -41,16 +39,19 @@ def build_nncf_detector( from mmdet.apis import multi_gpu_test, single_gpu_test from mmdet.apis.inference import LoadImage from mmdet.datasets import build_dataloader as mmdet_build_dataloader - from mmdet.datasets import build_dataset + from mmdet.datasets import build_dataset as mmdet_build_dataset from mmdet.datasets.pipelines import Compose from nncf.torch.dynamic_graph.io_handling import nncf_model_input from otx.algorithms.common.adapters.mmcv.nncf import ( - build_dataloader, get_fake_input, model_eval, wrap_nncf_model, ) + from otx.algorithms.common.adapters.mmcv.utils.builder import ( + build_dataloader, + build_dataset, + ) if cfg_options is not None: config.merge_from_dict(cfg_options) @@ -60,9 +61,7 @@ def build_nncf_detector( config.load_from = None assert checkpoint is not None - model = build_detector( - config, train_cfg=train_cfg, test_cfg=test_cfg, from_scratch=True - ) + model = build_detector(config, train_cfg=train_cfg, test_cfg=test_cfg, from_scratch=True) model = model.to(device) state_dict = CheckpointLoader.load_checkpoint(checkpoint, map_location=device) @@ -87,21 +86,29 @@ def build_nncf_detector( data_to_build_nncf = datasets[0][0].numpy init_dataloader = build_dataloader( + build_dataset( + config, + subset="train", + dataset_builder=mmdet_build_dataset, + ), config, subset="train", - distributed=distributed, dataloader_builder=mmdet_build_dataloader, - dataset_builder=build_dataset, + distributed=distributed, ) val_dataloader = None if is_acc_aware: val_dataloader = build_dataloader( + build_dataset( + config, + subset="val", + dataset_builder=mmdet_build_dataset, + ), config, subset="val", - distributed=distributed, dataloader_builder=mmdet_build_dataloader, - dataset_builder=build_dataset, + distributed=distributed, ) model_eval_fn = partial( @@ -133,9 +140,7 @@ def build_nncf_detector( # When we manage to enable NNCF compression for sufficiently many models, # we should keep one choice only. nncf_compress_postprocessing = config.get("nncf_compress_postprocessing") - logger.debug( - "set should_compress_postprocessing=" f"{nncf_compress_postprocessing}" - ) + logger.debug(f"set should_compress_postprocessing={nncf_compress_postprocessing}") else: # TODO: Do we have to keep this configuration? # This configuration is not enabled in forked mmdetection library in the first place @@ -159,13 +164,9 @@ def _get_fake_data_for_forward(nncf_config): # Marking data as NNCF network input must be after device movement img = [nncf_model_input(i) for i in img] if nncf_compress_postprocessing: - logger.debug( - "NNCF will try to compress a postprocessing part of the model" - ) + logger.debug("NNCF will try to compress a postprocessing part of the model") else: - logger.debug( - "NNCF will NOT compress a postprocessing part of the model" - ) + logger.debug("NNCF will NOT compress a postprocessing part of the model") img = img[0] model(img) @@ -181,9 +182,7 @@ def _get_fake_data_for_forward(nncf_config): # update custom hooks custom_hooks = config.get("custom_hooks", []) - custom_hooks.append( - ConfigDict(type="CompressionHook", compression_ctrl=compression_ctrl) - ) + custom_hooks.append(ConfigDict(type="CompressionHook", compression_ctrl=compression_ctrl)) custom_hooks.append(ConfigDict({"type": "CancelTrainingHook"})) custom_hooks.append( ConfigDict( @@ -203,10 +202,7 @@ def _get_fake_data_for_forward(nncf_config): for hook in get_configs_by_dict(custom_hooks, dict(type="OTXProgressHook")): time_monitor = hook.get("time_monitor", None) - if ( - time_monitor - and getattr(time_monitor, "on_initialization_end", None) is not None - ): + if time_monitor and getattr(time_monitor, "on_initialization_end", None) is not None: time_monitor.on_initialization_end() return compression_ctrl, model diff --git a/otx/algorithms/detection/adapters/mmdet/nncf/patches.py b/otx/algorithms/detection/adapters/mmdet/nncf/patches.py index 5a52e7625c5..04b7e85b1f7 100644 --- a/otx/algorithms/detection/adapters/mmdet/nncf/patches.py +++ b/otx/algorithms/detection/adapters/mmdet/nncf/patches.py @@ -22,8 +22,7 @@ no_nncf_trace_wrapper, ) from otx.algorithms.common.adapters.nncf.patches import nncf_trace_context -from mpa.deploy.utils import is_mmdeploy_enabled - +from otx.mpa.deploy.utils import is_mmdeploy_enabled HEADS_TARGETS = dict( classes=( @@ -94,9 +93,7 @@ def wrap_register_module(self, fn, *args, **kwargs): # for mmdet defined heads -for head_cls in [BaseDenseHead, BaseMaskHead, BaseRoIHead] + list( - HEADS.module_dict.values() -): +for head_cls in [BaseDenseHead, BaseMaskHead, BaseRoIHead] + list(HEADS.module_dict.values()): wrap_mmdet_head(head_cls) # for mmdet defined bbox assigners @@ -124,7 +121,7 @@ def wrap_register_module(self, fn, *args, **kwargs): if is_mmdeploy_enabled(): - import mmdeploy.codebase.mmdet + import mmdeploy.codebase.mmdet # noqa: F401 from mmdeploy.core import FUNCTION_REWRITER from mmdeploy.core.rewriters.rewriter_utils import import_function @@ -135,6 +132,4 @@ def wrap_register_module(self, fn, *args, **kwargs): fn_name = fn_path.split(".")[-1] if should_wrap(obj_cls, fn_name, HEADS_TARGETS): for record_dict in record_dicts: - record_dict["_object"] = partial( - no_nncf_trace_wrapper, None, record_dict["_object"] - ) + record_dict["_object"] = partial(no_nncf_trace_wrapper, None, record_dict["_object"]) diff --git a/otx/algorithms/detection/adapters/mmdet/utils/__init__.py b/otx/algorithms/detection/adapters/mmdet/utils/__init__.py index 259c2b97a7f..a8eb263a1a1 100644 --- a/otx/algorithms/detection/adapters/mmdet/utils/__init__.py +++ b/otx/algorithms/detection/adapters/mmdet/utils/__init__.py @@ -3,15 +3,15 @@ # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +from .builder import build_detector from .config_utils import ( cluster_anchors, - patch_recipe_config, patch_datasets, patch_evaluation, + patch_recipe_config, prepare_for_training, set_hyperparams, ) -from .builder import build_detector __all__ = [ "cluster_anchors", diff --git a/otx/algorithms/detection/adapters/mmdet/utils/builder.py b/otx/algorithms/detection/adapters/mmdet/utils/builder.py index 2af479f3c97..1572bcaee57 100644 --- a/otx/algorithms/detection/adapters/mmdet/utils/builder.py +++ b/otx/algorithms/detection/adapters/mmdet/utils/builder.py @@ -18,12 +18,13 @@ def build_detector( device: Union[str, torch.device] = "cpu", cfg_options: Optional[Union[Config, ConfigDict]] = None, from_scratch: bool = False, -): +) -> torch.nn.Module: """Creates a model, based on the configuration in config. Note that this function consumes/updates 'load_from' attribute of 'config'. """ from mmdet.models import build_detector as origin_build_detector + from mmdet.utils import get_root_logger model_cfg = deepcopy(config.model) @@ -36,7 +37,7 @@ def build_detector( checkpoint = checkpoint if checkpoint else config.pop("load_from", None) if checkpoint is not None and not from_scratch: - load_checkpoint(model, checkpoint, map_location=device) + load_checkpoint(model, checkpoint, map_location=device, logger=get_root_logger()) config.load_from = None else: config.load_from = checkpoint diff --git a/otx/algorithms/detection/adapters/mmdet/utils/config_utils.py b/otx/algorithms/detection/adapters/mmdet/utils/config_utils.py index 32498c3f906..de8ec59d6a5 100644 --- a/otx/algorithms/detection/adapters/mmdet/utils/config_utils.py +++ b/otx/algorithms/detection/adapters/mmdet/utils/config_utils.py @@ -15,23 +15,19 @@ # and limitations under the License. import math -from collections import defaultdict -from collections.abc import Mapping -from typing import List, Optional, Union +from typing import List, Union -import torch from mmcv import Config, ConfigDict -from mmdet.models.detectors import BaseDetector from otx.algorithms.common.adapters.mmcv.utils import ( + get_configs_by_keys, + get_dataset_configs, get_meta_keys, is_epoch_based_runner, patch_color_conversion, prepare_work_dir, - get_dataset_configs, - get_configs_by_keys, - update_config, remove_from_config, + update_config, ) from otx.algorithms.detection.configs.base import DetectionConfig from otx.algorithms.detection.utils.data import ( @@ -96,6 +92,7 @@ def patch_model_config( config: Config, labels: List[LabelEntity], ): + """Patch model config""" set_num_classes(config, len(labels)) @@ -194,6 +191,7 @@ def set_data_classes(config: Config, labels: List[LabelEntity]): @check_input_parameters_type() def set_num_classes(config: Config, num_classes: int): + """Set num classes""" # Set proper number of classes in model's detection heads. head_names = ("mask_head", "bbox_head", "segm_head") if "roi_head" in config.model: @@ -217,7 +215,7 @@ def patch_datasets( config: Config, domain: Domain = Domain.DETECTION, subsets: List[str] = ["train", "val", "test", "unlabeled"], - **kwargs + **kwargs, ): """Update dataset configs.""" @@ -237,9 +235,7 @@ def update_pipeline(cfg): for subset in subsets: if subset not in config.data: continue - config.data[f"{subset}_dataloader"] = config.data.get( - f"{subset}_dataloader", ConfigDict() - ) + config.data[f"{subset}_dataloader"] = config.data.get(f"{subset}_dataloader", ConfigDict()) cfgs = get_dataset_configs(config, subset) for cfg in cfgs: @@ -274,6 +270,7 @@ def patch_evaluation(config: Config): def should_cluster_anchors(model_cfg: Config): + """check whether cluster anchors or not""" if ( hasattr(model_cfg.model, "bbox_head") and hasattr(model_cfg.model.bbox_head, "anchor_generator") diff --git a/otx/algorithms/detection/adapters/openvino/model_wrappers/openvino_models.py b/otx/algorithms/detection/adapters/openvino/model_wrappers/openvino_models.py index b20e5612aae..ad0425fe8ad 100644 --- a/otx/algorithms/detection/adapters/openvino/model_wrappers/openvino_models.py +++ b/otx/algorithms/detection/adapters/openvino/model_wrappers/openvino_models.py @@ -103,7 +103,7 @@ def __init__(self, model_adapter, configuration=None, preload=False): self.image_info_blob_name = self.image_info_blob_names[0] if len(self.image_info_blob_names) == 1 else None self.output_parser = BatchBoxesLabelsParser( self.outputs, - self.inputs[self.image_blob_name].shape[2:][::-1] + self.inputs[self.image_blob_name].shape[2:][::-1], ) def _get_outputs(self) -> Dict: @@ -119,7 +119,7 @@ def _get_outputs(self) -> Dict: class BatchBoxesLabelsParser: - def __init__(self, layers, input_size, labels_layer='labels', default_label=0): + def __init__(self, layers, input_size, labels_layer="labels", default_label=0): try: self.labels_layer = find_layer_by_name(labels_layer, layers) except ValueError: @@ -133,9 +133,9 @@ def __init__(self, layers, input_size, labels_layer='labels', default_label=0): def find_layer_bboxes_output(layers): filter_outputs = [name for name, data in layers.items() if len(data.shape) == 3 and data.shape[-1] == 5] if not filter_outputs: - raise ValueError('Suitable output with bounding boxes is not found') + raise ValueError("Suitable output with bounding boxes is not found") if len(filter_outputs) > 1: - raise ValueError('More than 1 candidate for output with bounding boxes.') + raise ValueError("More than 1 candidate for output with bounding boxes.") return filter_outputs[0] def __call__(self, outputs): @@ -153,9 +153,5 @@ def __call__(self, outputs): labels = np.full(len(bboxes), self.default_label, dtype=bboxes.dtype) labels = labels.squeeze(0) - detections = [ - Detection(*bbox, score, label) for label, - score, - bbox in zip(labels, scores, bboxes) - ] + detections = [Detection(*bbox, score, label) for label, score, bbox in zip(labels, scores, bboxes)] return detections diff --git a/otx/algorithms/detection/configs/detection/cspdarknet_yolox/data_pipeline.py b/otx/algorithms/detection/configs/detection/cspdarknet_yolox/data_pipeline.py index d3a5fd5c844..811445ef098 100644 --- a/otx/algorithms/detection/configs/detection/cspdarknet_yolox/data_pipeline.py +++ b/otx/algorithms/detection/configs/detection/cspdarknet_yolox/data_pipeline.py @@ -34,7 +34,7 @@ hue_delta=18, ), dict(type="RandomFlip", flip_ratio=0.5), - dict(type="Resize", img_scale=img_scale, keep_ratio=True), + dict(type="Resize", img_scale=__img_size, keep_ratio=True), dict(type="Pad", pad_to_square=True, pad_val=114.0), dict(type="Normalize", **__img_norm_cfg), dict(type="DefaultFormatBundle"), @@ -65,7 +65,6 @@ data = dict( samples_per_gpu=__samples_per_gpu, workers_per_gpu=4, - num_classes=2, train=dict( # make sure to clean up recipe dataset _delete_=True, @@ -80,7 +79,6 @@ ], ), pipeline=train_pipeline, - dynamic_scale=__img_size, ), val=dict( type=__dataset_type, diff --git a/otx/algorithms/detection/configs/detection/cspdarknet_yolox/deployment.py b/otx/algorithms/detection/configs/detection/cspdarknet_yolox/deployment.py index 6e667502cea..b7f3953101d 100644 --- a/otx/algorithms/detection/configs/detection/cspdarknet_yolox/deployment.py +++ b/otx/algorithms/detection/configs/detection/cspdarknet_yolox/deployment.py @@ -1,3 +1,5 @@ +"""MMDeploy config of YOLOX model for Detection Task.""" + _base_ = ["../../base/deployments/base_detection_dynamic.py"] ir_config = dict( diff --git a/otx/algorithms/detection/configs/detection/mobilenetv2_atss/deployment.py b/otx/algorithms/detection/configs/detection/mobilenetv2_atss/deployment.py index 9d4b4da4db7..ce46c74465f 100644 --- a/otx/algorithms/detection/configs/detection/mobilenetv2_atss/deployment.py +++ b/otx/algorithms/detection/configs/detection/mobilenetv2_atss/deployment.py @@ -1,3 +1,5 @@ +"""MMDeploy config of ATSS model for Detection Task.""" + _base_ = ["../../base/deployments/base_detection_dynamic.py"] ir_config = dict( diff --git a/otx/algorithms/detection/configs/detection/mobilenetv2_ssd/deployment.py b/otx/algorithms/detection/configs/detection/mobilenetv2_ssd/deployment.py index 0bc6f75ab5f..c11deeb7db3 100644 --- a/otx/algorithms/detection/configs/detection/mobilenetv2_ssd/deployment.py +++ b/otx/algorithms/detection/configs/detection/mobilenetv2_ssd/deployment.py @@ -1,3 +1,5 @@ +"""MMDeploy config of SSD model for Detection Task.""" + _base_ = ["../../base/deployments/base_detection_dynamic.py"] ir_config = dict( diff --git a/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/deployment.py b/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/deployment.py index bfa20d3ce93..5992c36b43a 100644 --- a/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/deployment.py +++ b/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/deployment.py @@ -1,3 +1,5 @@ +"""MMDeploy config of EfficientNetB2B model for Instance-Seg Task.""" + _base_ = ["../../base/deployments/base_instance_segmentation_dynamic.py"] ir_config = dict( diff --git a/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/template.yaml b/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/template.yaml index d01b363df06..9447bab4d8d 100644 --- a/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/template.yaml +++ b/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/template.yaml @@ -14,6 +14,7 @@ framework: OTEDetection v2.9.1 entrypoints: base: otx.algorithms.detection.tasks.DetectionTrainTask openvino: otx.algorithms.detection.tasks.OpenVINODetectionTask + nncf: otx.algorithms.detection.tasks.DetectionNNCFTask data_pipeline_path: ./data_pipeline.py # Capabilities. @@ -40,7 +41,7 @@ hyper_parameters: default_value: 2 nncf_optimization: enable_quantization: - default_value: false + default_value: true enable_pruning: default_value: false pruning_supported: diff --git a/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/deployment.py b/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/deployment.py index fbde6cf904f..2b5a3b79a35 100644 --- a/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/deployment.py +++ b/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/deployment.py @@ -1,3 +1,5 @@ +"""MMDployment config of Resnet model for Instance-Seg Task.""" + _base_ = ["../../base/deployments/base_instance_segmentation_dynamic.py"] ir_config = dict( diff --git a/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/model.py b/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/model.py index be11331a731..87cf62ac18c 100644 --- a/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/model.py +++ b/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/model.py @@ -26,7 +26,10 @@ model = dict( type="CustomMaskRCNN", # Use CustomMaskRCNN for Incremental Learning neck=dict( - type="FPN", in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5, + type="FPN", + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5, ), rpn_head=dict( type="RPNHead", diff --git a/otx/algorithms/detection/tasks/inference.py b/otx/algorithms/detection/tasks/inference.py index 9911893ae78..cab6d8e29e1 100644 --- a/otx/algorithms/detection/tasks/inference.py +++ b/otx/algorithms/detection/tasks/inference.py @@ -14,32 +14,27 @@ # See the License for the specific language governing permissions # and limitations under the License. -import errno import os -from collections.abc import Mapping -from typing import Iterable, Optional, Tuple, Union +from typing import Iterable, Optional, Tuple import cv2 import numpy as np from mmcv.utils import ConfigDict from otx.algorithms.common.adapters.mmcv.utils import ( + get_configs_by_keys, + patch_data_pipeline, patch_default_config, patch_runner, - patch_data_pipeline, - get_configs_by_keys, ) from otx.algorithms.common.configs.training_base import TrainType from otx.algorithms.common.tasks.training_base import BaseTask from otx.algorithms.common.utils.callback import InferenceProgressCallback from otx.algorithms.detection.adapters.mmdet.utils import ( -from otx.algorithms.detection.adapters.mmdet.utils.config_utils import ( patch_datasets, patch_evaluation, ) -from otx.algorithms.detection.adapters.mmdet.utils.builder import ( - build_detector -) +from otx.algorithms.detection.adapters.mmdet.utils.builder import build_detector from otx.algorithms.detection.adapters.mmdet.utils.config_utils import ( cluster_anchors, should_cluster_anchors, @@ -168,7 +163,7 @@ def _infer_detector( self._data_cfg = self._init_test_data_cfg(dataset) # Temporary disable dump (will be handled by 'otx explain') dump_features = False - dump_saliency_map = False # not inference_parameters.is_evaluation if inference_parameters else True + dump_saliency_map = False # not inference_parameters.is_evaluation if inference_parameters else True results = self._run_task( stage_module, @@ -248,9 +243,7 @@ def export(self, export_type: ExportType, output_model: ModelEntity): outputs = results.get("outputs") logger.debug(f"results of run_task = {outputs}") if outputs is None: - logger.error( - f"error while exporting model, result is None: {results.get('msg')}" - ) + logger.error(f"error while exporting model, result is None: {results.get('msg')}") else: bin_file = outputs.get("bin") xml_file = outputs.get("xml") @@ -463,6 +456,4 @@ def _initialize_post_hook(self, options=dict()): self._recipe_cfg, otx_dataset, ) - self._update_anchors( - self._anchors, self._model_cfg.model.bbox_head.anchor_generator - ) + self._update_anchors(self._anchors, self._model_cfg.model.bbox_head.anchor_generator) diff --git a/otx/algorithms/detection/tasks/nncf.py b/otx/algorithms/detection/tasks/nncf.py index bb1a7eddfb1..20dc77b305a 100644 --- a/otx/algorithms/detection/tasks/nncf.py +++ b/otx/algorithms/detection/tasks/nncf.py @@ -18,7 +18,6 @@ from functools import partial from typing import Optional - from otx.algorithms.common.adapters.mmcv.utils import remove_from_config from otx.algorithms.common.tasks.nncf_base import NNCFBaseTask from otx.algorithms.detection.adapters.mmdet.nncf import build_nncf_detector @@ -33,7 +32,6 @@ from .inference import DetectionInferenceTask - logger = get_logger() @@ -50,10 +48,7 @@ def _initialize_post_hook(self, options=dict()): ) # do not configure regularization - if ( - "l2sp_weight" in self._recipe_cfg.model - or "l2sp_weight" in self._model_cfg.model - ): + if "l2sp_weight" in self._recipe_cfg.model or "l2sp_weight" in self._model_cfg.model: remove_from_config(self._recipe_cfg.model, "l2sp_weight") remove_from_config(self._model_cfg.model, "l2sp_weight") @@ -78,9 +73,7 @@ def _optimize_post_hook( ): # get prediction on validation set val_dataset = dataset.get_subset(Subset.VALIDATION) - val_preds, val_map = self._infer_detector( - val_dataset, InferenceParameters(is_evaluation=True) - ) + val_preds, val_map = self._infer_detector(val_dataset, InferenceParameters(is_evaluation=True)) preds_val_dataset = val_dataset.with_empty_annotations() self._add_predictions_to_dataset(val_preds, preds_val_dataset, 0.0) @@ -95,29 +88,19 @@ def _optimize_post_hook( if self._hyperparams.postprocessing.result_based_confidence_threshold: best_confidence_threshold = None logger.info("Adjusting the confidence threshold") - metric = MetricsHelper.compute_f_measure( - result_set, vary_confidence_threshold=True - ) + metric = MetricsHelper.compute_f_measure(result_set, vary_confidence_threshold=True) if metric.best_confidence_threshold: best_confidence_threshold = metric.best_confidence_threshold.value if best_confidence_threshold is None: - raise ValueError( - "Cannot compute metrics: Invalid confidence threshold!" - ) - logger.info( - f"Setting confidence threshold to {best_confidence_threshold} based on results" - ) + raise ValueError("Cannot compute metrics: Invalid confidence threshold!") + logger.info(f"Setting confidence threshold to {best_confidence_threshold} based on results") self.confidence_threshold = best_confidence_threshold else: - metric = MetricsHelper.compute_f_measure( - result_set, vary_confidence_threshold=False - ) + metric = MetricsHelper.compute_f_measure(result_set, vary_confidence_threshold=False) def _save_model_post_hook(self, modelinfo): config = modelinfo["meta"]["config"] - if hasattr(config.model, "bbox_head") and hasattr( - config.model.bbox_head, "anchor_generator" - ): + if hasattr(config.model, "bbox_head") and hasattr(config.model.bbox_head, "anchor_generator"): if getattr( config.model.bbox_head.anchor_generator, "reclustering_anchors", diff --git a/otx/algorithms/detection/tasks/train.py b/otx/algorithms/detection/tasks/train.py index eb6c3943528..5248f16a873 100644 --- a/otx/algorithms/detection/tasks/train.py +++ b/otx/algorithms/detection/tasks/train.py @@ -23,9 +23,8 @@ from otx.algorithms.common.adapters.mmcv.hooks import OTXLoggerHook from otx.algorithms.common.utils.callback import TrainingProgressCallback -from otx.algorithms.common.utils.data import get_unlabeled_dataset +from otx.algorithms.common.utils.data import get_dataset from otx.algorithms.detection.adapters.mmdet.utils.config_utils import ( - cluster_anchors, should_cluster_anchors, ) from otx.api.configuration import cfg_helper @@ -62,6 +61,7 @@ class DetectionTrainTask(DetectionInferenceTask, ITrainingTask): def save_model(self, output_model: ModelEntity): """Save best model weights in DetectionTrainTask.""" + assert self._model_cfg is not None logger.info("called save_model") buffer = io.BytesIO() hyperparams_str = ids_to_strings(cfg_helper.convert(self._hyperparams, dict, enum_to_str=True)) @@ -192,25 +192,18 @@ def train( def _init_train_data_cfg(self, dataset: DatasetEntity): logger.info("init data cfg.") - data_cfg = ConfigDict( - data=ConfigDict( - train=ConfigDict( - otx_dataset=dataset.get_subset(Subset.TRAINING), + data_cfg = ConfigDict(data=ConfigDict()) + + for cfg_key, subset in zip( + ["train", "val", "unlabeled"], + [Subset.TRAINING, Subset.VALIDATION, Subset.UNLABELED], + ): + subset = get_dataset(dataset, subset) + if subset: + data_cfg.data[cfg_key] = ConfigDict( + otx_dataset=subset, labels=self._labels, - ), - val=ConfigDict( - otx_dataset=dataset.get_subset(Subset.VALIDATION), - labels=self._labels, - ), - ) - ) - - unlabeled_dataset = get_unlabeled_dataset(dataset) - if unlabeled_dataset: - data_cfg.data.unlabeled = ConfigDict( - otx_dataset=unlabeled_dataset, - labels=self._labels, - ) + ) # Temparory remedy for cfg.pretty_text error for label in self._labels: diff --git a/otx/algorithms/detection/utils/data.py b/otx/algorithms/detection/utils/data.py index df5df92d1f4..dc9720becaf 100644 --- a/otx/algorithms/detection/utils/data.py +++ b/otx/algorithms/detection/utils/data.py @@ -20,6 +20,7 @@ from typing import Any, Dict, List, Optional, Sequence import numpy as np + # from pycocotools.coco import COCO from mmdet.datasets.api_wrappers.coco_api import COCO diff --git a/otx/algorithms/segmentation/adapters/mmseg/__init__.py b/otx/algorithms/segmentation/adapters/mmseg/__init__.py index d6280f14840..5da6c120629 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/__init__.py +++ b/otx/algorithms/segmentation/adapters/mmseg/__init__.py @@ -6,9 +6,14 @@ from .data import MPASegDataset from .models import DetConB, DetConLoss, SelfSLMLP +# fmt: off +# isort: off # FIXME: openvino pot library adds stream handlers to root logger # which makes annoying duplicated logging -from mmseg.utils import get_root_logger +# pylint: disable=no-name-in-module, worng-import-order +from mmseg.utils import get_root_logger # type: ignore get_root_logger().propagate = False +# fmt: off +# isort: on __all__ = ["MPASegDataset", "DetConLoss", "SelfSLMLP", "DetConB"] diff --git a/otx/algorithms/segmentation/adapters/mmseg/nncf/__init__.py b/otx/algorithms/segmentation/adapters/mmseg/nncf/__init__.py index 3f084ac2a14..468f9062a26 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/nncf/__init__.py +++ b/otx/algorithms/segmentation/adapters/mmseg/nncf/__init__.py @@ -2,12 +2,10 @@ # SPDX-License-Identifier: Apache-2.0 # -from .builder import ( - build_nncf_segmentor, -) +# flake8: noqa +from .builder import build_nncf_segmentor from .hooks import CustomstepLrUpdaterHook - from .patches import * __all__ = [ diff --git a/otx/algorithms/segmentation/adapters/mmseg/nncf/builder.py b/otx/algorithms/segmentation/adapters/mmseg/nncf/builder.py index 1fd685a59ec..af8c98f9b82 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/nncf/builder.py +++ b/otx/algorithms/segmentation/adapters/mmseg/nncf/builder.py @@ -1,15 +1,17 @@ +""" NNCF optimized segmentor builder """ # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -from copy import deepcopy from functools import partial from typing import Optional, Union import torch from mmcv.runner import CheckpointLoader, load_state_dict from mmcv.utils import Config, ConfigDict -from mmseg.utils import get_root_logger + +# pylint: disable=no-name-in-module +from mmseg.utils import get_root_logger # type: ignore from otx.algorithms.common.adapters.mmcv.utils import ( get_configs_by_dict, @@ -22,14 +24,12 @@ NNCF_STATE_NAME, STATE_TO_BUILD_NAME, ) -from otx.algorithms.common.adapters.nncf.utils import load_checkpoint from otx.algorithms.segmentation.adapters.mmseg.utils import build_segmentor - logger = get_root_logger() -def build_nncf_segmentor( +def build_nncf_segmentor( # noqa: C901 # pylint: disable=too-many-locals config: Config, train_cfg: Optional[Union[Config, ConfigDict]] = None, test_cfg: Optional[Union[Config, ConfigDict]] = None, @@ -38,18 +38,23 @@ def build_nncf_segmentor( cfg_options: Optional[Union[Config, ConfigDict]] = None, distributed=False, ): + """NNCF optimized semgentor builder""" + from mmseg.apis import multi_gpu_test, single_gpu_test from mmseg.apis.inference import LoadImage from mmseg.datasets import build_dataloader as mmseg_build_dataloader - from mmseg.datasets import build_dataset + from mmseg.datasets import build_dataset as mmseg_build_dataset from mmseg.datasets.pipelines import Compose from otx.algorithms.common.adapters.mmcv.nncf import ( - build_dataloader, get_fake_input, model_eval, wrap_nncf_model, ) + from otx.algorithms.common.adapters.mmcv.utils.builder import ( + build_dataloader, + build_dataset, + ) if cfg_options is not None: config.merge_from_dict(cfg_options) @@ -59,9 +64,7 @@ def build_nncf_segmentor( config.load_from = None assert checkpoint is not None - model = build_segmentor( - config, train_cfg=train_cfg, test_cfg=test_cfg, from_scratch=True - ) + model = build_segmentor(config, train_cfg=train_cfg, test_cfg=test_cfg, from_scratch=True) model = model.to(device) state_dict = CheckpointLoader.load_checkpoint(checkpoint, map_location=device) @@ -86,21 +89,31 @@ def build_nncf_segmentor( data_to_build_nncf = datasets[0][0].numpy init_dataloader = build_dataloader( + build_dataset( + config, + subset="train", + dataset_builder=mmseg_build_dataset, + ), config, subset="train", - distributed=distributed, dataloader_builder=mmseg_build_dataloader, - dataset_builder=build_dataset, + distributed=distributed, ) val_dataloader = None if is_acc_aware: val_dataloader = build_dataloader( + build_dataset( + config, + subset="val", + dataset_builder=mmseg_build_dataset, + ), config, subset="val", - distributed=distributed, dataloader_builder=mmseg_build_dataloader, - dataset_builder=build_dataset, + distributed=distributed, + # segmentor does not support various sized batch images + samples_per_gpu=1, ) model_eval_fn = partial( @@ -133,9 +146,7 @@ def build_nncf_segmentor( # update custom hooks custom_hooks = config.get("custom_hooks", []) - custom_hooks.append( - ConfigDict(type="CompressionHook", compression_ctrl=compression_ctrl) - ) + custom_hooks.append(ConfigDict(type="CompressionHook", compression_ctrl=compression_ctrl)) custom_hooks.append(ConfigDict({"type": "CancelTrainingHook"})) custom_hooks.append( ConfigDict( @@ -153,10 +164,7 @@ def build_nncf_segmentor( for hook in get_configs_by_dict(custom_hooks, dict(type="OTXProgressHook")): time_monitor = hook.get("time_monitor", None) - if ( - time_monitor - and getattr(time_monitor, "on_initialization_end", None) is not None - ): + if time_monitor and getattr(time_monitor, "on_initialization_end", None) is not None: time_monitor.on_initialization_end() return compression_ctrl, model diff --git a/otx/algorithms/segmentation/adapters/mmseg/nncf/hooks.py b/otx/algorithms/segmentation/adapters/mmseg/nncf/hooks.py index 2577c6d6fc4..cef09192ed7 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/nncf/hooks.py +++ b/otx/algorithms/segmentation/adapters/mmseg/nncf/hooks.py @@ -3,23 +3,16 @@ # import math -from abc import abstractmethod -from mmcv.runner.hooks import HOOKS -from mmcv.runner.hooks import LrUpdaterHook +from mmcv.runner.hooks import HOOKS, LrUpdaterHook class BaseLrUpdaterHook(LrUpdaterHook): - schedulers = ['constant', 'semi-constant', 'linear', 'cos'] - - def __init__(self, - by_epoch=True, - fixed=None, - fixed_iters=0, - fixed_ratio=1.0, - warmup=None, - warmup_iters=0, - warmup_ratio=0.1): + schedulers = ["constant", "semi-constant", "linear", "cos"] + + def __init__( + self, by_epoch=True, fixed=None, fixed_iters=0, fixed_ratio=1.0, warmup=None, warmup_iters=0, warmup_ratio=0.1 + ): super().__init__(by_epoch, warmup, warmup_iters, warmup_ratio) if fixed is not None: @@ -45,21 +38,21 @@ def __init__(self, @staticmethod def _get_lr(policy, cur_iters, regular_lr, max_iters, start_scale, end_scale): progress = float(cur_iters) / float(max_iters) - if policy == 'constant': + if policy == "constant": k = start_scale - elif policy == 'semi-constant': + elif policy == "semi-constant": threshold = 0.8 if progress < threshold: k = start_scale else: progress = (progress - threshold) / (1.0 - threshold) k = (end_scale - start_scale) * progress + start_scale - elif policy == 'linear': + elif policy == "linear": k = (end_scale - start_scale) * progress + start_scale - elif policy == 'cos': + elif policy == "cos": k = end_scale + 0.5 * (start_scale - end_scale) * (math.cos(math.pi * progress) + 1.0) else: - raise ValueError(f'Unknown policy: {policy}') + raise ValueError(f"Unknown policy: {policy}") return [_lr * k for _lr in regular_lr] @@ -67,10 +60,7 @@ def get_regular_lr(self, runner): if isinstance(runner.optimizer, dict): lr_groups = {} for k in runner.optimizer.keys(): - _lr_group = [ - self.get_lr(runner, _base_lr) - for _base_lr in self.base_lr[k] - ] + _lr_group = [self.get_lr(runner, _base_lr) for _base_lr in self.base_lr[k]] lr_groups.update({k: _lr_group}) return lr_groups @@ -79,23 +69,11 @@ def get_regular_lr(self, runner): def get_fixed_lr(self, cur_iters, regular_lr): return self._get_lr( - self.fixed_policy, - cur_iters, - regular_lr, - self.fixed_iters, - self.fixed_start_ratio, - self.fixed_end_ratio + self.fixed_policy, cur_iters, regular_lr, self.fixed_iters, self.fixed_start_ratio, self.fixed_end_ratio ) def get_warmup_lr(self, cur_iters, regular_lr): - return self._get_lr( - self.warmup, - cur_iters, - regular_lr, - self.warmup_iters, - self.warmup_ratio, - 1.0 - ) + return self._get_lr(self.warmup, cur_iters, regular_lr, self.warmup_iters, self.warmup_ratio, 1.0) def _init_states(self, runner): if self.by_epoch: @@ -148,9 +126,7 @@ def _init_states(self, runner): super(CustomstepLrUpdaterHook, self)._init_states(runner) if self.by_epoch: - self.steps = [ - step * self.epoch_len for step in self.steps - ] + self.steps = [step * self.epoch_len for step in self.steps] def get_lr(self, runner, base_lr): progress = runner.iter diff --git a/otx/algorithms/segmentation/adapters/mmseg/utils/__init__.py b/otx/algorithms/segmentation/adapters/mmseg/utils/__init__.py index e104081d946..993addde41f 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/utils/__init__.py +++ b/otx/algorithms/segmentation/adapters/mmseg/utils/__init__.py @@ -3,15 +3,15 @@ # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +from .builder import build_segmentor from .config_utils import ( - patch_recipe_config, patch_datasets, patch_evaluation, + patch_recipe_config, prepare_for_training, set_hyperparams, ) from .data_utils import load_dataset_items -from .builder import build_segmentor __all__ = [ "patch_recipe_config", diff --git a/otx/algorithms/segmentation/adapters/mmseg/utils/builder.py b/otx/algorithms/segmentation/adapters/mmseg/utils/builder.py index 6154eebf607..7683bfbf698 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/utils/builder.py +++ b/otx/algorithms/segmentation/adapters/mmseg/utils/builder.py @@ -18,12 +18,13 @@ def build_segmentor( device: Union[str, torch.device] = "cpu", cfg_options: Optional[Union[Config, ConfigDict]] = None, from_scratch: bool = False, -): +) -> torch.nn.Module: """Creates a model, based on the configuration in config. Note that this function consumes/updates 'load_from' attribute of 'config'. """ from mmseg.models import build_segmentor as origin_build_segmentor + from mmseg.utils import get_root_logger # type: ignore model_cfg = deepcopy(config.model) @@ -36,10 +37,9 @@ def build_segmentor( checkpoint = checkpoint if checkpoint else config.pop("load_from", None) if checkpoint is not None and not from_scratch: - load_checkpoint(model, checkpoint, map_location=device) + load_checkpoint(model, checkpoint, map_location=device, logger=get_root_logger()) config.load_from = None else: config.load_from = checkpoint return model - diff --git a/otx/algorithms/segmentation/adapters/mmseg/utils/config_utils.py b/otx/algorithms/segmentation/adapters/mmseg/utils/config_utils.py index e5b8c13a16d..183c1221509 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/utils/config_utils.py +++ b/otx/algorithms/segmentation/adapters/mmseg/utils/config_utils.py @@ -15,28 +15,25 @@ # and limitations under the License. -import copy import logging import math -from collections import defaultdict -from typing import List, Optional, Union +from typing import List, Union from mmcv import Config, ConfigDict from otx.algorithms.common.adapters.mmcv.utils import ( + get_configs_by_dict, + get_configs_by_keys, + get_dataset_configs, get_meta_keys, is_epoch_based_runner, patch_color_conversion, prepare_work_dir, remove_from_config, remove_from_configs_by_type, - get_configs_by_dict, - get_dataset_configs, - get_configs_by_keys, update_config, ) from otx.algorithms.segmentation.configs.base import SegmentationConfig -from otx.api.entities.datasets import DatasetEntity from otx.api.entities.label import Domain, LabelEntity from otx.api.utils.argument_checks import ( DirectoryPathCheck, @@ -111,10 +108,7 @@ def set_hyperparams(config: Config, hyperparams: SegmentationConfig): main_iters = int(hyperparams.learning_parameters.num_iters) total_iterations = fixed_iters + warmup_iters + main_iters - freeze_layer_config = get_configs_by_dict( - config.custom_hooks, - dict(type="FreezeLayers") - ) + freeze_layer_config = get_configs_by_dict(config.custom_hooks, dict(type="FreezeLayers")) assert len(freeze_layer_config) == 1 freeze_layer_config = freeze_layer_config[0] freeze_layer_config.iters = fixed_iters @@ -302,8 +296,8 @@ def set_num_classes(config: Config, num_classes: int): def patch_datasets( config: Config, domain: Domain = Domain.SEGMENTATION, - subsets: List[str] = ["train", "val", "test"], - **kwargs + subsets: List[str] = ["train", "val", "test", "unlabeled"], + **kwargs, ): """Update dataset configs.""" @@ -320,9 +314,7 @@ def update_pipeline(cfg): for subset in subsets: if subset not in config.data: continue - config.data[f"{subset}_dataloader"] = config.data.get( - f"{subset}_dataloader", ConfigDict() - ) + config.data[f"{subset}_dataloader"] = config.data.get(f"{subset}_dataloader", ConfigDict()) cfgs = get_dataset_configs(config, subset) for cfg in cfgs: diff --git a/otx/algorithms/segmentation/configs/base/models/mean_teacher.py b/otx/algorithms/segmentation/configs/base/models/mean_teacher.py index 963bac4fd9c..ea27f96d738 100644 --- a/otx/algorithms/segmentation/configs/base/models/mean_teacher.py +++ b/otx/algorithms/segmentation/configs/base/models/mean_teacher.py @@ -1,8 +1,8 @@ """Segmentor config for semi-supervised learning.""" model = dict( - type="MeanTeacher", - orig_type="EncoderDecoder", + type="MeanTeacherSegmentor", + orig_type="OTXEncoderDecoder", unsup_weight=0.1, train_cfg=dict(mix_loss=dict(enable=False, weight=0.1)), test_cfg=dict(mode="whole", output_scale=5.0), diff --git a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18/deployment.py b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18/deployment.py index 891bf5118be..9dd0a21c6ae 100644 --- a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18/deployment.py +++ b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18/deployment.py @@ -1,3 +1,5 @@ +"""MMDeploy config of OCR-Lite-HRnet-18 model for Segmentation Task.""" + _base_ = ["../base/deployments/base_segmentation_dynamic.py"] ir_config = dict( diff --git a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18/model.py b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18/model.py index ec9e96aadbd..3799b09a4cd 100644 --- a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18/model.py +++ b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18/model.py @@ -21,7 +21,7 @@ ] model = dict( - type="ClassIncrSegmentor", + type="ClassIncrEncoderDecoder", pretrained=None, decode_head=dict( type="CustomFCNHead", diff --git a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/deployment.py b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/deployment.py index 891bf5118be..0151b957607 100644 --- a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/deployment.py +++ b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/deployment.py @@ -1,3 +1,5 @@ +"""MMDeploy config of OCR-Lite-HRnet-18-mod2 model for Segmentation Task.""" + _base_ = ["../base/deployments/base_segmentation_dynamic.py"] ir_config = dict( diff --git a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/model.py b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/model.py index 7d0d5d39433..514b91d357b 100644 --- a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/model.py +++ b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/model.py @@ -21,7 +21,7 @@ ] model = dict( - type="ClassIncrSegmentor", + type="ClassIncrEncoderDecoder", pretrained=None, decode_head=dict( type="CustomFCNHead", diff --git a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_s_mod2/deployment.py b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_s_mod2/deployment.py index 891bf5118be..95a821f783e 100644 --- a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_s_mod2/deployment.py +++ b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_s_mod2/deployment.py @@ -1,3 +1,5 @@ +"""MMDeploy config of OCR-Lite-HRnet-s-mod2 model for Segmentation Task.""" + _base_ = ["../base/deployments/base_segmentation_dynamic.py"] ir_config = dict( diff --git a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_s_mod2/model.py b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_s_mod2/model.py index b7b2aedc631..2696ae85bdd 100644 --- a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_s_mod2/model.py +++ b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_s_mod2/model.py @@ -21,7 +21,7 @@ ] model = dict( - type="ClassIncrSegmentor", + type="ClassIncrEncoderDecoder", pretrained=None, decode_head=dict( type="CustomFCNHead", diff --git a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/deployment.py b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/deployment.py index 9dbf96904c6..ee73b0f0dd4 100644 --- a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/deployment.py +++ b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/deployment.py @@ -1,3 +1,5 @@ +"""MMDeploy config of OCR-Lite-HRnet-x-mod3 model for Segmentation Task.""" + _base_ = ["../base/deployments/base_segmentation_dynamic.py"] ir_config = dict( diff --git a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/model.py b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/model.py index 1a128ebf6ee..0ef49bc4764 100644 --- a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/model.py +++ b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/model.py @@ -21,7 +21,7 @@ ] model = dict( - type="ClassIncrSegmentor", + type="ClassIncrEncoderDecoder", pretrained=None, decode_head=dict( type="CustomFCNHead", diff --git a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/template.yaml b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/template.yaml index 8aa443353fd..1f04d2f0fd7 100644 --- a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/template.yaml +++ b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/template.yaml @@ -14,6 +14,7 @@ framework: OTESegmentation v0.14.0 entrypoints: base: otx.algorithms.segmentation.tasks.SegmentationTrainTask openvino: otx.algorithms.segmentation.tasks.OpenVINOSegmentationTask + nncf: otx.algorithms.segmentation.tasks.SegmentationNNCFTask data_pipeline_path: ../base/data/data_pipeline.py # Capabilities. @@ -39,7 +40,7 @@ hyper_parameters: default_value: 300 nncf_optimization: enable_quantization: - default_value: false + default_value: true enable_pruning: default_value: false pruning_supported: diff --git a/otx/algorithms/segmentation/tasks/inference.py b/otx/algorithms/segmentation/tasks/inference.py index 6b816465cfe..e214335b677 100644 --- a/otx/algorithms/segmentation/tasks/inference.py +++ b/otx/algorithms/segmentation/tasks/inference.py @@ -15,32 +15,25 @@ # and limitations under the License. import os -from typing import Dict, Optional, Union +from typing import Dict, Optional import numpy as np from mmcv.utils import ConfigDict from otx.algorithms.common.adapters.mmcv.utils import ( patch_data_pipeline, -) -from otx.algorithms.common.adapters.mmcv.utils import ( + patch_default_config, + patch_runner, remove_from_configs_by_type, - get_configs_by_dict, ) from otx.algorithms.common.configs import TrainType from otx.algorithms.common.tasks import BaseTask from otx.algorithms.common.utils.callback import InferenceProgressCallback -from otx.algorithms.common.adapters.mmcv.utils import ( - patch_default_config, - patch_runner, -) +from otx.algorithms.segmentation.adapters.mmseg.utils.builder import build_segmentor from otx.algorithms.segmentation.adapters.mmseg.utils.config_utils import ( patch_datasets, patch_evaluation, ) -from otx.algorithms.segmentation.adapters.mmseg.utils.builder import ( - build_segmentor, -) from otx.algorithms.segmentation.adapters.openvino.model_wrappers.blur import ( get_activation_map, ) @@ -161,9 +154,7 @@ def export(self, export_type: ExportType, output_model: ModelEntity): outputs = results.get("outputs") logger.debug(f"results of run_task = {outputs}") if outputs is None: - logger.error( - f"error while exporting model, result is None: {results.get('msg')}" - ) + logger.error(f"error while exporting model, result is None: {results.get('msg')}") # output_model.model_status = ModelStatus.FAILED else: bin_file = outputs.get("bin") @@ -180,31 +171,37 @@ def export(self, export_type: ExportType, output_model: ModelEntity): logger.info("Exporting completed") def _init_recipe_hparam(self) -> dict: - warmup_iters = int(self._hyperparams.learning_parameters.learning_rate_warmup_iters) + params = self._hyperparams.learning_parameters + warmup_iters = int(params.learning_rate_warmup_iters) lr_config = ( ConfigDict(warmup_iters=warmup_iters) if warmup_iters > 0 else ConfigDict(warmup_iters=warmup_iters, warmup=None) ) - if self._hyperparams.learning_parameters.enable_early_stopping: + if params.enable_early_stopping: early_stop = ConfigDict( - start=int(self._hyperparams.learning_parameters.early_stop_start), - patience=int(self._hyperparams.learning_parameters.early_stop_patience), - iteration_patience=int(self._hyperparams.learning_parameters.early_stop_iteration_patience), + start=int(params.early_stop_start), + patience=int(params.early_stop_patience), + iteration_patience=int(params.early_stop_iteration_patience), ) else: early_stop = False + if self._recipe_cfg.runner.get("type").startswith("IterBasedRunner"): # type: ignore + runner = ConfigDict(max_iters=int(params.num_iters)) + else: + runner = ConfigDict(max_epochs=int(params.num_iters)) + return ConfigDict( - optimizer=ConfigDict(lr=self._hyperparams.learning_parameters.learning_rate), + optimizer=ConfigDict(lr=params.learning_rate), lr_config=lr_config, early_stop=early_stop, data=ConfigDict( - samples_per_gpu=int(self._hyperparams.learning_parameters.batch_size), - workers_per_gpu=int(self._hyperparams.learning_parameters.num_workers), + samples_per_gpu=int(params.batch_size), + workers_per_gpu=int(params.num_workers), ), - runner=ConfigDict(max_epochs=int(self._hyperparams.learning_parameters.num_iters)), + runner=runner, ) def _init_recipe(self): diff --git a/otx/algorithms/segmentation/tasks/nncf.py b/otx/algorithms/segmentation/tasks/nncf.py index 558a6307291..3a688e1b745 100644 --- a/otx/algorithms/segmentation/tasks/nncf.py +++ b/otx/algorithms/segmentation/tasks/nncf.py @@ -17,18 +17,14 @@ from functools import partial from typing import Optional - from otx.algorithms.common.tasks.nncf_base import NNCFBaseTask from otx.algorithms.segmentation.adapters.mmseg.nncf import build_nncf_segmentor from otx.api.entities.datasets import DatasetEntity from otx.api.entities.optimization_parameters import OptimizationParameters -from otx.api.entities.task_environment import TaskEnvironment -from otx.api.utils.argument_checks import check_input_parameters_type from otx.mpa.utils.logger import get_logger from .inference import SegmentationInferenceTask - logger = get_logger() @@ -56,4 +52,3 @@ def _optimize( parameters=optimization_parameters, ) return results - diff --git a/otx/algorithms/segmentation/tasks/train.py b/otx/algorithms/segmentation/tasks/train.py index 933fb2cd727..bb6614336b0 100644 --- a/otx/algorithms/segmentation/tasks/train.py +++ b/otx/algorithms/segmentation/tasks/train.py @@ -23,7 +23,7 @@ from otx.algorithms.common.adapters.mmcv import OTXLoggerHook from otx.algorithms.common.configs import TrainType from otx.algorithms.common.utils.callback import TrainingProgressCallback -from otx.algorithms.common.utils.data import get_unlabeled_dataset +from otx.algorithms.common.utils.data import get_dataset from otx.algorithms.segmentation.tasks import SegmentationInferenceTask from otx.api.configuration import cfg_helper from otx.api.configuration.helper.utils import ids_to_strings @@ -140,25 +140,18 @@ def train( def _init_train_data_cfg(self, dataset: DatasetEntity): logger.info("init data cfg.") - data_cfg = ConfigDict( - data=ConfigDict( - train=ConfigDict( - otx_dataset=dataset.get_subset(Subset.TRAINING), + data_cfg = ConfigDict(data=ConfigDict()) + + for cfg_key, subset in zip( + ["train", "val", "unlabeled"], + [Subset.TRAINING, Subset.VALIDATION, Subset.UNLABELED], + ): + subset = get_dataset(dataset, subset) + if subset: + data_cfg.data[cfg_key] = ConfigDict( + otx_dataset=subset, labels=self._labels, - ), - val=ConfigDict( - otx_dataset=dataset.get_subset(Subset.VALIDATION), - labels=self._labels, - ), - ) - ) - - unlabeled_dataset = get_unlabeled_dataset(dataset) - if unlabeled_dataset: - data_cfg.data.unlabeled = ConfigDict( - otx_dataset=unlabeled_dataset, - labels=self._labels, - ) + ) # Temparory remedy for cfg.pretty_text error for label in self._labels: diff --git a/otx/cli/tools/eval.py b/otx/cli/tools/eval.py index 9b863e2ae50..604f6718020 100644 --- a/otx/cli/tools/eval.py +++ b/otx/cli/tools/eval.py @@ -58,6 +58,7 @@ def parse_args(): parser.add_argument("template") parser.add_argument("--data", required=False, default="./data.yaml") required = not os.path.exists("./data.yaml") + parser.add_argument( "--test-ann-files", required=required, diff --git a/otx/cli/utils/tests.py b/otx/cli/utils/tests.py index 7907c2195f2..b6b2ba0e22a 100644 --- a/otx/cli/utils/tests.py +++ b/otx/cli/utils/tests.py @@ -16,6 +16,7 @@ import os import shutil import subprocess # nosec +import sys import pytest @@ -49,8 +50,11 @@ def get_template_dir(template, root) -> str: def check_run(cmd, **kwargs): - result = subprocess.run(cmd, stderr=subprocess.PIPE, **kwargs) - assert result.returncode == 0, result.stderr.decode("utf=8") + p = subprocess.Popen(cmd, stderr=subprocess.PIPE, **kwargs) + for c in iter(lambda: p.stderr.read(1), b""): + sys.stderr.buffer.write(c) + _, _ = p.communicate() + assert p.returncode == 0, "The process returned non zero." def otx_train_testing(template, root, otx_dir, args): @@ -147,6 +151,7 @@ def otx_eval_testing(template, root, otx_dir, args): "--save-performance", f"{template_work_dir}/trained_{template.model_template_id}/performance.json", ] + command_line.extend(args.get("eval_params", [])) check_run(command_line) assert os.path.exists(f"{template_work_dir}/trained_{template.model_template_id}/performance.json") diff --git a/otx/mpa/modules/models/detectors/unbiased_teacher.py b/otx/mpa/modules/models/detectors/unbiased_teacher.py index 4d73b58487b..c72c2d4a296 100644 --- a/otx/mpa/modules/models/detectors/unbiased_teacher.py +++ b/otx/mpa/modules/models/detectors/unbiased_teacher.py @@ -165,15 +165,20 @@ def eval_pseudo_label_recall(self, all_pseudo_bboxes, all_gt_bboxes): return torch.Tensor(recall) @staticmethod - def state_dict_hook(module, state_dict, *args, **kwargs): + def state_dict_hook(module, state_dict, prefix, *args, **kwargs): """Redirect teacher model as output state_dict (student as auxilliary)""" logger.info("----------------- UnbiasedTeacher.state_dict_hook() called") - output = OrderedDict() - for k, v in state_dict.items(): - if "model_t." in k: - k = k.replace("model_t.", "") - output[k] = v - return output + for k in list(state_dict.keys()): + v = state_dict.pop(k) + if not prefix or k.startswith(prefix): + k = k.replace(prefix, "", 1) + if k.startswith("model_t."): + k = k.replace("model_t.", "", 1) + elif k.startswith("model_s."): + continue + k = prefix + k + state_dict[k] = v + return state_dict @staticmethod def load_state_dict_pre_hook(module, state_dict, *args, **kwargs): diff --git a/otx/mpa/modules/models/segmentors/mean_teacher_segmentor.py b/otx/mpa/modules/models/segmentors/mean_teacher_segmentor.py index b6232cf7c2d..cf3eb65c8d3 100644 --- a/otx/mpa/modules/models/segmentors/mean_teacher_segmentor.py +++ b/otx/mpa/modules/models/segmentors/mean_teacher_segmentor.py @@ -80,15 +80,20 @@ def forward_train(self, img, img_metas, gt_semantic_seg, **kwargs): return losses @staticmethod - def state_dict_hook(module, state_dict, *args, **kwargs): + def state_dict_hook(module, state_dict, prefix, *args, **kwargs): """Redirect student model as output state_dict (teacher as auxilliary)""" logger.info("----------------- MeanTeacherSegmentor.state_dict_hook() called") - output = OrderedDict() - for k, v in state_dict.items(): - if k.startswith("model_s."): - k = k.replace("model_s.", "") - output[k] = v - return output + for k in list(state_dict.keys()): + v = state_dict.pop(k) + if not prefix or k.startswith(prefix): + k = k.replace(prefix, "", 1) + if k.startswith("model_s."): + k = k.replace("model_s.", "", 1) + elif k.startswith("model_t."): + continue + k = prefix + k + state_dict[k] = v + return state_dict @staticmethod def load_state_dict_pre_hook(module, state_dict, *args, **kwargs): diff --git a/otx/mpa/utils/data_cpu.py b/otx/mpa/utils/data_cpu.py deleted file mode 100644 index 99ecee41ed6..00000000000 --- a/otx/mpa/utils/data_cpu.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (C) 2018-2021 OpenMMLab -# SPDX-License-Identifier: Apache-2.0 -# -# Copyright (C) 2016-2021 Facebook, Inc -# SPDX-License-Identifier: BSD-3-Clause -# -# Copyright (C) 2020-2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# -import torch -from mmcv.parallel import MMDataParallel -from mmcv.parallel.data_container import DataContainer - - -def scatter_cpu(inputs): - """Scatter inputs to cpu. - :type:`~mmcv.parallel.DataContainer`. - """ - - def scatter_map(obj): - if isinstance(obj, torch.Tensor): - return [obj] - if isinstance(obj, DataContainer): - return obj.data - if isinstance(obj, tuple) and len(obj) > 0: - return list(zip(*map(scatter_map, obj))) - if isinstance(obj, list) and len(obj) > 0: - out = list(map(list, zip(*map(scatter_map, obj)))) - return out - if isinstance(obj, dict) and len(obj) > 0: - out = list(map(type(obj), zip(*map(scatter_map, obj.items())))) - return out - return [obj] - - # After scatter_map is called, a scatter_map cell will exist. This cell - # has a reference to the actual function scatter_map, which has references - # to a closure that has a reference to the scatter_map cell (because the - # fn is recursive). To avoid this reference cycle, we set the function to - # None, clearing the cell - try: - return scatter_map(inputs) - finally: - scatter_map = None - - -def scatter_kwargs(inputs, kwargs): - """Scatter with support for kwargs dictionary""" - inputs = scatter_cpu(inputs) if inputs else [] - kwargs = scatter_cpu(kwargs) if kwargs else [] - if len(inputs) < len(kwargs): - inputs.extend([() for _ in range(len(kwargs) - len(inputs))]) - elif len(kwargs) < len(inputs): - kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))]) - inputs = tuple(inputs) - kwargs = tuple(kwargs) - return inputs, kwargs - - -class MMDataCPU(MMDataParallel): - """Implementation of MMDataParallel to use CPU for training""" - - def scatter(self, inputs, kwargs): - return scatter_kwargs(inputs, kwargs) - - def train_step(self, *inputs, **kwargs): - inputs, kwargs = self.scatter(inputs, kwargs) - return self.module.train_step(*inputs[0], **kwargs[0]) - - def val_step(self, *inputs, **kwargs): - inputs, kwargs = self.scatter(inputs, kwargs) - return self.module.val_step(*inputs[0], **kwargs[0]) - - def forward(self, *inputs, **kwargs): - inputs, kwargs = self.scatter(inputs, kwargs) - return self.module(*inputs[0], **kwargs[0]) diff --git a/tests/integration/cli/classification/test_classification.py b/tests/integration/cli/classification/test_classification.py index 91edd7a298b..bb8938077a4 100644 --- a/tests/integration/cli/classification/test_classification.py +++ b/tests/integration/cli/classification/test_classification.py @@ -617,7 +617,7 @@ class TestToolsMPASelfSLClassification: @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) @set_dummy_data - def test_otx_selfsl_train(self, template, tmp_dir_path): + def test_otx_train(self, template, tmp_dir_path): otx_train_testing(template, tmp_dir_path, otx_dir, args_selfsl) template_work_dir = get_template_dir(template, tmp_dir_path) args1 = args.copy() diff --git a/tests/integration/cli/detection/test_detection.py b/tests/integration/cli/detection/test_detection.py index 2cd76976f46..59139d71dfe 100644 --- a/tests/integration/cli/detection/test_detection.py +++ b/tests/integration/cli/detection/test_detection.py @@ -235,4 +235,5 @@ def test_otx_train(self, template, tmp_dir_path): @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_eval(self, template, tmp_dir_path): - otx_eval_testing(template, tmp_dir_path, otx_dir, args) + args_semisl = args.copy() + otx_eval_testing(template, tmp_dir_path, otx_dir, args_semisl) diff --git a/tests/integration/cli/segmentation/test_segmentation.py b/tests/integration/cli/segmentation/test_segmentation.py index c5a8e73b8a2..043608fdc9c 100644 --- a/tests/integration/cli/segmentation/test_segmentation.py +++ b/tests/integration/cli/segmentation/test_segmentation.py @@ -92,7 +92,8 @@ def test_otx_train(self, template, tmp_dir_path): @pytest.mark.skipif(TT_STABILITY_TESTS, reason="This is TT_STABILITY_TESTS") @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_otx_eval(self, template, tmp_dir_path): - otx_eval_testing(template, tmp_dir_path, otx_dir, args) + args_semisl = args.copy() + otx_eval_testing(template, tmp_dir_path, otx_dir, args_semisl) class TestToolsMPASegmentation: