diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 39e05772c3a..50ad5eb3d04 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -22,20 +22,20 @@ not fully covered by unit tests or manual testing can be complicated. --> -- [ ] I submit my changes into the `develop` branch -- [ ] I have added description of my changes into [CHANGELOG](https://github.com/openvinotoolkit/training_extensions/blob/develop/CHANGELOG.md) -- [ ] I have updated the [documentation](https://github.com/openvinotoolkit/training_extensions/tree/develop/docs) accordingly -- [ ] I have added tests to cover my changes -- [ ] I have [linked related issues](https://help.github.com/en/github/managing-your-work-on-github/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword) +- [ ] I have added unit tests to cover my changes.​ +- [ ] I have added integration tests to cover my changes.​ +- [ ] I have added e2e tests for validation. +- [ ] I have added the description of my changes into CHANGELOG in my target branch (e.g., [CHANGELOG](https://github.com/openvinotoolkit/training_extensions/blob/develop/CHANGELOG.md) in develop).​ +- [ ] I have updated the documentation in my target branch accordingly (e.g., [documentation](https://github.com/openvinotoolkit/training_extensions/tree/develop/docs) in develop). +- [ ] I have [linked related issues](https://help.github.com/en/github/managing-your-work-on-github/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword). ### License -- [ ] I submit _my code changes_ under the same [MIT License](https://github.com/openvinotoolkit/training_extensions/blob/develop/LICENSE) that covers the project. +- [ ] I submit _my code changes_ under the same [Apache License](https://github.com/openvinotoolkit/training_extensions/blob/develop/LICENSE) that covers the project. Feel free to contact the maintainers if that's a concern. -- [ ] I have updated the license header for each file (see an example below) +- [ ] I have updated the license header for each file (see an example below). ```python -# Copyright (C) 2023 Intel Corporation -# -# SPDX-License-Identifier: MIT +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 ``` diff --git a/.github/workflows/pre_merge.yml b/.github/workflows/pre_merge.yml index f90e9911ac3..05b5011a526 100644 --- a/.github/workflows/pre_merge.yml +++ b/.github/workflows/pre_merge.yml @@ -43,7 +43,7 @@ jobs: - name: Install dependencies run: python -m pip install -r requirements/dev.txt - name: Unit-testing - run: tox -re pre-merge -- tests/unit + run: tox -e pre-merge -- tests/unit - name: Upload coverage reports to Codecov run: | # If the workflow is triggered from PR then it gets the commit id from the PR. @@ -72,7 +72,7 @@ jobs: - name: Install dependencies run: python -m pip install -r requirements/dev.txt - name: Integration-testing - run: tox -re pre-merge -- tests/integration/cli/test_cli.py + run: tox -e pre-merge -- tests/integration/cli/test_cli.py Pre-Merge-Integration-Cls-Test: runs-on: [self-hosted, linux, x64, dev] needs: Pre-Merge-Unit-Test @@ -83,7 +83,7 @@ jobs: - name: Install dependencies run: python -m pip install -r requirements/dev.txt - name: Integration-testing - run: tox -re pre-merge-cls + run: tox -e pre-merge-cls Pre-Merge-Integration-Det-Test: runs-on: [self-hosted, linux, x64, dev] needs: Pre-Merge-Unit-Test @@ -94,7 +94,7 @@ jobs: - name: Install dependencies run: python -m pip install -r requirements/dev.txt - name: Integration-testing - run: tox -re pre-merge-det + run: tox -e pre-merge-det Pre-Merge-Integration-Seg-Test: runs-on: [self-hosted, linux, x64, dev] needs: Pre-Merge-Unit-Test @@ -105,7 +105,7 @@ jobs: - name: Install dependencies run: python -m pip install -r requirements/dev.txt - name: Integration-testing - run: tox -re pre-merge-seg + run: tox -e pre-merge-seg Pre-Merge-Integration-Action-Test: runs-on: [self-hosted, linux, x64, dev] needs: Pre-Merge-Unit-Test @@ -116,7 +116,7 @@ jobs: - name: Install dependencies run: python -m pip install -r requirements/dev.txt - name: Integration-testing - run: tox -re pre-merge-action + run: tox -e pre-merge-action Pre-Merge-Integration-Anomaly-Test: runs-on: [self-hosted, linux, x64, dev] needs: Pre-Merge-Unit-Test @@ -127,4 +127,4 @@ jobs: - name: Install dependencies run: python -m pip install -r requirements/dev.txt - name: Integration-testing - run: tox -re pre-merge-anomaly + run: tox -e pre-merge-anomaly diff --git a/CHANGELOG.md b/CHANGELOG.md index 7c22002aaa1..c36aeaac4f1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,60 @@ All notable changes to this project will be documented in this file. +## \[v1.2.0\] + +### New features + +- + +### Enhancements + +- + +### Bug fixes + +- + +### Known issues + +- OpenVINO(==2022.3) IR inference is not working well on 2-stage models (e.g. Mask-RCNN) exported from torch==1.13.1 + (working well up to torch==1.12.1) () + +## \[v1.1.0\] + +### New features + +- Add FP16 IR export support () +- Add in-memory caching in dataloader () +- Add MoViNet template for action classification () +- Add Semi-SL multilabel classification algorithm () +- Integrate multi-gpu training for semi-supervised learning and self-supervised learning () +- Add train-type parameter to otx train () +- Add embedding of inference configuration to IR for classification () +- Enable VOC dataset in OTX () +- Add mmcls.VisionTransformer backbone support () + +### Enhancements + +- Parametrize saliency maps dumping in export () +- Bring mmdeploy to action recognition model export & Test optimization of action tasks () +- Update backbone lists () +- Add explanation for XAI & minor doc fixes () +- Refactor phase#1: MPA modules + +### Bug fixes + +- Handle unpickable update_progress_callback () +- Dataset Adapter: Avoid duplicated annotation and permit empty image () +- Arrange scale between bbox preds and bbox targets in ATSS () +- Fix label mismatch of evaluation and validation with large dataset in semantic segmentation () +- Fix packaging errors including cython module build / import issues () + +### Known issues + +- OpenVINO(==2022.3) IR inference is not working well on 2-stage models (e.g. Mask-RCNN) exported from torch==1.13.1 + (working well up to torch==1.12.1) () + ## \[v1.0.1\] ### Enhancements diff --git a/MANIFEST.in b/MANIFEST.in index a1932f526f8..7216109b97d 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -2,3 +2,6 @@ recursive-include requirements * recursive-include otx *.pyx recursive-include otx *.yaml recursive-include otx *.json +recursive-exclude otx *.c +graft tests +global-exclude *.py[cod] diff --git a/README.md b/README.md index a5f78ca9fdb..8c82e9dd2ca 100644 --- a/README.md +++ b/README.md @@ -89,19 +89,19 @@ You can find more details with examples in the [CLI command intro](https://openv ## Updates -### v1.0.0 (1Q23) - -- Package Installation via PyPI - - OpenVINO™ Training Extensions installation will be supported via PyPI -- CLI update - - Update `find` command to find configurations of tasks/algorithms - - Introduce `build` command to customize task or model configurations - - Automatic algorihm selection for the `train` command using the given input dataset -- Adaptation of [Datumaro](https://github.com/openvinotoolkit/datumaro) component as a dataset interface -- Integrate hyper-parameter optimizations -- Support action recognition task - -### v1.1+ (2Q23) +### v1.1.0 (1Q23) + +- Add FP16 IR export support () +- Add in-memory caching in dataloader () +- Add MoViNet template for action classification () +- Add Semi-SL multilabel classification algorithm () +- Integrate multi-gpu training for semi-supervised learning and self-supervised learning () +- Add train-type parameter to otx train () +- Add embedding of inference configuration to IR for classification () +- Enable VOC dataset in OTX () +- Add mmcls.VisionTransformer backbone support () + +### v1.2+ (2Q23) - In planning diff --git a/docs/source/_static/logos/github_icon.png b/docs/source/_static/logos/github_icon.png new file mode 100644 index 00000000000..30183508885 Binary files /dev/null and b/docs/source/_static/logos/github_icon.png differ diff --git a/docs/source/conf.py b/docs/source/conf.py index 0f2fb9f6937..d4834d06e9b 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -52,7 +52,15 @@ "logo": { "image_light": 'logos/otx-logo.png', "image_dark": 'logos/otx-logo.png', - } + }, + "icon_links": [ + { + "name": "GitHub", + "url": "https://github.com/openvinotoolkit/training_extensions", + "icon": "_static/logos/github_icon.png", + "type": "local", + }, + ], } html_css_files = [ 'css/custom.css', diff --git a/docs/source/guide/explanation/additional_features/index.rst b/docs/source/guide/explanation/additional_features/index.rst index 5bfdaf77e16..9e76843e82e 100644 --- a/docs/source/guide/explanation/additional_features/index.rst +++ b/docs/source/guide/explanation/additional_features/index.rst @@ -9,3 +9,4 @@ Additional Features models_optimization hpo auto_configuration + xai diff --git a/docs/source/guide/explanation/additional_features/xai.rst b/docs/source/guide/explanation/additional_features/xai.rst new file mode 100644 index 00000000000..3c91c2c71e1 --- /dev/null +++ b/docs/source/guide/explanation/additional_features/xai.rst @@ -0,0 +1,95 @@ +Explainable AI (XAI) +==================== + +**Explainable AI (XAI)** is a field of research that aims to make machine learning models more transparent and interpretable to humans. +The goal is to help users understand how and why AI systems make decisions and provide insight into their inner workings. It allows us to detect, analyze, and prevent common mistakes, for example, when the model uses irrelevant features to make a prediction. +XAI can help to build trust in AI, make sure that the model is safe for development and increase its adoption in various domains. + +Most XAI methods generate **saliency maps** as a result. Saliency map is a visual representation, suitable for human comprehension, that highlights the most important parts of the image from the model point of view. +It looks like a heatmap, where warm-colored areas represent the areas with main focus. + + +.. figure:: ../../../../utils/images/xai_example.jpg + :width: 600 + :alt: this image shows the result of XAI algorithm + + These images are taken from `D-RISE paper `_. + + +We can generate saliency maps for a certain model that was trained in OpenVINO™ Training Extensions, using ``otx explain`` command line. Learn more about its usage in :doc:`../../tutorials/base/explain` tutorial. + +********************************* +XAI algorithms for classification +********************************* + +.. image:: ../../../../utils/images/xai_cls.jpg + :width: 600 + :align: center + :alt: this image shows the comparison of XAI classification algorithms + + +For classification networks these algorithms are used to generate saliency maps: + +- **Activation Map​** - this is the most basic and naive approach. It takes the outputs of the model's feature extractor (backbone) and averages it in channel dimension. The results highly rely on the backbone and ignore neck and head computations. Basically, it gives a relatively good and fast result. + +- `Eigen-Cam `_ uses Principal Component Analysis (PCA). It returns the first principal component of the feature extractor output, which most of the time corresponds to the dominant object. The results highly rely on the backbone as well and ignore neck and head computations. + +- `Recipro-CAM​ `_ uses Class Activation Mapping (CAM) to weigh the activation map for each class, so it can generate different saliency per class. Recipro-CAM is a fast gradient-free Reciprocal CAM method. The method involves spatially masking the extracted feature maps to exploit the correlation between activation maps and network predictions for target classes. + + +Below we show the comparison of described algorithms. ``Access to the model internal state`` means the necessity to modify the model's outputs and dump inner features. +``Per-class explanation support`` means generation different saliency maps for different classes. + ++-------------------------------------------+----------------+----------------+-------------------------------------------------------------------------+ +| Classification algorithm | Activation Map | Eigen-Cam | Recipro-CAM | ++===========================================+================+================+=========================================================================+ +| Need access to model internal state | Yes | Yes | Yes | ++-------------------------------------------+----------------+----------------+-------------------------------------------------------------------------+ +| Gradient-free | Yes | Yes | Yes | ++-------------------------------------------+----------------+----------------+-------------------------------------------------------------------------+ +| Single-shot | Yes | Yes | No (re-infer neck + head H*W times, where HxW – feature map size) | ++-------------------------------------------+----------------+----------------+-------------------------------------------------------------------------+ +| Per-class explanation support | No | No | Yes | ++-------------------------------------------+----------------+----------------+-------------------------------------------------------------------------+ +| Execution speed | Fast | Fast | Medium | ++-------------------------------------------+----------------+----------------+-------------------------------------------------------------------------+ + + +**************************** +XAI algorithms for detection +**************************** + +For detection networks these algorithms are used to generate saliency maps: + +- **Activation Map​** - the same approach as for classification networks, which uses the outputs from feature extractor. This is an algorithm is used to generate saliency maps for two-stage detectors. + +- **DetClassProbabilityMap** - this approach takes the raw classification head output and uses class probability maps to calculate regions of interest for each class. So, it creates different salience maps for each class. This algorithm is implemented for single-stage detectors only. + +.. image:: ../../../../utils/images/xai_det.jpg + :width: 600 + :align: center + :alt: this image shows the detailed description of XAI detection algorithm + + +The main limitation of this method is that, due to training loss design of most single-stage detectors, activation values drift towards the center of the object while propagating through the network. +This prevents from getting clear explanation in the input image space using intermediate activations. + +Below we show the comparison of described algorithms. ``Access to the model internal state`` means the necessity to modify the model's outputs and dump inner features. +``Per-class explanation support`` means generation different saliency maps for different classes. ``Per-box explanation support`` means generation standalone saliency maps for each detected prediction. + + ++-------------------------------------------+----------------------------+--------------------------------------------+ +| Detection algorithm | Activation Map | DetClassProbabilityMap | ++===========================================+============================+============================================+ +| Need access to model internal state | Yes | Yes | ++-------------------------------------------+----------------------------+--------------------------------------------+ +| Gradient-free | Yes | Yes | ++-------------------------------------------+----------------------------+--------------------------------------------+ +| Single-shot | Yes | Yes | ++-------------------------------------------+----------------------------+--------------------------------------------+ +| Per-class explanation support | No | Yes | ++-------------------------------------------+----------------------------+--------------------------------------------+ +| Per-box explanation support | No | No | ++-------------------------------------------+----------------------------+--------------------------------------------+ +| Execution speed | Fast | Fast | ++-------------------------------------------+----------------------------+--------------------------------------------+ diff --git a/docs/source/guide/explanation/algorithms/action/action_classification.rst b/docs/source/guide/explanation/algorithms/action/action_classification.rst index 09fe5cb9350..6069d6932de 100644 --- a/docs/source/guide/explanation/algorithms/action/action_classification.rst +++ b/docs/source/guide/explanation/algorithms/action/action_classification.rst @@ -27,21 +27,23 @@ Refer to our tutorial for more information on how to train, validate, and optimi Models ****** -We support `X3D `_ for action classification. X3D is a deep learning model that was proposed in the paper "X3D: Expanding Architectures for Efficient Video Recognition" by Christoph Feichtenhofer. The model is an extension of the popular 2D convolutional neural network (CNN) architectures to the 3D domain, allowing it to efficiently process spatiotemporal information in videos. +Currently OpenVINO™ Training Extensions supports `X3D `_ and `MoViNet `_ for action classification. -Currenly OpenVINO™ Training Extensions supports X3D-S model with below template: ++----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------+---------------------+-------------------------+ +| Template ID | Name | Complexity (GFLOPs) | Model size (MB) | ++========================================================================================================================================================================================+=========+=====================+=========================+ +| `Custom_Action_Classification_X3D `_ | X3D | 2.49 | 3.79 | ++----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------+---------------------+-------------------------+ +| `Custom_Action_Classificaiton_MoViNet `_ | MoViNet | 2.71 | 3.10 | ++----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------+---------------------+-------------------------+ -+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------+---------------------+-------------------------+ -| Template ID | Name | Complexity (GFLOPs) | Model size (MB) | -+===============================================================================================================================================================================+=========+=====================+=========================+ -| `Custom_Action_Classification_X3D `_ | X3D | 2.49 | 3.79 | -+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------+---------------------+-------------------------+ - -In the table below the **top-1 accuracy** on some academic datasets are presented. Each model is trained with single Nvidia GeForce RTX3090. +In the table below the **top-1 accuracy** on some academic datasets are presented. Each model is trained with single NVIDIA GeForce RTX 3090. +-----------------------+------------+-----------------+ | Model name | HMDB51 | UCF101 | +=======================+============+=================+ | X3D | 67.19 | 87.89 | +-----------------------+------------+-----------------+ +| MoViNet | 62.74 | 81.32 | ++-----------------------+------------+-----------------+ diff --git a/docs/source/guide/explanation/algorithms/classification/multi_class_classification.rst b/docs/source/guide/explanation/algorithms/classification/multi_class_classification.rst index 07b571cefec..3923b077434 100644 --- a/docs/source/guide/explanation/algorithms/classification/multi_class_classification.rst +++ b/docs/source/guide/explanation/algorithms/classification/multi_class_classification.rst @@ -206,7 +206,7 @@ Unlike other tasks, ``--val-data-root`` is not needed. $ otx train otx/algorithms/classification/configs/efficientnet_b0_cls_incr/template.yaml \ --train-data-root=tests/assets/imagenet_dataset_class_incremental \ params \ - --algo_backend.train_type=SELFSUPERVISED + --algo_backend.train_type=Selfsupervised After self-supervised training, pretrained weights can be use for supervised (incremental) learning like the below command: diff --git a/docs/source/guide/explanation/algorithms/object_detection/object_detection.rst b/docs/source/guide/explanation/algorithms/object_detection/object_detection.rst index 7cbe49852b3..e2528c20541 100644 --- a/docs/source/guide/explanation/algorithms/object_detection/object_detection.rst +++ b/docs/source/guide/explanation/algorithms/object_detection/object_detection.rst @@ -95,20 +95,25 @@ To see which public backbones are available for the task, the following command $ otx find --backbone {torchvision, pytorchcv, mmcls, omz.mmcls} -.. In the table below the test mAP on some academic datasets using our :ref:`supervised pipeline ` is presented. -.. The results were obtained on our templates without any changes. -.. For hyperparameters, please, refer to the related template. -.. We trained each model with a single Nvidia GeForce RTX3090. +In the table below the test mAP on some academic datasets using our :ref:`supervised pipeline ` is presented. -.. +-----------+------------+-----------+-----------+ -.. | Model name| COCO | PASCAL VOC| MinneApple| -.. +===========+============+===========+===========+ -.. | YOLOX | N/A | N/A | 24.5 | -.. +-----------+------------+-----------+-----------+ -.. | SSD | N/A | N/A | 31.2 | -.. +-----------+------------+-----------+-----------+ -.. | ATSS | N/A | N/A | 42.5 | -.. +-----------+------------+-----------+-----------+ +For `COCO `__ dataset the accuracy of pretrained weights is shown. That means that weights are undertrained for COCO dataset and don't achieve the best result. +That is because the purpose of pretrained models is to learn basic features from a such large and diverse dataset as COCO and to use these weights to get good results for other custom datasets right from the start. + +The results on `Pascal VOC `_, `BCCD `_, `MinneApple `_ and `WGISD `_ were obtained on our templates without any changes. +BCCD is an easy dataset with focused large objects, while MinneApple and WGISD have small objects that are hard to distinguish from the background. +For hyperparameters, please, refer to the related template. +We trained each model with a single Nvidia GeForce RTX3090. + ++-----------+------------+-----------+-----------+-----------+-----------+ +| Model name| COCO | PASCAL VOC| BCCD | MinneApple| WGISD | ++===========+============+===========+===========+===========+===========+ +| YOLOX | 32.0 | 66.6 | 60.3 | 24.5 | 44.1 | ++-----------+------------+-----------+-----------+-----------+-----------+ +| SSD | 13.5 | 50.0 | 54.2 | 31.2 | 45.9 | ++-----------+------------+-----------+-----------+-----------+-----------+ +| ATSS | 32.5 | 68.7 | 61.5 | 42.5 | 57.5 | ++-----------+------------+-----------+-----------+-----------+-----------+ @@ -133,7 +138,7 @@ Overall, OpenVINO™ Training Extensions utilizes powerful techniques for improv Please, refer to the :doc:`tutorial <../../../tutorials/advanced/semi_sl>` how to train semi supervised learning. -In the table below the mAP on toy data sample from `COCO `_ dataset using our pipeline is presented. +In the table below the mAP on toy data sample from `COCO `__ dataset using our pipeline is presented. We sample 400 images that contain one of [person, car, bus] for labeled train images. And 4000 images for unlabeled images. For validation 100 images are selected from val2017. diff --git a/docs/source/guide/explanation/algorithms/segmentation/semantic_segmentation.rst b/docs/source/guide/explanation/algorithms/segmentation/semantic_segmentation.rst index ede1c246464..5ae38d350fb 100644 --- a/docs/source/guide/explanation/algorithms/segmentation/semantic_segmentation.rst +++ b/docs/source/guide/explanation/algorithms/segmentation/semantic_segmentation.rst @@ -165,7 +165,7 @@ To enable self-supervised training, the command below can be executed: $ otx train otx/algorithms/segmentation/configs/ocr_lite_hrnet_s_mod2/template.yaml \ --train-data-roots=tests/assets/common_semantic_segmentation_dataset/train/images \ params \ - --algo_backend.train_type=SELFSUPERVISED + --algo_backend.train_type=Selfsupervised After self-supervised training, pretrained weights can be use for supervised (incremental) learning like the below command: diff --git a/docs/source/guide/get_started/quick_start_guide/cli_commands.rst b/docs/source/guide/get_started/quick_start_guide/cli_commands.rst index 2d628d9c4a9..f7878dce927 100644 --- a/docs/source/guide/get_started/quick_start_guide/cli_commands.rst +++ b/docs/source/guide/get_started/quick_start_guide/cli_commands.rst @@ -92,7 +92,7 @@ Building workspace folder Comma-separated paths to unlabeled file list --task TASK The currently supported options: ('CLASSIFICATION', 'DETECTION', 'INSTANCE_SEGMENTATION', 'SEGMENTATION', 'ACTION_CLASSIFICATION', 'ACTION_DETECTION', 'ANOMALY_CLASSIFICATION', 'ANOMALY_DETECTION', 'ANOMALY_SEGMENTATION'). --train-type TRAIN_TYPE - The currently supported options: dict_keys(['INCREMENTAL', 'SEMISUPERVISED', 'SELFSUPERVISED']). + The currently supported options: dict_keys(['Incremental', 'Semisupervised', 'Selfsupervised']). --work-dir WORK_DIR Location where the workspace. --model MODEL Enter the name of the model you want to use. (Ex. EfficientNet-B0). --backbone BACKBONE Available Backbone Type can be found using 'otx find --backbone {framework}'. @@ -181,7 +181,7 @@ However, if you created a workspace with ``otx build``, the training process can --unlabeled-file-list UNLABELED_FILE_LIST Comma-separated paths to unlabeled file list --train-type TRAIN_TYPE - The currently supported options: dict_keys(['INCREMENTAL', 'SEMISUPERVISED', 'SELFSUPERVISED']). + The currently supported options: dict_keys(['Incremental', 'Semisupervised', 'Selfsupervised']). --load-weights LOAD_WEIGHTS Load model weights from previously saved checkpoint. --resume-from RESUME_FROM @@ -399,7 +399,7 @@ The command below will evaluate the trained model on the provided dataset: Explanation *********** -``otx explain`` runs the explanation algorithm of a model on the specific dataset. It helps explain the model's decision-making process in a way that is easily understood by humans. +``otx explain`` runs the explainable AI (XAI) algorithm of a model on the specific dataset. It helps explain the model's decision-making process in a way that is easily understood by humans. With the ``--help`` command, you can list additional information, such as its parameters common to all model templates: diff --git a/docs/source/guide/index.rst b/docs/source/guide/index.rst index f798f932241..2715ced60c0 100644 --- a/docs/source/guide/index.rst +++ b/docs/source/guide/index.rst @@ -30,7 +30,7 @@ Guide reference/api/index reference/algorithm/index - reference/core/data + reference/core/index reference/hpo/hpo reference/mpa/index diff --git a/docs/source/guide/reference/algorithm/classification/adapters/mmcls.rst b/docs/source/guide/reference/algorithm/classification/adapters/mmcls.rst index d25ccf195a3..540b104e094 100644 --- a/docs/source/guide/reference/algorithm/classification/adapters/mmcls.rst +++ b/docs/source/guide/reference/algorithm/classification/adapters/mmcls.rst @@ -7,4 +7,56 @@ mmclassification .. automodule:: otx.algorithms.classification.adapters.mmcls :members: - :undoc-members: \ No newline at end of file + :undoc-members: + +.. automodule:: otx.algorithms.classification.adapters.mmcls.task + :members: + :undoc-members: + +.. automodule:: otx.algorithms.classification.adapters.mmcls.task.exporter + :members: + :undoc-members: + +.. automodule:: otx.algorithms.classification.adapters.mmcls.task.inferrer + :members: + :undoc-members: + +.. automodule:: otx.algorithms.classification.adapters.mmcls.task.stage + :members: + :undoc-members: + +.. automodule:: otx.algorithms.classification.adapters.mmcls.task.trainer + :members: + :undoc-members: + +.. automodule:: otx.algorithms.classification.adapters.mmcls.task.incremental + :members: + :undoc-members: + +.. automodule:: otx.algorithms.classification.adapters.mmcls.task.incremental.inferrer + :members: + :undoc-members: + +.. automodule:: otx.algorithms.classification.adapters.mmcls.task.incremental.stage + :members: + :undoc-members: + +.. automodule:: otx.algorithms.classification.adapters.mmcls.task.incremental.trainer + :members: + :undoc-members: + +.. automodule:: otx.algorithms.classification.adapters.mmcls.task.semisl + :members: + :undoc-members: + +.. automodule:: otx.algorithms.classification.adapters.mmcls.task.semisl.inferrer + :members: + :undoc-members: + +.. automodule:: otx.algorithms.classification.adapters.mmcls.task.semisl.stage + :members: + :undoc-members: + +.. automodule:: otx.algorithms.classification.adapters.mmcls.task.semisl.trainer + :members: + :undoc-members: diff --git a/docs/source/guide/reference/algorithm/detection/adapters/mmdet.rst b/docs/source/guide/reference/algorithm/detection/adapters/mmdet.rst index 6be3c0ea081..67b2d413d82 100644 --- a/docs/source/guide/reference/algorithm/detection/adapters/mmdet.rst +++ b/docs/source/guide/reference/algorithm/detection/adapters/mmdet.rst @@ -7,4 +7,56 @@ mmdetection .. automodule:: otx.algorithms.detection.adapters.mmdet :members: - :undoc-members: \ No newline at end of file + :undoc-members: + +.. automodule:: otx.algorithms.detection.adapters.mmdet.tasks + :members: + :undoc-members: + +.. automodule:: otx.algorithms.detection.adapters.mmdet.tasks.exporter + :members: + :undoc-members: + +.. automodule:: otx.algorithms.detection.adapters.mmdet.tasks.inferrer + :members: + :undoc-members: + +.. automodule:: otx.algorithms.detection.adapters.mmdet.tasks.stage + :members: + :undoc-members: + +.. automodule:: otx.algorithms.detection.adapters.mmdet.tasks.trainer + :members: + :undoc-members: + +.. automodule:: otx.algorithms.detection.adapters.mmdet.tasks.incremental + :members: + :undoc-members: + +.. automodule:: otx.algorithms.detection.adapters.mmdet.tasks.incremental.inferrer + :members: + :undoc-members: + +.. automodule:: otx.algorithms.detection.adapters.mmdet.tasks.incremental.stage + :members: + :undoc-members: + +.. automodule:: otx.algorithms.detection.adapters.mmdet.tasks.incremental.trainer + :members: + :undoc-members: + +.. automodule:: otx.algorithms.detection.adapters.mmdet.tasks.semisl + :members: + :undoc-members: + +.. automodule:: otx.algorithms.detection.adapters.mmdet.tasks.semisl.inferrer + :members: + :undoc-members: + +.. automodule:: otx.algorithms.detection.adapters.mmdet.tasks.semisl.stage + :members: + :undoc-members: + +.. automodule:: otx.algorithms.detection.adapters.mmdet.tasks.semisl.trainer + :members: + :undoc-members: diff --git a/docs/source/guide/reference/algorithm/segmentation/adapters/mmseg.rst b/docs/source/guide/reference/algorithm/segmentation/adapters/mmseg.rst index 9d72fcdc48a..c03e42319c6 100644 --- a/docs/source/guide/reference/algorithm/segmentation/adapters/mmseg.rst +++ b/docs/source/guide/reference/algorithm/segmentation/adapters/mmseg.rst @@ -7,4 +7,56 @@ mmsegmentation .. automodule:: otx.algorithms.segmentation.adapters.mmseg :members: - :undoc-members: \ No newline at end of file + :undoc-members: + +.. automodule:: otx.algorithms.segmentation.adapters.mmseg.tasks + :members: + :undoc-members: + +.. automodule:: otx.algorithms.segmentation.adapters.mmseg.tasks.exporter + :members: + :undoc-members: + +.. automodule:: otx.algorithms.segmentation.adapters.mmseg.tasks.inferrer + :members: + :undoc-members: + +.. automodule:: otx.algorithms.segmentation.adapters.mmseg.tasks.stage + :members: + :undoc-members: + +.. automodule:: otx.algorithms.segmentation.adapters.mmseg.tasks.trainer + :members: + :undoc-members: + +.. automodule:: otx.algorithms.segmentation.adapters.mmseg.tasks.incremental + :members: + :undoc-members: + +.. automodule:: otx.algorithms.segmentation.adapters.mmseg.tasks.incremental.inferrer + :members: + :undoc-members: + +.. automodule:: otx.algorithms.segmentation.adapters.mmseg.tasks.incremental.stage + :members: + :undoc-members: + +.. automodule:: otx.algorithms.segmentation.adapters.mmseg.tasks.incremental.trainer + :members: + :undoc-members: + +.. automodule:: otx.algorithms.segmentation.adapters.mmseg.tasks.semisl + :members: + :undoc-members: + +.. automodule:: otx.algorithms.segmentation.adapters.mmseg.tasks.semisl.inferrer + :members: + :undoc-members: + +.. automodule:: otx.algorithms.segmentation.adapters.mmseg.tasks.semisl.stage + :members: + :undoc-members: + +.. automodule:: otx.algorithms.segmentation.adapters.mmseg.tasks.semisl.trainer + :members: + :undoc-members: diff --git a/docs/source/guide/reference/core/index.rst b/docs/source/guide/reference/core/index.rst new file mode 100644 index 00000000000..b55754cce07 --- /dev/null +++ b/docs/source/guide/reference/core/index.rst @@ -0,0 +1,8 @@ +Core +==== + +.. toctree:: + :maxdepth: 1 + + data + ov/index diff --git a/docs/source/guide/reference/mpa/modules/ov/graph.rst b/docs/source/guide/reference/core/ov/graph.rst similarity index 51% rename from docs/source/guide/reference/mpa/modules/ov/graph.rst rename to docs/source/guide/reference/core/ov/graph.rst index 01bab6ca9b4..6f47f9e154b 100644 --- a/docs/source/guide/reference/mpa/modules/ov/graph.rst +++ b/docs/source/guide/reference/core/ov/graph.rst @@ -5,18 +5,18 @@ Graph :maxdepth: 3 :caption: Contents: -.. automodule:: otx.mpa.modules.ov.graph +.. automodule:: otx.core.ov.graph :members: :undoc-members: -.. automodule:: otx.mpa.modules.ov.graph.graph +.. automodule:: otx.core.ov.graph.graph :members: :undoc-members: -.. automodule:: otx.mpa.modules.ov.graph.utils +.. automodule:: otx.core.ov.graph.utils :members: :undoc-members: -.. automodule:: otx.mpa.modules.ov.graph.parsers +.. automodule:: otx.core.ov.graph.parsers :members: :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/mpa/modules/ov/index.rst b/docs/source/guide/reference/core/ov/index.rst similarity index 56% rename from docs/source/guide/reference/mpa/modules/ov/index.rst rename to docs/source/guide/reference/core/ov/index.rst index ec585d95c7e..07ce1abd4cf 100644 --- a/docs/source/guide/reference/mpa/modules/ov/index.rst +++ b/docs/source/guide/reference/core/ov/index.rst @@ -8,18 +8,18 @@ OpenVINO models ops -.. automodule:: otx.mpa.modules.ov +.. automodule:: otx.core.ov :members: :undoc-members: -.. automodule:: otx.mpa.modules.ov.omz_wrapper +.. automodule:: otx.core.ov.omz_wrapper :members: :undoc-members: -.. automodule:: otx.mpa.modules.ov.registry +.. automodule:: otx.core.ov.registry :members: :undoc-members: -.. automodule:: otx.mpa.modules.ov.utils +.. automodule:: otx.core.ov.utils :members: :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/core/ov/models.rst b/docs/source/guide/reference/core/ov/models.rst new file mode 100644 index 00000000000..c3f535e82ab --- /dev/null +++ b/docs/source/guide/reference/core/ov/models.rst @@ -0,0 +1,22 @@ +Models +^^^^^^^^ + +.. toctree:: + :maxdepth: 3 + :caption: Contents: + +.. automodule:: otx.core.ov.models + :members: + :undoc-members: + +.. automodule:: otx.core.ov.models.mmov_model + :members: + :undoc-members: + +.. automodule:: otx.core.ov.models.ov_model + :members: + :undoc-members: + +.. automodule:: otx.core.ov.models.parser_mixin + :members: + :undoc-members: diff --git a/docs/source/guide/reference/core/ov/ops.rst b/docs/source/guide/reference/core/ov/ops.rst new file mode 100644 index 00000000000..7b249e02702 --- /dev/null +++ b/docs/source/guide/reference/core/ov/ops.rst @@ -0,0 +1,82 @@ +OPS +^^^ + +.. toctree:: + :maxdepth: 3 + :caption: Contents: + +.. automodule:: otx.core.ov.ops + :members: + :undoc-members: + +.. automodule:: otx.core.ov.ops.activations + :members: + :undoc-members: + +.. automodule:: otx.core.ov.ops.arithmetics + :members: + :undoc-members: + +.. automodule:: otx.core.ov.ops.builder + :members: + :undoc-members: + +.. automodule:: otx.core.ov.ops.convolutions + :members: + :undoc-members: + +.. automodule:: otx.core.ov.ops.generation + :members: + :undoc-members: + +.. automodule:: otx.core.ov.ops.image_processings + :members: + :undoc-members: + +.. automodule:: otx.core.ov.ops.infrastructures + :members: + :undoc-members: + +.. automodule:: otx.core.ov.ops.matmuls + :members: + :undoc-members: + +.. automodule:: otx.core.ov.ops.movements + :members: + :undoc-members: + +.. automodule:: otx.core.ov.ops.normalizations + :members: + :undoc-members: + +.. automodule:: otx.core.ov.ops.object_detections + :members: + :undoc-members: + +.. automodule:: otx.core.ov.ops.op + :members: + :undoc-members: + +.. automodule:: otx.core.ov.ops.poolings + :members: + :undoc-members: + +.. automodule:: otx.core.ov.ops.reductions + :members: + :undoc-members: + +.. automodule:: otx.core.ov.ops.shape_manipulations + :members: + :undoc-members: + +.. automodule:: otx.core.ov.ops.sorting_maximization + :members: + :undoc-members: + +.. automodule:: otx.core.ov.ops.type_conversions + :members: + :undoc-members: + +.. automodule:: otx.core.ov.ops.utils + :members: + :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/mpa/classification.rst b/docs/source/guide/reference/mpa/classification.rst deleted file mode 100644 index ff8abf3bccc..00000000000 --- a/docs/source/guide/reference/mpa/classification.rst +++ /dev/null @@ -1,58 +0,0 @@ -Classification -^^^^^^^^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.mpa.cls - :members: - :undoc-members: - -.. automodule:: otx.mpa.cls.exporter - :members: - :undoc-members: - -.. automodule:: otx.mpa.cls.inferrer - :members: - :undoc-members: - -.. automodule:: otx.mpa.cls.stage - :members: - :undoc-members: - -.. automodule:: otx.mpa.cls.trainer - :members: - :undoc-members: - -.. automodule:: otx.mpa.cls.incremental - :members: - :undoc-members: - -.. automodule:: otx.mpa.cls.incremental.inferrer - :members: - :undoc-members: - -.. automodule:: otx.mpa.cls.incremental.stage - :members: - :undoc-members: - -.. automodule:: otx.mpa.cls.incremental.trainer - :members: - :undoc-members: - -.. automodule:: otx.mpa.cls.semisl - :members: - :undoc-members: - -.. automodule:: otx.mpa.cls.semisl.inferrer - :members: - :undoc-members: - -.. automodule:: otx.mpa.cls.semisl.stage - :members: - :undoc-members: - -.. automodule:: otx.mpa.cls.semisl.trainer - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/mpa/deploy.rst b/docs/source/guide/reference/mpa/deploy.rst deleted file mode 100644 index 7a4d154095a..00000000000 --- a/docs/source/guide/reference/mpa/deploy.rst +++ /dev/null @@ -1,34 +0,0 @@ -Deploy -^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.mpa.deploy - :members: - :undoc-members: - -.. automodule:: otx.mpa.deploy.apis - :members: - :undoc-members: - -.. automodule:: otx.mpa.deploy.utils - :members: - :undoc-members: - -.. automodule:: otx.mpa.deploy.utils.mmdeploy - :members: - :undoc-members: - -.. automodule:: otx.mpa.deploy.utils.onnx - :members: - :undoc-members: - -.. automodule:: otx.mpa.deploy.utils.operations_domain - :members: - :undoc-members: - -.. automodule:: otx.mpa.deploy.utils.utils - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/mpa/detection.rst b/docs/source/guide/reference/mpa/detection.rst deleted file mode 100644 index 365b4fad1a7..00000000000 --- a/docs/source/guide/reference/mpa/detection.rst +++ /dev/null @@ -1,58 +0,0 @@ -Detection -^^^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.mpa.det - :members: - :undoc-members: - -.. automodule:: otx.mpa.det.exporter - :members: - :undoc-members: - -.. automodule:: otx.mpa.det.inferrer - :members: - :undoc-members: - -.. automodule:: otx.mpa.det.stage - :members: - :undoc-members: - -.. automodule:: otx.mpa.det.trainer - :members: - :undoc-members: - -.. automodule:: otx.mpa.det.incremental - :members: - :undoc-members: - -.. automodule:: otx.mpa.det.incremental.inferrer - :members: - :undoc-members: - -.. automodule:: otx.mpa.det.incremental.stage - :members: - :undoc-members: - -.. automodule:: otx.mpa.det.incremental.trainer - :members: - :undoc-members: - -.. automodule:: otx.mpa.det.semisl - :members: - :undoc-members: - -.. automodule:: otx.mpa.det.semisl.inferrer - :members: - :undoc-members: - -.. automodule:: otx.mpa.det.semisl.stage - :members: - :undoc-members: - -.. automodule:: otx.mpa.det.semisl.trainer - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/mpa/index.rst b/docs/source/guide/reference/mpa/index.rst index d1e88cd2ae8..2b7ebc58cf3 100644 --- a/docs/source/guide/reference/mpa/index.rst +++ b/docs/source/guide/reference/mpa/index.rst @@ -5,8 +5,4 @@ Model Preparation Algorithm :maxdepth: 1 modules/index - classification - detection - segmentation - deploy utils diff --git a/docs/source/guide/reference/mpa/modules/datasets.rst b/docs/source/guide/reference/mpa/modules/datasets.rst deleted file mode 100644 index 3d52750d3f6..00000000000 --- a/docs/source/guide/reference/mpa/modules/datasets.rst +++ /dev/null @@ -1,23 +0,0 @@ -Datasets -^^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.mpa.modules.datasets - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.datasets.composed_dataloader - :members: - :undoc-members: - - -.. automodule:: otx.mpa.modules.datasets.pipelines - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.datasets.samplers - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/mpa/modules/hooks.rst b/docs/source/guide/reference/mpa/modules/hooks.rst deleted file mode 100644 index 522a1d3a61f..00000000000 --- a/docs/source/guide/reference/mpa/modules/hooks.rst +++ /dev/null @@ -1,86 +0,0 @@ -Hooks -^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.mpa.modules.hooks - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.hooks.adaptive_training_hooks - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.hooks.cancel_interface_hook - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.hooks.checkpoint_hook - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.hooks.composed_dataloaders_hook - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.hooks.early_stopping_hook - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.hooks.eval_hook - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.hooks.fp16_sam_optimizer_hook - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.hooks.ib_loss_hook - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.hooks.logger_replace_hook - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.hooks.model_ema_hook - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.hooks.model_ema_v2_hook - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.hooks.no_bias_decay_hook - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.hooks.recording_forward_hooks - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.hooks.sam_optimizer_hook - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.hooks.save_initial_weight_hook - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.hooks.semisl_cls_hook - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.hooks.task_adapt_hook - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.hooks.unbiased_teacher_hook - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.hooks.workflow_hooks - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/mpa/modules/index.rst b/docs/source/guide/reference/mpa/modules/index.rst deleted file mode 100644 index 0f463fdd61d..00000000000 --- a/docs/source/guide/reference/mpa/modules/index.rst +++ /dev/null @@ -1,12 +0,0 @@ -Modules -=================== - -.. toctree:: - :maxdepth: 1 - - models/index - datasets - hooks - optimizer - ov/index - utils diff --git a/docs/source/guide/reference/mpa/modules/models/backbones.rst b/docs/source/guide/reference/mpa/modules/models/backbones.rst deleted file mode 100644 index 249f934ebb5..00000000000 --- a/docs/source/guide/reference/mpa/modules/models/backbones.rst +++ /dev/null @@ -1,14 +0,0 @@ -Backbones -^^^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.mpa.modules.models.backbones - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.backbones.litehrnet - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/mpa/modules/models/classifiers.rst b/docs/source/guide/reference/mpa/modules/models/classifiers.rst deleted file mode 100644 index 7f11ac3ca4a..00000000000 --- a/docs/source/guide/reference/mpa/modules/models/classifiers.rst +++ /dev/null @@ -1,26 +0,0 @@ -Classifiers -^^^^^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.mpa.modules.models.classifiers - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.classifiers.sam_classifier - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.classifiers.sam_classifier_mixin - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.classifiers.semisl_classifier - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.classifiers.supcon_classifier - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/mpa/modules/models/heads.rst b/docs/source/guide/reference/mpa/modules/models/heads.rst deleted file mode 100644 index 5b5a5de1c83..00000000000 --- a/docs/source/guide/reference/mpa/modules/models/heads.rst +++ /dev/null @@ -1,63 +0,0 @@ -Heads -^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.mpa.modules.models.heads - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.heads.aggregator_mixin - :members: - :undoc-members: - - -.. automodule:: otx.mpa.modules.models.heads.custom_cls_head - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.heads.custom_fcn_head - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.heads.custom_hierarchical_linear_cls_head - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.heads.custom_hierarchical_non_linear_cls_head - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.heads.custom_multi_label_linear_cls_head - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.heads.custom_multi_label_non_linear_cls_head - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.heads.mix_loss_mixin - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.heads.non_linear_cls_head - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.heads.pixel_weights_mixin - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.heads.segment_out_norm_mixin - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.heads.semisl_cls_head - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.heads.supcon_cls_head - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/mpa/modules/models/index.rst b/docs/source/guide/reference/mpa/modules/models/index.rst deleted file mode 100644 index 45d93070e47..00000000000 --- a/docs/source/guide/reference/mpa/modules/models/index.rst +++ /dev/null @@ -1,13 +0,0 @@ -Models -^^^^^^^ - -.. toctree:: - :maxdepth: 1 - - backbones - classifiers - heads - losses - scalar_schedulers - segmentors - utils \ No newline at end of file diff --git a/docs/source/guide/reference/mpa/modules/models/losses.rst b/docs/source/guide/reference/mpa/modules/models/losses.rst deleted file mode 100644 index d084ee3a9ab..00000000000 --- a/docs/source/guide/reference/mpa/modules/models/losses.rst +++ /dev/null @@ -1,51 +0,0 @@ -Losses -^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.mpa.modules.models.losses - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.losses.asymmetric_angular_loss_with_ignore - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.losses.asymmetric_loss_with_ignore - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.losses.barlowtwins_loss - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.losses.base_pixel_loss - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.losses.base_weighted_loss - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.losses.cross_entropy_loss_with_ignore - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.losses.cross_entropy_loss - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.losses.ib_loss - :members: - :undoc-members: - - -.. automodule:: otx.mpa.modules.models.losses.mpa_pixel_base - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.losses.utils - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/mpa/modules/models/scalar_schedulers.rst b/docs/source/guide/reference/mpa/modules/models/scalar_schedulers.rst deleted file mode 100644 index 869c52a169b..00000000000 --- a/docs/source/guide/reference/mpa/modules/models/scalar_schedulers.rst +++ /dev/null @@ -1,10 +0,0 @@ -Scalar Schedulers -^^^^^^^^^^^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.mpa.modules.models.scalar_schedulers - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/mpa/modules/models/segmentors.rst b/docs/source/guide/reference/mpa/modules/models/segmentors.rst deleted file mode 100644 index 48184f2e04c..00000000000 --- a/docs/source/guide/reference/mpa/modules/models/segmentors.rst +++ /dev/null @@ -1,30 +0,0 @@ -Segmentors -^^^^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.mpa.modules.models.segmentors - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.segmentors.class_incr_encoder_decoder - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.segmentors.mean_teacher_segmentor - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.segmentors.mix_loss_mixin - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.segmentors.otx_encoder_decoder - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.models.segmentors.pixel_weights_mixin - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/mpa/modules/models/utils.rst b/docs/source/guide/reference/mpa/modules/models/utils.rst deleted file mode 100644 index dd67394313b..00000000000 --- a/docs/source/guide/reference/mpa/modules/models/utils.rst +++ /dev/null @@ -1,10 +0,0 @@ -Utils -^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.mpa.modules.models.utils - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/mpa/modules/optimizer.rst b/docs/source/guide/reference/mpa/modules/optimizer.rst deleted file mode 100644 index 767da78bc9f..00000000000 --- a/docs/source/guide/reference/mpa/modules/optimizer.rst +++ /dev/null @@ -1,14 +0,0 @@ -Optimizer -^^^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.mpa.modules.optimizer - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.optimizer.lars - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/mpa/modules/ov/models.rst b/docs/source/guide/reference/mpa/modules/ov/models.rst deleted file mode 100644 index 362d788172d..00000000000 --- a/docs/source/guide/reference/mpa/modules/ov/models.rst +++ /dev/null @@ -1,30 +0,0 @@ -Models -^^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.mpa.modules.ov.models - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.ov.models.mmov_model - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.ov.models.ov_model - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.ov.models.parser_mixin - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.ov.models.mmcls - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.ov.models.mmseg - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/mpa/modules/ov/ops.rst b/docs/source/guide/reference/mpa/modules/ov/ops.rst deleted file mode 100644 index 72b59b1770f..00000000000 --- a/docs/source/guide/reference/mpa/modules/ov/ops.rst +++ /dev/null @@ -1,82 +0,0 @@ -OPS -^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.mpa.modules.ov.ops - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.ov.ops.activations - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.ov.ops.arithmetics - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.ov.ops.builder - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.ov.ops.convolutions - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.ov.ops.generation - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.ov.ops.image_processings - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.ov.ops.infrastructures - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.ov.ops.matmuls - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.ov.ops.movements - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.ov.ops.normalizations - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.ov.ops.object_detections - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.ov.ops.op - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.ov.ops.poolings - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.ov.ops.reductions - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.ov.ops.shape_manipulations - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.ov.ops.sorting_maximization - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.ov.ops.type_conversions - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.ov.ops.utils - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/mpa/modules/utils.rst b/docs/source/guide/reference/mpa/modules/utils.rst deleted file mode 100644 index a22e69d158c..00000000000 --- a/docs/source/guide/reference/mpa/modules/utils.rst +++ /dev/null @@ -1,22 +0,0 @@ -Utils -^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.mpa.modules.utils - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.utils.distance_utils - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.utils.seg_utils - :members: - :undoc-members: - -.. automodule:: otx.mpa.modules.utils.task_adapt - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/mpa/segmentation.rst b/docs/source/guide/reference/mpa/segmentation.rst deleted file mode 100644 index a59c5463524..00000000000 --- a/docs/source/guide/reference/mpa/segmentation.rst +++ /dev/null @@ -1,58 +0,0 @@ -Segmentation -^^^^^^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.mpa.seg - :members: - :undoc-members: - -.. automodule:: otx.mpa.seg.exporter - :members: - :undoc-members: - -.. automodule:: otx.mpa.seg.inferrer - :members: - :undoc-members: - -.. automodule:: otx.mpa.seg.stage - :members: - :undoc-members: - -.. automodule:: otx.mpa.seg.trainer - :members: - :undoc-members: - -.. automodule:: otx.mpa.seg.incremental - :members: - :undoc-members: - -.. automodule:: otx.mpa.seg.incremental.inferrer - :members: - :undoc-members: - -.. automodule:: otx.mpa.seg.incremental.stage - :members: - :undoc-members: - -.. automodule:: otx.mpa.seg.incremental.trainer - :members: - :undoc-members: - -.. automodule:: otx.mpa.seg.semisl - :members: - :undoc-members: - -.. automodule:: otx.mpa.seg.semisl.inferrer - :members: - :undoc-members: - -.. automodule:: otx.mpa.seg.semisl.stage - :members: - :undoc-members: - -.. automodule:: otx.mpa.seg.semisl.trainer - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/reference/mpa/utils.rst b/docs/source/guide/reference/mpa/utils.rst deleted file mode 100644 index 9419a4a872c..00000000000 --- a/docs/source/guide/reference/mpa/utils.rst +++ /dev/null @@ -1,30 +0,0 @@ -Utils -^^^^^^^ - -.. toctree:: - :maxdepth: 3 - :caption: Contents: - -.. automodule:: otx.mpa.utils - :members: - :undoc-members: - -.. automodule:: otx.mpa.utils.config_utils - :members: - :undoc-members: - -.. automodule:: otx.mpa.utils.convert_keys - :members: - :undoc-members: - -.. automodule:: otx.mpa.utils.ext_loader - :members: - :undoc-members: - -.. automodule:: otx.mpa.utils.file - :members: - :undoc-members: - -.. automodule:: otx.mpa.utils.mo_wrapper - :members: - :undoc-members: \ No newline at end of file diff --git a/docs/source/guide/tutorials/advanced/self_sl.rst b/docs/source/guide/tutorials/advanced/self_sl.rst index 96de2beb42c..e474fe0c0bc 100644 --- a/docs/source/guide/tutorials/advanced/self_sl.rst +++ b/docs/source/guide/tutorials/advanced/self_sl.rst @@ -21,7 +21,7 @@ The process has been tested on the following configuration: Setup virtual environment ************************* -1. You can follow the installation process from a :doc:`quick start guide <../../../get_started/quick_start_guide/installation>` +1. You can follow the installation process from a :doc:`quick start guide <../../get_started/quick_start_guide/installation>` to create a universal virtual environment for OpenVINO™ Training Extensions. 2. Activate your virtual @@ -64,23 +64,23 @@ for **self-supervised learning** by running the following command: .. code-block:: - (otx) ...$ otx build --train-data-roots data/flower_photos --model MobileNet-V3-large-1x --train-type SELFSUPERVISED --work-dir otx-workspace-CLASSIFICATION-SELFSUPERVISED + (otx) ...$ otx build --train-data-roots data/flower_photos --model MobileNet-V3-large-1x --train-type Selfsupervised --work-dir otx-workspace-CLASSIFICATION-Selfsupervised - [*] Workspace Path: otx-workspace-CLASSIFICATION-SELFSUPERVISED + [*] Workspace Path: otx-workspace-CLASSIFICATION-Selfsupervised [*] Load Model Template ID: Custom_Image_Classification_MobileNet-V3-large-1x - [*] Load Model Name: MobileNet-V3-large-1x[*] - Updated: otx-workspace-CLASSIFICATION-SELFSUPERVISED/selfsl/model.py - [*] - Updated: otx-workspace-CLASSIFICATION-SELFSUPERVISED/selfsl/data_pipeline.py - [*] - Updated: otx-workspace-CLASSIFICATION-SELFSUPERVISED/deployment.py - [*] - Updated: otx-workspace-CLASSIFICATION-SELFSUPERVISED/hpo_config.yaml - [*] - Updated: otx-workspace-CLASSIFICATION-SELFSUPERVISED/model_hierarchical.py - [*] - Updated: otx-workspace-CLASSIFICATION-SELFSUPERVISED/model_multilabel.py - [*] Update data configuration file to: otx-workspace-CLASSIFICATION-SELFSUPERVISED/data.yaml + [*] Load Model Name: MobileNet-V3-large-1x[*] - Updated: otx-workspace-CLASSIFICATION-Selfsupervised/selfsl/model.py + [*] - Updated: otx-workspace-CLASSIFICATION-Selfsupervised/selfsl/data_pipeline.py + [*] - Updated: otx-workspace-CLASSIFICATION-Selfsupervised/deployment.py + [*] - Updated: otx-workspace-CLASSIFICATION-Selfsupervised/hpo_config.yaml + [*] - Updated: otx-workspace-CLASSIFICATION-Selfsupervised/model_hierarchical.py + [*] - Updated: otx-workspace-CLASSIFICATION-Selfsupervised/model_multilabel.py + [*] Update data configuration file to: otx-workspace-CLASSIFICATION-Selfsupervised/data.yaml .. note:: Three things must be considered to set the workspace for self-supervised learning: - 1. add ``--train-type SELFSUPERVISED`` in the command to get the training components for self-supervised learning, + 1. add ``--train-type Selfsupervised`` in the command to get the training components for self-supervised learning, 2. update the path set as ``train-data-roots``, 3. and add ``--work-dir`` to distinguish self-supervised learning workspace from supervised learning workspace. @@ -102,7 +102,7 @@ After the workspace creation, the workspace structure is as follows: │   ├── train │   └── val └── template.yaml - otx-workspace-CLASSIFICATION-SELFSUPERVISED + otx-workspace-CLASSIFICATION-Selfsupervised ├── configuration.yaml ├── data.yaml ├── deployment.py @@ -121,20 +121,20 @@ After the workspace creation, the workspace structure is as follows: For `VOC2012 dataset `_ used in :doc:`semantic segmentation tutorial <../base/how_to_train/semantic_segmentation>`, for example, the path ``data/VOCdevkit/VOC2012/JPEGImages`` must be set instead of ``data/VOCdevkit/VOC2012``. Please refer to :ref:`Explanation of Self-Supervised Learning for Semantic Segmentation `. - And don't forget to add ``--train-type SELFSUPERVISED``. + And don't forget to add ``--train-type Selfsupervised``. .. code-block:: (otx) ...$ otx build --train-data-roots data/VOCdevkit/VOC2012/JPEGImages \ --model Lite-HRNet-18-mod2 \ - --train-type SELFSUPERVISED + --train-type Selfsupervised 4. To start training we need to call ``otx train`` command in **self-supervised learning** workspace: .. code-block:: - (otx) ...$ cd otx-workspace-CLASSIFICATION-SELFSUPERVISED + (otx) ...$ cd otx-workspace-CLASSIFICATION-Selfsupervised (otx) ...$ otx train --data ../otx-workspace-CLASSIFICATION/data.yaml ... @@ -168,7 +168,7 @@ After pre-training progress, start fine-tuning by calling the below command with .. code-block:: (otx) ...$ cd ../otx-workspace-CLASSIFICATION - (otx) ...$ otx train --load-weights ../otx-workspace-CLASSIFICATION-SELFSUPERVISED/models/weights.pth + (otx) ...$ otx train --load-weights ../otx-workspace-CLASSIFICATION-Selfsupervised/models/weights.pth ... diff --git a/docs/source/guide/tutorials/advanced/semi_sl.rst b/docs/source/guide/tutorials/advanced/semi_sl.rst index cce334631e9..fa866675526 100644 --- a/docs/source/guide/tutorials/advanced/semi_sl.rst +++ b/docs/source/guide/tutorials/advanced/semi_sl.rst @@ -44,7 +44,7 @@ This tutorial explains how to train a model in semi-supervised learning mode and Setup virtual environment ************************* -1. You can follow the installation process from a :doc:`quick start guide <../../../get_started/quick_start_guide/installation>` +1. You can follow the installation process from a :doc:`quick start guide <../../get_started/quick_start_guide/installation>` to create a universal virtual environment for OpenVINO™ Training Extensions. 2. Activate your virtual @@ -73,7 +73,7 @@ Enable via ``otx build`` 1. To enable semi-supervsied learning via ``otx build``, we need to add arguments ``--unlabeled-data-roots`` and ``--train-type``. OpenVINO™ Training Extensions receives the root path where unlabeled images are by ``--unlabeled-data-roots``. -We should put the path where unlabeled data are contained. It also provides us ``--train-type`` to select the type of training scheme. All we have to do for that is specifying it as **SEMISUPERVISED**. +We should put the path where unlabeled data are contained. It also provides us ``--train-type`` to select the type of training scheme. All we have to do for that is specifying it as **Semisupervised**. .. note:: @@ -85,7 +85,7 @@ We should put the path where unlabeled data are contained. It also provides us ` .. code-block:: - (otx) ...$ otx build --train-data-roots data/flower_photos --unlabeled-data-roots tests/assets/imagenet_dataset --model MobileNet-V3-large-1x --train-type SEMISUPERVISED + (otx) ...$ otx build --train-data-roots data/flower_photos --unlabeled-data-roots tests/assets/imagenet_dataset --model MobileNet-V3-large-1x --train-type Semisupervised [*] Workspace Path: otx-workspace-CLASSIFICATION @@ -107,14 +107,14 @@ command in our workspace: (otx) ...$ otx train -In the train log, you can check that the train type is set to **SEMISUPERVISED** and related configurations are properly loaded as following: +In the train log, you can check that the train type is set to **Semisupervised** and related configurations are properly loaded as following: .. code-block:: ... 2023-02-22 06:21:54,492 | INFO : called _init_recipe() - 2023-02-22 06:21:54,492 | INFO : train type = SEMISUPERVISED - 2023-02-22 06:21:54,492 | INFO : train type = SEMISUPERVISED - loading training_extensions/otx/recipes/stages/classification/semisl.yaml + 2023-02-22 06:21:54,492 | INFO : train type = Semisupervised + 2023-02-22 06:21:54,492 | INFO : train type = Semisupervised - loading training_extensions/otx/recipes/stages/classification/semisl.yaml 2023-02-22 06:21:54,500 | INFO : Replacing runner from EpochRunnerWithCancel to EpochRunnerWithCancel. 2023-02-22 06:21:54,503 | INFO : initialized recipe = training_extensions/otx/recipes/stages/classification/semisl.yaml ... @@ -128,23 +128,23 @@ Enable via ``otx train`` *************************** 1. To enable semi-supervised learning directly via ``otx train``, we need to add arguments ``--unlabeled-data-roots`` and ``--algo_backend.train_type`` -which is one of template-specific parameters (details are provided in `quick start guide <../../get_started/quick_start_guide/cli_commands.html#training>`__.) +which is one of template-specific parameters (details are provided in `quick start guide <../../get_started/quick_start_guide/cli_commands.html#training>`__). .. code-block:: (otx) ...$ otx train otx/algorithms/classification/configs/mobilenet_v3_large_1_cls_incr/template.yaml \ --train-data-roots data/flower_photos \ --unlabeled-data-roots tests/assets/imagenet_dataset \ - params --algo_backend.train_type SEMISUPERVISED + params --algo_backend.train_type Semisupervised -In the train log, you can check that the train type is set to **SEMISUPERVISED** and related configurations are properly loaded as following: +In the train log, you can check that the train type is set to **Semisupervised** and related configurations are properly loaded as following: .. code-block:: ... 2023-02-22 06:21:54,492 | INFO : called _init_recipe() - 2023-02-22 06:21:54,492 | INFO : train type = SEMISUPERVISED - 2023-02-22 06:21:54,492 | INFO : train type = SEMISUPERVISED - loading training_extensions/otx/recipes/stages/classification/semisl.yaml + 2023-02-22 06:21:54,492 | INFO : train type = Semisupervised + 2023-02-22 06:21:54,492 | INFO : train type = Semisupervised - loading training_extensions/otx/recipes/stages/classification/semisl.yaml 2023-02-22 06:21:54,500 | INFO : Replacing runner from EpochRunnerWithCancel to EpochRunnerWithCancel. 2023-02-22 06:21:54,503 | INFO : initialized recipe = training_extensions/otx/recipes/stages/classification/semisl.yaml ... diff --git a/docs/source/guide/tutorials/base/demo.rst b/docs/source/guide/tutorials/base/demo.rst index 2ca856df0ae..735cc664515 100644 --- a/docs/source/guide/tutorials/base/demo.rst +++ b/docs/source/guide/tutorials/base/demo.rst @@ -8,7 +8,7 @@ It allows you to apply the model on the custom data or the online footage from a This tutorial uses an object detection model for example, however for other tasks the functionality remains the same - you just need to replace the input dataset with your own. -For visualization you use images from WGISD dataset from the :doc: `object detection tutorial `. +For visualization you use images from WGISD dataset from the :doc:`object detection tutorial `. 1. Activate the virtual environment created in the previous step. @@ -69,8 +69,8 @@ You can check a list of camera devices by running the command line below on Linu .. code-block:: - sudo apt-get install v4l-utils - v4l2-ctl --list-devices + (demo) ...$ sudo apt-get install v4l-utils + (demo) ...$ v4l2-ctl --list-devices The output will look like this: diff --git a/docs/source/guide/tutorials/base/explain.rst b/docs/source/guide/tutorials/base/explain.rst index dba54b63d14..a9367f19887 100644 --- a/docs/source/guide/tutorials/base/explain.rst +++ b/docs/source/guide/tutorials/base/explain.rst @@ -26,9 +26,28 @@ at the path specified by ``--save-explanation-to``. .. code-block:: - otx explain --explain-data-roots otx-workspace-DETECTION/splitted_dataset/val/ --save-explanation-to outputs/explanation --load-weights outputs/weights.pth + otx explain --explain-data-roots otx-workspace-DETECTION/splitted_dataset/val/ \ + --save-explanation-to outputs/explanation \ + --load-weights outputs/weights.pth -3. As a result we will get a folder with a pair of generated +3. To specify the algorithm of saliency map creation for classification, +we can define the ``--explain-algorithm`` parameter. + +- ``activationmap`` - for activation map classification algorithm +- ``eigencam`` - for Eigen-Cam classification algorithm +- ``classwisesaliencymap`` - for Recipro-CAM classification algorithm, this is a default method + +For detection task, we can choose between the following methods: + +- ``activationmap`` - for activation map detection algorithm +- ``classwisesaliencymap`` - for DetClassProbabilityMap algorithm (works for single-stage detectors only) + +.. note:: + + Learn more about Explainable AI and its algorithms in :doc:`XAI explanation section <../../explanation/additional_features/xai>` + + +4. As a result we will get a folder with a pair of generated images for each image in ``--explain-data-roots``: - saliency map - where red color means more attention of the model diff --git a/docs/source/guide/tutorials/base/how_to_train/action_classification.rst b/docs/source/guide/tutorials/base/how_to_train/action_classification.rst index f41f6051fb7..c81d5946b56 100644 --- a/docs/source/guide/tutorials/base/how_to_train/action_classification.rst +++ b/docs/source/guide/tutorials/base/how_to_train/action_classification.rst @@ -74,7 +74,6 @@ According to the `documentation `_ format using the following command: @@ -128,17 +127,18 @@ To see the list of supported templates, run the following command: .. note:: - OpenVINO™ Training Extensions is supporting only X3D model template now, other architecture will be supported in near future. + OpenVINO™ Training Extensions supports X3D and MoViNet template now, other architecture will be supported in future. .. code-block:: (otx) ...$ otx find --task action_classification - +-----------------------+----------------------------------+------+----------------------------------------------------------------+ - | TASK | ID | NAME | BASE PATH | - +-----------------------+----------------------------------+------+----------------------------------------------------------------+ - | ACTION_CLASSIFICATION | Custom_Action_Classification_X3D | X3D | otx/algorithms/action/configs/classification/x3d/template.yaml | - +-----------------------+----------------------------------+------+----------------------------------------------------------------+ + +-----------------------+--------------------------------------+---------+-----------------------------------------------------------------------+ + | TASK | ID | NAME | BASE PATH | + +-----------------------+--------------------------------------+---------+-----------------------------------------------------------------------+ + | ACTION_CLASSIFICATION | Custom_Action_Classification_X3D | X3D | ../otx/algorithms/action/configs/classification/x3d/template.yaml | + | ACTION_CLASSIFICATION | Custom_Action_Classification_MoViNet | MoViNet | ../otx/algorithms/action/configs/classification/movinet/template.yaml | + +-----------------------+--------------------------------------+---------+-----------------------------------------------------------------------+ All commands will be run on the X3D model. It's a light model, that achieves competitive accuracy while keeping the inference fast. @@ -254,7 +254,7 @@ Optimization ************* 1. You can further optimize the model with ``otx optimize``. -Currently, only POT is supported for action classsification. NNCF will be supported in near future. +Currently, quantization jobs that include POT is supported for X3D template. MoViNet will be supported in near future. Refer to :doc:`optimization explanation <../../../explanation/additional_features/models_optimization>` section for more details on model optimization. 2. Example command for optimizing @@ -275,4 +275,4 @@ Keep in mind that POT will take some time (generally less than NNCF optimization efficient model representation ready-to-use action classification model. The following tutorials provide further steps on how to :doc:`deploy <../deploy>` and use your model in the :doc:`demonstration mode <../demo>` and visualize results. -The examples are provided with an object detection model, but it is easy to apply them for action classification by substituting the object detection model with classification one. \ No newline at end of file +The examples are provided with an object detection model, but it is easy to apply them for action classification by substituting the object detection model with classification one. diff --git a/docs/source/guide/tutorials/base/how_to_train/action_detection.rst b/docs/source/guide/tutorials/base/how_to_train/action_detection.rst index 3340e012d11..f19349921b4 100644 --- a/docs/source/guide/tutorials/base/how_to_train/action_detection.rst +++ b/docs/source/guide/tutorials/base/how_to_train/action_detection.rst @@ -153,3 +153,74 @@ We will get a similar to this validation output after some validation time (abou .. note:: Currently we don't support export and optimize task in action detection. We will support these features very near future. + + +********* +Export +********* + +1. ``otx export`` exports a trained Pytorch `.pth` model to the OpenVINO™ Intermediate Representation (IR) format. +It allows running the model on the Intel hardware much more efficiently, especially on the CPU. Also, the resulting IR model is required to run POT optimization. IR model consists of two files: ``openvino.xml`` for weights and ``openvino.bin`` for architecture. + +2. Run the command line below to export the trained model +and save the exported model to the ``openvino_models`` folder. + +.. code-block:: + + (otx) ...$ otx export + + 2023-03-24 15:03:35,993 - mmdeploy - INFO - Export PyTorch model to ONNX: /tmp/OTX-task-ffw8llin/openvino.onnx. + 2023-03-24 15:03:44,450 - mmdeploy - INFO - Args for Model Optimizer: mo --input_model="/tmp/OTX-task-ffw8llin/openvino.onnx" --output_dir="/tmp/OTX-task-ffw8llin/" --output="bboxes,labels" --input="input" --input_shape="[1, 3, 32, 256, 256]" --mean_values="[123.675, 116.28, 103.53]" --scale_values="[58.395, 57.12, 57.375]" --source_layout=bctwh + 2023-03-24 15:03:46,707 - mmdeploy - INFO - [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. + Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html + [ SUCCESS ] Generated IR version 11 model. + [ SUCCESS ] XML file: /tmp/OTX-task-ffw8llin/openvino.xml + [ SUCCESS ] BIN file: /tmp/OTX-task-ffw8llin/openvino.bin + +2023-03-24 15:03:46,707 - mmdeploy - INFO - Successfully exported OpenVINO model: /tmp/OTX-task-ffw8llin/openvino.xml +2023-03-24 15:03:46,756 - mmaction - INFO - Exporting completed + + +3. Check the accuracy of the IR model and the consistency between the exported model and the PyTorch model, +using ``otx eval`` and passing the IR model path to the ``--load-weights`` parameter. + +.. code-block:: + + (otx) ...$ otx eval --test-data-roots ../data/JHMDB_5%/test \ + --load-weights model-exported/openvino.xml \ + --save-performance model-exported/performance.json + + ... + + Performance(score: 0.0, dashboard: (3 metric groups)) + +.. note:: + + Unfortunately, openvino has trouble in export from ONNX file, which comes from torch 1.13. + You can get proper openvino IR when you downgrade torch version to 1.12.1 when exporting. + + +************* +Optimization +************* + +1. You can further optimize the model with ``otx optimize``. +Currently, only POT is supported for action detection. NNCF will be supported in near future. +Refer to :doc:`optimization explanation <../../../explanation/additional_features/models_optimization>` section for more details on model optimization. + +2. Example command for optimizing +OpenVINO™ model (.xml) with OpenVINO™ POT. + +.. code-block:: + + (otx) ...$ otx optimize --load-weights openvino_models/openvino.xml \ + --save-model-to pot_model + + ... + + Performance(score: 0.0, dashboard: (3 metric groups)) + +Keep in mind that POT will take some time (generally less than NNCF optimization) without logging to optimize the model. + +3. Now, you have fully trained, optimized and exported an +efficient model representation ready-to-use action detection model. diff --git a/docs/source/guide/tutorials/base/how_to_train/classification.rst b/docs/source/guide/tutorials/base/how_to_train/classification.rst index ff66d4b7f39..d645d9ec0ee 100644 --- a/docs/source/guide/tutorials/base/how_to_train/classification.rst +++ b/docs/source/guide/tutorials/base/how_to_train/classification.rst @@ -56,6 +56,7 @@ with the following command: cd .. | + .. image:: ../../../../../utils/images/flowers_example.jpg :width: 600 @@ -120,7 +121,7 @@ Let's prepare an OpenVINO™ Training Extensions classification workspace runnin (otx) ...$ cd ./otx-workspace-CLASSIFICATION -It will create **otx-workspace-CLASSIFICATION** with all necessery configs for MobileNet-V3-large-1x, prepared ``data.yaml`` to simplify CLI commands launch and splitted dataset named ``splitted_dataset``. +It will create **otx-workspace-CLASSIFICATION** with all necessary configs for MobileNet-V3-large-1x, prepared ``data.yaml`` to simplify CLI commands launch and splitted dataset named ``splitted_dataset``. 3. To start training you need to call ``otx train`` command in our workspace: diff --git a/docs/source/guide/tutorials/base/how_to_train/detection.rst b/docs/source/guide/tutorials/base/how_to_train/detection.rst index 1e6a82c693e..f55d1419af8 100644 --- a/docs/source/guide/tutorials/base/how_to_train/detection.rst +++ b/docs/source/guide/tutorials/base/how_to_train/detection.rst @@ -60,7 +60,7 @@ Dataset preparation .. code-block:: - cd data + mkdir data ; cd data git clone https://github.com/thsant/wgisd.git cd wgisd git checkout 6910edc5ae3aae8c20062941b1641821f0c30127 @@ -107,7 +107,7 @@ We can do that by running these commands: .. code-block:: # format images folder - mkdir data images + mv data images # format annotations folder mv coco_annotations annotations @@ -116,6 +116,8 @@ We can do that by running these commands: mv annotations/train_bbox_instances.json annotations/instances_train.json mv annotations/test_bbox_instances.json annotations/instances_val.json + cd ../.. + ********* Training ********* @@ -183,9 +185,9 @@ Let's prepare the object detection workspace running the following command: -.. note:: +.. warning:: - If you want to update your current workspace by running ``otx build`` with other parameters, it's better to delete the original workplace before that to prevent mistakes. + If you want to rebuild your current workspace by running ``otx build`` with other parameters, it's better to delete the original workplace before that to prevent mistakes. Check ``otx-workspace-DETECTION/data.yaml`` to ensure, which data subsets will be used for training and validation, and update it if necessary. diff --git a/docs/source/guide/tutorials/index.rst b/docs/source/guide/tutorials/index.rst index 7e679cbe38e..582efdd9193 100644 --- a/docs/source/guide/tutorials/index.rst +++ b/docs/source/guide/tutorials/index.rst @@ -6,6 +6,7 @@ This section reveals how to use ``CLI``, both base and advanced features. It provides the end-to-end solution from installation to model deployment and demo visualization on specific example for each of the supported tasks. .. toctree:: + :titlesonly: :maxdepth: 3 base/index diff --git a/docs/utils/images/xai_cls.jpg b/docs/utils/images/xai_cls.jpg new file mode 100644 index 00000000000..602d77b2eb2 Binary files /dev/null and b/docs/utils/images/xai_cls.jpg differ diff --git a/docs/utils/images/xai_det.jpg b/docs/utils/images/xai_det.jpg new file mode 100644 index 00000000000..fabecf82f57 Binary files /dev/null and b/docs/utils/images/xai_det.jpg differ diff --git a/docs/utils/images/xai_example.jpg b/docs/utils/images/xai_example.jpg new file mode 100644 index 00000000000..5e6a981ca0c Binary files /dev/null and b/docs/utils/images/xai_example.jpg differ diff --git a/otx/__init__.py b/otx/__init__.py index 5b1ea9b4f72..f5a65030475 100644 --- a/otx/__init__.py +++ b/otx/__init__.py @@ -3,7 +3,7 @@ # Copyright (C) 2021-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -__version__ = "1.1.0rc0" +__version__ = "1.2.0rc0" # NOTE: Sync w/ otx/api/usecases/exportable_code/demo/requirements.txt on release MMCLS_AVAILABLE = True diff --git a/otx/algorithms/__init__.py b/otx/algorithms/__init__.py index 3d087f538e4..daf814e52b2 100644 --- a/otx/algorithms/__init__.py +++ b/otx/algorithms/__init__.py @@ -2,3 +2,5 @@ # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 + +TRANSFORMER_BACKBONES = ["VisionTransformer", "T2T_ViT", "Conformer"] diff --git a/otx/algorithms/action/configs/classification/configuration.yaml b/otx/algorithms/action/configs/classification/configuration.yaml index 5221eaaa03b..fde36852b7d 100644 --- a/otx/algorithms/action/configs/classification/configuration.yaml +++ b/otx/algorithms/action/configs/classification/configuration.yaml @@ -245,20 +245,20 @@ algo_backend: header: Algo backend parameters train_type: affects_outcome_of: NONE - default_value: INCREMENTAL + default_value: Incremental description: Quantization preset that defines quantization scheme editable: false enum_name: TrainType header: Train type options: - INCREMENTAL: "INCREMENTAL" + Incremental: "Incremental" type: SELECTABLE ui_rules: action: DISABLE_EDITING operator: AND rules: [] type: UI_RULES - value: INCREMENTAL + value: Incremental visible_in_ui: True warning: null mem_cache_size: diff --git a/otx/algorithms/action/configs/classification/movinet/template.yaml b/otx/algorithms/action/configs/classification/movinet/template.yaml index 6fee18320db..c57ee80574c 100644 --- a/otx/algorithms/action/configs/classification/movinet/template.yaml +++ b/otx/algorithms/action/configs/classification/movinet/template.yaml @@ -1,5 +1,5 @@ # Description. -model_template_id: Custom_Action_Classificaiton_MoViNet +model_template_id: Custom_Action_Classification_MoViNet name: MoViNet task_type: ACTION_CLASSIFICATION task_family: VISION @@ -45,7 +45,7 @@ hyper_parameters: default_value: 1.0 algo_backend: train_type: - default_value: INCREMENTAL + default_value: Incremental # Training resources. max_nodes: 1 diff --git a/otx/algorithms/action/configs/classification/x3d/template.yaml b/otx/algorithms/action/configs/classification/x3d/template.yaml index c214e0a282d..f4b63ebfb1c 100644 --- a/otx/algorithms/action/configs/classification/x3d/template.yaml +++ b/otx/algorithms/action/configs/classification/x3d/template.yaml @@ -1,5 +1,5 @@ # Description. -model_template_id: Custom_Action_Classificaiton_X3D +model_template_id: Custom_Action_Classification_X3D name: X3D task_type: ACTION_CLASSIFICATION task_family: VISION @@ -45,7 +45,7 @@ hyper_parameters: default_value: 1.0 algo_backend: train_type: - default_value: INCREMENTAL + default_value: Incremental # Training resources. max_nodes: 1 diff --git a/otx/algorithms/action/configs/detection/configuration.yaml b/otx/algorithms/action/configs/detection/configuration.yaml index 5221eaaa03b..fde36852b7d 100644 --- a/otx/algorithms/action/configs/detection/configuration.yaml +++ b/otx/algorithms/action/configs/detection/configuration.yaml @@ -245,20 +245,20 @@ algo_backend: header: Algo backend parameters train_type: affects_outcome_of: NONE - default_value: INCREMENTAL + default_value: Incremental description: Quantization preset that defines quantization scheme editable: false enum_name: TrainType header: Train type options: - INCREMENTAL: "INCREMENTAL" + Incremental: "Incremental" type: SELECTABLE ui_rules: action: DISABLE_EDITING operator: AND rules: [] type: UI_RULES - value: INCREMENTAL + value: Incremental visible_in_ui: True warning: null mem_cache_size: diff --git a/otx/algorithms/action/configs/detection/x3d_fast_rcnn/template.yaml b/otx/algorithms/action/configs/detection/x3d_fast_rcnn/template.yaml index 63ed3f682bd..b1d204d6cb0 100644 --- a/otx/algorithms/action/configs/detection/x3d_fast_rcnn/template.yaml +++ b/otx/algorithms/action/configs/detection/x3d_fast_rcnn/template.yaml @@ -45,7 +45,7 @@ hyper_parameters: default_value: 1.0 algo_backend: train_type: - default_value: INCREMENTAL + default_value: Incremental # Training resources. max_nodes: 1 diff --git a/otx/algorithms/classification/adapters/mmcls/__init__.py b/otx/algorithms/classification/adapters/mmcls/__init__.py index fbec047abe2..3fa9776764e 100644 --- a/otx/algorithms/classification/adapters/mmcls/__init__.py +++ b/otx/algorithms/classification/adapters/mmcls/__init__.py @@ -15,8 +15,9 @@ # and limitations under the License. -from .data import OTXClsDataset, SelfSLDataset +from .datasets import OTXClsDataset, SelfSLDataset from .models import BYOL, ConstrastiveHead, SelfSLMLP +from .optimizer import LARS # fmt: off # isort: off @@ -33,4 +34,5 @@ "BYOL", "SelfSLMLP", "ConstrastiveHead", + "LARS", ] diff --git a/otx/algorithms/classification/adapters/mmcls/data/__init__.py b/otx/algorithms/classification/adapters/mmcls/datasets/__init__.py similarity index 69% rename from otx/algorithms/classification/adapters/mmcls/data/__init__.py rename to otx/algorithms/classification/adapters/mmcls/datasets/__init__.py index 206425d34b1..0242a22643b 100644 --- a/otx/algorithms/classification/adapters/mmcls/data/__init__.py +++ b/otx/algorithms/classification/adapters/mmcls/datasets/__init__.py @@ -1,6 +1,6 @@ """OTX Algorithms - Classification Dataset.""" -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2023 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,30 +14,30 @@ # See the License for the specific language governing permissions # and limitations under the License. -from .datasets import ( +from .otx_datasets import ( OTXClsDataset, OTXHierarchicalClsDataset, OTXMultilabelClsDataset, SelfSLDataset, ) -from .pipelines import ( - GaussianBlur, - LoadImageFromOTXDataset, - OTXColorJitter, - PILImageToNDArray, - PostAug, - RandomAppliedTrans, +from .pipelines.transforms import ( + AugMixAugment, + OTXRandAugment, + PILToTensor, + RandomRotate, + TensorNormalize, + TwoCropTransform, ) __all__ = [ + "AugMixAugment", + "PILToTensor", + "TensorNormalize", + "RandomRotate", + "OTXRandAugment", + "TwoCropTransform", "OTXClsDataset", "OTXMultilabelClsDataset", "OTXHierarchicalClsDataset", "SelfSLDataset", - "PostAug", - "PILImageToNDArray", - "LoadImageFromOTXDataset", - "RandomAppliedTrans", - "GaussianBlur", - "OTXColorJitter", ] diff --git a/otx/algorithms/classification/adapters/mmcls/data/datasets.py b/otx/algorithms/classification/adapters/mmcls/datasets/otx_datasets.py similarity index 99% rename from otx/algorithms/classification/adapters/mmcls/data/datasets.py rename to otx/algorithms/classification/adapters/mmcls/datasets/otx_datasets.py index 5f81734b9fb..4d08c415865 100644 --- a/otx/algorithms/classification/adapters/mmcls/data/datasets.py +++ b/otx/algorithms/classification/adapters/mmcls/datasets/otx_datasets.py @@ -18,13 +18,13 @@ from torch.utils.data import Dataset from otx.algorithms.common.utils import get_cls_img_indices, get_old_new_img_indices +from otx.algorithms.common.utils.logger import get_logger from otx.api.entities.datasets import DatasetEntity from otx.api.entities.label import LabelEntity from otx.api.utils.argument_checks import ( DatasetParamTypeCheck, check_input_parameters_type, ) -from otx.mpa.utils.logger import get_logger logger = get_logger() diff --git a/otx/algorithms/classification/adapters/mmcls/datasets/pipelines/__init__.py b/otx/algorithms/classification/adapters/mmcls/datasets/pipelines/__init__.py new file mode 100644 index 00000000000..40f49b6d32d --- /dev/null +++ b/otx/algorithms/classification/adapters/mmcls/datasets/pipelines/__init__.py @@ -0,0 +1,36 @@ +"""OTX Algorithms - Classification pipelines.""" + +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from .otx_pipelines import ( + GaussianBlur, + LoadImageFromOTXDataset, + OTXColorJitter, + PILImageToNDArray, + PostAug, + RandomAppliedTrans, +) +from .transforms import ( + AugMixAugment, + OTXRandAugment, + PILToTensor, + RandomRotate, + TensorNormalize, + TwoCropTransform, +) + +__all__ = [ + "PostAug", + "PILImageToNDArray", + "LoadImageFromOTXDataset", + "RandomAppliedTrans", + "GaussianBlur", + "OTXColorJitter", + "AugMixAugment", + "PILToTensor", + "RandomRotate", + "TensorNormalize", + "OTXRandAugment", + "TwoCropTransform", +] diff --git a/otx/algorithms/classification/adapters/mmcls/data/pipelines.py b/otx/algorithms/classification/adapters/mmcls/datasets/pipelines/otx_pipelines.py similarity index 100% rename from otx/algorithms/classification/adapters/mmcls/data/pipelines.py rename to otx/algorithms/classification/adapters/mmcls/datasets/pipelines/otx_pipelines.py diff --git a/otx/algorithms/classification/adapters/mmcls/datasets/pipelines/transforms/__init__.py b/otx/algorithms/classification/adapters/mmcls/datasets/pipelines/transforms/__init__.py new file mode 100644 index 00000000000..5644dffbf01 --- /dev/null +++ b/otx/algorithms/classification/adapters/mmcls/datasets/pipelines/transforms/__init__.py @@ -0,0 +1,19 @@ +"""Module to init transforms for OTX classification.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# +# flake8: noqa + +from .augmix import AugMixAugment +from .otx_transforms import PILToTensor, RandomRotate, TensorNormalize +from .random_augment import OTXRandAugment +from .twocrop_transform import TwoCropTransform + +__all__ = [ + "AugMixAugment", + "PILToTensor", + "TensorNormalize", + "RandomRotate", + "OTXRandAugment", + "TwoCropTransform", +] diff --git a/otx/mpa/modules/datasets/pipelines/transforms/augmix.py b/otx/algorithms/classification/adapters/mmcls/datasets/pipelines/transforms/augmix.py similarity index 74% rename from otx/mpa/modules/datasets/pipelines/transforms/augmix.py rename to otx/algorithms/classification/adapters/mmcls/datasets/pipelines/transforms/augmix.py index 704301ad3bd..1b9dd3e74e9 100644 --- a/otx/mpa/modules/datasets/pipelines/transforms/augmix.py +++ b/otx/algorithms/classification/adapters/mmcls/datasets/pipelines/transforms/augmix.py @@ -1,3 +1,4 @@ +"""Module for defining AugMix class used for classification task.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -8,9 +9,12 @@ import numpy as np from mmcls.datasets.builder import PIPELINES +from mmcv.utils import ConfigDict from PIL import Image -from otx.mpa.modules.datasets.pipelines.transforms.augments import CythonAugments +from otx.algorithms.common.adapters.mmcv.pipelines.transforms.augments import ( + CythonAugments, +) _AUGMIX_TRANSFORMS_GREY = [ "SharpnessIncreasing", # not in paper @@ -37,13 +41,15 @@ class OpsFabric: + """OpsFabric class.""" + def __init__(self, name, magnitude, hparams, prob=1.0): self.max_level = 10 self.prob = prob self.hparams = hparams # kwargs for augment functions self.aug_kwargs = dict(fillcolor=hparams["img_mean"], resample=(Image.BILINEAR, Image.BICUBIC)) - self.LEVEL_TO_ARG = { + self.level_to_arg = { "AutoContrast": None, "Equalize": None, "Rotate": self._rotate_level_to_arg, @@ -58,7 +64,7 @@ def __init__(self, name, magnitude, hparams, prob=1.0): "TranslateXRel": self._translate_rel_level_to_arg, "TranslateYRel": self._translate_rel_level_to_arg, } - self.NAME_TO_OP = { + self.name_to_op = { "AutoContrast": CythonAugments.autocontrast, "Equalize": CythonAugments.equalize, "Rotate": CythonAugments.rotate, @@ -73,15 +79,17 @@ def __init__(self, name, magnitude, hparams, prob=1.0): "TranslateXRel": CythonAugments.translate_x_rel, "TranslateYRel": CythonAugments.translate_y_rel, } - self.aug_fn = self.NAME_TO_OP[name] - self.level_fn = self.LEVEL_TO_ARG[name] - self.magnitude = magnitude - self.magnitude_std = self.hparams.get("magnitude_std", float("inf")) + self.aug_factory = ConfigDict( + aug_fn=self.name_to_op[name], + level_fn=self.level_to_arg[name], + magnitude=magnitude, + magnitude_std=self.hparams.get("magnitude_std", float("inf")), + ) @staticmethod - def randomly_negate(v): - """With 50% prob, negate the value""" - return -v if random.random() > 0.5 else v + def randomly_negate(value): + """With 50% prob, negate the value.""" + return -value if random.random() > 0.5 else value def _rotate_level_to_arg(self, level, _hparams): # range [-30, 30] @@ -129,95 +137,96 @@ def _solarize_increasing_level_to_arg(self, level, _hparams): return (256 - self._solarize_level_to_arg(level, _hparams)[0],) def __call__(self, img): + """Call method of OpsFabric class.""" if self.prob < 1.0 and random.random() > self.prob: return img - magnitude = self.magnitude - if self.magnitude_std: - if self.magnitude_std == float("inf"): + magnitude = self.aug_factory.magnitude + magnitude_std = self.aug_factory.magnitude_std + level_fn = self.aug_factory.level_fn + if magnitude_std: + if magnitude_std == float("inf"): magnitude = random.uniform(0, magnitude) - elif self.magnitude_std > 0: - magnitude = random.gauss(magnitude, self.magnitude_std) + elif magnitude_std > 0: + magnitude = random.gauss(magnitude, magnitude_std) magnitude = min(self.max_level, max(0, magnitude)) # clip to valid range - level_args = self.level_fn(magnitude, self.hparams) if self.level_fn is not None else tuple() - return self.aug_fn(img, *level_args, **self.aug_kwargs) + level_args = level_fn(magnitude, self.hparams) if level_fn is not None else tuple() + return self.aug_factory.aug_fn(img, *level_args, **self.aug_kwargs) @PIPELINES.register_module() -class AugMixAugment(object): - """AugMix Transform +class AugMixAugment: + """AugMix Transform. + Adapted and improved from impl here: https://github.com/google-research/augmix/blob/master/imagenet.py From paper: 'AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty - - https://arxiv.org/abs/1912.02781 + https://arxiv.org/abs/1912.02781. """ - def __init__(self, config_str, image_mean=None, grey=False, **kwargs): + def __init__(self, config_str, image_mean=None, grey=False): self.ops, self.alpha, self.width, self.depth = self._augmix_ops(config_str, image_mean, grey=grey) - def _apply_basic(self, img, mixing_weights, m): + def _apply_basic(self, img, mixing_weights, m): # pylint: disable=invalid-name # This is a literal adaptation of the paper/official implementation without normalizations and # PIL <-> Numpy conversions between every op. It is still quite CPU compute heavy compared to the # typical augmentation transforms, could use a GPU / Kornia implementation. mixed = (1 - m) * np.array(img, dtype=np.float32) - for mw in mixing_weights: + for mix_weight in mixing_weights: depth = self.depth if self.depth > 0 else np.random.randint(1, 4) ops = np.random.choice(self.ops, depth, replace=True) img_aug = deepcopy(img) - for op in ops: + for op in ops: # pylint: disable=invalid-name img_aug = op(img_aug) - CythonAugments.blend(img_aug, mixed, mw * m) + CythonAugments.blend(img_aug, mixed, mix_weight * m) np.clip(mixed, 0, 255.0, out=mixed) return Image.fromarray(mixed.astype(np.uint8)) def _augmix_ops(self, config_str, image_mean=None, translate_const=250, grey=False): if image_mean is None: image_mean = [0.485, 0.456, 0.406] # imagenet mean - magnitude = 3 - width = 3 - depth = -1 - alpha = 1.0 - p = 1.0 + aug_params = ConfigDict(magnitude=3, width=3, depth=-1, alpha=1.0, p=1.0) hparams = dict( translate_const=translate_const, - img_mean=tuple([int(c * 256) for c in image_mean]), + img_mean=tuple(int(c * 256) for c in image_mean), magnitude_std=float("inf"), ) config = config_str.split("-") assert config[0] == "augmix" config = config[1:] - for c in config: - cs = re.split(r"(\d.*)", c) - if len(cs) < 2: + for cfg in config: + cfgs = re.split(r"(\d.*)", cfg) + if len(cfgs) < 2: continue - key, val = cs[:2] + key, val = cfgs[:2] if key == "mstd": hparams.setdefault("magnitude_std", float(val)) elif key == "m": - magnitude = int(val) + aug_params.magnitude = int(val) elif key == "w": - width = int(val) + aug_params.width = int(val) elif key == "d": - depth = int(val) + aug_params.depth = int(val) elif key == "a": - alpha = float(val) + aug_params.alpha = float(val) elif key == "p": - p = float(val) + aug_params.p = float(val) else: assert False, "Unknown AugMix config section" aug_politics = _AUGMIX_TRANSFORMS_GREY if grey else _AUGMIX_TRANSFORMS return ( - [OpsFabric(name, magnitude, hparams, p) for name in aug_politics], - alpha, - width, - depth, + [OpsFabric(name, aug_params.magnitude, hparams, aug_params.p) for name in aug_politics], + aug_params.alpha, + aug_params.width, + aug_params.depth, ) def __call__(self, results): + """Call function applies augmix on image.""" for key in results.get("img_fields", ["img"]): img = results[key] if not Image.isImageType(img): img = Image.fromarray(img) mixing_weights = np.float32(np.random.dirichlet([self.alpha] * self.width)) - m = np.float32(np.random.beta(self.alpha, self.alpha)) + m = np.float32(np.random.beta(self.alpha, self.alpha)) # pylint: disable=invalid-name mixed = self._apply_basic(img, mixing_weights, m) results["augmix"] = True results[key] = mixed diff --git a/otx/mpa/modules/datasets/pipelines/transforms/ote_transforms.py b/otx/algorithms/classification/adapters/mmcls/datasets/pipelines/transforms/otx_transforms.py similarity index 80% rename from otx/mpa/modules/datasets/pipelines/transforms/ote_transforms.py rename to otx/algorithms/classification/adapters/mmcls/datasets/pipelines/transforms/otx_transforms.py index c5fdb8ccec7..8e790c8721e 100644 --- a/otx/mpa/modules/datasets/pipelines/transforms/ote_transforms.py +++ b/otx/algorithms/classification/adapters/mmcls/datasets/pipelines/transforms/otx_transforms.py @@ -1,3 +1,4 @@ +"""Module for defining transforms used for OTX classification.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -10,8 +11,11 @@ @PIPELINES.register_module() -class PILToTensor(object): +class PILToTensor: + """Convert PIL image to Tensor.""" + def __call__(self, results): + """Call function of PILToTensor class.""" for key in results.get("img_fields", ["img"]): img = results[key] if not Image.isImageType(img): @@ -23,13 +27,16 @@ def __call__(self, results): @PIPELINES.register_module() -class TensorNormalize(object): +class TensorNormalize: + """Normalize tensor object.""" + def __init__(self, mean, std, inplace=False): self.mean = mean self.std = std self.inplace = inplace def __call__(self, results): + """Call function of TensorNormalize class.""" for key in results.get("img_fields", ["img"]): img = results[key] img = F.normalize(img, self.mean, self.std, self.inplace) @@ -40,18 +47,17 @@ def __call__(self, results): # TODO [Jihwan]: Can be removed by mmcls.dataset.pipelines.auto_augment L398, Roate class @PIPELINES.register_module() -class RandomRotate(object): - """Random rotate - From torchreid.data.transforms - """ +class RandomRotate: + """Random rotate, from torchreid.data.transforms.""" - def __init__(self, p=0.5, angle=(-5, 5), values=None, **kwargs): + def __init__(self, p=0.5, angle=(-5, 5), values=None): self.p = p self.angle = angle self.discrete = values is not None and len([v for v in values if v != 0]) > 0 self.values = values def __call__(self, results, *args, **kwargs): + """Call function of RandomRotate class.""" if random.uniform(0, 1) > self.p: return results for key in results.get("img_fields", ["img"]): diff --git a/otx/algorithms/classification/adapters/mmcls/datasets/pipelines/transforms/random_augment.py b/otx/algorithms/classification/adapters/mmcls/datasets/pipelines/transforms/random_augment.py new file mode 100644 index 00000000000..d0e4804ba67 --- /dev/null +++ b/otx/algorithms/classification/adapters/mmcls/datasets/pipelines/transforms/random_augment.py @@ -0,0 +1,194 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# +# pylint: disable=unused-argument +"""Code in this file is adapted from. + +https://github.com/ildoonet/pytorch-randaugment/blob/master/RandAugment/augmentations.py +https://github.com/google-research/fixmatch/blob/master/third_party/auto_augment/augmentations.py +https://github.com/google-research/fixmatch/blob/master/libml/ctaugment.py +""" + +import random + +import numpy as np +import PIL +from mmcls.datasets.builder import PIPELINES + +PARAMETER_MAX = 10 + + +def auto_contrast(img, **kwargs): + """Applies auto contrast to an image.""" + return PIL.ImageOps.autocontrast(img), None + + +def brightness(img, value, max_value, bias=0): + """Applies brightness adjustment to an image.""" + value = _float_parameter(value, max_value) + bias + return PIL.ImageEnhance.Brightness(img).enhance(value), value + + +def color(img, value, max_value, bias=0): + """Applies color adjustment to an image.""" + value = _float_parameter(value, max_value) + bias + return PIL.ImageEnhance.Color(img).enhance(value), value + + +def contrast(img, value, max_value, bias=0): + """Applies contrast adjustment to an image.""" + value = _float_parameter(value, max_value) + bias + return PIL.ImageEnhance.Contrast(img).enhance(value), value + + +def cutout(img, value, max_value, bias=0): + """Applies cutout augmentation to an image.""" + if value == 0: + return img + value = _float_parameter(value, max_value) + bias + value = int(value * min(img.size)) + return cutout_abs(img, value), value + + +def cutout_abs(img, value, **kwargs): + """Applies cutout with absolute pixel size to an image.""" + w, h = img.size + x0 = np.random.uniform(0, w) + y0 = np.random.uniform(0, h) + x0 = int(max(0, x0 - value / 2.0)) + y0 = int(max(0, y0 - value / 2.0)) + x1 = int(min(w, x0 + value)) + y1 = int(min(h, y0 + value)) + xy = (x0, y0, x1, y1) + # gray + rec_color = (127, 127, 127) + img = img.copy() + PIL.ImageDraw.Draw(img).rectangle(xy, rec_color) + return img, xy, rec_color + + +def equalize(img, **kwargs): + """Applies equalization to an image.""" + return PIL.ImageOps.equalize(img), None + + +def identity(img, **kwargs): + """Returns the original image without any transformation.""" + return img, None + + +def posterize(img, value, max_value, bias=0): + """Applies posterization to an image.""" + value = _int_parameter(value, max_value) + bias + return PIL.ImageOps.posterize(img, value), value + + +def rotate(img, value, max_value, bias=0): + """Applies rotation to an image.""" + value = _int_parameter(value, max_value) + bias + if random.random() < 0.5: + value = -value + return img.rotate(value), value + + +def sharpness(img, value, max_value, bias=0): + """Applies Sharpness to an image.""" + value = _float_parameter(value, max_value) + bias + return PIL.ImageEnhance.Sharpness(img).enhance(value), value + + +def shear_x(img, value, max_value, bias=0): + """Applies ShearX to an image.""" + value = _float_parameter(value, max_value) + bias + if random.random() < 0.5: + value = -value + return img.transform(img.size, PIL.Image.AFFINE, (1, value, 0, 0, 1, 0)), value + + +def shear_y(img, value, max_value, bias=0): + """Applies ShearY to an image.""" + value = _float_parameter(value, max_value) + bias + if random.random() < 0.5: + value = -value + return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, value, 1, 0)), value + + +def solarize(img, value, max_value, bias=0): + """Applies Solarize to an image.""" + value = _int_parameter(value, max_value) + bias + return PIL.ImageOps.solarize(img, 256 - value), value + + +def translate_x(img, value, max_value, bias=0): + """Applies TranslateX to an image.""" + value = _float_parameter(value, max_value) + bias + if random.random() < 0.5: + value = -value + value = int(value * img.size[0]) + return img.transform(img.size, PIL.Image.AFFINE, (1, 0, value, 0, 1, 0)), value + + +def translate_y(img, value, max_value, bias=0): + """Applies TranslateX to an image.""" + value = _float_parameter(value, max_value) + bias + if random.random() < 0.5: + value = -value + value = int(value * img.size[1]) + return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, value)), value + + +def _float_parameter(value, max_value): + return float(value) * max_value / PARAMETER_MAX + + +def _int_parameter(value, max_value): + return int(value * max_value / PARAMETER_MAX) + + +rand_augment_pool = [ + (auto_contrast, None, None), + (brightness, 0.9, 0.05), + (color, 0.9, 0.05), + (contrast, 0.9, 0.05), + (equalize, None, None), + (identity, None, None), + (posterize, 4, 4), + (rotate, 30, 0), + (sharpness, 0.9, 0.05), + (shear_x, 0.3, 0), + (shear_y, 0.3, 0), + (solarize, 256, 0), + (translate_x, 0.3, 0), + (translate_y, 0.3, 0), +] + + +# TODO: [Jihwan]: Can be removed by mmcls.datasets.pipeline.auto_augment Line 95 RandAugment class +@PIPELINES.register_module() +class OTXRandAugment: + """RandAugment class for OTX classification.""" + + def __init__(self, num_aug, magnitude, cutout_value=16): + assert num_aug >= 1 + assert 1 <= magnitude <= 10 + self.num_aug = num_aug + self.magnitude = magnitude + self.cutout_value = cutout_value + self.augment_pool = rand_augment_pool + + def __call__(self, results): + """Call function of OTXRandAugment class.""" + for key in results.get("img_fields", ["img"]): + img = results[key] + if not PIL.Image.isImageType(img): + img = PIL.Image.fromarray(results[key]) + augs = random.choices(self.augment_pool, k=self.num_aug) + for aug, max_value, bias in augs: + value = np.random.randint(1, self.magnitude) + if random.random() < 0.5: + img, value = aug(img, value=value, max_value=max_value, bias=bias) + results[f"rand_mc_{aug.__name__}"] = value + img, xy, rec_color = cutout_abs(img, self.cutout_value) + results["CutoutAbs"] = (xy, self.cutout_value, rec_color) + results[key] = np.array(img) + return results diff --git a/otx/mpa/modules/datasets/pipelines/transforms/twocrop_transform.py b/otx/algorithms/classification/adapters/mmcls/datasets/pipelines/transforms/twocrop_transform.py similarity index 81% rename from otx/mpa/modules/datasets/pipelines/transforms/twocrop_transform.py rename to otx/algorithms/classification/adapters/mmcls/datasets/pipelines/transforms/twocrop_transform.py index 8a9c7818ebc..4d8c5308c95 100644 --- a/otx/mpa/modules/datasets/pipelines/transforms/twocrop_transform.py +++ b/otx/algorithms/classification/adapters/mmcls/datasets/pipelines/transforms/twocrop_transform.py @@ -1,3 +1,4 @@ +"""Define TwoCropTransform used for self-sl in mmclassification.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 @@ -11,13 +12,14 @@ @PIPELINES.register_module() class TwoCropTransform: - """Generate two different cropped views of an image""" + """Generate two different cropped views of an image.""" def __init__(self, pipeline): self.pipeline1 = Compose([build_from_cfg(p, PIPELINES) for p in pipeline]) self.pipeline2 = Compose([build_from_cfg(p, PIPELINES) for p in pipeline]) def __call__(self, data): + """Call method for TwoCropTransform class.""" data1 = self.pipeline1(deepcopy(data)) data2 = self.pipeline2(deepcopy(data)) diff --git a/otx/algorithms/classification/adapters/mmcls/models/__init__.py b/otx/algorithms/classification/adapters/mmcls/models/__init__.py index 3a8398f2fed..ee3c040a319 100644 --- a/otx/algorithms/classification/adapters/mmcls/models/__init__.py +++ b/otx/algorithms/classification/adapters/mmcls/models/__init__.py @@ -14,8 +14,53 @@ # See the License for the specific language governing permissions # and limitations under the License. -from .classifiers import BYOL -from .heads import ConstrastiveHead +from .classifiers import BYOL, SAMImageClassifier, SemiSLClassifier, SupConClassifier +from .heads import ( + ClsHead, + ConstrastiveHead, + ConvClsHead, + CustomHierarchicalLinearClsHead, + CustomHierarchicalNonLinearClsHead, + CustomLinearClsHead, + CustomMultiLabelLinearClsHead, + CustomMultiLabelNonLinearClsHead, + CustomNonLinearClsHead, + MMOVClsHead, + SemiLinearMultilabelClsHead, + SemiNonLinearMultilabelClsHead, + SupConClsHead, +) +from .losses import ( + AsymmetricAngularLossWithIgnore, + AsymmetricLossWithIgnore, + BarlowTwinsLoss, + CrossEntropyLossWithIgnore, + IBLoss, +) from .necks import SelfSLMLP -__all__ = ["BYOL", "SelfSLMLP", "ConstrastiveHead"] +__all__ = [ + "BYOL", + "SAMImageClassifier", + "SemiSLClassifier", + "SupConClassifier", + "CustomLinearClsHead", + "CustomNonLinearClsHead", + "CustomMultiLabelNonLinearClsHead", + "CustomMultiLabelLinearClsHead", + "CustomHierarchicalLinearClsHead", + "CustomHierarchicalNonLinearClsHead", + "AsymmetricAngularLossWithIgnore", + "SemiLinearMultilabelClsHead", + "SemiNonLinearMultilabelClsHead", + "MMOVClsHead", + "ConvClsHead", + "ClsHead", + "AsymmetricLossWithIgnore", + "BarlowTwinsLoss", + "IBLoss", + "CrossEntropyLossWithIgnore", + "SelfSLMLP", + "ConstrastiveHead", + "SupConClsHead", +] diff --git a/otx/algorithms/classification/adapters/mmcls/models/backbones/__init__.py b/otx/algorithms/classification/adapters/mmcls/models/backbones/__init__.py new file mode 100644 index 00000000000..07decd4f247 --- /dev/null +++ b/otx/algorithms/classification/adapters/mmcls/models/backbones/__init__.py @@ -0,0 +1,19 @@ +"""OTX Algorithms - Classification Backbones.""" + +# Copyright (C) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions +# and limitations under the License. + +from .mmov_backbone import MMOVBackbone + +__all__ = ["MMOVBackbone"] diff --git a/otx/algorithms/classification/adapters/mmcls/models/backbones/mmov_backbone.py b/otx/algorithms/classification/adapters/mmcls/models/backbones/mmov_backbone.py new file mode 100644 index 00000000000..1480701c509 --- /dev/null +++ b/otx/algorithms/classification/adapters/mmcls/models/backbones/mmov_backbone.py @@ -0,0 +1,43 @@ +"""Module for the MMOVBackbone class.""" + +from typing import Dict, List, Union + +from mmcls.models.builder import BACKBONES + +from otx.core.ov.graph.parsers.cls import cls_base_parser +from otx.core.ov.models.mmov_model import MMOVModel + + +@BACKBONES.register_module() +class MMOVBackbone(MMOVModel): + """MMOVBackbone class. + + Args: + *args: positional arguments. + **kwargs: keyword arguments. + """ + + @staticmethod + def parser(graph, **kwargs) -> Dict[str, Union[List[str], Dict[str, List[str]]]]: + """Parses the input and output of the model. + + Args: + graph: input graph. + **kwargs: keyword arguments. + + Returns: + Dictionary containing input and output of the model. + """ + output = cls_base_parser(graph, "backbone") + if output is None: + raise ValueError("Parser can not determine input and output of model. Please provide them explicitly") + return output + + def init_weights(self, pretrained=None): # pylint: disable=unused-argument + """Initializes the weights of the model. + + Args: + pretrained: pretrained weights. Default: None. + """ + # TODO + return diff --git a/otx/algorithms/classification/adapters/mmcls/models/classifiers/__init__.py b/otx/algorithms/classification/adapters/mmcls/models/classifiers/__init__.py index e7a44b05002..ff42c4f6085 100644 --- a/otx/algorithms/classification/adapters/mmcls/models/classifiers/__init__.py +++ b/otx/algorithms/classification/adapters/mmcls/models/classifiers/__init__.py @@ -1,6 +1,6 @@ """OTX Algorithms - Classification Classifiers.""" -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2023 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,5 +15,9 @@ # and limitations under the License. from .byol import BYOL +from .sam_classifier import SAMImageClassifier +from .semisl_classifier import SemiSLClassifier +from .semisl_multilabel_classifier import SemiSLMultilabelClassifier +from .supcon_classifier import SupConClassifier -__all__ = ["BYOL"] +__all__ = ["BYOL", "SAMImageClassifier", "SemiSLClassifier", "SemiSLMultilabelClassifier", "SupConClassifier"] diff --git a/otx/algorithms/classification/adapters/mmcls/models/classifiers/byol.py b/otx/algorithms/classification/adapters/mmcls/models/classifiers/byol.py index f88cb1dd248..6342805b37c 100644 --- a/otx/algorithms/classification/adapters/mmcls/models/classifiers/byol.py +++ b/otx/algorithms/classification/adapters/mmcls/models/classifiers/byol.py @@ -18,7 +18,7 @@ from mmcls.models.builder import CLASSIFIERS, build_backbone, build_head, build_neck from torch import nn -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger logger = get_logger() diff --git a/otx/mpa/modules/models/classifiers/sam_classifier.py b/otx/algorithms/classification/adapters/mmcls/models/classifiers/sam_classifier.py similarity index 57% rename from otx/mpa/modules/models/classifiers/sam_classifier.py rename to otx/algorithms/classification/adapters/mmcls/models/classifiers/sam_classifier.py index 8edbffda75e..6ea79289ba9 100644 --- a/otx/mpa/modules/models/classifiers/sam_classifier.py +++ b/otx/algorithms/classification/adapters/mmcls/models/classifiers/sam_classifier.py @@ -1,17 +1,15 @@ +"""Module for defining SAMClassifier for classification task.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # - -from collections import OrderedDict from functools import partial from mmcls.models.builder import CLASSIFIERS -from mmcls.models.classifiers.base import BaseClassifier from mmcls.models.classifiers.image import ImageClassifier -from otx.mpa.deploy.utils import is_mmdeploy_enabled -from otx.mpa.modules.utils.task_adapt import map_class_names -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.adapters.mmdeploy.utils import is_mmdeploy_enabled +from otx.algorithms.common.utils.logger import get_logger +from otx.algorithms.common.utils.task_adapt import map_class_names from .sam_classifier_mixin import SAMClassifierMixin @@ -20,7 +18,7 @@ @CLASSIFIERS.register_module() class SAMImageClassifier(SAMClassifierMixin, ImageClassifier): - """SAM-enabled ImageClassifier""" + """SAM-enabled ImageClassifier.""" def __init__(self, task_adapt=None, **kwargs): if "multilabel" in kwargs: @@ -75,119 +73,125 @@ def forward_train(self, img, gt_label, **kwargs): return losses @staticmethod - def state_dict_hook(module, state_dict, prefix, *args, **kwargs): - """Redirect model as output state_dict for OTX model compatibility""" + def state_dict_hook(module, state_dict, prefix, *args, **kwargs): # noqa: C901 + # pylint: disable=unused-argument, too-many-branches + """Redirect model as output state_dict for OTX model compatibility.""" backbone_type = type(module.backbone).__name__ if backbone_type not in ["OTXMobileNetV3", "OTXEfficientNet", "OTXEfficientNetV2"]: - return - - if backbone_type == "OTXMobileNetV3": - for k in list(state_dict.keys()): - v = state_dict.pop(k) - if not prefix or k.startswith(prefix): - k = k.replace(prefix, "", 1) - if k.startswith("backbone"): - k = k.replace("backbone.", "", 1) - elif k.startswith("head"): - k = k.replace("head.", "", 1) - if "3" in k: # MPA uses "classifier.3", OTX uses "classifier.4". Convert for OTX compatibility. - k = k.replace("3", "4") + return None + + if backbone_type == "OTXMobileNetV3": # pylint: disable=too-many-nested-blocks + for key in list(state_dict.keys()): + val = state_dict.pop(key) + if not prefix or key.startswith(prefix): + key = key.replace(prefix, "", 1) + if key.startswith("backbone"): + key = key.replace("backbone.", "", 1) + elif key.startswith("head"): + key = key.replace("head.", "", 1) + if ( + "3" in key + ): # MPA uses "classifier.3", OTX uses "classifier.4". Convert for OTX compatibility. + key = key.replace("3", "4") if module.multilabel and not module.is_export: - v = v.t() - k = prefix + k - state_dict[k] = v + val = val.t() + key = prefix + key + state_dict[key] = val elif backbone_type == "OTXEfficientNet": - for k in list(state_dict.keys()): - v = state_dict.pop(k) - if not prefix or k.startswith(prefix): - k = k.replace(prefix, "", 1) - if k.startswith("backbone"): - k = k.replace("backbone.", "", 1) - elif k.startswith("head"): - k = k.replace("head", "output", 1) + for key in list(state_dict.keys()): + val = state_dict.pop(key) + if not prefix or key.startswith(prefix): + key = key.replace(prefix, "", 1) + if key.startswith("backbone"): + key = key.replace("backbone.", "", 1) + elif key.startswith("head"): + key = key.replace("head", "output", 1) if not module.hierarchical and not module.is_export: - k = k.replace("fc", "asl") - v = v.t() - k = prefix + k - state_dict[k] = v + key = key.replace("fc", "asl") + val = val.t() + key = prefix + key + state_dict[key] = val elif backbone_type == "OTXEfficientNetV2": - for k in list(state_dict.keys()): - v = state_dict.pop(k) - if not prefix or k.startswith(prefix): - k = k.replace(prefix, "", 1) - if k.startswith("backbone"): - k = k.replace("backbone.", "", 1) - elif k == "head.fc.weight": - k = k.replace("head.fc", "model.classifier") + for key in list(state_dict.keys()): + val = state_dict.pop(key) + if not prefix or key.startswith(prefix): + key = key.replace(prefix, "", 1) + if key.startswith("backbone"): + key = key.replace("backbone.", "", 1) + elif key == "head.fc.weight": + key = key.replace("head.fc", "model.classifier") if not module.hierarchical and not module.is_export: - v = v.t() - k = prefix + k - state_dict[k] = v + val = val.t() + key = prefix + key + state_dict[key] = val return state_dict @staticmethod - def load_state_dict_pre_hook(module, state_dict, prefix, *args, **kwargs): - """Redirect input state_dict to model for OTX model compatibility""" + def load_state_dict_pre_hook(module, state_dict, prefix, *args, **kwargs): # noqa: C901 + # pylint: disable=unused-argument, too-many-branches + """Redirect input state_dict to model for OTX model compatibility.""" backbone_type = type(module.backbone).__name__ if backbone_type not in ["OTXMobileNetV3", "OTXEfficientNet", "OTXEfficientNetV2"]: return - if backbone_type == "OTXMobileNetV3": - for k in list(state_dict.keys()): - v = state_dict.pop(k) - if not prefix or k.startswith(prefix): - k = k.replace(prefix, "", 1) - if k.startswith("classifier."): - if "4" in k: - k = "head." + k.replace("4", "3") + if backbone_type == "OTXMobileNetV3": # pylint: disable=too-many-nested-blocks + for key in list(state_dict.keys()): + val = state_dict.pop(key) + if not prefix or key.startswith(prefix): + key = key.replace(prefix, "", 1) + if key.startswith("classifier."): + if "4" in key: + key = "head." + key.replace("4", "3") if module.multilabel: - v = v.t() + val = val.t() else: - k = "head." + k - elif k.startswith("act"): - k = "head." + k - elif not k.startswith("backbone."): - k = "backbone." + k - k = prefix + k - state_dict[k] = v + key = "head." + key + elif key.startswith("act"): + key = "head." + key + elif not key.startswith("backbone."): + key = "backbone." + key + key = prefix + key + state_dict[key] = val elif backbone_type == "OTXEfficientNet": - for k in list(state_dict.keys()): - v = state_dict.pop(k) - if not prefix or k.startswith(prefix): - k = k.replace(prefix, "", 1) - if k.startswith("features.") and "activ" not in k: - k = "backbone." + k - elif k.startswith("output."): - k = k.replace("output", "head") + for key in list(state_dict.keys()): + val = state_dict.pop(key) + if not prefix or key.startswith(prefix): + key = key.replace(prefix, "", 1) + if key.startswith("features.") and "activ" not in key: + key = "backbone." + key + elif key.startswith("output."): + key = key.replace("output", "head") if not module.hierarchical: - k = k.replace("asl", "fc") - v = v.t() - k = prefix + k - state_dict[k] = v + key = key.replace("asl", "fc") + val = val.t() + key = prefix + key + state_dict[key] = val elif backbone_type == "OTXEfficientNetV2": - for k in list(state_dict.keys()): - v = state_dict.pop(k) - if not prefix or k.startswith(prefix): - k = k.replace(prefix, "", 1) - if k.startswith("model.classifier"): - k = k.replace("model.classifier", "head.fc") + for key in list(state_dict.keys()): + val = state_dict.pop(key) + if not prefix or key.startswith(prefix): + key = key.replace(prefix, "", 1) + if key.startswith("model.classifier"): + key = key.replace("model.classifier", "head.fc") if not module.hierarchical: - v = v.t() - elif k.startswith("model"): - k = "backbone." + k - k = prefix + k - state_dict[k] = v + val = val.t() + elif key.startswith("model"): + key = "backbone." + key + key = prefix + key + state_dict[key] = val else: logger.info("conversion is not required.") @staticmethod - def load_state_dict_mixing_hook(model, model_classes, chkpt_classes, chkpt_dict, prefix, *args, **kwargs): - """Modify input state_dict according to class name matching before weight loading""" + def load_state_dict_mixing_hook( + model, model_classes, chkpt_classes, chkpt_dict, prefix, *args, **kwargs + ): # pylint: disable=unused-argument, too-many-branches, too-many-locals + """Modify input state_dict according to class name matching before weight loading.""" backbone_type = type(model.backbone).__name__ if backbone_type not in ["OTXMobileNetV3", "OTXEfficientNet", "OTXEfficientNetV2"]: return @@ -244,15 +248,16 @@ def load_state_dict_mixing_hook(model, model_classes, chkpt_classes, chkpt_dict, # Mix weights chkpt_param = chkpt_dict[chkpt_name] - for m, c in enumerate(model2chkpt): + for module, c in enumerate(model2chkpt): if c >= 0: - model_param[m].copy_(chkpt_param[c]) + model_param[module].copy_(chkpt_param[c]) # Replace checkpoint weight by mixed weights chkpt_dict[chkpt_name] = model_param def extract_feat(self, img): - """Directly extract features from the backbone + neck + """Directly extract features from the backbone + neck. + Overriding for OpenVINO export with features """ x = self.backbone(img) @@ -270,15 +275,16 @@ def extract_feat(self, img): if is_mmdeploy_enabled(): from mmdeploy.core import FUNCTION_REWRITER - from otx.mpa.modules.hooks.recording_forward_hooks import ( + from otx.algorithms.common.adapters.mmcv.hooks.recording_forward_hook import ( # pylint: disable=ungrouped-imports FeatureVectorHook, ReciproCAMHook, ) @FUNCTION_REWRITER.register_rewriter( - "otx.mpa.modules.models.classifiers.sam_classifier.SAMImageClassifier.extract_feat" + "otx.algorithms.classification.adapters.mmcls.models.classifiers.SAMImageClassifier.extract_feat" ) - def sam_image_classifier__extract_feat(ctx, self, img): + def sam_image_classifier__extract_feat(ctx, self, img): # pylint: disable=unused-argument + """Feature extraction function for SAMClassifier with mmdeploy.""" feat = self.backbone(img) # For Global Backbones (det/seg/etc..), # In case of tuple or list, only the feat of the last layer is used. @@ -290,9 +296,10 @@ def sam_image_classifier__extract_feat(ctx, self, img): return feat, backbone_feat @FUNCTION_REWRITER.register_rewriter( - "otx.mpa.modules.models.classifiers.sam_classifier.SAMImageClassifier.simple_test" + "otx.algorithms.classification.adapters.mmcls.models.classifiers.SAMImageClassifier.simple_test" ) - def sam_image_classifier__simple_test(ctx, self, img, img_metas): + def sam_image_classifier__simple_test(ctx, self, img, img_metas): # pylint: disable=unused-argument + """Simple test function used for inference for SAMClassifier with mmdeploy.""" feat, backbone_feat = self.extract_feat(img) logit = self.head.simple_test(feat) diff --git a/otx/mpa/modules/models/classifiers/sam_classifier_mixin.py b/otx/algorithms/classification/adapters/mmcls/models/classifiers/sam_classifier_mixin.py similarity index 54% rename from otx/mpa/modules/models/classifiers/sam_classifier_mixin.py rename to otx/algorithms/classification/adapters/mmcls/models/classifiers/sam_classifier_mixin.py index cd7354a6c51..5ee82cbc058 100644 --- a/otx/mpa/modules/models/classifiers/sam_classifier_mixin.py +++ b/otx/algorithms/classification/adapters/mmcls/models/classifiers/sam_classifier_mixin.py @@ -1,14 +1,13 @@ +"""Module defining Mix-in class of SAMClassifier.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -class SAMClassifierMixin(object): - """SAM-enabled BaseClassifier mix-in""" +class SAMClassifierMixin: + """SAM-enabled BaseClassifier mix-in.""" def train_step(self, data, optimizer=None, **kwargs): - # Saving current batch data to compute SAM gradient - # Rest of SAM logics are implented in SAMOptimizerHook + """Saving current batch data to compute SAM gradient.""" self.current_batch = data - return super().train_step(data, optimizer, **kwargs) diff --git a/otx/mpa/modules/models/classifiers/semisl_classifier.py b/otx/algorithms/classification/adapters/mmcls/models/classifiers/semisl_classifier.py similarity index 91% rename from otx/mpa/modules/models/classifiers/semisl_classifier.py rename to otx/algorithms/classification/adapters/mmcls/models/classifiers/semisl_classifier.py index 88ff008c64a..7da4b94ee21 100644 --- a/otx/mpa/modules/models/classifiers/semisl_classifier.py +++ b/otx/algorithms/classification/adapters/mmcls/models/classifiers/semisl_classifier.py @@ -1,3 +1,4 @@ +"""Module for defining a semi-supervised classifier using mmcls.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -5,7 +6,7 @@ import torch from mmcls.models.builder import CLASSIFIERS -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger from .sam_classifier import SAMImageClassifier @@ -14,12 +15,13 @@ @CLASSIFIERS.register_module() class SemiSLClassifier(SAMImageClassifier): - """Semi-SL Classifier + """Semi-SL Classifier. + This classifier supports unlabeled data by overriding forward_train """ def forward_train(self, imgs, **kwargs): - """Data is transmitted as a classifier training function + """Data is transmitted as a classifier training function. Args: imgs (list[Tensor]): List of tensors of shape (1, C, H, W) diff --git a/otx/mpa/modules/models/classifiers/semisl_multilabel_classifier.py b/otx/algorithms/classification/adapters/mmcls/models/classifiers/semisl_multilabel_classifier.py similarity index 76% rename from otx/mpa/modules/models/classifiers/semisl_multilabel_classifier.py rename to otx/algorithms/classification/adapters/mmcls/models/classifiers/semisl_multilabel_classifier.py index 8f9de42f07d..d9a08b5f7c2 100644 --- a/otx/mpa/modules/models/classifiers/semisl_multilabel_classifier.py +++ b/otx/algorithms/classification/adapters/mmcls/models/classifiers/semisl_multilabel_classifier.py @@ -1,10 +1,11 @@ +"""Module for defining a semi-supervised multi-label classifier using mmcls.""" # Copyright (C) 2023 Intel Corporation # # SPDX-License-Identifier: MIT from mmcls.models.builder import CLASSIFIERS -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger from .sam_classifier import SAMImageClassifier @@ -13,15 +14,13 @@ @CLASSIFIERS.register_module() class SemiSLMultilabelClassifier(SAMImageClassifier): - """Semi-SL Multilabel Classifier - This classifier supports unlabeled data by overriding forward_train - """ + """Semi-SL Multilabel Classifier which supports unlabeled data by overriding forward_train.""" - def forward_train(self, imgs, gt_label, **kwargs): - """Data is transmitted as a classifier training function + def forward_train(self, img, gt_label, **kwargs): + """Data is transmitted as a classifier training function. Args: - imgs (list[Tensor]): List of tensors of shape (1, C, H, W) + img (list[Tensor]): List of tensors of shape (1, C, H, W) Typically these should be mean centered and std scaled. gt_label (Tensor): Ground truth labels for the input labeled images kwargs (keyword arguments): Specific to concrete implementation @@ -34,7 +33,7 @@ def forward_train(self, imgs, gt_label, **kwargs): target = gt_label.squeeze() unlabeled_data = kwargs["extra_0"] x = {} - x["labeled_weak"] = self.extract_feat(imgs) + x["labeled_weak"] = self.extract_feat(img) x["labeled_strong"] = self.extract_feat(kwargs["img_strong"]) img_uw = unlabeled_data["img"] diff --git a/otx/mpa/modules/models/classifiers/supcon_classifier.py b/otx/algorithms/classification/adapters/mmcls/models/classifiers/supcon_classifier.py similarity index 82% rename from otx/mpa/modules/models/classifiers/supcon_classifier.py rename to otx/algorithms/classification/adapters/mmcls/models/classifiers/supcon_classifier.py index 6403de55f5b..ed3456fcb4c 100644 --- a/otx/mpa/modules/models/classifiers/supcon_classifier.py +++ b/otx/algorithms/classification/adapters/mmcls/models/classifiers/supcon_classifier.py @@ -1,3 +1,4 @@ +"""This module contains the SupConClassifier implementation for MMClassification.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -5,18 +6,19 @@ import torch from mmcls.models.builder import CLASSIFIERS from mmcls.models.classifiers.image import ImageClassifier -from torch.nn.functional import sigmoid, softmax @CLASSIFIERS.register_module() class SupConClassifier(ImageClassifier): + """SupConClassifier with support for classification tasks.""" + def __init__(self, backbone, neck=None, head=None, pretrained=None, **kwargs): self.multilabel = kwargs.pop("multilabel", False) self.hierarchical = kwargs.pop("hierarchical", False) super().__init__(backbone, neck=neck, head=head, pretrained=pretrained, **kwargs) def forward_train(self, img, gt_label, **kwargs): - # concatenate the different image views along the batch size + """Concatenate the different image views along the batch size.""" if len(img.shape) == 5: img = torch.cat([img[:, d, :, :, :] for d in range(img.shape[1])], dim=0) x = self.extract_feat(img) diff --git a/otx/algorithms/classification/adapters/mmcls/models/heads/__init__.py b/otx/algorithms/classification/adapters/mmcls/models/heads/__init__.py index 375f9e849a4..69a1ca77179 100644 --- a/otx/algorithms/classification/adapters/mmcls/models/heads/__init__.py +++ b/otx/algorithms/classification/adapters/mmcls/models/heads/__init__.py @@ -14,6 +14,38 @@ # See the License for the specific language governing permissions # and limitations under the License. +from .cls_head import ClsHead from .contrastive_head import ConstrastiveHead +from .conv_head import ConvClsHead +from .custom_cls_head import CustomLinearClsHead, CustomNonLinearClsHead +from .custom_hierarchical_linear_cls_head import CustomHierarchicalLinearClsHead +from .custom_hierarchical_non_linear_cls_head import CustomHierarchicalNonLinearClsHead +from .custom_multi_label_linear_cls_head import CustomMultiLabelLinearClsHead +from .custom_multi_label_non_linear_cls_head import CustomMultiLabelNonLinearClsHead +from .mmov_cls_head import MMOVClsHead +from .non_linear_cls_head import NonLinearClsHead +from .semisl_cls_head import SemiLinearClsHead, SemiNonLinearClsHead +from .semisl_multilabel_cls_head import ( + SemiLinearMultilabelClsHead, + SemiNonLinearMultilabelClsHead, +) +from .supcon_cls_head import SupConClsHead -__all__ = ["ConstrastiveHead"] +__all__ = [ + "ConstrastiveHead", + "CustomLinearClsHead", + "CustomNonLinearClsHead", + "CustomHierarchicalLinearClsHead", + "CustomHierarchicalNonLinearClsHead", + "CustomMultiLabelLinearClsHead", + "CustomMultiLabelNonLinearClsHead", + "SemiLinearMultilabelClsHead", + "SemiNonLinearMultilabelClsHead", + "NonLinearClsHead", + "SemiLinearClsHead", + "SemiNonLinearClsHead", + "SupConClsHead", + "MMOVClsHead", + "ConvClsHead", + "ClsHead", +] diff --git a/otx/mpa/modules/ov/models/mmcls/heads/cls_head.py b/otx/algorithms/classification/adapters/mmcls/models/heads/cls_head.py similarity index 67% rename from otx/mpa/modules/ov/models/mmcls/heads/cls_head.py rename to otx/algorithms/classification/adapters/mmcls/models/heads/cls_head.py index 2cdf09f9139..1cf23a187fb 100644 --- a/otx/mpa/modules/ov/models/mmcls/heads/cls_head.py +++ b/otx/algorithms/classification/adapters/mmcls/models/heads/cls_head.py @@ -1,3 +1,4 @@ +"""Module defining Classification Head for MMOV inference.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -8,17 +9,25 @@ @HEADS.register_module(force=True) class ClsHead(OriginClsHead): + """Classification Head for MMOV inference.""" + def __init__(self, *args, **kwargs): do_squeeze = kwargs.pop("do_squeeze", False) - super(ClsHead, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self._do_squeeze = do_squeeze + def forward(self, x): + """Forward fuction of ClsHead class.""" + return self.simple_test(x) + def forward_train(self, cls_score, gt_label): + """Forward_train fuction of ClsHead class.""" if self._do_squeeze: cls_score = cls_score.unsqueeze(0).squeeze() return super().forward_train(cls_score, gt_label) def simple_test(self, cls_score): + """Test without augmentation.""" if self._do_squeeze: cls_score = cls_score.unsqueeze(0).squeeze() return super().simple_test(cls_score) diff --git a/otx/mpa/modules/ov/models/mmcls/heads/conv_head.py b/otx/algorithms/classification/adapters/mmcls/models/heads/conv_head.py similarity index 75% rename from otx/mpa/modules/ov/models/mmcls/heads/conv_head.py rename to otx/algorithms/classification/adapters/mmcls/models/heads/conv_head.py index ee9fd89a59c..ca302d947f7 100644 --- a/otx/mpa/modules/ov/models/mmcls/heads/conv_head.py +++ b/otx/algorithms/classification/adapters/mmcls/models/heads/conv_head.py @@ -1,11 +1,12 @@ +"""Module for defining ConvClsHead used for MMOV inference.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -import torch.nn as nn import torch.nn.functional as F from mmcls.models.builder import HEADS from mmcls.models.heads import ClsHead +from torch import nn @HEADS.register_module() @@ -20,8 +21,9 @@ class ConvClsHead(ClsHead): Defaults to use dict(type='Normal', layer='Linear', std=0.01). """ - def __init__(self, num_classes, in_channels, init_cfg=dict(type="Kaiming", layer=["Conv2d"]), *args, **kwargs): - super(ConvClsHead, self).__init__(init_cfg=init_cfg, *args, **kwargs) + def __init__(self, num_classes, in_channels, init_cfg=None, **kwargs): + init_cfg = init_cfg if init_cfg else dict(type="Kaiming", layer=["Conv2d"]) + super().__init__(init_cfg=init_cfg, **kwargs) self.in_channels = in_channels self.num_classes = num_classes @@ -32,11 +34,12 @@ def __init__(self, num_classes, in_channels, init_cfg=dict(type="Kaiming", layer self.conv = nn.Conv2d(self.in_channels, self.num_classes, (1, 1)) def pre_logits(self, x): + """Preprocess logits.""" if isinstance(x, tuple): x = x[-1] return x - def simple_test(self, x, softmax=True, post_process=True): + def simple_test(self, cls_score, softmax=True, post_process=True): """Inference without augmentation. Args: @@ -56,7 +59,7 @@ def simple_test(self, x, softmax=True, post_process=True): - If post processing, the output is a multi-dimentional list of float and the dimensions are ``(num_samples, num_classes)``. """ - x = self.pre_logits(x) + x = self.pre_logits(cls_score) cls_score = self.conv(x).squeeze() if softmax: @@ -66,11 +69,15 @@ def simple_test(self, x, softmax=True, post_process=True): if post_process: return self.post_process(pred) - else: - return pred + return pred + + def forward(self, x): + """Forward fuction of ConvClsHead class.""" + return self.simple_test(x) - def forward_train(self, x, gt_label, **kwargs): - x = self.pre_logits(x) + def forward_train(self, cls_score, gt_label, **kwargs): + """Forward_train fuction of ConvClsHead class.""" + x = self.pre_logits(cls_score) cls_score = self.conv(x).squeeze() losses = self.loss(cls_score, gt_label, **kwargs) return losses diff --git a/otx/mpa/modules/models/heads/custom_cls_head.py b/otx/algorithms/classification/adapters/mmcls/models/heads/custom_cls_head.py similarity index 74% rename from otx/mpa/modules/models/heads/custom_cls_head.py rename to otx/algorithms/classification/adapters/mmcls/models/heads/custom_cls_head.py index 8947c44efdf..fcf2008e795 100644 --- a/otx/mpa/modules/models/heads/custom_cls_head.py +++ b/otx/algorithms/classification/adapters/mmcls/models/heads/custom_cls_head.py @@ -1,3 +1,4 @@ +"""Module defining for OTX Custom Non-linear classification head.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -15,10 +16,11 @@ class CustomNonLinearClsHead(NonLinearClsHead): """Custom Nonlinear classifier head.""" def __init__(self, *args, **kwargs): - super(CustomNonLinearClsHead, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.loss_type = kwargs.get("loss", dict(type="CrossEntropyLoss"))["type"] def loss(self, cls_score, gt_label, feature=None): + """Calculate loss for given cls_score/gt_label.""" num_samples = len(cls_score) losses = dict() # compute loss @@ -34,9 +36,14 @@ def loss(self, cls_score, gt_label, feature=None): losses["loss"] = loss return losses - def forward_train(self, x, gt_label): - cls_score = self.classifier(x) - losses = self.loss(cls_score, gt_label, feature=x) + def forward(self, x): + """Forward fuction of CustomNonLinearHead class.""" + return self.simple_test(x) + + def forward_train(self, cls_score, gt_label): + """Forward_train fuction of CustomNonLinearHead class.""" + logit = self.classifier(cls_score) + losses = self.loss(logit, gt_label, feature=cls_score) return losses @@ -52,13 +59,13 @@ class CustomLinearClsHead(LinearClsHead): Defaults to use dict(type='Normal', layer='Linear', std=0.01). """ - def __init__( - self, num_classes, in_channels, init_cfg=dict(type="Normal", layer="Linear", std=0.01), *args, **kwargs - ): + def __init__(self, num_classes, in_channels, init_cfg=None, **kwargs): + init_cfg = init_cfg if init_cfg else dict(type="Normal", layer="Linear", std=0.01) + super().__init__(num_classes, in_channels, init_cfg=init_cfg, **kwargs) self.loss_type = kwargs.get("loss", dict(type="CrossEntropyLoss"))["type"] - super(CustomLinearClsHead, self).__init__(num_classes, in_channels, init_cfg=init_cfg, *args, **kwargs) def loss(self, cls_score, gt_label, feature=None): + """Calculate loss for given cls_score/gt_label.""" num_samples = len(cls_score) losses = dict() # compute loss @@ -85,7 +92,12 @@ def simple_test(self, img): return self.post_process(pred) + def forward(self, x): + """Forward fuction of CustomLinearHead class.""" + return self.simple_test(x) + def forward_train(self, x, gt_label): + """Forward_train fuction of CustomLinearHead class.""" cls_score = self.fc(x) losses = self.loss(cls_score, gt_label, feature=x) return losses diff --git a/otx/mpa/modules/models/heads/custom_hierarchical_linear_cls_head.py b/otx/algorithms/classification/adapters/mmcls/models/heads/custom_hierarchical_linear_cls_head.py similarity index 85% rename from otx/mpa/modules/models/heads/custom_hierarchical_linear_cls_head.py rename to otx/algorithms/classification/adapters/mmcls/models/heads/custom_hierarchical_linear_cls_head.py index a9190de0c87..d03f89afcdf 100644 --- a/otx/mpa/modules/models/heads/custom_hierarchical_linear_cls_head.py +++ b/otx/algorithms/classification/adapters/mmcls/models/heads/custom_hierarchical_linear_cls_head.py @@ -1,17 +1,19 @@ +"""Module for defining Linear classification head for h-label classification.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # import torch -import torch.nn as nn from mmcls.models.builder import HEADS, build_loss from mmcls.models.heads import MultiLabelClsHead from mmcv.cnn import normal_init +from torch import nn @HEADS.register_module() class CustomHierarchicalLinearClsHead(MultiLabelClsHead): """Custom Linear classification head for hierarchical classification task. + Args: num_classes (int): Number of categories. in_channels (int): Number of channels in the input feature map. @@ -23,13 +25,17 @@ def __init__( self, num_classes, in_channels, - loss=dict(type="CrossEntropyLoss", use_sigmoid=True, reduction="mean", loss_weight=1.0), - multilabel_loss=dict(type="AsymmetricLoss", reduction="mean", loss_weight=1.0), + loss=None, + multilabel_loss=None, **kwargs, ): + loss = loss if loss else dict(type="CrossEntropyLoss", use_sigmoid=True, reduction="mean", loss_weight=1.0) + multilabel_loss = ( + multilabel_loss if multilabel_loss else dict(type="AsymmetricLoss", reduction="mean", loss_weight=1.0) + ) self.hierarchical_info = kwargs.pop("hierarchical_info", None) assert self.hierarchical_info - super(CustomHierarchicalLinearClsHead, self).__init__(loss=loss) + super().__init__(loss=loss) if self.hierarchical_info["num_multiclass_heads"] + self.hierarchical_info["num_multilabel_classes"] == 0: raise ValueError("Invalid classification heads configuration") self.compute_multilabel_loss = False @@ -47,9 +53,11 @@ def _init_layers(self): self.fc = nn.Linear(self.in_channels, self.num_classes) def init_weights(self): + """Initialize weights of head.""" normal_init(self.fc, mean=0, std=0.01, bias=0) def loss(self, cls_score, gt_label, multilabel=False, valid_label_mask=None): + """Calculate loss for given cls_score/gt_label.""" num_samples = len(cls_score) # compute loss if multilabel: @@ -65,10 +73,15 @@ def loss(self, cls_score, gt_label, multilabel=False, valid_label_mask=None): return loss - def forward_train(self, x, gt_label, **kwargs): + def forward(self, x): + """Forward fuction of CustomHierarchicalLinearClsHead.""" + return self.simple_test(x) + + def forward_train(self, cls_score, gt_label, **kwargs): + """Forward_train fuction of CustomHierarchicalLinearClsHead class.""" img_metas = kwargs.get("img_metas", None) - gt_label = gt_label.type_as(x) - cls_score = self.fc(x) + gt_label = gt_label.type_as(cls_score) + cls_score = self.fc(cls_score) losses = dict(loss=0.0) for i in range(self.hierarchical_info["num_multiclass_heads"]): @@ -104,7 +117,7 @@ def forward_train(self, x, gt_label, **kwargs): valid_label_mask = self.get_valid_label_mask(img_metas=img_metas)[ :, self.hierarchical_info["num_single_label_classes"] : ] - valid_label_mask = valid_label_mask.to(x.device) + valid_label_mask = valid_label_mask.to(cls_score.device) valid_label_mask = valid_label_mask[valid_batch_mask] else: valid_label_mask = None @@ -150,8 +163,9 @@ def simple_test(self, img): return pred def get_valid_label_mask(self, img_metas): + """Get valid label with ignored_label mask.""" valid_label_mask = [] - for i, meta in enumerate(img_metas): + for meta in img_metas: mask = torch.Tensor([1 for _ in range(self.num_classes)]) if "ignored_labels" in meta and meta["ignored_labels"]: mask[meta["ignored_labels"]] = 0 diff --git a/otx/mpa/modules/models/heads/custom_hierarchical_non_linear_cls_head.py b/otx/algorithms/classification/adapters/mmcls/models/heads/custom_hierarchical_non_linear_cls_head.py similarity index 81% rename from otx/mpa/modules/models/heads/custom_hierarchical_non_linear_cls_head.py rename to otx/algorithms/classification/adapters/mmcls/models/heads/custom_hierarchical_non_linear_cls_head.py index ab737907299..88b7d88b67a 100644 --- a/otx/mpa/modules/models/heads/custom_hierarchical_non_linear_cls_head.py +++ b/otx/algorithms/classification/adapters/mmcls/models/heads/custom_hierarchical_non_linear_cls_head.py @@ -1,17 +1,19 @@ +"""Non-linear classification head for hierarhical classification task.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # import torch -import torch.nn as nn from mmcls.models.builder import HEADS, build_loss from mmcls.models.heads import MultiLabelClsHead from mmcv.cnn import build_activation_layer, constant_init, normal_init +from torch import nn @HEADS.register_module() -class CustomHierarchicalNonLinearClsHead(MultiLabelClsHead): +class CustomHierarchicalNonLinearClsHead(MultiLabelClsHead): # pylint: disable=too-many-instance-attributes """Custom NonLinear classification head for hierarchical classification task. + Args: num_classes (int): Number of categories excluding the background category. @@ -27,15 +29,20 @@ def __init__( num_classes, in_channels, hid_channels=1280, - act_cfg=dict(type="ReLU"), - loss=dict(type="CrossEntropyLoss", use_sigmoid=True, reduction="mean", loss_weight=1.0), - multilabel_loss=dict(type="AsymmetricLoss", reduction="mean", loss_weight=1.0), + act_cfg=None, + loss=None, + multilabel_loss=None, dropout=False, **kwargs, - ): + ): # pylint: disable=too-many-arguments + act_cfg = act_cfg if act_cfg else dict(type="ReLU") + loss = loss if loss else dict(type="CrossEntropyLoss", use_sigmoid=True, reduction="mean", loss_weight=1.0) + multilabel_loss = ( + multilabel_loss if multilabel_loss else dict(type="AsymmetricLoss", reduction="mean", loss_weight=1.0) + ) self.hierarchical_info = kwargs.pop("hierarchical_info", None) assert self.hierarchical_info - super(CustomHierarchicalNonLinearClsHead, self).__init__(loss=loss) + super().__init__(loss=loss) if self.hierarchical_info["num_multiclass_heads"] + self.hierarchical_info["num_multilabel_classes"] == 0: raise ValueError("Invalid classification heads configuration") self.compute_multilabel_loss = False @@ -70,13 +77,15 @@ def _init_layers(self): ) def init_weights(self): - for m in self.classifier: - if isinstance(m, nn.Linear): - normal_init(m, mean=0, std=0.01, bias=0) - elif isinstance(m, nn.BatchNorm1d): - constant_init(m, 1) + """Iniitialize weights of classification head.""" + for module in self.classifier: + if isinstance(module, nn.Linear): + normal_init(module, mean=0, std=0.01, bias=0) + elif isinstance(module, nn.BatchNorm1d): + constant_init(module, 1) def loss(self, cls_score, gt_label, multilabel=False, valid_label_mask=None): + """Calculate loss for given cls_score and gt_label.""" num_samples = len(cls_score) # compute loss if multilabel: @@ -92,10 +101,15 @@ def loss(self, cls_score, gt_label, multilabel=False, valid_label_mask=None): return loss - def forward_train(self, x, gt_label, **kwargs): + def forward(self, x): + """Forward fuction of CustomHierarchicalNonLinearClsHead class.""" + return self.simple_test(x) + + def forward_train(self, cls_score, gt_label, **kwargs): + """Forward_train fuction of CustomHierarchicalNonLinearClsHead class.""" img_metas = kwargs.get("img_metas", None) - gt_label = gt_label.type_as(x) - cls_score = self.classifier(x) + gt_label = gt_label.type_as(cls_score) + cls_score = self.classifier(cls_score) losses = dict(loss=0.0) for i in range(self.hierarchical_info["num_multiclass_heads"]): @@ -131,7 +145,7 @@ def forward_train(self, x, gt_label, **kwargs): valid_label_mask = self.get_valid_label_mask(img_metas=img_metas)[ :, self.hierarchical_info["num_single_label_classes"] : ] - valid_label_mask = valid_label_mask.to(x.device) + valid_label_mask = valid_label_mask.to(cls_score.device) valid_label_mask = valid_label_mask[valid_batch_mask] else: valid_label_mask = None @@ -177,8 +191,9 @@ def simple_test(self, img): return pred def get_valid_label_mask(self, img_metas): + """Get valid label mask with ignored_label.""" valid_label_mask = [] - for i, meta in enumerate(img_metas): + for meta in img_metas: mask = torch.Tensor([1 for _ in range(self.num_classes)]) if "ignored_labels" in meta and meta["ignored_labels"]: mask[meta["ignored_labels"]] = 0 diff --git a/otx/mpa/modules/models/heads/custom_multi_label_linear_cls_head.py b/otx/algorithms/classification/adapters/mmcls/models/heads/custom_multi_label_linear_cls_head.py similarity index 80% rename from otx/mpa/modules/models/heads/custom_multi_label_linear_cls_head.py rename to otx/algorithms/classification/adapters/mmcls/models/heads/custom_multi_label_linear_cls_head.py index 75ec8770d6a..0fc9e9214af 100644 --- a/otx/mpa/modules/models/heads/custom_multi_label_linear_cls_head.py +++ b/otx/algorithms/classification/adapters/mmcls/models/heads/custom_multi_label_linear_cls_head.py @@ -1,18 +1,20 @@ +"""Module for defining multi-label linear classification head.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # import torch -import torch.nn as nn import torch.nn.functional as F from mmcls.models.builder import HEADS from mmcls.models.heads import MultiLabelClsHead from mmcv.cnn import normal_init +from torch import nn @HEADS.register_module() class CustomMultiLabelLinearClsHead(MultiLabelClsHead): """Custom Linear classification head for multilabel task. + Args: num_classes (int): Number of categories. in_channels (int): Number of channels in the input feature map. @@ -27,9 +29,10 @@ def __init__( in_channels, normalized=False, scale=1.0, - loss=dict(type="CrossEntropyLoss", use_sigmoid=True, reduction="mean", loss_weight=1.0), + loss=None, ): - super(CustomMultiLabelLinearClsHead, self).__init__(loss=loss) + loss = loss if loss else dict(type="CrossEntropyLoss", use_sigmoid=True, reduction="mean", loss_weight=1.0) + super().__init__(loss=loss) if num_classes <= 0: raise ValueError(f"num_classes={num_classes} must be a positive integer") @@ -46,10 +49,12 @@ def _init_layers(self): self.fc = nn.Linear(self.in_channels, self.num_classes) def init_weights(self): + """Initialize weights of head.""" if isinstance(self.fc, nn.Linear): normal_init(self.fc, mean=0, std=0.01, bias=0) def loss(self, cls_score, gt_label, valid_label_mask=None): + """Calculate loss for given cls_score/gt_label.""" gt_label = gt_label.type_as(cls_score) num_samples = len(cls_score) losses = dict() @@ -61,10 +66,15 @@ def loss(self, cls_score, gt_label, valid_label_mask=None): losses["loss"] = loss / self.scale return losses - def forward_train(self, x, gt_label, **kwargs): + def forward(self, x): + """Forward fuction of CustomMultiLabelLinearClsHead class.""" + return self.simple_test(x) + + def forward_train(self, cls_score, gt_label, **kwargs): + """Forward_train fuction of CustomMultiLabelLinearClsHead.""" img_metas = kwargs.get("img_metas", False) - gt_label = gt_label.type_as(x) - cls_score = self.fc(x) * self.scale + gt_label = gt_label.type_as(cls_score) + cls_score = self.fc(cls_score) * self.scale valid_batch_mask = gt_label >= 0 gt_label = gt_label[ @@ -75,7 +85,7 @@ def forward_train(self, x, gt_label, **kwargs): ].view(cls_score.shape[0], -1) if img_metas: valid_label_mask = self.get_valid_label_mask(img_metas=img_metas) - valid_label_mask = valid_label_mask.to(x.device) + valid_label_mask = valid_label_mask.to(cls_score.device) valid_label_mask = valid_label_mask[ valid_batch_mask, ].view(valid_label_mask.shape[0], -1) @@ -96,8 +106,9 @@ def simple_test(self, img): return pred def get_valid_label_mask(self, img_metas): + """Get valid label mask using ignored_label.""" valid_label_mask = [] - for i, meta in enumerate(img_metas): + for meta in img_metas: mask = torch.Tensor([1 for _ in range(self.num_classes)]) if "ignored_labels" in meta and meta["ignored_labels"]: mask[meta["ignored_labels"]] = 0 @@ -107,13 +118,15 @@ def get_valid_label_mask(self, img_metas): class AnglularLinear(nn.Module): - """Computes cos of angles between input vectors and weights vectors + """Computes cos of angles between input vectors and weights vectors. + Args: in_features (int): Number of input features. out_features (int): Number of output cosine logits. """ def __init__(self, in_features, out_features): + """Init fuction of AngularLinear class.""" super().__init__() self.in_features = in_features self.out_features = out_features @@ -121,5 +134,6 @@ def __init__(self, in_features, out_features): self.weight.data.normal_().renorm_(2, 0, 1e-5).mul_(1e5) def forward(self, x): + """Forward fuction of AngularLinear class.""" cos_theta = F.normalize(x.view(x.shape[0], -1), dim=1).mm(F.normalize(self.weight.t(), p=2, dim=0)) return cos_theta.clamp(-1, 1) diff --git a/otx/mpa/modules/models/heads/custom_multi_label_non_linear_cls_head.py b/otx/algorithms/classification/adapters/mmcls/models/heads/custom_multi_label_non_linear_cls_head.py similarity index 76% rename from otx/mpa/modules/models/heads/custom_multi_label_non_linear_cls_head.py rename to otx/algorithms/classification/adapters/mmcls/models/heads/custom_multi_label_non_linear_cls_head.py index d5b8db4c095..f851d6f0bda 100644 --- a/otx/mpa/modules/models/heads/custom_multi_label_non_linear_cls_head.py +++ b/otx/algorithms/classification/adapters/mmcls/models/heads/custom_multi_label_non_linear_cls_head.py @@ -1,12 +1,13 @@ +"""This module contains the CustomMultiLabelNonLinearClsHead implementation for MMClassification.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # import torch -import torch.nn as nn from mmcls.models.builder import HEADS from mmcls.models.heads import MultiLabelClsHead from mmcv.cnn import build_activation_layer, constant_init, normal_init +from torch import nn from .custom_multi_label_linear_cls_head import AnglularLinear @@ -14,6 +15,7 @@ @HEADS.register_module() class CustomMultiLabelNonLinearClsHead(MultiLabelClsHead): """Non-linear classification head for multilabel task. + Args: num_classes (int): Number of categories. in_channels (int): Number of channels in the input feature map. @@ -24,19 +26,21 @@ class CustomMultiLabelNonLinearClsHead(MultiLabelClsHead): normalized (bool): Normalize input features and weights in the last linar layer. """ + # pylint: disable=too-many-arguments def __init__( self, num_classes, in_channels, hid_channels=1280, - act_cfg=dict(type="ReLU"), + act_cfg=None, scale=1.0, - loss=dict(type="CrossEntropyLoss", use_sigmoid=True, reduction="mean", loss_weight=1.0), + loss=None, dropout=False, normalized=False, ): - - super(CustomMultiLabelNonLinearClsHead, self).__init__(loss=loss) + act_cfg = act_cfg if act_cfg else dict(type="ReLU") + loss = loss if loss else dict(type="CrossEntropyLoss", use_sigmoid=True, reduction="mean", loss_weight=1.0) + super().__init__(loss=loss) self.in_channels = in_channels self.num_classes = num_classes @@ -66,13 +70,15 @@ def _init_layers(self, act_cfg): self.classifier = nn.Sequential(*modules) def init_weights(self): - for m in self.classifier: - if isinstance(m, nn.Linear): - normal_init(m, mean=0, std=0.01, bias=0) - elif isinstance(m, nn.BatchNorm1d): - constant_init(m, 1) + """Iniitalize weights of model.""" + for module in self.classifier: + if isinstance(module, nn.Linear): + normal_init(module, mean=0, std=0.01, bias=0) + elif isinstance(module, nn.BatchNorm1d): + constant_init(module, 1) def loss(self, cls_score, gt_label, valid_label_mask=None): + """Calculate loss for given cls_score/gt_label.""" gt_label = gt_label.type_as(cls_score) num_samples = len(cls_score) losses = dict() @@ -89,10 +95,15 @@ def loss(self, cls_score, gt_label, valid_label_mask=None): losses["loss"] = loss / self.scale return losses - def forward_train(self, x, gt_label, **kwargs): + def forward(self, x): + """Forward fuction of CustomMultiLabelNonLinearClsHead.""" + return self.simple_test(x) + + def forward_train(self, cls_score, gt_label, **kwargs): + """Forward_train fuction of CustomMultiLabelNonLinearClsHead.""" img_metas = kwargs.get("img_metas", False) - gt_label = gt_label.type_as(x) - cls_score = self.classifier(x) * self.scale + gt_label = gt_label.type_as(cls_score) + cls_score = self.classifier(cls_score) * self.scale valid_batch_mask = gt_label >= 0 gt_label = gt_label[ @@ -103,7 +114,7 @@ def forward_train(self, x, gt_label, **kwargs): ].view(cls_score.shape[0], -1) if img_metas: valid_label_mask = self.get_valid_label_mask(img_metas=img_metas) - valid_label_mask = valid_label_mask.to(x.device) + valid_label_mask = valid_label_mask.to(cls_score.device) valid_label_mask = valid_label_mask[ valid_batch_mask, ].view(valid_label_mask.shape[0], -1) @@ -124,8 +135,9 @@ def simple_test(self, img): return pred def get_valid_label_mask(self, img_metas): + """Get valid label with ignored_label mask.""" valid_label_mask = [] - for i, meta in enumerate(img_metas): + for meta in img_metas: mask = torch.Tensor([1 for _ in range(self.num_classes)]) if "ignored_labels" in meta and meta["ignored_labels"]: mask[meta["ignored_labels"]] = 0 diff --git a/otx/mpa/modules/ov/models/mmcls/heads/mmov_cls_head.py b/otx/algorithms/classification/adapters/mmcls/models/heads/mmov_cls_head.py similarity index 57% rename from otx/mpa/modules/ov/models/mmcls/heads/mmov_cls_head.py rename to otx/algorithms/classification/adapters/mmcls/models/heads/mmov_cls_head.py index 804ee70babd..3f18ded84ab 100644 --- a/otx/mpa/modules/ov/models/mmcls/heads/mmov_cls_head.py +++ b/otx/algorithms/classification/adapters/mmcls/models/heads/mmov_cls_head.py @@ -1,3 +1,4 @@ +"""Module for OpenVINO Classification Head adopted with mmclassification.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -9,12 +10,28 @@ from mmcls.models.builder import HEADS from mmcls.models.heads import ClsHead -from ....graph.parsers.cls import cls_base_parser -from ...mmov_model import MMOVModel +from otx.core.ov.graph.parsers.cls import cls_base_parser +from otx.core.ov.models.mmov_model import MMOVModel @HEADS.register_module() class MMOVClsHead(ClsHead): + """Head module for MMClassification that uses MMOV for inference. + + Args: + model_path_or_model (Union[str, ov.Model]): Path to the ONNX model file or + the ONNX model object. + weight_path (Optional[str]): Path to the weight file. + inputs (Optional[Union[Dict[str, Union[str, List[str]]], List[str], str]]): + Input shape(s) of the ONNX model. + outputs (Optional[Union[Dict[str, Union[str, List[str]]], List[str], str]]): + Output name(s) of the ONNX model. + init_weight (bool): Whether to initialize the weight from a normal + distribution. + verify_shape (bool): Whether to verify the input shape of the ONNX model. + softmax_at_test (bool): Whether to apply softmax during testing. + """ + def __init__( self, model_path_or_model: Union[str, ov.Model], @@ -25,7 +42,7 @@ def __init__( verify_shape: bool = True, softmax_at_test: bool = True, **kwargs, - ): + ): # pylint: disable=too-many-arguments kwargs.pop("in_channels", None) kwargs.pop("num_classes", None) super().__init__(**kwargs) @@ -49,15 +66,21 @@ def __init__( parser_kwargs=dict(component="head"), ) - def forward_train(self, x, gt_label, **kwargs): - cls_score = self.model(x) + def forward(self, x): + """Forward fuction of MMOVClsHead class.""" + return self.simple_test(x) + + def forward_train(self, cls_score, gt_label, **kwargs): + """Forward_train fuction of MMOVClsHead.""" + cls_score = self.model(cls_score) while cls_score.dim() > 2: cls_score = cls_score.squeeze(2) losses = self.loss(cls_score, gt_label, **kwargs) return losses - def simple_test(self, x): - cls_score = self.model(x) + def simple_test(self, cls_score): + """Test without augmentation.""" + cls_score = self.model(cls_score) while cls_score.dim() > 2: cls_score = cls_score.squeeze(2) if self._softmax_at_test: diff --git a/otx/mpa/modules/models/heads/non_linear_cls_head.py b/otx/algorithms/classification/adapters/mmcls/models/heads/non_linear_cls_head.py similarity index 71% rename from otx/mpa/modules/models/heads/non_linear_cls_head.py rename to otx/algorithms/classification/adapters/mmcls/models/heads/non_linear_cls_head.py index 11aa198a160..fa37c5c9c72 100644 --- a/otx/mpa/modules/models/heads/non_linear_cls_head.py +++ b/otx/algorithms/classification/adapters/mmcls/models/heads/non_linear_cls_head.py @@ -1,13 +1,14 @@ +"""Module for defining non-linear classification head.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # import torch -import torch.nn as nn import torch.nn.functional as F from mmcls.models.builder import HEADS from mmcls.models.heads.cls_head import ClsHead from mmcv.cnn import build_activation_layer, constant_init, normal_init +from torch import nn @HEADS.register_module() @@ -29,15 +30,16 @@ def __init__( num_classes, in_channels, hid_channels=1280, - act_cfg=dict(type="ReLU"), - loss=dict(type="CrossEntropyLoss", loss_weight=1.0), + act_cfg=None, + loss=None, topk=(1,), dropout=False, - *args, **kwargs, - ): + ): # pylint: disable=too-many-arguments topk = (1,) if num_classes < 5 else (1, 5) - super(NonLinearClsHead, self).__init__(loss=loss, topk=topk, *args, **kwargs) + act_cfg = act_cfg if act_cfg else dict(type="ReLU") + loss = loss if loss else dict(type="CrossEntropyLoss", loss_weight=1.0) + super().__init__(loss=loss, topk=topk, **kwargs) self.in_channels = in_channels self.hid_channels = hid_channels self.num_classes = num_classes @@ -67,11 +69,12 @@ def _init_layers(self): ) def init_weights(self): - for m in self.classifier: - if isinstance(m, nn.Linear): - normal_init(m, mean=0, std=0.01, bias=0) - elif isinstance(m, nn.BatchNorm1d): - constant_init(m, 1) + """Initialize weights of head.""" + for module in self.classifier: + if isinstance(module, nn.Linear): + normal_init(module, mean=0, std=0.01, bias=0) + elif isinstance(module, nn.BatchNorm1d): + constant_init(module, 1) def simple_test(self, img): """Test without augmentation.""" @@ -84,7 +87,12 @@ def simple_test(self, img): pred = list(pred.detach().cpu().numpy()) return pred - def forward_train(self, x, gt_label): - cls_score = self.classifier(x) - losses = self.loss(cls_score, gt_label) + def forward(self, x): + """Forward fuction of NonLinearClsHead class.""" + return self.simple_test(x) + + def forward_train(self, cls_score, gt_label): + """Forward_train fuction of NonLinearClsHead class.""" + logit = self.classifier(cls_score) + losses = self.loss(logit, gt_label) return losses diff --git a/otx/mpa/modules/models/heads/semisl_cls_head.py b/otx/algorithms/classification/adapters/mmcls/models/heads/semisl_cls_head.py similarity index 82% rename from otx/mpa/modules/models/heads/semisl_cls_head.py rename to otx/algorithms/classification/adapters/mmcls/models/heads/semisl_cls_head.py index adbb8dd2efa..108dff95e69 100644 --- a/otx/mpa/modules/models/heads/semisl_cls_head.py +++ b/otx/algorithms/classification/adapters/mmcls/models/heads/semisl_cls_head.py @@ -1,3 +1,4 @@ +"""Module for defining semi-supervised learning for multi-class classification task.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -6,7 +7,9 @@ from mmcls.models.builder import HEADS from mmcls.models.heads.linear_head import LinearClsHead -from otx.mpa.modules.models.heads.non_linear_cls_head import NonLinearClsHead +from otx.algorithms.classification.adapters.mmcls.models.heads.non_linear_cls_head import ( + NonLinearClsHead, +) class SemiClsHead: @@ -30,7 +33,7 @@ def __init__(self, unlabeled_coef=1.0, use_dynamic_threshold=True, min_threshold self.classwise_acc = self.classwise_acc.cuda() def loss(self, logits, gt_label, pseudo_label=None, mask=None): - """loss function in which unlabeled data is considered + """Loss function in which unlabeled data is considered. Args: logit (set): (labeled data logit, unlabeled data logit) @@ -46,22 +49,22 @@ def loss(self, logits, gt_label, pseudo_label=None, mask=None): losses = dict() # compute supervised loss - lx = self.compute_loss(logits_x, gt_label, avg_factor=num_samples) + labeled_loss = self.compute_loss(logits_x, gt_label, avg_factor=num_samples) - lu = 0 + unlabeled_loss = 0 if len(logits_u_s) > 0: # compute unsupervised loss - lu = self.compute_loss(logits_u_s, pseudo_label, avg_factor=len(logits_u_s)) * mask - losses["loss"] = lx + self.unlabeled_coef * lu - losses["unlabeled_loss"] = self.unlabeled_coef * lu + unlabeled_loss = self.compute_loss(logits_u_s, pseudo_label, avg_factor=len(logits_u_s)) * mask + losses["loss"] = labeled_loss + self.unlabeled_coef * unlabeled_loss + losses["unlabeled_loss"] = self.unlabeled_coef * unlabeled_loss # compute accuracy acc = self.compute_accuracy(logits_x, gt_label) losses["accuracy"] = {f"top-{k}": a for k, a in zip(self.topk, acc)} return losses - def forward_train(self, x, gt_label, final_layer=None): - """forward_train head using pseudo-label selected through threshold + def forward_train(self, x, gt_label, final_layer=None): # pylint: disable=too-many-locals + """Forward_train head using pseudo-label selected through threshold. Args: x (dict or Tensor): dict(labeled, unlabeled_weak, unlabeled_strong) or NxC input features. @@ -119,7 +122,7 @@ def forward_train(self, x, gt_label, final_layer=None): @HEADS.register_module() class SemiLinearClsHead(SemiClsHead, LinearClsHead): - """Linear classification head for Semi-SL + """Linear classification head for Semi-SL. This head is designed to support FixMatch algorithm. (https://arxiv.org/abs/2001.07685) - [OTX] supports dynamic threshold based on confidence for each class @@ -138,28 +141,34 @@ def __init__( self, num_classes, in_channels, - loss=dict(type="CrossEntropyLoss", loss_weight=1.0), - topk=(1,), + loss=None, + topk=None, unlabeled_coef=1.0, use_dynamic_threshold=True, min_threshold=0.5, - ): + ): # pylint: disable=too-many-arguments if in_channels <= 0: raise ValueError(f"in_channels={in_channels} must be a positive integer") if num_classes <= 0: raise ValueError("at least one class must be exist num_classes.") topk = (1,) if num_classes < 5 else (1, 5) + loss = loss if loss else dict(type="CrossEntropyLoss", loss_weight=1.0) LinearClsHead.__init__(self, num_classes, in_channels, loss=loss, topk=topk) SemiClsHead.__init__(self, unlabeled_coef, use_dynamic_threshold, min_threshold) + def forward(self, x): + """Forward fuction of SemiLinearClsHead class.""" + return self.simple_test(x) + def forward_train(self, x, gt_label): + """Forward_train fuction of SemiLinearClsHead class.""" return SemiClsHead.forward_train(self, x, gt_label, final_layer=self.fc) @HEADS.register_module() class SemiNonLinearClsHead(SemiClsHead, NonLinearClsHead): - """Non-linear classification head for Semi-SL + """Non-linear classification head for Semi-SL. This head is designed to support FixMatch algorithm. (https://arxiv.org/abs/2001.07685) - [OTX] supports dynamic threshold based on confidence for each class @@ -181,20 +190,22 @@ def __init__( num_classes, in_channels, hid_channels=1280, - act_cfg=dict(type="ReLU"), - loss=dict(type="CrossEntropyLoss", loss_weight=1.0), - topk=(1,), + act_cfg=None, + loss=None, + topk=None, dropout=False, unlabeled_coef=1.0, use_dynamic_threshold=True, min_threshold=0.5, - ): + ): # pylint: disable=too-many-arguments if in_channels <= 0: raise ValueError(f"in_channels={in_channels} must be a positive integer") if num_classes <= 0: raise ValueError("at least one class must be exist num_classes.") topk = (1,) if num_classes < 5 else (1, 5) + act_cfg = act_cfg if act_cfg else dict(type="ReLU") + loss = loss if loss else dict(type="CrossEntropyLoss", loss_weight=1.0) NonLinearClsHead.__init__( self, num_classes, @@ -207,5 +218,10 @@ def __init__( ) SemiClsHead.__init__(self, unlabeled_coef, use_dynamic_threshold, min_threshold) + def forward(self, x): + """Forward fuction of SemiNonLinearClsHead class.""" + return self.simple_test(x) + def forward_train(self, x, gt_label): + """Forward_train fuction of SemiNonLinearClsHead class.""" return SemiClsHead.forward_train(self, x, gt_label, final_layer=self.classifier) diff --git a/otx/mpa/modules/models/heads/semisl_multilabel_cls_head.py b/otx/algorithms/classification/adapters/mmcls/models/heads/semisl_multilabel_cls_head.py similarity index 52% rename from otx/mpa/modules/models/heads/semisl_multilabel_cls_head.py rename to otx/algorithms/classification/adapters/mmcls/models/heads/semisl_multilabel_cls_head.py index d763714f9fe..9f694990744 100644 --- a/otx/mpa/modules/models/heads/semisl_multilabel_cls_head.py +++ b/otx/algorithms/classification/adapters/mmcls/models/heads/semisl_multilabel_cls_head.py @@ -1,18 +1,115 @@ +"""Module for defining semi-supervised classification head for multi-label classification task.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # import torch from mmcls.models.builder import HEADS, build_loss +from torch import nn -from otx.mpa.modules.models.heads.custom_multi_label_linear_cls_head import ( +from otx.algorithms.classification.adapters.mmcls.models.heads.custom_multi_label_linear_cls_head import ( CustomMultiLabelLinearClsHead, ) -from otx.mpa.modules.models.heads.custom_multi_label_non_linear_cls_head import ( +from otx.algorithms.classification.adapters.mmcls.models.heads.custom_multi_label_non_linear_cls_head import ( CustomMultiLabelNonLinearClsHead, ) -from .utils import LossBalancer, generate_aux_mlp + +def generate_aux_mlp(aux_mlp_cfg: dict, in_channels: int): + """Generate auxiliary MLP.""" + out_channels = aux_mlp_cfg["out_channels"] + if out_channels <= 0: + raise ValueError(f"out_channels={out_channels} must be a positive integer") + if "hid_channels" in aux_mlp_cfg and aux_mlp_cfg["hid_channels"] > 0: + hid_channels = aux_mlp_cfg["hid_channels"] + mlp = nn.Sequential( + nn.Linear(in_features=in_channels, out_features=hid_channels), + nn.ReLU(inplace=True), + nn.Linear(in_features=hid_channels, out_features=out_channels), + ) + else: + mlp = nn.Linear(in_features=in_channels, out_features=out_channels) + + return mlp + + +class EMAMeter: + """Exponential Moving Average Meter class.""" + + def __init__(self, alpha=0.9): + """Initialize the Exponential Moving Average Meter. + + Args: + - alpha (float): Smoothing factor for the exponential moving average. Defaults to 0.9. + + Returns: + - None + """ + self.alpha = alpha + self.val = 0 + + def reset(self): + """Reset the Exponential Moving Average Meter. + + Args: + - None + + Returns: + - None + """ + self.val = 0 + + def update(self, val): + """Update the Exponential Moving Average Meter with new value. + + Args: + - val (float): New value to update the meter. + + Returns: + - None + """ + self.val = self.alpha * self.val + (1 - self.alpha) * val + + +class LossBalancer: + """Loss Balancer class.""" + + def __init__(self, num_losses, weights=None, ema_weight=0.7) -> None: + """Initialize the Loss Balancer. + + Args: + - num_losses (int): Number of losses to balance. + - weights (list): List of weights to be applied to each loss. If None, equal weights are applied. + - ema_weight (float): Smoothing factor for the exponential moving average meter. Defaults to 0.7. + + Returns: + - None + """ + self.epsilon = 1e-9 + self.avg_estimators = [EMAMeter(ema_weight) for _ in range(num_losses)] + + if weights is not None: + assert len(weights) == num_losses + self.final_weights = weights + else: + self.final_weights = [1.0] * num_losses + + def balance_losses(self, losses): + """Balance the given losses using the weights and exponential moving average. + + Args: + - losses (list): List of losses to be balanced. + + Returns: + - total_loss (float): Balanced loss value. + """ + total_loss = 0.0 + for i, loss in enumerate(losses): + self.avg_estimators[i].update(float(loss)) + total_loss += ( + self.final_weights[i] * loss / (self.avg_estimators[i].val + self.epsilon) * self.avg_estimators[0].val + ) + return total_loss class SemiMultilabelClsHead: @@ -27,8 +124,11 @@ def __init__( self, unlabeled_coef=0.1, use_dynamic_loss_weighting=True, - aux_loss=dict(type="BarlowTwinsLoss", off_diag_penality=1.0 / 128.0, loss_weight=1.0), + aux_loss=None, ): + aux_loss = ( + aux_loss if aux_loss else dict(type="BarlowTwinsLoss", off_diag_penality=1.0 / 128.0, loss_weight=1.0) + ) self.unlabeled_coef = unlabeled_coef self.use_dynamic_loss_weighting = use_dynamic_loss_weighting self.aux_loss = build_loss(aux_loss) @@ -39,7 +139,7 @@ def __init__( self.num_pseudo_label = 0 def loss(self, logits, gt_label, features): - """loss function in which unlabeled data is considered + """Loss function in which unlabeled data is considered. Args: logit (Tensor): Labeled data logits @@ -71,7 +171,7 @@ def loss(self, logits, gt_label, features): return losses def forward_train_with_last_layers(self, x, gt_label, final_cls_layer, final_emb_layer): - """Forwards multilabel semi-sl head and losses + """Forwards multilabel semi-sl head and losses. Args: x (dict): dict(labeled_weak. labeled_strong, unlabeled_weak, unlabeled_strong) or NxC input features. @@ -92,7 +192,7 @@ def forward_train_with_last_layers(self, x, gt_label, final_cls_layer, final_emb @HEADS.register_module() class SemiLinearMultilabelClsHead(SemiMultilabelClsHead, CustomMultiLabelLinearClsHead): - """Linear multilabel classification head for Semi-SL + """Linear multilabel classification head for Semi-SL. Args: num_classes (int): The number of classes of dataset used for training @@ -111,29 +211,44 @@ def __init__( in_channels, scale=1.0, normalized=False, - aux_mlp=dict(hid_channels=0, out_channels=1024), - loss=dict(type="CrossEntropyLoss", loss_weight=1.0), + aux_mlp=None, + loss=None, unlabeled_coef=0.1, - aux_loss=dict(type="BarlowTwinsLoss", off_diag_penality=1.0 / 128.0, loss_weight=1.0), + aux_loss=None, use_dynamic_loss_weighting=True, - ): + ): # pylint: disable=too-many-arguments if in_channels <= 0: raise ValueError(f"in_channels={in_channels} must be a positive integer") if num_classes <= 0: raise ValueError("at least one class must be exist num_classes.") - + aux_mlp = aux_mlp if aux_mlp else dict(hid_channels=0, out_channels=1024) + loss = loss if loss else dict(type="CrossEntropyLoss", loss_weight=1.0) + aux_loss = ( + aux_loss if aux_loss else dict(type="BarlowTwinsLoss", off_diag_penality=1.0 / 128.0, loss_weight=1.0) + ) CustomMultiLabelLinearClsHead.__init__(self, num_classes, in_channels, normalized, scale, loss) SemiMultilabelClsHead.__init__(self, unlabeled_coef, use_dynamic_loss_weighting, aux_loss) self.aux_mlp = generate_aux_mlp(aux_mlp, in_channels) - def forward_train(self, x, gt_label): - return self.forward_train_with_last_layers(x, gt_label, final_cls_layer=self.fc, final_emb_layer=self.aux_mlp) + def loss(self, logits, gt_label, features): + """Calculate loss for given logits/gt_label.""" + return SemiMultilabelClsHead.loss(self, logits, gt_label, features) + + def forward(self, x): + """Forward fuction of SemiLinearMultilabelClsHead class.""" + return self.simple_test(x) + + def forward_train(self, cls_score, gt_label): + """Forward_train fuction of SemiLinearMultilabelClsHead class.""" + return self.forward_train_with_last_layers( + cls_score, gt_label, final_cls_layer=self.fc, final_emb_layer=self.aux_mlp + ) @HEADS.register_module() class SemiNonLinearMultilabelClsHead(SemiMultilabelClsHead, CustomMultiLabelNonLinearClsHead): - """Non-linear classification head for Semi-SL + """Non-linear classification head for Semi-SL. Args: num_classes (int): The number of classes of dataset used for training @@ -156,19 +271,24 @@ def __init__( hid_channels=1280, scale=1.0, normalized=False, - aux_mlp=dict(hid_channels=0, out_channels=1024), - act_cfg=dict(type="ReLU"), - loss=dict(type="CrossEntropyLoss", loss_weight=1.0), - aux_loss=dict(type="BarlowTwinsLoss", off_diag_penality=1.0 / 128.0, loss_weight=1.0), + aux_mlp=None, + act_cfg=None, + loss=None, + aux_loss=None, dropout=False, unlabeled_coef=0.1, use_dynamic_loss_weighting=True, - ): + ): # pylint: disable=too-many-arguments if in_channels <= 0: raise ValueError(f"in_channels={in_channels} must be a positive integer") if num_classes <= 0: raise ValueError("at least one class must be exist num_classes.") - + aux_mlp = aux_mlp if aux_mlp else dict(hid_channels=0, out_channels=1024) + act_cfg = act_cfg if act_cfg else dict(type="ReLU") + loss = loss if loss else dict(type="CrossEntropyLoss", loss_weight=1.0) + aux_loss = ( + aux_loss if aux_loss else dict(type="BarlowTwinsLoss", off_diag_penality=1.0 / 128.0, loss_weight=1.0) + ) CustomMultiLabelNonLinearClsHead.__init__( self, num_classes, @@ -184,7 +304,16 @@ def __init__( self.aux_mlp = generate_aux_mlp(aux_mlp, in_channels) - def forward_train(self, x, gt_label): + def loss(self, logits, gt_label, features): + """Calculate loss for given logits/gt_label.""" + return SemiMultilabelClsHead.loss(self, logits, gt_label, features) + + def forward(self, x): + """Forward fuction of SemiNonLinearMultilabelClsHead class.""" + return self.simple_test(x) + + def forward_train(self, cls_score, gt_label): + """Forward_train fuction of SemiNonLinearMultilabelClsHead class.""" return self.forward_train_with_last_layers( - x, gt_label, final_cls_layer=self.classifier, final_emb_layer=self.aux_mlp + cls_score, gt_label, final_cls_layer=self.classifier, final_emb_layer=self.aux_mlp ) diff --git a/otx/mpa/modules/models/heads/supcon_cls_head.py b/otx/algorithms/classification/adapters/mmcls/models/heads/supcon_cls_head.py similarity index 86% rename from otx/mpa/modules/models/heads/supcon_cls_head.py rename to otx/algorithms/classification/adapters/mmcls/models/heads/supcon_cls_head.py index 528e200245f..2c9d0126a7f 100644 --- a/otx/mpa/modules/models/heads/supcon_cls_head.py +++ b/otx/algorithms/classification/adapters/mmcls/models/heads/supcon_cls_head.py @@ -1,3 +1,4 @@ +"""Module for defining classification head for supcon.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 @@ -10,8 +11,8 @@ @HEADS.register_module() class SupConClsHead(BaseHead): - """ - Supervised Contrastive Learning head for Classification using SelfSL + """Supervised Contrastive Learning head for Classification using SelfSL. + Args: num_classes (int): The number of classes of dataset used for training in_channels (int): The channels of input data from the backbone @@ -21,7 +22,9 @@ class SupConClsHead(BaseHead): topk (set): evaluation topk score, default is (1, ) """ - def __init__(self, num_classes: int, in_channels: int, aux_mlp, loss, aux_loss, topk=(1,), init_cfg=None, **kwargs): + def __init__( + self, num_classes: int, in_channels: int, aux_mlp, loss, aux_loss, topk=(1,), init_cfg=None + ): # pylint: disable=too-many-arguments if in_channels <= 0: raise ValueError(f"in_channels={in_channels} must be a positive integer") if num_classes <= 0: @@ -56,11 +59,16 @@ def __init__(self, num_classes: int, in_channels: int, aux_mlp, loss, aux_loss, else: self.aux_mlp = nn.Linear(in_features=in_channels, out_features=out_channels) + def forward(self, x): + """Forward fuction of SupConClsHead class.""" + return self.simple_test(x) + def forward_train(self, x, gt_label): - """ - Forward train head using the Supervised Contrastive Loss + """Forward train head using the Supervised Contrastive Loss. + Args: x (Tensor): features from the backbone. + Returns: dict[str, Tensor]: A dictionary of loss components. """ @@ -80,11 +88,8 @@ def forward_train(self, x, gt_label): return losses def simple_test(self, img): - """ - Test without data augmentation. - """ + """Test without data augmentation.""" cls_score = self.fc(img) - if isinstance(cls_score, list): cls_score = sum(cls_score) / float(len(cls_score)) pred = F.softmax(cls_score, dim=1) if cls_score is not None else None diff --git a/otx/algorithms/classification/adapters/mmcls/models/losses/__init__.py b/otx/algorithms/classification/adapters/mmcls/models/losses/__init__.py new file mode 100644 index 00000000000..f43e9e11bfc --- /dev/null +++ b/otx/algorithms/classification/adapters/mmcls/models/losses/__init__.py @@ -0,0 +1,29 @@ +"""OTX Algorithms - Classification Losses.""" + +# Copyright (C) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions +# and limitations under the License. + +from .asymmetric_angular_loss_with_ignore import AsymmetricAngularLossWithIgnore +from .asymmetric_loss_with_ignore import AsymmetricLossWithIgnore +from .barlowtwins_loss import BarlowTwinsLoss +from .cross_entropy_loss import CrossEntropyLossWithIgnore +from .ib_loss import IBLoss + +__all__ = [ + "AsymmetricAngularLossWithIgnore", + "AsymmetricLossWithIgnore", + "BarlowTwinsLoss", + "CrossEntropyLossWithIgnore", + "IBLoss", +] diff --git a/otx/mpa/modules/models/losses/asymmetric_angular_loss_with_ignore.py b/otx/algorithms/classification/adapters/mmcls/models/losses/asymmetric_angular_loss_with_ignore.py similarity index 88% rename from otx/mpa/modules/models/losses/asymmetric_angular_loss_with_ignore.py rename to otx/algorithms/classification/adapters/mmcls/models/losses/asymmetric_angular_loss_with_ignore.py index c67444c52ea..93d014c90fa 100644 --- a/otx/mpa/modules/models/losses/asymmetric_angular_loss_with_ignore.py +++ b/otx/algorithms/classification/adapters/mmcls/models/losses/asymmetric_angular_loss_with_ignore.py @@ -1,11 +1,12 @@ +"""Module for defining AsymmetricAngularLossWithIgnore.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # import torch -import torch.nn as nn from mmcls.models.builder import LOSSES from mmcls.models.losses.utils import weight_reduce_loss +from torch import nn def asymmetric_angular_loss_with_ignore( @@ -19,8 +20,9 @@ def asymmetric_angular_loss_with_ignore( k=0.8, reduction="mean", avg_factor=None, -): - """asymmetric angular loss +): # pylint: disable=too-many-arguments, too-many-locals + """Asymmetric angular loss. + Args: pred (torch.Tensor): The prediction with shape (N, *). target (torch.Tensor): The ground truth label of the prediction with @@ -37,6 +39,7 @@ def asymmetric_angular_loss_with_ignore( is same shape as pred and label. Defaults to 'mean'. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. + Returns: torch.Tensor: Loss. """ @@ -54,11 +57,11 @@ def asymmetric_angular_loss_with_ignore( asymmetric_focus = gamma_pos > 0 or gamma_neg > 0 if asymmetric_focus: - pt0 = xs_neg * target - pt1 = xs_pos * anti_target - pt = pt0 + pt1 + pos_target0 = xs_neg * target + pos_target1 = xs_pos * anti_target + pos_target = pos_target0 + pos_target1 one_sided_gamma = gamma_pos * target + gamma_neg * anti_target - one_sided_w = torch.pow(pt, one_sided_gamma) + one_sided_w = torch.pow(pos_target, one_sided_gamma) loss = -k * target * torch.log(xs_pos.clamp(min=eps)) - (1 - k) * anti_target * torch.log(xs_neg.clamp(min=eps)) @@ -81,7 +84,8 @@ def asymmetric_angular_loss_with_ignore( @LOSSES.register_module() class AsymmetricAngularLossWithIgnore(nn.Module): - """Asymmetric angular loss + """Asymmetric angular loss. + Args: gamma_pos (float): positive focusing parameter. Defaults to 0.0. @@ -95,6 +99,7 @@ class AsymmetricAngularLossWithIgnore(nn.Module): """ def __init__(self, gamma_pos=0.0, gamma_neg=1.0, k=0.8, clip=0.05, reduction="mean", loss_weight=1.0): + """Init fuction of AsymmetricAngularLossWithIgnore class.""" super().__init__() self.gamma_pos = gamma_pos self.gamma_neg = gamma_neg @@ -104,7 +109,7 @@ def __init__(self, gamma_pos=0.0, gamma_neg=1.0, k=0.8, clip=0.05, reduction="me self.loss_weight = loss_weight def forward(self, pred, target, valid_label_mask=None, weight=None, avg_factor=None, reduction_override=None): - """asymmetric angular loss""" + """Asymmetric angular loss.""" assert reduction_override in (None, "none", "mean", "sum") reduction = reduction_override if reduction_override else self.reduction loss_cls = self.loss_weight * asymmetric_angular_loss_with_ignore( diff --git a/otx/mpa/modules/models/losses/asymmetric_loss_with_ignore.py b/otx/algorithms/classification/adapters/mmcls/models/losses/asymmetric_loss_with_ignore.py similarity index 83% rename from otx/mpa/modules/models/losses/asymmetric_loss_with_ignore.py rename to otx/algorithms/classification/adapters/mmcls/models/losses/asymmetric_loss_with_ignore.py index d7022675271..638b6bf2d03 100644 --- a/otx/mpa/modules/models/losses/asymmetric_loss_with_ignore.py +++ b/otx/algorithms/classification/adapters/mmcls/models/losses/asymmetric_loss_with_ignore.py @@ -1,11 +1,12 @@ +"""Module for defining AsymmetricLossWithIgnore.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # import torch -import torch.nn as nn from mmcls.models.builder import LOSSES from mmcls.models.losses.utils import weight_reduce_loss +from torch import nn def asymmetric_loss_with_ignore( @@ -18,10 +19,9 @@ def asymmetric_loss_with_ignore( clip=0.05, reduction="none", avg_factor=None, -): - """asymmetric loss - Please refer to the `paper `_ for - details. +): # pylint: disable=too-many-arguments + """Asymmetric loss, please refer to the `paper `_ for details. + Args: pred (torch.Tensor): The prediction with shape (N, *). target (torch.Tensor): The ground truth label of the prediction with @@ -37,6 +37,7 @@ def asymmetric_loss_with_ignore( is same shape as pred and label. Defaults to 'mean'. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. + Returns: torch.Tensor: Loss. """ @@ -49,11 +50,11 @@ def asymmetric_loss_with_ignore( avg_factor = None # if we are not set this to None the exception will be throwed if clip and clip > 0: - pt = (1 - pred_sigmoid + clip).clamp(max=1) * (1 - target) + pred_sigmoid * target + pos_target = (1 - pred_sigmoid + clip).clamp(max=1) * (1 - target) + pred_sigmoid * target else: - pt = (1 - pred_sigmoid) * (1 - target) + pred_sigmoid * target - asymmetric_weight = (1 - pt).pow(gamma_pos * target + gamma_neg * (1 - target)) - loss = -torch.log(pt.clamp(min=eps)) * asymmetric_weight + pos_target = (1 - pred_sigmoid) * (1 - target) + pred_sigmoid * target + asymmetric_weight = (1 - pos_target).pow(gamma_pos * target + gamma_neg * (1 - target)) + loss = -torch.log(pos_target.clamp(min=eps)) * asymmetric_weight if valid_label_mask is not None: loss = loss * valid_label_mask @@ -69,7 +70,8 @@ def asymmetric_loss_with_ignore( @LOSSES.register_module() class AsymmetricLossWithIgnore(nn.Module): - """asymmetric loss + """Asymmetric loss. + Args: gamma_pos (float): positive focusing parameter. Defaults to 0.0. @@ -82,7 +84,7 @@ class AsymmetricLossWithIgnore(nn.Module): """ def __init__(self, gamma_pos=0.0, gamma_neg=4.0, clip=0.05, reduction="none", loss_weight=1.0): - super(AsymmetricLossWithIgnore, self).__init__() + super().__init__() self.gamma_pos = gamma_pos self.gamma_neg = gamma_neg self.clip = clip @@ -90,7 +92,7 @@ def __init__(self, gamma_pos=0.0, gamma_neg=4.0, clip=0.05, reduction="none", lo self.loss_weight = loss_weight def forward(self, pred, target, valid_label_mask=None, weight=None, avg_factor=None, reduction_override=None): - """asymmetric loss""" + """Forward fuction of asymmetric loss.""" assert reduction_override in (None, "none", "mean", "sum") reduction = reduction_override if reduction_override else self.reduction loss_cls = self.loss_weight * asymmetric_loss_with_ignore( diff --git a/otx/mpa/modules/models/losses/barlowtwins_loss.py b/otx/algorithms/classification/adapters/mmcls/models/losses/barlowtwins_loss.py similarity index 70% rename from otx/mpa/modules/models/losses/barlowtwins_loss.py rename to otx/algorithms/classification/adapters/mmcls/models/losses/barlowtwins_loss.py index 744dc7e5eb7..dfbdeabd139 100644 --- a/otx/mpa/modules/models/losses/barlowtwins_loss.py +++ b/otx/algorithms/classification/adapters/mmcls/models/losses/barlowtwins_loss.py @@ -1,24 +1,22 @@ +"""Module for defining BarlowTwinsLoss for supcon in classification task.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import torch -import torch.nn as nn from mmcls.models.builder import LOSSES -from torch import Tensor +from torch import Tensor, nn def off_diagonal(x: Tensor): - """ - return a tensor containing all the elements outside the diagonal of x - """ + """Return a tensor containing all the elements outside the diagonal of x.""" assert x.shape[0] == x.shape[1] return x.flatten()[:-1].view(x.shape[0] - 1, x.shape[0] + 1)[:, 1:].flatten() @LOSSES.register_module() class BarlowTwinsLoss(nn.Module): - """ - Barlow Twins Loss: https://arxiv.org/abs/2103.03230. + """Barlow Twins Loss: https://arxiv.org/abs/2103.03230. + Self-Supervised Learning via Redundancy Reduction Code adapted from https://github.com/facebookresearch/barlowtwins. """ @@ -28,13 +26,13 @@ def __init__(self, off_diag_penality, loss_weight=1.0): self.penalty = off_diag_penality self.loss_weight = loss_weight - def forward(self, feats1: Tensor, feats2: Tensor, **kwargs): - """ - Compute Barlow Twins Loss and, if labels are not none, - also the Cross-Entropy loss. + def forward(self, feats1: Tensor, feats2: Tensor): + """Compute Barlow Twins Loss and, if labels are not none, also the Cross-Entropy loss. + Args: - feats1, feats2: vectors of shape [bsz, ...]. Corresponding to - two views of the same samples + feats1 (torch.Tensor): vectors of shape [bsz, ...]. Corresponding to one of two views of the same samples. + feats2 (torch.Tensor): vectors of shape [bsz, ...]. Corresponding to one of two views of the same samples. + Returns: A floating point number describing the Barlow Twins loss """ diff --git a/otx/mpa/modules/models/losses/cross_entropy_loss.py b/otx/algorithms/classification/adapters/mmcls/models/losses/cross_entropy_loss.py similarity index 83% rename from otx/mpa/modules/models/losses/cross_entropy_loss.py rename to otx/algorithms/classification/adapters/mmcls/models/losses/cross_entropy_loss.py index 0b99480c80f..28d8d2a57fd 100644 --- a/otx/mpa/modules/models/losses/cross_entropy_loss.py +++ b/otx/algorithms/classification/adapters/mmcls/models/losses/cross_entropy_loss.py @@ -1,14 +1,16 @@ +"""Module for defining cross entropy loss for classification task.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -import torch.nn as nn import torch.nn.functional as F from mmcls.models.builder import LOSSES from mmcls.models.losses.utils import weight_reduce_loss +from torch import nn def cross_entropy(pred, label, weight=None, reduction="mean", avg_factor=None, class_weight=None, ignore_index=None): + """Calculate cross entropy for given pred, label pairs.""" # element-wise losses if ignore_index is not None: loss = F.cross_entropy(pred, label, reduction="none", weight=class_weight, ignore_index=ignore_index) @@ -25,8 +27,10 @@ def cross_entropy(pred, label, weight=None, reduction="mean", avg_factor=None, c @LOSSES.register_module() class CrossEntropyLossWithIgnore(nn.Module): + """Defining CrossEntropyLossWothIgnore which supports ignored_label masking.""" + def __init__(self, reduction="mean", loss_weight=1.0, ignore_index=None): - super(CrossEntropyLossWithIgnore, self).__init__() + super().__init__() self.reduction = reduction self.loss_weight = loss_weight self.ignore_index = ignore_index @@ -34,6 +38,7 @@ def __init__(self, reduction="mean", loss_weight=1.0, ignore_index=None): self.cls_criterion = cross_entropy def forward(self, cls_score, label, weight=None, avg_factor=None, reduction_override=None, **kwargs): + """Forward function of CrossEntropyLossWithIgnore class.""" assert reduction_override in (None, "none", "mean", "sum") reduction = reduction_override if reduction_override else self.reduction loss_cls = self.loss_weight * self.cls_criterion( diff --git a/otx/mpa/modules/models/losses/ib_loss.py b/otx/algorithms/classification/adapters/mmcls/models/losses/ib_loss.py similarity index 59% rename from otx/mpa/modules/models/losses/ib_loss.py rename to otx/algorithms/classification/adapters/mmcls/models/losses/ib_loss.py index 3c5de67438e..ce627738628 100644 --- a/otx/mpa/modules/models/losses/ib_loss.py +++ b/otx/algorithms/classification/adapters/mmcls/models/losses/ib_loss.py @@ -1,3 +1,4 @@ +"""Module for defining IB Loss which alleviate effect of imbalanced dataset.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -11,16 +12,17 @@ @LOSSES.register_module() class IBLoss(CrossEntropyLoss): - def __init__(self, num_classes, start=5, alpha=1000.0, **kwargs): - """IB Loss - https://arxiv.org/abs/2110.02444 + """IB Loss, Influence-Balanced Loss for Imbalanced Visual Classification, https://arxiv.org/abs/2110.02444.""" + + def __init__(self, num_classes, start=5, alpha=1000.0): + """Init fuction of IBLoss. Args: num_classes (int): Number of classes in dataset start (int): Epoch to start finetuning with IB loss alpha (float): Hyper-parameter for an adjustment for IB loss re-weighting """ - super(IBLoss, self).__init__(loss_weight=1.0) + super().__init__(loss_weight=1.0) if alpha < 0: raise ValueError("Alpha for IB loss should be bigger than 0") self.alpha = alpha @@ -32,6 +34,7 @@ def __init__(self, num_classes, start=5, alpha=1000.0, **kwargs): @property def cur_epoch(self): + """Return current epoch.""" return self._cur_epoch @cur_epoch.setter @@ -39,6 +42,7 @@ def cur_epoch(self, epoch): self._cur_epoch = epoch def update_weight(self, cls_num_list): + """Update loss weight per class.""" if len(cls_num_list) == 0: raise ValueError("Cannot compute the IB loss weight with empty cls_num_list.") per_cls_weights = 1.0 / np.array(cls_num_list) @@ -46,14 +50,14 @@ def update_weight(self, cls_num_list): per_cls_weights = torch.FloatTensor(per_cls_weights) self.weight = per_cls_weights - def forward(self, input, target, feature): + def forward(self, x, target, feature): + """Forward fuction of IBLoss.""" if self._cur_epoch < self._start_epoch: - return super().forward(input, target) - else: - grads = torch.sum(torch.abs(F.softmax(input, dim=1) - F.one_hot(target, self.num_classes)), 1) - feature = torch.sum(torch.abs(feature), 1).reshape(-1, 1) - ib = grads * feature.reshape(-1) - ib = self.alpha / (ib + self.epsilon) - ce_loss = F.cross_entropy(input, target, weight=self.weight.to(input.get_device()), reduction="none") - loss = ce_loss * ib - return loss.mean() + return super().forward(x, target) + grads = torch.sum(torch.abs(F.softmax(x, dim=1) - F.one_hot(target, self.num_classes)), 1) + feature = torch.sum(torch.abs(feature), 1).reshape(-1, 1) + scaler = grads * feature.reshape(-1) + scaler = self.alpha / (scaler + self.epsilon) + ce_loss = F.cross_entropy(x, target, weight=self.weight.to(x.get_device()), reduction="none") + loss = ce_loss * scaler + return loss.mean() diff --git a/otx/algorithms/classification/adapters/mmcls/models/necks/__init__.py b/otx/algorithms/classification/adapters/mmcls/models/necks/__init__.py index 13803f3ccff..d2cb363f53f 100644 --- a/otx/algorithms/classification/adapters/mmcls/models/necks/__init__.py +++ b/otx/algorithms/classification/adapters/mmcls/models/necks/__init__.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions # and limitations under the License. +from .mmov_neck import MMOVNeck from .selfsl_mlp import SelfSLMLP -__all__ = ["SelfSLMLP"] +__all__ = ["SelfSLMLP", "MMOVNeck"] diff --git a/otx/algorithms/classification/adapters/mmcls/models/necks/mmov_neck.py b/otx/algorithms/classification/adapters/mmcls/models/necks/mmov_neck.py new file mode 100644 index 00000000000..a4e96798b6a --- /dev/null +++ b/otx/algorithms/classification/adapters/mmcls/models/necks/mmov_neck.py @@ -0,0 +1,27 @@ +"""Module for defining MMOVNeck for inference.""" +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +from typing import Dict, List, Union + +from mmcls.models.builder import NECKS + +from otx.core.ov.graph.parsers.cls import cls_base_parser +from otx.core.ov.models.mmov_model import MMOVModel + + +@NECKS.register_module() +class MMOVNeck(MMOVModel): + """Neck class for MMOV inference.""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @staticmethod + def parser(graph, **kwargs) -> Dict[str, Union[List[str], Dict[str, List[str]]]]: + """Parser function returns base_parser for given graph.""" + output = cls_base_parser(graph, "neck") + if output is None: + raise ValueError("Parser can not determine input and output of model. Please provide them explicitly") + return output diff --git a/otx/algorithms/classification/adapters/mmcls/models/necks/selfsl_mlp.py b/otx/algorithms/classification/adapters/mmcls/models/necks/selfsl_mlp.py index 9c7b61b05bc..b787a911851 100644 --- a/otx/algorithms/classification/adapters/mmcls/models/necks/selfsl_mlp.py +++ b/otx/algorithms/classification/adapters/mmcls/models/necks/selfsl_mlp.py @@ -6,7 +6,7 @@ # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -# pylint: disable=missing-module-docstring, dangerous-default-value +# pylint: disable=missing-module-docstring from typing import Any, Dict, List, Tuple, Union import torch @@ -34,11 +34,11 @@ def __init__( in_channels: int, hid_channels: int, out_channels: int, - norm_cfg: Dict[str, Any] = dict(type="BN1d"), + norm_cfg: Dict[str, Any] = None, use_conv: bool = False, with_avg_pool: bool = True, ): - + norm_cfg = norm_cfg if norm_cfg else dict(type="BN1d") super().__init__() self.with_avg_pool = with_avg_pool diff --git a/otx/algorithms/classification/adapters/mmcls/optimizer/__init__.py b/otx/algorithms/classification/adapters/mmcls/optimizer/__init__.py new file mode 100644 index 00000000000..271368e0d6a --- /dev/null +++ b/otx/algorithms/classification/adapters/mmcls/optimizer/__init__.py @@ -0,0 +1,19 @@ +"""OTX Algorithms - Classification Optimizers.""" + +# Copyright (C) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions +# and limitations under the License. + +from .lars import LARS + +__all__ = ["LARS"] diff --git a/otx/mpa/modules/optimizer/lars.py b/otx/algorithms/classification/adapters/mmcls/optimizer/lars.py similarity index 74% rename from otx/mpa/modules/optimizer/lars.py rename to otx/algorithms/classification/adapters/mmcls/optimizer/lars.py index 89fc83e98ba..cc453e40a51 100644 --- a/otx/mpa/modules/optimizer/lars.py +++ b/otx/algorithms/classification/adapters/mmcls/optimizer/lars.py @@ -1,3 +1,4 @@ +"""Module for defining LARS optimizer for classification task.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -45,15 +46,15 @@ def __init__( nesterov=False, mode=None, exclude_bn_from_weight_decay=False, - ): + ): # pylint: disable=too-many-arguments, too-many-locals if lr is not required and lr < 0.0: - raise ValueError("Invalid learning rate: {}".format(lr)) + raise ValueError(f"Invalid learning rate: {lr}") if momentum < 0.0: - raise ValueError("Invalid momentum value: {}".format(momentum)) + raise ValueError(f"Invalid momentum value: {momentum}") if weight_decay < 0.0: - raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) + raise ValueError(f"Invalid weight_decay value: {weight_decay}") if eta < 0.0: - raise ValueError("Invalid LARS coefficient value: {}".format(eta)) + raise ValueError(f"Invalid LARS coefficient value: {eta}") defaults = dict( lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov, eta=eta @@ -92,10 +93,11 @@ def __init__( self.mode = mode - super(LARS, self).__init__(new_param_groups, defaults) + super().__init__(new_param_groups, defaults) def __setstate__(self, state): - super(LARS, self).__setstate__(state) + """Set state for parameter groups.""" + super().__setstate__(state) for group in self.param_groups: group.setdefault("nesterov", False) @@ -115,10 +117,8 @@ def step(self, closure=None): for group in self.param_groups: weight_decay = group["weight_decay"] momentum = group["momentum"] - dampening = group["dampening"] nesterov = group["nesterov"] eta = group["eta"] - lars_exclude = group.get("lars_exclude", False) for p in group["params"]: if p.grad is None: @@ -128,21 +128,15 @@ def step(self, closure=None): # Add weight decay before computing adaptive LR. # Seems to be pretty important in SIMclr style models. local_lr = 1.0 - if self.mode == "selfsl": - if weight_decay != 0: - d_p = d_p.add(p, alpha=weight_decay) - if not lars_exclude: - weight_norm = torch.norm(p).item() - grad_norm = torch.norm(d_p).item() - if weight_norm > 0 and grad_norm > 0: - local_lr = eta * weight_norm / grad_norm + if not group.get("lars_exclude", False): + weight_norm = torch.norm(p).item() + grad_norm = torch.norm(d_p).item() + if self.mode == "selfsl" and weight_norm > 0 and grad_norm > 0: + local_lr = eta * weight_norm / grad_norm else: - if not lars_exclude: - weight_norm = torch.norm(p).item() - grad_norm = torch.norm(d_p).item() - local_lr = eta * weight_norm / (grad_norm + weight_decay * weight_norm) - if weight_decay != 0: - d_p = d_p.add(p, alpha=weight_decay) + local_lr = eta * weight_norm / (grad_norm + weight_decay * weight_norm) + if weight_decay != 0: + d_p = d_p.add(p, alpha=weight_decay) d_p = d_p.mul(local_lr) @@ -152,12 +146,7 @@ def step(self, closure=None): buf = param_state["momentum_buffer"] = torch.clone(d_p).detach() else: buf = param_state["momentum_buffer"] - buf.mul_(momentum).add_(d_p, alpha=1 - dampening) - if nesterov: - d_p = d_p.add(buf, alpha=momentum) - else: - d_p = buf - + buf.mul_(momentum).add_(d_p, alpha=1 - group["dampening"]) + d_p = d_p.add(buf, alpha=momentum) if nesterov else buf p.add_(d_p, alpha=-group["lr"]) - return loss diff --git a/otx/algorithms/classification/adapters/mmcls/tasks/__init__.py b/otx/algorithms/classification/adapters/mmcls/tasks/__init__.py new file mode 100644 index 00000000000..cbb783e9b60 --- /dev/null +++ b/otx/algorithms/classification/adapters/mmcls/tasks/__init__.py @@ -0,0 +1,16 @@ +"""Initialize OTX classification tasks.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +# flake8: noqa +from . import ( + evaluator, + explainer, + exporter, + incremental, + inferrer, + semisl, + stage, + trainer, +) diff --git a/otx/mpa/cls/evaluator.py b/otx/algorithms/classification/adapters/mmcls/tasks/evaluator.py similarity index 78% rename from otx/mpa/cls/evaluator.py rename to otx/algorithms/classification/adapters/mmcls/tasks/evaluator.py index c2eb7b3bc99..72d074ea606 100644 --- a/otx/mpa/cls/evaluator.py +++ b/otx/algorithms/classification/adapters/mmcls/tasks/evaluator.py @@ -1,11 +1,10 @@ -# Copyright (C) 2022 Intel Corporation +"""Evaluation class for OTX Classification with MMCLS.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -import os.path as osp - -from otx.mpa.registry import STAGES -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES +from otx.algorithms.common.utils.logger import get_logger from .inferrer import ClsInferrer @@ -14,8 +13,10 @@ @STAGES.register_module() class ClsEvaluator(ClsInferrer): + """Evaluator for MMCLS backend.""" + def run(self, model_cfg, model_ckpt, data_cfg, **kwargs): - """Run evaluation stage for classification + """Run evaluation stage for classification. - Run inference - Run evaluation via MMClassification -> MMCV diff --git a/otx/mpa/cls/explainer.py b/otx/algorithms/classification/adapters/mmcls/tasks/explainer.py similarity index 86% rename from otx/mpa/cls/explainer.py rename to otx/algorithms/classification/adapters/mmcls/tasks/explainer.py index 315ad1f4cb3..e3ab896fe39 100644 --- a/otx/mpa/cls/explainer.py +++ b/otx/algorithms/classification/adapters/mmcls/tasks/explainer.py @@ -1,4 +1,5 @@ -# Copyright (C) 2022 Intel Corporation +"""Explainer for OTX Classification with MMCLS.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -6,18 +7,18 @@ from mmcls.datasets import build_dataloader as mmcls_build_dataloader from mmcls.datasets import build_dataset as mmcls_build_dataset +from otx.algorithms.common.adapters.mmcv.hooks.recording_forward_hook import ( + ActivationMapHook, + EigenCamHook, + ReciproCAMHook, +) +from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES from otx.algorithms.common.adapters.mmcv.utils import ( build_data_parallel, build_dataloader, build_dataset, ) -from otx.mpa.modules.hooks.recording_forward_hooks import ( - ActivationMapHook, - EigenCamHook, - ReciproCAMHook, -) -from otx.mpa.registry import STAGES -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger from .stage import ClsStage @@ -31,12 +32,17 @@ @STAGES.register_module() class ClsExplainer(ClsStage): + """Base explainer class for OTX Classification with MMCLS.""" + def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.dataset = None + self.explainer_hook = None + self.extract_prob = False def run(self, model_cfg, model_ckpt, data_cfg, **kwargs): - """Run explain stage + """Run explain stage. + - Configuration - Environment setup - Run explain via hooks in recording_forward_hooks @@ -57,6 +63,7 @@ def run(self, model_cfg, model_ckpt, data_cfg, **kwargs): return dict(outputs=outputs) def explain(self, cfg, model_builder=None): + """Main explain function.""" # TODO: distributed inference # Data loader diff --git a/otx/mpa/cls/exporter.py b/otx/algorithms/classification/adapters/mmcls/tasks/exporter.py similarity index 76% rename from otx/mpa/cls/exporter.py rename to otx/algorithms/classification/adapters/mmcls/tasks/exporter.py index 23734d2e055..22af74418d6 100644 --- a/otx/mpa/cls/exporter.py +++ b/otx/algorithms/classification/adapters/mmcls/tasks/exporter.py @@ -1,14 +1,15 @@ -# Copyright (C) 2022 Intel Corporation +"""Base exporter for OTX Classification with MMCLS.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # import numpy as np from mmcv.runner import wrap_fp16_model -from otx.mpa.deploy.utils import sync_batchnorm_2_batchnorm -from otx.mpa.exporter_mixin import ExporterMixin -from otx.mpa.registry import STAGES -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.adapters.mmcv.tasks.exporter_mixin import ExporterMixin +from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES +from otx.algorithms.common.adapters.mmdeploy.utils import sync_batchnorm_2_batchnorm +from otx.algorithms.common.utils.logger import get_logger from .stage import ClsStage @@ -17,8 +18,10 @@ @STAGES.register_module() class ClsExporter(ExporterMixin, ClsStage): + """Base exporter class.""" + def run(self, model_cfg, model_ckpt, data_cfg, **kwargs): # noqa: C901 - """Run exporter stage""" + """Run exporter stage.""" precision = kwargs.get("precision", "FP32") model_builder = kwargs.get("model_builder", self.MODEL_BUILDER) @@ -45,9 +48,10 @@ def model_builder_helper(*args, **kwargs): @staticmethod def naive_export(output_dir, model_builder, precision, cfg, model_name="model"): + """Export procedure with pytorch backend.""" from mmcls.datasets.pipelines import Compose - from ..deploy.apis import NaiveExporter + from otx.algorithms.common.adapters.mmdeploy.apis import NaiveExporter def get_fake_data(cfg, orig_img_shape=(128, 128, 3)): pipeline = cfg.data.test.pipeline diff --git a/otx/mpa/cls/incremental/__init__.py b/otx/algorithms/classification/adapters/mmcls/tasks/incremental/__init__.py similarity index 77% rename from otx/mpa/cls/incremental/__init__.py rename to otx/algorithms/classification/adapters/mmcls/tasks/incremental/__init__.py index e5b6a12e095..301f0b6b0d5 100644 --- a/otx/mpa/cls/incremental/__init__.py +++ b/otx/algorithms/classification/adapters/mmcls/tasks/incremental/__init__.py @@ -1,3 +1,4 @@ +"""Initailize Incremental Learning for Classification.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # diff --git a/otx/algorithms/classification/adapters/mmcls/tasks/incremental/inferrer.py b/otx/algorithms/classification/adapters/mmcls/tasks/incremental/inferrer.py new file mode 100644 index 00000000000..5e58f81d646 --- /dev/null +++ b/otx/algorithms/classification/adapters/mmcls/tasks/incremental/inferrer.py @@ -0,0 +1,21 @@ +"""Inference task for Incremental OTX classification with MMCLS.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +from otx.algorithms.classification.adapters.mmcls.tasks.inferrer import ClsInferrer +from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES +from otx.algorithms.common.utils.logger import get_logger + +from .stage import IncrClsStage + +logger = get_logger() + + +# pylint: disable=super-init-not-called +@STAGES.register_module() +class IncrClsInferrer(IncrClsStage, ClsInferrer): + """Inference class for incremental classification.""" + + def __init__(self, **kwargs): + IncrClsStage.__init__(self, **kwargs) diff --git a/otx/mpa/cls/incremental/stage.py b/otx/algorithms/classification/adapters/mmcls/tasks/incremental/stage.py similarity index 81% rename from otx/mpa/cls/incremental/stage.py rename to otx/algorithms/classification/adapters/mmcls/tasks/incremental/stage.py index 3659237cda5..a0eabb0d40f 100644 --- a/otx/mpa/cls/incremental/stage.py +++ b/otx/algorithms/classification/adapters/mmcls/tasks/incremental/stage.py @@ -1,12 +1,15 @@ +"""Stage for Incremental learning for OTX Classification with MMCLS.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # from mmcv import ConfigDict -from otx.mpa.cls.stage import ClsStage -from otx.mpa.utils.config_utils import update_or_add_custom_hook -from otx.mpa.utils.logger import get_logger +from otx.algorithms.classification.adapters.mmcls.tasks.stage import ClsStage +from otx.algorithms.common.adapters.mmcv.utils.config_utils import ( + update_or_add_custom_hook, +) +from otx.algorithms.common.utils.logger import get_logger logger = get_logger() @@ -21,20 +24,21 @@ class IncrClsStage(ClsStage): - """Patch config to support incremental learning for object Cls""" + """Patch config to support incremental learning for object Cls.""" def __init__(self, **kwargs): super().__init__(**kwargs) - def configure_task(self, cfg, training, **kwargs): - """Patch config to support incremental learning""" - super().configure_task(cfg, training, **kwargs) + # pylint: disable=too-many-function-args + def configure_task(self, cfg, training): + """Patch config to support incremental learning.""" + super().configure_task(cfg, training) if "task_adapt" in cfg: - self.configure_task_adapt(cfg, training, **kwargs) + self.configure_task_adapt(cfg, training) # noqa: C901 - def configure_task_adapt(self, cfg, training, **kwargs): - """Configure for Task Adaptation Task""" + def configure_task_adapt(self, cfg, training): + """Configure for Task Adaptation Task.""" train_data_cfg = self.get_data_cfg(cfg, "train") if training: if train_data_cfg.type not in CLASS_INC_DATASET: @@ -56,6 +60,7 @@ def configure_task_adapt(self, cfg, training, **kwargs): self.configure_task_modules(cfg) def configure_task_modules(self, cfg): + """Patch Task adapt module.""" if not cfg.model.get("multilabel", False) and not cfg.model.get("hierarchical", False): efficient_mode = cfg["task_adapt"].get("efficient_mode", True) sampler_type = "balanced" @@ -84,6 +89,7 @@ def configure_task_modules(self, cfg): update_or_add_custom_hook(cfg, task_adapt_hook) def configure_loss(self, cfg): + """Patch classification loss.""" if len(set(self.org_model_classes) & set(self.model_classes)) == 0 or set(self.org_model_classes) == set( self.model_classes ): diff --git a/otx/algorithms/classification/adapters/mmcls/tasks/incremental/trainer.py b/otx/algorithms/classification/adapters/mmcls/tasks/incremental/trainer.py new file mode 100644 index 00000000000..b603a828908 --- /dev/null +++ b/otx/algorithms/classification/adapters/mmcls/tasks/incremental/trainer.py @@ -0,0 +1,21 @@ +"""Incremental learning trainer for OTX Detection with MMDET.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +from otx.algorithms.classification.adapters.mmcls.tasks.trainer import ClsTrainer +from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES +from otx.algorithms.common.utils.logger import get_logger + +from .stage import IncrClsStage + +logger = get_logger() + + +# pylint: disable=super-init-not-called +@STAGES.register_module() +class IncrClsTrainer(IncrClsStage, ClsTrainer): + """Trainer for incremental classification.""" + + def __init__(self, **kwargs): + IncrClsStage.__init__(self, **kwargs) diff --git a/otx/mpa/cls/inferrer.py b/otx/algorithms/classification/adapters/mmcls/tasks/inferrer.py similarity index 82% rename from otx/mpa/cls/inferrer.py rename to otx/algorithms/classification/adapters/mmcls/tasks/inferrer.py index 17336bf7cd4..0e223fa1da3 100644 --- a/otx/mpa/cls/inferrer.py +++ b/otx/algorithms/classification/adapters/mmcls/tasks/inferrer.py @@ -1,27 +1,26 @@ -# Copyright (C) 2022 Intel Corporation +"""Inference task for OTX classification with MMCLS.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -import os.path as osp from contextlib import nullcontext -import numpy as np import torch from mmcls.datasets import build_dataloader as mmcls_build_dataloader from mmcls.datasets import build_dataset as mmcls_build_dataset from mmcv import Config, ConfigDict +from otx.algorithms.common.adapters.mmcv.hooks.recording_forward_hook import ( + FeatureVectorHook, + ReciproCAMHook, +) +from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES from otx.algorithms.common.adapters.mmcv.utils import ( build_data_parallel, build_dataloader, build_dataset, ) -from otx.mpa.modules.hooks.recording_forward_hooks import ( - FeatureVectorHook, - ReciproCAMHook, -) -from otx.mpa.registry import STAGES -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger from .stage import ClsStage @@ -30,12 +29,14 @@ @STAGES.register_module() class ClsInferrer(ClsStage): + """Class for inference classification.""" + def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.dataset = None def run(self, model_cfg, model_ckpt, data_cfg, **kwargs): - """Run inference stage for classification + """Run inference stage for classification. - Configuration - Environment setup @@ -53,21 +54,27 @@ def run(self, model_cfg, model_ckpt, data_cfg, **kwargs): model_builder = kwargs.get("model_builder", None) dump_features = kwargs.get("dump_features", False) dump_saliency_map = kwargs.get("dump_saliency_map", False) - eval = kwargs.get("eval", False) + # TODO: It looks like we need to modify that code in an appropriate way. + if model_cfg.model.head.get("type", None) == "VisionTransformerClsHead": + dump_features = False + dump_saliency_map = False + do_eval = kwargs.get("eval", False) outputs = self.infer( cfg, model_builder=model_builder, - eval=eval, + do_eval=do_eval, dump_features=dump_features, dump_saliency_map=dump_saliency_map, ) return dict(outputs=outputs) - def infer(self, cfg, model_builder=None, eval=False, dump_features=False, dump_saliency_map=False): + # pylint: disable=too-many-locals + def infer(self, cfg, model_builder=None, do_eval=False, dump_features=False, dump_saliency_map=False): + """Main inference function.""" # TODO: distributed inference - if cfg.get("task_adapt", False) and not eval: + if cfg.get("task_adapt", False) and not do_eval: data_cfg = cfg.data.train.copy() data_cfg.pipeline = cfg.data.test.pipeline else: diff --git a/otx/mpa/cls/semisl/__init__.py b/otx/algorithms/classification/adapters/mmcls/tasks/semisl/__init__.py similarity index 80% rename from otx/mpa/cls/semisl/__init__.py rename to otx/algorithms/classification/adapters/mmcls/tasks/semisl/__init__.py index 9d20e8aa35f..afe6f6df210 100644 --- a/otx/mpa/cls/semisl/__init__.py +++ b/otx/algorithms/classification/adapters/mmcls/tasks/semisl/__init__.py @@ -1,3 +1,4 @@ +"""Initialize semisl task for OTX classification with MMCLS.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # diff --git a/otx/algorithms/classification/adapters/mmcls/tasks/semisl/inferrer.py b/otx/algorithms/classification/adapters/mmcls/tasks/semisl/inferrer.py new file mode 100644 index 00000000000..3fff9e99899 --- /dev/null +++ b/otx/algorithms/classification/adapters/mmcls/tasks/semisl/inferrer.py @@ -0,0 +1,21 @@ +"""Inference task for Semi-SL OTX classification with MMCLS.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +from otx.algorithms.classification.adapters.mmcls.tasks.inferrer import ClsInferrer +from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES +from otx.algorithms.common.utils.logger import get_logger + +from .stage import SemiSLClsStage + +logger = get_logger() + + +# pylint: disable=super-init-not-called +@STAGES.register_module() +class SemiSLClsInferrer(SemiSLClsStage, ClsInferrer): + """Semi-SL Inferencer.""" + + def __init__(self, **kwargs): + SemiSLClsStage.__init__(self, **kwargs) diff --git a/otx/mpa/cls/semisl/stage.py b/otx/algorithms/classification/adapters/mmcls/tasks/semisl/stage.py similarity index 63% rename from otx/mpa/cls/semisl/stage.py rename to otx/algorithms/classification/adapters/mmcls/tasks/semisl/stage.py index 0ac7a0de3ae..b78db7d13c9 100644 --- a/otx/mpa/cls/semisl/stage.py +++ b/otx/algorithms/classification/adapters/mmcls/tasks/semisl/stage.py @@ -1,22 +1,23 @@ +"""Stage for SemiSL classification with MMCLS.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -from otx.mpa.cls.stage import ClsStage -from otx.mpa.utils.logger import get_logger +from otx.algorithms.classification.adapters.mmcls.tasks.stage import ClsStage +from otx.algorithms.common.utils.logger import get_logger logger = get_logger() class SemiSLClsStage(ClsStage): - """Patch config to support semi supervised learning for object Cls""" + """Patch config to support semi supervised learning for object Cls.""" def __init__(self, **kwargs): super().__init__(**kwargs) - def configure_data(self, cfg, data_cfg, training, **kwargs): + def configure_data(self, cfg, data_cfg, training): """Patch cfg.data.""" - super().configure_data(cfg, data_cfg, training, **kwargs) + super().configure_data(cfg, data_cfg, training) # Set unlabeled data hook if training: if cfg.data.get("unlabeled", False) and cfg.data.unlabeled.get("otx_dataset", False): diff --git a/otx/algorithms/classification/adapters/mmcls/tasks/semisl/trainer.py b/otx/algorithms/classification/adapters/mmcls/tasks/semisl/trainer.py new file mode 100644 index 00000000000..843e99ce941 --- /dev/null +++ b/otx/algorithms/classification/adapters/mmcls/tasks/semisl/trainer.py @@ -0,0 +1,21 @@ +"""Semi-SL Trainer for OTX Classification with MMCLS.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +from otx.algorithms.classification.adapters.mmcls.tasks.trainer import ClsTrainer +from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES +from otx.algorithms.common.utils.logger import get_logger + +from .stage import SemiSLClsStage + +logger = get_logger() + + +# pylint: disable=super-init-not-called +@STAGES.register_module() +class SemiSLClsTrainer(SemiSLClsStage, ClsTrainer): + """Trainer class for Semi-SL.""" + + def __init__(self, **kwargs): + SemiSLClsStage.__init__(self, **kwargs) diff --git a/otx/mpa/cls/stage.py b/otx/algorithms/classification/adapters/mmcls/tasks/stage.py similarity index 75% rename from otx/mpa/cls/stage.py rename to otx/algorithms/classification/adapters/mmcls/tasks/stage.py index d24abbe12fd..11dd3c9aefc 100644 --- a/otx/mpa/cls/stage.py +++ b/otx/algorithms/classification/adapters/mmcls/tasks/stage.py @@ -1,40 +1,46 @@ -# Copyright (C) 2022 Intel Corporation +"""Base stage for OTX classification with MMCLS.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # import copy -import numpy as np import torch -from mmcv import ConfigDict, build_from_cfg +from mmcv import build_from_cfg +from otx.algorithms import TRANSFORMER_BACKBONES from otx.algorithms.classification.adapters.mmcls.utils.builder import build_classifier -from otx.mpa.stage import Stage -from otx.mpa.utils.config_utils import recursively_update_cfg, update_or_add_custom_hook -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.adapters.mmcv.tasks.stage import Stage +from otx.algorithms.common.adapters.mmcv.utils.config_utils import ( + recursively_update_cfg, +) +from otx.algorithms.common.utils.logger import get_logger logger = get_logger() class ClsStage(Stage): + """Base classification stage class.""" + MODEL_BUILDER = build_classifier def configure(self, model_cfg, model_ckpt, data_cfg, training=True, **kwargs): # noqa: C901 - """Create MMCV-consumable config from given inputs""" + """Create MMCV-consumable config from given inputs.""" logger.info(f"configure: training={training}") # Recipe + model cfg = self.cfg - self.configure_model(cfg, model_cfg, training, **kwargs) + self.configure_model(cfg, model_cfg, **kwargs) self.configure_ckpt(cfg, model_ckpt, kwargs.get("pretrained", None)) - self.configure_data(cfg, data_cfg, training, **kwargs) - self.configure_task(cfg, training, **kwargs) + self.configure_data(cfg, data_cfg, training) + self.configure_task(cfg, training) return cfg - def configure_model(self, cfg, model_cfg, training, **kwargs): + def configure_model(self, cfg, model_cfg, **kwargs): + """Patch model for classification task.""" if model_cfg: if hasattr(cfg, "model"): - cfg.merge_from_dict(model_cfg._cfg_dict) + cfg.merge_from_dict(model_cfg) else: cfg.model = copy.deepcopy(model_cfg.model) @@ -46,8 +52,8 @@ def configure_model(self, cfg, model_cfg, training, **kwargs): ir_model_path = kwargs.get("ir_model_path") if ir_model_path: - def is_mmov_model(k, v): - if k == "type" and v.startswith("MMOV"): + def is_mmov_model(key, value): + if key == "type" and value.startswith("MMOV"): return True return False @@ -61,8 +67,10 @@ def is_mmov_model(k, v): self.configure_in_channel(cfg) self.configure_topk(cfg) + # pylint: disable=too-many-branches @staticmethod def configure_in_channel(cfg): + """Return whether in_channels need patch.""" configure_required = False if cfg.model.get("neck") is not None: if cfg.model.neck.get("in_channels") is not None and cfg.model.neck.in_channels <= 0: @@ -89,6 +97,13 @@ def configure_in_channel(cfg): output = layer(torch.rand([1] + list(input_shape))) if isinstance(output, (tuple, list)): output = output[-1] + + if layer.__class__.__name__ in TRANSFORMER_BACKBONES and isinstance(output, (tuple, list)): + # mmcls.VisionTransformer outputs Tuple[List[...]] and the last index of List is the final logit. + _, output = output + if cfg.model.head.type != "VisionTransformerClsHead": + raise ValueError(f"{layer.__class__.__name__ } needs VisionTransformerClsHead as head") + in_channels = output.shape[1] if cfg.model.get("neck") is not None: if cfg.model.neck.get("in_channels") is not None: @@ -116,12 +131,14 @@ def configure_in_channel(cfg): @staticmethod def configure_topk(cfg): + """Patch topk in case of num_classes is less than 5.""" if cfg.model.head.get("topk", False) and isinstance(cfg.model.head.topk, tuple): cfg.model.head.topk = (1,) if cfg.model.head.num_classes < 5 else (1, 5) if cfg.model.get("multilabel", False) or cfg.model.get("hierarchical", False): cfg.model.head.pop("topk", None) - def configure_ckpt(self, cfg, model_ckpt, pretrained): + def configure_ckpt(self, cfg, model_ckpt, pretrained=None): + """Patch checkpoint from model_ckpt.""" # Checkpoint if model_ckpt: cfg.load_from = self.get_model_ckpt(model_ckpt) @@ -133,13 +150,16 @@ def configure_ckpt(self, cfg, model_ckpt, pretrained): if cfg.get("load_from", None) and cfg.model.backbone.get("pretrained", None): cfg.model.backbone.pretrained = None - def configure_data(self, cfg, data_cfg, training, **kwargs): + def configure_data(self, cfg, data_cfg, training): + """Patch data settings.""" # Data if data_cfg: cfg.merge_from_dict(data_cfg) - Stage.configure_data(cfg, training, **kwargs) + super().configure_data(cfg, training) - def configure_task(self, cfg, training, **kwargs): + def configure_task(self, cfg, training): + """Patch settings for task.""" + logger.info(f"task config!!!!: training={training}") self.configure_classes(cfg) def configure_classes(self, cfg): diff --git a/otx/mpa/cls/trainer.py b/otx/algorithms/classification/adapters/mmcls/tasks/trainer.py similarity index 90% rename from otx/mpa/cls/trainer.py rename to otx/algorithms/classification/adapters/mmcls/tasks/trainer.py index f4b09c9def6..ad01baa8c02 100644 --- a/otx/mpa/cls/trainer.py +++ b/otx/algorithms/classification/adapters/mmcls/tasks/trainer.py @@ -1,4 +1,5 @@ -# Copyright (C) 2022 Intel Corporation +"""Basic trainer for OTX classification with MMCLS.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -12,8 +13,8 @@ from mmcls.utils import collect_env from torch import nn -from otx.mpa.registry import STAGES -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES +from otx.algorithms.common.utils.logger import get_logger from .stage import ClsStage @@ -22,8 +23,11 @@ @STAGES.register_module() class ClsTrainer(ClsStage): + """Class for train.""" + + # pylint: disable=too-many-locals def run(self, model_cfg, model_ckpt, data_cfg, **kwargs): # noqa: C901 - """Run training stage for classification + """Run training stage for classification. - Configuration - Environment setup @@ -45,7 +49,7 @@ def run(self, model_cfg, model_ckpt, data_cfg, **kwargs): # noqa: C901 env_info_dict = collect_env() env_info = "\n".join([(f"{k}: {v}") for k, v in env_info_dict.items()]) dash_line = "-" * 60 + "\n" - logger.info("Environment info:\n" + dash_line + env_info + "\n" + dash_line) + logger.info(f"Environment info:\n{dash_line}{env_info}\n{dash_line}") # Data datasets = [build_dataset(cfg.data.train)] @@ -79,7 +83,7 @@ def run(self, model_cfg, model_ckpt, data_cfg, **kwargs): # noqa: C901 self.configure_compat_cfg(cfg) # register custom eval hooks - validate = True if cfg.data.get("val", None) else False + validate = cfg.data.get("val", None) if validate: val_dataset = build_dataset(cfg.data.val, dict(test_mode=True)) val_loader_cfg = { diff --git a/otx/algorithms/classification/adapters/mmcls/utils/builder.py b/otx/algorithms/classification/adapters/mmcls/utils/builder.py index ba3a52e063f..36974ce3851 100644 --- a/otx/algorithms/classification/adapters/mmcls/utils/builder.py +++ b/otx/algorithms/classification/adapters/mmcls/utils/builder.py @@ -10,7 +10,7 @@ from mmcv.runner import load_checkpoint from mmcv.utils import Config, ConfigDict, get_logger -from otx.mpa.utils.logger import LEVEL +from otx.algorithms.common.utils.logger import LEVEL logger = get_logger("mmcls") diff --git a/otx/algorithms/classification/adapters/mmcls/utils/config_utils.py b/otx/algorithms/classification/adapters/mmcls/utils/config_utils.py index 533539bab0a..843e9ab6a87 100644 --- a/otx/algorithms/classification/adapters/mmcls/utils/config_utils.py +++ b/otx/algorithms/classification/adapters/mmcls/utils/config_utils.py @@ -24,9 +24,9 @@ get_meta_keys, patch_color_conversion, ) +from otx.algorithms.common.utils.logger import get_logger from otx.api.entities.label import Domain from otx.api.utils.argument_checks import check_input_parameters_type -from otx.mpa.utils.logger import get_logger logger = get_logger() diff --git a/otx/algorithms/classification/configs/base/data/semisl/data_pipeline.py b/otx/algorithms/classification/configs/base/data/semisl/data_pipeline.py index e6ea7b73bbb..70ac93bbf8c 100644 --- a/otx/algorithms/classification/configs/base/data/semisl/data_pipeline.py +++ b/otx/algorithms/classification/configs/base/data/semisl/data_pipeline.py @@ -25,7 +25,7 @@ ] __strong_pipeline = [ - dict(type="MPARandAugment", n=8, m=10), + dict(type="OTXRandAugment", num_aug=8, magnitude=10), ] __train_pipeline = [ diff --git a/otx/algorithms/classification/configs/configuration.yaml b/otx/algorithms/classification/configs/configuration.yaml index 897c3f7e13f..f64bf911c01 100644 --- a/otx/algorithms/classification/configs/configuration.yaml +++ b/otx/algorithms/classification/configs/configuration.yaml @@ -10,7 +10,7 @@ learning_parameters: stable. A larger batch size has higher memory requirements. editable: true header: Batch size - max_value: 512 + max_value: 2048 min_value: 1 type: INTEGER ui_rules: @@ -336,22 +336,22 @@ algo_backend: header: Algo backend parameters train_type: affects_outcome_of: TRAINING - default_value: INCREMENTAL + default_value: Incremental description: Training scheme option that determines how to train the model editable: True enum_name: TrainType header: Train type options: - INCREMENTAL: "INCREMENTAL" - SEMISUPERVISED: "SEMISUPERVISED" - SELFSUPERVISED: "SELFSUPERVISED" + Incremental: "Incremental" + Semisupervised: "Semisupervised" + Selfsupervised: "Selfsupervised" type: SELECTABLE ui_rules: action: DISABLE_EDITING operator: AND rules: [] type: UI_RULES - value: INCREMENTAL + value: Incremental visible_in_ui: True warning: null mem_cache_size: diff --git a/otx/algorithms/classification/configs/efficientnet_b0_cls_incr/selfsl/hparam.yaml b/otx/algorithms/classification/configs/efficientnet_b0_cls_incr/selfsl/hparam.yaml index 6b1e96ff7e0..9be2286c001 100644 --- a/otx/algorithms/classification/configs/efficientnet_b0_cls_incr/selfsl/hparam.yaml +++ b/otx/algorithms/classification/configs/efficientnet_b0_cls_incr/selfsl/hparam.yaml @@ -18,7 +18,7 @@ hyper_parameters: default_value: false algo_backend: train_type: - default_value: SELFSUPERVISED + default_value: Selfsupervised # Training resources. max_nodes: 1 diff --git a/otx/algorithms/classification/configs/efficientnet_b0_cls_incr/semisl/hparam.yaml b/otx/algorithms/classification/configs/efficientnet_b0_cls_incr/semisl/hparam.yaml index fce1fb2f832..d282469bb8e 100644 --- a/otx/algorithms/classification/configs/efficientnet_b0_cls_incr/semisl/hparam.yaml +++ b/otx/algorithms/classification/configs/efficientnet_b0_cls_incr/semisl/hparam.yaml @@ -20,4 +20,4 @@ hyper_parameters: default_value: 90 algo_backend: train_type: - default_value: SEMISUPERVISED + default_value: Semisupervised diff --git a/otx/algorithms/classification/configs/efficientnet_b0_cls_incr/template.yaml b/otx/algorithms/classification/configs/efficientnet_b0_cls_incr/template.yaml index 6743404bb1d..9e195689a68 100644 --- a/otx/algorithms/classification/configs/efficientnet_b0_cls_incr/template.yaml +++ b/otx/algorithms/classification/configs/efficientnet_b0_cls_incr/template.yaml @@ -46,7 +46,7 @@ hyper_parameters: default_value: 1.0 algo_backend: train_type: - default_value: INCREMENTAL + default_value: Incremental # Training resources. max_nodes: 1 diff --git a/otx/algorithms/classification/configs/efficientnet_v2_s_cls_incr/selfsl/hparam.yaml b/otx/algorithms/classification/configs/efficientnet_v2_s_cls_incr/selfsl/hparam.yaml index 6b1e96ff7e0..9be2286c001 100644 --- a/otx/algorithms/classification/configs/efficientnet_v2_s_cls_incr/selfsl/hparam.yaml +++ b/otx/algorithms/classification/configs/efficientnet_v2_s_cls_incr/selfsl/hparam.yaml @@ -18,7 +18,7 @@ hyper_parameters: default_value: false algo_backend: train_type: - default_value: SELFSUPERVISED + default_value: Selfsupervised # Training resources. max_nodes: 1 diff --git a/otx/algorithms/classification/configs/efficientnet_v2_s_cls_incr/semisl/hparam.yaml b/otx/algorithms/classification/configs/efficientnet_v2_s_cls_incr/semisl/hparam.yaml index 62026f71da9..87c19f5bd01 100644 --- a/otx/algorithms/classification/configs/efficientnet_v2_s_cls_incr/semisl/hparam.yaml +++ b/otx/algorithms/classification/configs/efficientnet_v2_s_cls_incr/semisl/hparam.yaml @@ -20,4 +20,4 @@ hyper_parameters: default_value: 90 algo_backend: train_type: - default_value: SEMISUPERVISED + default_value: Semisupervised diff --git a/otx/algorithms/classification/configs/efficientnet_v2_s_cls_incr/template.yaml b/otx/algorithms/classification/configs/efficientnet_v2_s_cls_incr/template.yaml index eaf31fa0afe..1dc17e1470b 100644 --- a/otx/algorithms/classification/configs/efficientnet_v2_s_cls_incr/template.yaml +++ b/otx/algorithms/classification/configs/efficientnet_v2_s_cls_incr/template.yaml @@ -46,7 +46,7 @@ hyper_parameters: default_value: 1.0 algo_backend: train_type: - default_value: INCREMENTAL + default_value: Incremental # Training resources. max_nodes: 1 diff --git a/otx/algorithms/classification/configs/mobilenet_v3_large_075_cls_incr/selfsl/hparam.yaml b/otx/algorithms/classification/configs/mobilenet_v3_large_075_cls_incr/selfsl/hparam.yaml index 6b1e96ff7e0..9be2286c001 100644 --- a/otx/algorithms/classification/configs/mobilenet_v3_large_075_cls_incr/selfsl/hparam.yaml +++ b/otx/algorithms/classification/configs/mobilenet_v3_large_075_cls_incr/selfsl/hparam.yaml @@ -18,7 +18,7 @@ hyper_parameters: default_value: false algo_backend: train_type: - default_value: SELFSUPERVISED + default_value: Selfsupervised # Training resources. max_nodes: 1 diff --git a/otx/algorithms/classification/configs/mobilenet_v3_large_075_cls_incr/template_experiment.yaml b/otx/algorithms/classification/configs/mobilenet_v3_large_075_cls_incr/template_experiment.yaml index 86ed2150a43..cdf0f76bbc1 100644 --- a/otx/algorithms/classification/configs/mobilenet_v3_large_075_cls_incr/template_experiment.yaml +++ b/otx/algorithms/classification/configs/mobilenet_v3_large_075_cls_incr/template_experiment.yaml @@ -37,7 +37,7 @@ hyper_parameters: default_value: 20 algo_backend: train_type: - default_value: INCREMENTAL + default_value: Incremental # Training resources. max_nodes: 1 diff --git a/otx/algorithms/classification/configs/mobilenet_v3_large_1_cls_incr/selfsl/hparam.yaml b/otx/algorithms/classification/configs/mobilenet_v3_large_1_cls_incr/selfsl/hparam.yaml index 6b1e96ff7e0..9be2286c001 100644 --- a/otx/algorithms/classification/configs/mobilenet_v3_large_1_cls_incr/selfsl/hparam.yaml +++ b/otx/algorithms/classification/configs/mobilenet_v3_large_1_cls_incr/selfsl/hparam.yaml @@ -18,7 +18,7 @@ hyper_parameters: default_value: false algo_backend: train_type: - default_value: SELFSUPERVISED + default_value: Selfsupervised # Training resources. max_nodes: 1 diff --git a/otx/algorithms/classification/configs/mobilenet_v3_large_1_cls_incr/semisl/hparam.yaml b/otx/algorithms/classification/configs/mobilenet_v3_large_1_cls_incr/semisl/hparam.yaml index 928ffab95bb..2e116c4acdb 100644 --- a/otx/algorithms/classification/configs/mobilenet_v3_large_1_cls_incr/semisl/hparam.yaml +++ b/otx/algorithms/classification/configs/mobilenet_v3_large_1_cls_incr/semisl/hparam.yaml @@ -20,4 +20,4 @@ hyper_parameters: default_value: 90 algo_backend: train_type: - default_value: SEMISUPERVISED + default_value: Semisupervised diff --git a/otx/algorithms/classification/configs/mobilenet_v3_large_1_cls_incr/supcon/model.py b/otx/algorithms/classification/configs/mobilenet_v3_large_1_cls_incr/supcon/model.py index 34976cf2668..80a8b25ea50 100644 --- a/otx/algorithms/classification/configs/mobilenet_v3_large_1_cls_incr/supcon/model.py +++ b/otx/algorithms/classification/configs/mobilenet_v3_large_1_cls_incr/supcon/model.py @@ -9,6 +9,7 @@ type="SupConClassifier", backbone=dict(mode="large"), head=dict( + _delete_=True, type="SupConClsHead", in_channels=-1, aux_mlp=dict(hid_channels=0, out_channels=1024), diff --git a/otx/algorithms/classification/configs/mobilenet_v3_large_1_cls_incr/template.yaml b/otx/algorithms/classification/configs/mobilenet_v3_large_1_cls_incr/template.yaml index 0076c9f6ee3..7d06fd5fbf3 100644 --- a/otx/algorithms/classification/configs/mobilenet_v3_large_1_cls_incr/template.yaml +++ b/otx/algorithms/classification/configs/mobilenet_v3_large_1_cls_incr/template.yaml @@ -46,7 +46,7 @@ hyper_parameters: default_value: 1.0 algo_backend: train_type: - default_value: INCREMENTAL + default_value: Incremental # Training resources. max_nodes: 1 diff --git a/otx/algorithms/classification/configs/mobilenet_v3_small_cls_incr/selfsl/hparam.yaml b/otx/algorithms/classification/configs/mobilenet_v3_small_cls_incr/selfsl/hparam.yaml index 6b1e96ff7e0..9be2286c001 100644 --- a/otx/algorithms/classification/configs/mobilenet_v3_small_cls_incr/selfsl/hparam.yaml +++ b/otx/algorithms/classification/configs/mobilenet_v3_small_cls_incr/selfsl/hparam.yaml @@ -18,7 +18,7 @@ hyper_parameters: default_value: false algo_backend: train_type: - default_value: SELFSUPERVISED + default_value: Selfsupervised # Training resources. max_nodes: 1 diff --git a/otx/algorithms/classification/configs/mobilenet_v3_small_cls_incr/template_experiment.yaml b/otx/algorithms/classification/configs/mobilenet_v3_small_cls_incr/template_experiment.yaml index 7418bb3e3bd..4b0ce523e05 100644 --- a/otx/algorithms/classification/configs/mobilenet_v3_small_cls_incr/template_experiment.yaml +++ b/otx/algorithms/classification/configs/mobilenet_v3_small_cls_incr/template_experiment.yaml @@ -37,7 +37,7 @@ hyper_parameters: default_value: 20 algo_backend: train_type: - default_value: INCREMENTAL + default_value: Incremental # Training resources. max_nodes: 1 diff --git a/otx/algorithms/classification/tasks/__init__.py b/otx/algorithms/classification/tasks/__init__.py index 4d7b41852c3..83f6a517daa 100644 --- a/otx/algorithms/classification/tasks/__init__.py +++ b/otx/algorithms/classification/tasks/__init__.py @@ -15,8 +15,8 @@ # and limitations under the License. import otx.algorithms.classification.adapters.mmcls as MPAData +import otx.algorithms.classification.adapters.mmcls.tasks as MPAClassification import otx.algorithms.common.adapters.mmcv.models as OTXBackbones -import otx.mpa.cls as MPAClassification from .inference import ClassificationInferenceTask from .nncf import ClassificationNNCFTask diff --git a/otx/algorithms/classification/tasks/inference.py b/otx/algorithms/classification/tasks/inference.py index d49fc8c0e86..18cebd6701b 100644 --- a/otx/algorithms/classification/tasks/inference.py +++ b/otx/algorithms/classification/tasks/inference.py @@ -30,9 +30,11 @@ patch_default_config, patch_runner, ) +from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig from otx.algorithms.common.configs import TrainType from otx.algorithms.common.tasks import BaseTask from otx.algorithms.common.utils import embed_ir_model_data +from otx.algorithms.common.utils.logger import get_logger from otx.api.entities.datasets import DatasetEntity from otx.api.entities.inference_parameters import ( InferenceParameters, @@ -62,8 +64,6 @@ ) from otx.api.utils.dataset_utils import add_saliency_maps_to_dataset_item from otx.api.utils.labels_utils import get_empty_label -from otx.mpa.utils.config_utils import MPAConfig -from otx.mpa.utils.logger import get_logger # pylint: disable=invalid-name @@ -71,9 +71,9 @@ TASK_CONFIG = ClassificationConfig RECIPE_TRAIN_TYPE = { - TrainType.SEMISUPERVISED: "semisl.yaml", - TrainType.INCREMENTAL: "incremental.yaml", - TrainType.SELFSUPERVISED: "selfsl.yaml", + TrainType.Semisupervised: "semisl.yaml", + TrainType.Incremental: "incremental.yaml", + TrainType.Selfsupervised: "selfsl.yaml", } @@ -113,7 +113,7 @@ def __init__(self, task_environment: TaskEnvironment, **kwargs): if not self._multilabel and not self._hierarchical: logger.info("Classification mode: multiclass") - if self._hyperparams.algo_backend.train_type == TrainType.SELFSUPERVISED: + if self._hyperparams.algo_backend.train_type == TrainType.Selfsupervised: self._selfsl = True @check_input_parameters_type({"dataset": DatasetParamTypeCheck}) @@ -423,7 +423,7 @@ def _init_recipe_hparam(self) -> dict: runner=runner, ) - if self._train_type.value == "SEMISUPERVISED": + if self._train_type.value == "Semisupervised": unlabeled_config = ConfigDict( data=ConfigDict( unlabeled_dataloader=ConfigDict( @@ -443,7 +443,7 @@ def _init_recipe(self): # pylint: disable=too-many-boolean-expressions if ( self._train_type in RECIPE_TRAIN_TYPE - and self._train_type == TrainType.INCREMENTAL + and self._train_type == TrainType.Incremental and not self._multilabel and not self._hierarchical and self._hyperparams.learning_parameters.enable_supcon @@ -453,7 +453,7 @@ def _init_recipe(self): self._recipe_cfg = self._init_model_cfg() - # FIXME[Soobee] : if train type is not in cfg, it raises an error in default INCREMENTAL mode. + # FIXME[Soobee] : if train type is not in cfg, it raises an error in default Incremental mode. # During semi-implementation, this line should be fixed to -> self._recipe_cfg.train_type = train_type self._recipe_cfg.train_type = self._train_type.name @@ -479,7 +479,7 @@ def _init_recipe(self): patch_evaluation(self._recipe_cfg, **options_for_patch_evaluation) # for OTX compatibility # TODO: make cfg_path loaded from custom model cfg file corresponding to train_type - # model.py contains heads/classifier only for INCREMENTAL setting + # model.py contains heads/classifier only for Incremental setting # error log : ValueError: Unexpected type of 'data_loader' parameter def _init_model_cfg(self): if self._multilabel: @@ -512,7 +512,7 @@ def _init_test_data_cfg(self, dataset: DatasetEntity): return data_cfg def _update_stage_module(self, stage_module): - module_prefix = {TrainType.INCREMENTAL: "Incr", TrainType.SEMISUPERVISED: "SemiSL"} + module_prefix = {TrainType.Incremental: "Incr", TrainType.Semisupervised: "SemiSL"} if self._train_type in module_prefix and stage_module in ["ClsTrainer", "ClsInferrer"]: stage_module = module_prefix[self._train_type] + stage_module return stage_module diff --git a/otx/algorithms/classification/tasks/nncf.py b/otx/algorithms/classification/tasks/nncf.py index 51e114a9b65..f7f0681ba09 100644 --- a/otx/algorithms/classification/tasks/nncf.py +++ b/otx/algorithms/classification/tasks/nncf.py @@ -23,6 +23,7 @@ build_nncf_classifier, ) from otx.algorithms.common.tasks.nncf_base import NNCFBaseTask +from otx.algorithms.common.utils.logger import get_logger from otx.api.entities.datasets import DatasetEntity from otx.api.entities.metrics import ( CurveMetric, @@ -34,7 +35,6 @@ ) from otx.api.entities.model import ModelEntity # ModelStatus from otx.api.entities.optimization_parameters import OptimizationParameters -from otx.mpa.utils.logger import get_logger from .inference import ClassificationInferenceTask diff --git a/otx/algorithms/classification/tasks/train.py b/otx/algorithms/classification/tasks/train.py index 657e0c3bce4..d3dd6a7b9df 100644 --- a/otx/algorithms/classification/tasks/train.py +++ b/otx/algorithms/classification/tasks/train.py @@ -15,6 +15,7 @@ from otx.algorithms.classification.configs import ClassificationConfig from otx.algorithms.common.utils.callback import TrainingProgressCallback from otx.algorithms.common.utils.data import get_dataset +from otx.algorithms.common.utils.logger import get_logger from otx.api.configuration import cfg_helper from otx.api.configuration.helper.utils import ids_to_strings from otx.api.entities.datasets import DatasetEntity @@ -37,7 +38,6 @@ DatasetParamTypeCheck, check_input_parameters_type, ) -from otx.mpa.utils.logger import get_logger from .inference import ClassificationInferenceTask @@ -162,8 +162,6 @@ def _init_train_data_cfg(self, dataset: DatasetEntity): labels=self._labels, ) - for label in self._labels: - label.hotkey = "a" return data_cfg def _generate_training_metrics_group(self, learning_curves): diff --git a/otx/algorithms/common/adapters/mmcv/__init__.py b/otx/algorithms/common/adapters/mmcv/__init__.py index 1d4d1c2cb89..572eadc7f25 100644 --- a/otx/algorithms/common/adapters/mmcv/__init__.py +++ b/otx/algorithms/common/adapters/mmcv/__init__.py @@ -16,12 +16,19 @@ from .hooks import ( CancelTrainingHook, + CheckpointHookWithValResults, + CustomEvalHook, EarlyStoppingHook, EMAMomentumUpdateHook, EnsureCorrectBestCheckpointHook, + Fp16SAMOptimizerHook, + IBLossHook, + NoBiasDecayHook, OTXLoggerHook, OTXProgressHook, ReduceLROnPlateauLrUpdaterHook, + SAMOptimizerHook, + SemiSLClsHook, StopLossNanTrainingHook, TwoCropTransformHook, ) @@ -32,6 +39,13 @@ __all__ = [ "EpochRunnerWithCancel", "IterBasedRunnerWithCancel", + "CheckpointHookWithValResults", + "CustomEvalHook", + "Fp16SAMOptimizerHook", + "IBLossHook", + "SAMOptimizerHook", + "NoBiasDecayHook", + "SemiSLClsHook", "CancelTrainingHook", "OTXLoggerHook", "OTXProgressHook", diff --git a/otx/algorithms/common/adapters/mmcv/hooks.py b/otx/algorithms/common/adapters/mmcv/hooks.py deleted file mode 100644 index 1a13c5e68e0..00000000000 --- a/otx/algorithms/common/adapters/mmcv/hooks.py +++ /dev/null @@ -1,750 +0,0 @@ -"""Collections of hooks for common OTX algorithms.""" - -# Copyright (C) 2021-2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions -# and limitations under the License. - -import math -import os -from collections import defaultdict -from math import cos, inf, isnan, pi -from typing import Any, Dict, List, Optional - -from mmcv.parallel import is_module_wrapper -from mmcv.runner import BaseRunner, EpochBasedRunner -from mmcv.runner.dist_utils import master_only -from mmcv.runner.hooks import HOOKS, Hook, LoggerHook, LrUpdaterHook -from mmcv.utils import print_log - -from otx.api.usecases.reporting.time_monitor_callback import TimeMonitorCallback -from otx.api.utils.argument_checks import check_input_parameters_type -from otx.mpa.utils.logger import get_logger - -logger = get_logger() - - -# pylint: disable=too-many-instance-attributes, protected-access, too-many-arguments, unused-argument -@HOOKS.register_module() -class CancelTrainingHook(Hook): - """CancelTrainingHook for Training Stopping.""" - - @check_input_parameters_type() - def __init__(self, interval: int = 5): - """Periodically check whether whether a stop signal is sent to the runner during model training. - - Every 'check_interval' iterations, the work_dir for the runner is checked to see if a file '.stop_training' - is present. If it is, training is stopped. - - :param interval: Period for checking for stop signal, given in iterations. - - """ - self.interval = interval - - @staticmethod - def _check_for_stop_signal(runner: BaseRunner): - """Log _check_for_stop_signal for CancelTrainingHook.""" - work_dir = runner.work_dir - stop_filepath = os.path.join(work_dir, ".stop_training") - if os.path.exists(stop_filepath): - if isinstance(runner, EpochBasedRunner): - epoch = runner.epoch - runner._max_epochs = epoch # Force runner to stop by pretending it has reached it's max_epoch - runner.should_stop = True # Set this flag to true to stop the current training epoch - os.remove(stop_filepath) - - @check_input_parameters_type() - def after_train_iter(self, runner: BaseRunner): - """Log after_train_iter for CancelTrainingHook.""" - if not self.every_n_iters(runner, self.interval): - return - self._check_for_stop_signal(runner) - - -@HOOKS.register_module() -class EnsureCorrectBestCheckpointHook(Hook): - """EnsureCorrectBestCheckpointHook. - - This hook makes sure that the 'best_mAP' checkpoint points properly to the best model, even if the best model is - created in the last epoch. - """ - - @check_input_parameters_type() - def after_run(self, runner: BaseRunner): - """Called after train epoch hooks.""" - runner.call_hook("after_train_epoch") - - -@HOOKS.register_module() -class OTXLoggerHook(LoggerHook): - """OTXLoggerHook for Logging.""" - - class Curve: - """Curve with x (epochs) & y (scores).""" - - def __init__(self): - self.x = [] - self.y = [] - - def __repr__(self): - """Repr function.""" - points = [] - for x, y in zip(self.x, self.y): - points.append(f"({x},{y})") - return "curve[" + ",".join(points) + "]" - - @check_input_parameters_type() - def __init__( - self, - curves: Optional[Dict[Any, Curve]] = None, - interval: int = 10, - ignore_last: bool = True, - reset_flag: bool = True, - by_epoch: bool = True, - ): - super().__init__(interval, ignore_last, reset_flag, by_epoch) - self.curves = curves if curves is not None else defaultdict(self.Curve) - - @master_only - @check_input_parameters_type() - def log(self, runner: BaseRunner): - """Log function for OTXLoggerHook.""" - tags = self.get_loggable_tags(runner, allow_text=False, tags_to_skip=()) - if runner.max_epochs is not None: - normalized_iter = self.get_iter(runner) / runner.max_iters * runner.max_epochs - else: - normalized_iter = self.get_iter(runner) - for tag, value in tags.items(): - curve = self.curves[tag] - # Remove duplicates. - if len(curve.x) > 0 and curve.x[-1] == normalized_iter: - curve.x.pop() - curve.y.pop() - curve.x.append(normalized_iter) - curve.y.append(value) - - @check_input_parameters_type() - def after_train_epoch(self, runner: BaseRunner): - """Called after_train_epoch in OTXLoggerHook.""" - # Iteration counter is increased right after the last iteration in the epoch, - # temporarily decrease it back. - runner._iter -= 1 - super().after_train_epoch(runner) - runner._iter += 1 - - -@HOOKS.register_module() -class OTXProgressHook(Hook): - """OTXProgressHook for getting progress.""" - - @check_input_parameters_type() - def __init__(self, time_monitor: TimeMonitorCallback, verbose: bool = False): - super().__init__() - self.time_monitor = time_monitor - self.verbose = verbose - self.print_threshold = 1 - - @check_input_parameters_type() - def before_run(self, runner: BaseRunner): - """Called before_run in OTXProgressHook.""" - total_epochs = runner.max_epochs if runner.max_epochs is not None else 1 - self.time_monitor.total_epochs = total_epochs - self.time_monitor.train_steps = runner.max_iters // total_epochs if total_epochs else 1 - self.time_monitor.steps_per_epoch = self.time_monitor.train_steps + self.time_monitor.val_steps - self.time_monitor.total_steps = max(math.ceil(self.time_monitor.steps_per_epoch * total_epochs), 1) - self.time_monitor.current_step = 0 - self.time_monitor.current_epoch = 0 - self.time_monitor.on_train_begin() - - @check_input_parameters_type() - def before_epoch(self, runner: BaseRunner): - """Called before_epoch in OTXProgressHook.""" - self.time_monitor.on_epoch_begin(runner.epoch) - - @check_input_parameters_type() - def after_epoch(self, runner: BaseRunner): - """Called after_epoch in OTXProgressHook.""" - # put some runner's training status to use on the other hooks - runner.log_buffer.output["current_iters"] = runner.iter - self.time_monitor.on_epoch_end(runner.epoch, runner.log_buffer.output) - - @check_input_parameters_type() - def before_iter(self, runner: BaseRunner): - """Called before_iter in OTXProgressHook.""" - self.time_monitor.on_train_batch_begin(1) - - @check_input_parameters_type() - def after_iter(self, runner: BaseRunner): - """Called after_iter in OTXProgressHook.""" - # put some runner's training status to use on the other hooks - runner.log_buffer.output["current_iters"] = runner.iter - self.time_monitor.on_train_batch_end(1) - if self.verbose: - progress = self.progress - if progress >= self.print_threshold: - logger.warning(f"training progress {progress:.0f}%") - self.print_threshold = (progress + 10) // 10 * 10 - - @check_input_parameters_type() - def before_val_iter(self, runner: BaseRunner): - """Called before_val_iter in OTXProgressHook.""" - self.time_monitor.on_test_batch_begin(1, logger) - - @check_input_parameters_type() - def after_val_iter(self, runner: BaseRunner): - """Called after_val_iter in OTXProgressHook.""" - self.time_monitor.on_test_batch_end(1, logger) - - @check_input_parameters_type() - def after_run(self, runner: BaseRunner): - """Called after_run in OTXProgressHook.""" - self.time_monitor.on_train_end(1) - if self.time_monitor.update_progress_callback: - self.time_monitor.update_progress_callback(int(self.time_monitor.get_progress())) - - @property - def progress(self): - """Getting Progress from time monitor.""" - return self.time_monitor.get_progress() - - -@HOOKS.register_module() -class EarlyStoppingHook(Hook): - """Cancel training when a metric has stopped improving. - - Early Stopping hook monitors a metric quantity and if no improvement is seen for a ‘patience’ - number of epochs, the training is cancelled. - - :param interval: the number of intervals for checking early stop. The interval number should be - the same as the evaluation interval - the `interval` variable set in - `evaluation` config. - :param metric: the metric name to be monitored - :param rule: greater or less. In `less` mode, training will stop when the metric has stopped - decreasing and in `greater` mode it will stop when the metric has stopped - increasing. - :param patience: Number of epochs with no improvement after which the training will be reduced. - For example, if patience = 2, then we will ignore the first 2 epochs with no - improvement, and will only cancel the training after the 3rd epoch if the - metric still hasn’t improved then - :param iteration_patience: Number of iterations must be trained after the last improvement - before training stops. The same as patience but the training - continues if the number of iteration is lower than iteration_patience - This variable makes sure a model is trained enough for some - iterations after the last improvement before stopping. - :param min_delta: Minimal decay applied to lr. If the difference between new and old lr is - smaller than eps, the update is ignored - """ - - rule_map = {"greater": lambda x, y: x > y, "less": lambda x, y: x < y} - init_value_map = {"greater": -inf, "less": inf} - greater_keys = [ - "acc", - "top", - "AR@", - "auc", - "precision", - "mAP", - "mDice", - "mIoU", - "mAcc", - "aAcc", - ] - less_keys = ["loss"] - - @check_input_parameters_type() - def __init__( - self, - interval: int, - metric: str = "bbox_mAP", - rule: Optional[str] = None, - patience: int = 5, - iteration_patience: int = 500, - min_delta: float = 0.0, - ): - super().__init__() - self.patience = patience - self.iteration_patience = iteration_patience - self.interval = interval - self.min_delta = min_delta - self._init_rule(rule, metric) - - self.min_delta *= 1 if self.rule == "greater" else -1 - self.last_iter = 0 - self.wait_count = 0 - self.by_epoch = True - self.warmup_iters = 0 - self.best_score = self.init_value_map[self.rule] - - def _init_rule(self, rule, key_indicator): - """Initialize rule, key_indicator, comparison_func, and best score. - - Here is the rule to determine which rule is used for key indicator - when the rule is not specific: - 1. If the key indicator is in ``self.greater_keys``, the rule will be - specified as 'greater'. - 2. Or if the key indicator is in ``self.less_keys``, the rule will be - specified as 'less'. - 3. Or if the key indicator is equal to the substring in any one item - in ``self.greater_keys``, the rule will be specified as 'greater'. - 4. Or if the key indicator is equal to the substring in any one item - in ``self.less_keys``, the rule will be specified as 'less'. - - Args: - rule (str | None): Comparison rule for best score. - key_indicator (str | None): Key indicator to determine the - comparison rule. - """ - if rule not in self.rule_map and rule is not None: - raise KeyError(f"rule must be greater, less or None, " f"but got {rule}.") - - if rule is None: - if key_indicator in self.greater_keys or any(key in key_indicator for key in self.greater_keys): - rule = "greater" - elif key_indicator in self.less_keys or any(key in key_indicator for key in self.less_keys): - rule = "less" - else: - raise ValueError( - f"Cannot infer the rule for key " f"{key_indicator}, thus a specific rule " f"must be specified." - ) - self.rule = rule - self.key_indicator = key_indicator - self.compare_func = self.rule_map[self.rule] - - @check_input_parameters_type() - def before_run(self, runner: BaseRunner): - """Called before_run in EarlyStoppingHook.""" - self.by_epoch = runner.max_epochs is not None - for hook in runner.hooks: - if isinstance(hook, LrUpdaterHook): - self.warmup_iters = hook.warmup_iters - break - - @check_input_parameters_type() - def after_train_iter(self, runner: BaseRunner): - """Called after every training iter to evaluate the results.""" - if not self.by_epoch: - self._do_check_stopping(runner) - - @check_input_parameters_type() - def after_train_epoch(self, runner: BaseRunner): - """Called after every training epoch to evaluate the results.""" - if self.by_epoch: - self._do_check_stopping(runner) - - def _do_check_stopping(self, runner): - """Called _do_check_stopping in EarlyStoppingHook.""" - if not self._should_check_stopping(runner) or self.warmup_iters > runner.iter: - return - - if runner.rank == 0: - if self.key_indicator not in runner.log_buffer.output: - raise KeyError( - f"metric {self.key_indicator} does not exist in buffer. Please check " - f"{self.key_indicator} is cached in evaluation output buffer" - ) - - key_score = runner.log_buffer.output[self.key_indicator] - if self.compare_func(key_score - self.min_delta, self.best_score): - self.best_score = key_score - self.wait_count = 0 - self.last_iter = runner.iter - else: - self.wait_count += 1 - if self.wait_count >= self.patience: - if runner.iter - self.last_iter < self.iteration_patience: - print_log( - f"\nSkip early stopping. Accumulated iteration " - f"{runner.iter - self.last_iter} from the last " - f"improvement must be larger than {self.iteration_patience} to trigger " - f"Early Stopping.", - logger=runner.logger, - ) - return - stop_point = runner.epoch if self.by_epoch else runner.iter - print_log( - f"\nEarly Stopping at :{stop_point} with " f"best {self.key_indicator}: {self.best_score}", - logger=runner.logger, - ) - runner.should_stop = True - - def _should_check_stopping(self, runner): - """Called _should_check_stopping in EarlyStoppingHook.""" - check_time = self.every_n_epochs if self.by_epoch else self.every_n_iters - if not check_time(runner, self.interval): - # No evaluation during the interval. - return False - return True - - -@HOOKS.register_module(force=True) -class ReduceLROnPlateauLrUpdaterHook(LrUpdaterHook): - """Reduce learning rate when a metric has stopped improving. - - Models often benefit from reducing the learning rate by a factor of 2-10 once learning stagnates. - This scheduler reads a metrics quantity and if no improvement is seen for a ‘patience’ - number of epochs, the learning rate is reduced. - - :param min_lr: minimum learning rate. The lower bound of the desired learning rate. - :param interval: the number of intervals for checking the hook. The interval number should be - the same as the evaluation interval - the `interval` variable set in - `evaluation` config. - :param metric: the metric name to be monitored - :param rule: greater or less. In `less` mode, learning rate will be dropped if the metric has - stopped decreasing and in `greater` mode it will be dropped when the metric has - stopped increasing. - :param patience: Number of epochs with no improvement after which learning rate will be reduced. - For example, if patience = 2, then we will ignore the first 2 epochs with no - improvement, and will only drop LR after the 3rd epoch if the metric still - hasn’t improved then - :param iteration_patience: Number of iterations must be trained after the last improvement - before LR drops. The same as patience but the LR remains the same if - the number of iteration is lower than iteration_patience. This - variable makes sure a model is trained enough for some iterations - after the last improvement before dropping the LR. - :param factor: Factor to be multiply with the learning rate. - For example, new_lr = current_lr * factor - """ - - rule_map = {"greater": lambda x, y: x > y, "less": lambda x, y: x < y} - init_value_map = {"greater": -inf, "less": inf} - greater_keys = [ - "acc", - "top", - "AR@", - "auc", - "precision", - "mAP", - "mDice", - "mIoU", - "mAcc", - "aAcc", - ] - less_keys = ["loss"] - - @check_input_parameters_type() - def __init__( - self, - min_lr: float, - interval: int, - metric: str = "bbox_mAP", - rule: Optional[str] = None, - factor: float = 0.1, - patience: int = 3, - iteration_patience: int = 300, - **kwargs, - ): - super().__init__(**kwargs) - self.interval = interval - self.min_lr = min_lr - self.factor = factor - self.patience = patience - self.iteration_patience = iteration_patience - self.metric = metric - self.bad_count = 0 - self.last_iter = 0 - self.current_lr = -1.0 - self.base_lr = [] # type: List - self._init_rule(rule, metric) - self.best_score = self.init_value_map[self.rule] - - def _init_rule(self, rule, key_indicator): - """Initialize rule, key_indicator, comparison_func, and best score. - - Here is the rule to determine which rule is used for key indicator - when the rule is not specific: - 1. If the key indicator is in ``self.greater_keys``, the rule will be - specified as 'greater'. - 2. Or if the key indicator is in ``self.less_keys``, the rule will be - specified as 'less'. - 3. Or if the key indicator is equal to the substring in any one item - in ``self.greater_keys``, the rule will be specified as 'greater'. - 4. Or if the key indicator is equal to the substring in any one item - in ``self.less_keys``, the rule will be specified as 'less'. - - Args: - rule (str | None): Comparison rule for best score. - key_indicator (str | None): Key indicator to determine the - comparison rule. - """ - if rule not in self.rule_map and rule is not None: - raise KeyError(f"rule must be greater, less or None, " f"but got {rule}.") - - if rule is None: - if key_indicator in self.greater_keys or any(key in key_indicator for key in self.greater_keys): - rule = "greater" - elif key_indicator in self.less_keys or any(key in key_indicator for key in self.less_keys): - rule = "less" - else: - raise ValueError( - f"Cannot infer the rule for key " f"{key_indicator}, thus a specific rule " f"must be specified." - ) - self.rule = rule - self.key_indicator = key_indicator - self.compare_func = self.rule_map[self.rule] - - def _is_check_timing(self, runner: BaseRunner) -> bool: - """Check whether current epoch or iter is multiple of self.interval, skip during warmup interations.""" - check_time = self.after_each_n_epochs if self.by_epoch else self.after_each_n_iters - return check_time(runner, self.interval) and (self.warmup_iters <= runner.iter) - - def after_each_n_epochs(self, runner: BaseRunner, interval: int) -> bool: - """Check whether current epoch is a next epoch after multiples of interval.""" - return runner.epoch % interval == 0 if interval > 0 and runner.epoch != 0 else False - - def after_each_n_iters(self, runner: BaseRunner, interval: int) -> bool: - """Check whether current iter is a next iter after multiples of interval.""" - return runner.iter % interval == 0 if interval > 0 and runner.iter != 0 else False - - @check_input_parameters_type() - def get_lr(self, runner: BaseRunner, base_lr: float): - """Called get_lr in ReduceLROnPlateauLrUpdaterHook.""" - if self.current_lr < 0: - self.current_lr = base_lr - - if not self._is_check_timing(runner): - return self.current_lr - - if hasattr(runner, "all_metrics"): - score = runner.all_metrics.get(self.metric, 0.0) - else: - return self.current_lr - - if self.compare_func(score, self.best_score): - self.best_score = score - self.bad_count = 0 - self.last_iter = runner.iter - else: - self.bad_count += 1 - - print_log( - f"\nBest Score: {self.best_score}, Current Score: {score}, Patience: {self.patience} " - f"Count: {self.bad_count}", - logger=runner.logger, - ) - - if self.bad_count >= self.patience: - if runner.iter - self.last_iter < self.iteration_patience: - print_log( - f"\nSkip LR dropping. Accumulated iteration " - f"{runner.iter - self.last_iter} from the last " - f"improvement must be larger than {self.iteration_patience} to trigger " - f"LR dropping.", - logger=runner.logger, - ) - return self.current_lr - self.last_iter = runner.iter - self.bad_count = 0 - print_log( - f"\nDrop LR from: {self.current_lr}, to: " f"{max(self.current_lr * self.factor, self.min_lr)}", - logger=runner.logger, - ) - self.current_lr = max(self.current_lr * self.factor, self.min_lr) - return self.current_lr - - @check_input_parameters_type() - def before_run(self, runner: BaseRunner): - """Called before_run in ReduceLROnPlateauLrUpdaterHook.""" - # TODO: remove overloaded method after fixing the issue - # https://github.com/open-mmlab/mmdetection/issues/6572 - for group in runner.optimizer.param_groups: - group.setdefault("initial_lr", group["lr"]) - self.base_lr = [group["initial_lr"] for group in runner.optimizer.param_groups] - self.bad_count = 0 - self.last_iter = 0 - self.current_lr = -1.0 - self.best_score = self.init_value_map[self.rule] - - -@HOOKS.register_module(force=True) -class StopLossNanTrainingHook(Hook): - """StopLossNanTrainingHook.""" - - @check_input_parameters_type() - def after_train_iter(self, runner: BaseRunner): - """Called after_train_iter in StopLossNanTrainingHook.""" - if isnan(runner.outputs["loss"].item()): - logger.warning("Early Stopping since loss is NaN") - runner.should_stop = True - - -@HOOKS.register_module() -class EMAMomentumUpdateHook(Hook): - """Exponential moving average (EMA) momentum update hook for self-supervised methods. - - This hook includes momentum adjustment in self-supervised methods following: - m = 1 - ( 1- m_0) * (cos(pi * k / K) + 1) / 2, - k: current step, K: total steps. - - :param end_momentum: The final momentum coefficient for the target network, defaults to 1. - :param update_interval: Interval to update new momentum, defaults to 1. - :param by_epoch: Whether updating momentum by epoch or not, defaults to False. - """ - - def __init__(self, end_momentum: float = 1.0, update_interval: int = 1, by_epoch: bool = False, **kwargs): - self.by_epoch = by_epoch - self.end_momentum = end_momentum - self.update_interval = update_interval - - def before_train_epoch(self, runner: BaseRunner): - """Called before_train_epoch in EMAMomentumUpdateHook.""" - if not self.by_epoch: - return - - if is_module_wrapper(runner.model): - model = runner.model.module - else: - model = runner.model - - if not hasattr(model, "momentum"): - raise AttributeError('The model must have attribute "momentum".') - if not hasattr(model, "base_momentum"): - raise AttributeError('The model must have attribute "base_momentum".') - - if self.every_n_epochs(runner, self.update_interval): - cur_epoch = runner.epoch - max_epoch = runner.max_epochs - base_m = model.base_momentum - updated_m = ( - self.end_momentum - (self.end_momentum - base_m) * (cos(pi * cur_epoch / float(max_epoch)) + 1) / 2 - ) - model.momentum = updated_m - - def before_train_iter(self, runner: BaseRunner): - """Called before_train_iter in EMAMomentumUpdateHook.""" - if self.by_epoch: - return - - if is_module_wrapper(runner.model): - model = runner.model.module - else: - model = runner.model - - if not hasattr(model, "momentum"): - raise AttributeError('The model must have attribute "momentum".') - if not hasattr(model, "base_momentum"): - raise AttributeError('The model must have attribute "base_momentum".') - - if self.every_n_iters(runner, self.update_interval): - cur_iter = runner.iter - max_iter = runner.max_iters - base_m = model.base_momentum - updated_m = ( - self.end_momentum - (self.end_momentum - base_m) * (cos(pi * cur_iter / float(max_iter)) + 1) / 2 - ) - model.momentum = updated_m - - def after_train_iter(self, runner: BaseRunner): - """Called after_train_iter in EMAMomentumUpdateHook.""" - if self.every_n_iters(runner, self.update_interval): - if is_module_wrapper(runner.model): - runner.model.module.momentum_update() - else: - runner.model.momentum_update() - - -@HOOKS.register_module() -class ForceTrainModeHook(Hook): - """Force train mode for model. - - This is a workaround of a bug in EvalHook from MMCV. - If a model evaluation is enabled before training by setting 'start=0' in EvalHook, - EvalHook does not put a model in a training mode again after evaluation. - - This simple hook forces to put a model in a training mode before every train epoch - with the lowest priority. - """ - - def before_train_epoch(self, runner): - """Make sure to put a model in a training mode before train epoch.""" - runner.model.train() - - -@HOOKS.register_module() -class TwoCropTransformHook(Hook): - """TwoCropTransformHook with every specific interval. - - This hook decides whether using single pipeline or two pipelines - implemented in `TwoCropTransform` for the current iteration. - - Args: - interval (int): If `interval` == 1, both pipelines is used. - If `interval` > 1, the first pipeline is used and then - both pipelines are used every `interval`. Defaults to 1. - by_epoch (bool): (TODO) Use `interval` by epoch. Defaults to False. - """ - - @check_input_parameters_type() - def __init__(self, interval: int = 1, by_epoch: bool = False): - assert interval > 0, f"interval (={interval}) must be positive value." - if by_epoch: - raise NotImplementedError("by_epoch is not implemented.") - - self.interval = interval - self.cnt = 0 - - @check_input_parameters_type() - def _get_dataset(self, runner: BaseRunner): - """Get dataset to handle `is_both`.""" - if hasattr(runner.data_loader.dataset, "dataset"): - # for RepeatDataset - dataset = runner.data_loader.dataset.dataset - else: - dataset = runner.data_loader.dataset - - return dataset - - # pylint: disable=inconsistent-return-statements - @check_input_parameters_type() - def _find_two_crop_transform(self, transforms: List[object]): - """Find TwoCropTransform among transforms.""" - for transform in transforms: - if transform.__class__.__name__ == "TwoCropTransform": - return transform - - @check_input_parameters_type() - def before_train_epoch(self, runner: BaseRunner): - """Called before_train_epoch in TwoCropTransformHook.""" - # Always keep `TwoCropTransform` enabled. - if self.interval == 1: - return - - dataset = self._get_dataset(runner) - two_crop_transform = self._find_two_crop_transform(dataset.pipeline.transforms) - if self.cnt == self.interval - 1: - # start using both pipelines - two_crop_transform.is_both = True - else: - two_crop_transform.is_both = False - - @check_input_parameters_type() - def after_train_iter(self, runner: BaseRunner): - """Called after_train_iter in TwoCropTransformHook.""" - # Always keep `TwoCropTransform` enabled. - if self.interval == 1: - return - - if self.cnt < self.interval - 1: - # Instead of using `runner.every_n_iters` or `runner.every_n_inner_iters`, - # this condition is used to compare `self.cnt` with `self.interval` throughout the entire epochs. - self.cnt += 1 - - if self.cnt == self.interval - 1: - dataset = self._get_dataset(runner) - two_crop_transform = self._find_two_crop_transform(dataset.pipeline.transforms) - if not two_crop_transform.is_both: - # If `self.cnt` == `self.interval`-1, there are two cases, - # 1. `self.cnt` was updated in L709, so `is_both` must be on for the next iter. - # 2. if the current iter was already conducted, `is_both` must be off. - two_crop_transform.is_both = True - else: - two_crop_transform.is_both = False - self.cnt = 0 diff --git a/otx/algorithms/common/adapters/mmcv/hooks/__init__.py b/otx/algorithms/common/adapters/mmcv/hooks/__init__.py new file mode 100644 index 00000000000..4c48242bc35 --- /dev/null +++ b/otx/algorithms/common/adapters/mmcv/hooks/__init__.py @@ -0,0 +1,89 @@ +"""Adapters for mmcv support.""" + +# Copyright (C) 2022-2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions +# and limitations under the License. + +from .adaptive_training_hook import AdaptiveTrainSchedulingHook +from .cancel_hook import CancelInterfaceHook, CancelTrainingHook +from .checkpoint_hook import ( + CheckpointHookWithValResults, + EnsureCorrectBestCheckpointHook, + SaveInitialWeightHook, +) +from .composed_dataloaders_hook import ComposedDataLoadersHook +from .custom_model_ema_hook import CustomModelEMAHook, EMAMomentumUpdateHook +from .dual_model_ema_hook import DualModelEMAHook +from .early_stopping_hook import ( + EarlyStoppingHook, + LazyEarlyStoppingHook, + ReduceLROnPlateauLrUpdaterHook, + StopLossNanTrainingHook, +) +from .eval_hook import CustomEvalHook, DistCustomEvalHook +from .force_train_hook import ForceTrainModeHook +from .fp16_sam_optimizer_hook import Fp16SAMOptimizerHook +from .ib_loss_hook import IBLossHook +from .logger_hook import LoggerReplaceHook, OTXLoggerHook +from .model_ema_v2_hook import ModelEmaV2Hook +from .no_bias_decay_hook import NoBiasDecayHook +from .progress_hook import OTXProgressHook +from .recording_forward_hook import ( + ActivationMapHook, + BaseRecordingForwardHook, + EigenCamHook, + FeatureVectorHook, +) +from .sam_optimizer_hook import SAMOptimizerHook +from .semisl_cls_hook import SemiSLClsHook +from .task_adapt_hook import TaskAdaptHook +from .two_crop_transform_hook import TwoCropTransformHook +from .unbiased_teacher_hook import UnbiasedTeacherHook +from .workflow_hook import WorkflowHook + +__all__ = [ + "AdaptiveTrainSchedulingHook", + "CancelInterfaceHook", + "CancelTrainingHook", + "CheckpointHookWithValResults", + "EnsureCorrectBestCheckpointHook", + "ComposedDataLoadersHook", + "CustomEvalHook", + "DistCustomEvalHook", + "EarlyStoppingHook", + "LazyEarlyStoppingHook", + "ReduceLROnPlateauLrUpdaterHook", + "EMAMomentumUpdateHook", + "ForceTrainModeHook", + "Fp16SAMOptimizerHook", + "StopLossNanTrainingHook", + "IBLossHook", + "OTXLoggerHook", + "LoggerReplaceHook", + "CustomModelEMAHook", + "DualModelEMAHook", + "ModelEmaV2Hook", + "NoBiasDecayHook", + "OTXProgressHook", + "BaseRecordingForwardHook", + "EigenCamHook", + "ActivationMapHook", + "FeatureVectorHook", + "SAMOptimizerHook", + "SaveInitialWeightHook", + "SemiSLClsHook", + "TaskAdaptHook", + "TwoCropTransformHook", + "UnbiasedTeacherHook", + "WorkflowHook", +] diff --git a/otx/mpa/modules/hooks/adaptive_training_hooks.py b/otx/algorithms/common/adapters/mmcv/hooks/adaptive_training_hook.py similarity index 91% rename from otx/mpa/modules/hooks/adaptive_training_hooks.py rename to otx/algorithms/common/adapters/mmcv/hooks/adaptive_training_hook.py index cbd31b095f4..23480ce5cbb 100644 --- a/otx/mpa/modules/hooks/adaptive_training_hooks.py +++ b/otx/algorithms/common/adapters/mmcv/hooks/adaptive_training_hook.py @@ -1,4 +1,5 @@ -# Copyright (C) 2022 Intel Corporation +"""Adaptive training schedule hook.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -8,15 +9,19 @@ from mmcv.runner.hooks.checkpoint import CheckpointHook from mmcv.runner.hooks.evaluation import EvalHook -from otx.mpa.modules.hooks.early_stopping_hook import EarlyStoppingHook -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.adapters.mmcv.hooks.early_stopping_hook import ( + EarlyStoppingHook, +) +from otx.algorithms.common.utils.logger import get_logger logger = get_logger() +# pylint: disable=too-many-arguments, too-many-instance-attributes + @HOOKS.register_module() class AdaptiveTrainSchedulingHook(Hook): - """Adaptive Training Scheduling Hook + """Adaptive Training Scheduling Hook. Depending on the size of iteration per epoch, adaptively update the validation interval and related values. @@ -58,6 +63,7 @@ def __init__( self._original_interval = None def before_run(self, runner): + """Before run.""" if self.enable_eval_before_run: hook = self.get_evalhook(runner) if hook is None: @@ -68,6 +74,7 @@ def before_run(self, runner): hook.start = 0 def before_train_iter(self, runner): + """Before train iter.""" if self.enable_eval_before_run and self._original_interval is not None: hook = self.get_evalhook(runner) hook.interval = self._original_interval @@ -110,10 +117,12 @@ def before_train_iter(self, runner): self._initialized = True def get_adaptive_interval(self, iter_per_epoch): + """Get adaptive interval.""" adaptive_interval = max(round(math.exp(self.decay * iter_per_epoch) * self.max_interval), 1) return adaptive_interval def get_evalhook(self, runner): + """Get evaluation hook.""" target_hook = None for hook in runner.hooks: if isinstance(hook, EvalHook): diff --git a/otx/algorithms/common/adapters/mmcv/hooks/cancel_hook.py b/otx/algorithms/common/adapters/mmcv/hooks/cancel_hook.py new file mode 100644 index 00000000000..4d4393a267b --- /dev/null +++ b/otx/algorithms/common/adapters/mmcv/hooks/cancel_hook.py @@ -0,0 +1,89 @@ +"""Cancel hooks.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + + +import os +from typing import Callable + +from mmcv.runner import BaseRunner, EpochBasedRunner +from mmcv.runner.hooks import HOOKS, Hook + +from otx.algorithms.common.utils.logger import get_logger +from otx.api.utils.argument_checks import check_input_parameters_type + +logger = get_logger() + + +# pylint: disable=too-many-instance-attributes, protected-access, too-many-arguments, unused-argument +@HOOKS.register_module() +class CancelTrainingHook(Hook): + """CancelTrainingHook for Training Stopping.""" + + @check_input_parameters_type() + def __init__(self, interval: int = 5): + """Periodically check whether whether a stop signal is sent to the runner during model training. + + Every 'check_interval' iterations, the work_dir for the runner is checked to see if a file '.stop_training' + is present. If it is, training is stopped. + + :param interval: Period for checking for stop signal, given in iterations. + + """ + self.interval = interval + + @staticmethod + def _check_for_stop_signal(runner: BaseRunner): + """Log _check_for_stop_signal for CancelTrainingHook.""" + work_dir = runner.work_dir + stop_filepath = os.path.join(work_dir, ".stop_training") + if os.path.exists(stop_filepath): + if isinstance(runner, EpochBasedRunner): + epoch = runner.epoch + runner._max_epochs = epoch # Force runner to stop by pretending it has reached it's max_epoch + runner.should_stop = True # Set this flag to true to stop the current training epoch + os.remove(stop_filepath) + + @check_input_parameters_type() + def after_train_iter(self, runner: BaseRunner): + """Log after_train_iter for CancelTrainingHook.""" + if not self.every_n_iters(runner, self.interval): + return + self._check_for_stop_signal(runner) + + +@HOOKS.register_module() +class CancelInterfaceHook(Hook): + """Cancel interface. If called, running job will be terminated.""" + + def __init__(self, init_callback: Callable, interval=5): + self.on_init_callback = init_callback + self.runner = None + self.interval = interval + + def cancel(self): + """Cancel.""" + logger.info("CancelInterfaceHook.cancel() is called.") + if self.runner is None: + logger.warning("runner is not configured yet. ignored this request.") + return + + if self.runner.should_stop: + logger.warning("cancel already requested.") + return + + if isinstance(self.runner, EpochBasedRunner): + epoch = self.runner.epoch + self.runner._max_epochs = epoch # Force runner to stop by pretending it has reached it's max_epoch + self.runner.should_stop = True # Set this flag to true to stop the current training epoch + logger.info("requested stopping to the runner") + + def before_run(self, runner): + """Before run.""" + self.runner = runner + self.on_init_callback(self) + + def after_run(self, runner): + """After run.""" + self.runner = None diff --git a/otx/mpa/modules/hooks/checkpoint_hook.py b/otx/algorithms/common/adapters/mmcv/hooks/checkpoint_hook.py similarity index 77% rename from otx/mpa/modules/hooks/checkpoint_hook.py rename to otx/algorithms/common/adapters/mmcv/hooks/checkpoint_hook.py index b421b173e07..234cb2dd49a 100644 --- a/otx/mpa/modules/hooks/checkpoint_hook.py +++ b/otx/algorithms/common/adapters/mmcv/hooks/checkpoint_hook.py @@ -1,4 +1,5 @@ -# Copyright (C) 2022 Intel Corporation +"""CheckpointHook with validation results for classification task.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -6,12 +7,15 @@ from pathlib import Path from typing import Optional +from mmcv.runner import BaseRunner from mmcv.runner.dist_utils import allreduce_params, master_only from mmcv.runner.hooks.hook import HOOKS, Hook +from otx.api.utils.argument_checks import check_input_parameters_type + @HOOKS.register_module() -class CheckpointHookWithValResults(Hook): +class CheckpointHookWithValResults(Hook): # pylint: disable=too-many-instance-attributes """Save checkpoints periodically. Args: @@ -53,10 +57,12 @@ def __init__( self._best_model_weight: Optional[Path] = None def before_run(self, runner): + """Set output directopy if not set.""" if not self.out_dir: self.out_dir = runner.work_dir def after_train_epoch(self, runner): + """Checkpoint stuffs after train epoch.""" if not self.by_epoch or not self.every_n_epochs(runner, self.interval): return @@ -126,6 +132,7 @@ def _save_latest_checkpoint(self, runner): runner.meta["hook_msgs"]["last_ckpt"] = str(self.out_dir / cur_ckpt_filename) def after_train_iter(self, runner): + """Checkpoint stuffs after train iteration.""" if self.by_epoch or not self.every_n_iters(runner, self.interval): return @@ -136,3 +143,34 @@ def after_train_iter(self, runner): allreduce_params(runner.model.buffers()) self._save_checkpoint(runner) runner.save_ckpt = False + + +@HOOKS.register_module() +class EnsureCorrectBestCheckpointHook(Hook): + """EnsureCorrectBestCheckpointHook. + + This hook makes sure that the 'best_mAP' checkpoint points properly to the best model, even if the best model is + created in the last epoch. + """ + + @check_input_parameters_type() + def after_run(self, runner: BaseRunner): + """Called after train epoch hooks.""" + runner.call_hook("after_train_epoch") + + +@HOOKS.register_module() +class SaveInitialWeightHook(Hook): + """Save the initial weights before training.""" + + def __init__(self, save_path, file_name: str = "weights.pth", **kwargs): + self._save_path = save_path + self._file_name = file_name + self._args = kwargs + + def before_run(self, runner): + """Save initial the weights before training.""" + runner.logger.info("Saving weight before training") + runner.save_checkpoint( + self._save_path, filename_tmpl=self._file_name, save_optimizer=False, create_symlink=False, **self._args + ) diff --git a/otx/mpa/modules/hooks/composed_dataloaders_hook.py b/otx/algorithms/common/adapters/mmcv/hooks/composed_dataloaders_hook.py similarity index 58% rename from otx/mpa/modules/hooks/composed_dataloaders_hook.py rename to otx/algorithms/common/adapters/mmcv/hooks/composed_dataloaders_hook.py index 64fe0ebdd2f..90b6644a58d 100644 --- a/otx/mpa/modules/hooks/composed_dataloaders_hook.py +++ b/otx/algorithms/common/adapters/mmcv/hooks/composed_dataloaders_hook.py @@ -1,30 +1,37 @@ -# Copyright (C) 2022 Intel Corporation +"""Composed dataloader hook.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -from typing import Sequence, Union +from typing import List, Sequence, Union from mmcv.runner import HOOKS, Hook from torch.utils.data import DataLoader -from otx.mpa.modules.datasets.composed_dataloader import ComposedDL -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.adapters.torch.dataloaders import ComposedDL +from otx.algorithms.common.utils.logger import get_logger logger = get_logger() @HOOKS.register_module() class ComposedDataLoadersHook(Hook): + """Composed dataloader hook, which makes a composed dataloader which can combine multiple data loaders. + + Especially used for semi-supervised learning to aggregate a unlabeled dataloader and a labeled dataloader. + """ + def __init__( self, data_loaders: Union[Sequence[DataLoader], DataLoader], ): - self.data_loaders = [] + self.data_loaders = [] # type: List[DataLoader] self.composed_loader = None self.add_dataloaders(data_loaders) def add_dataloaders(self, data_loaders: Union[Sequence[DataLoader], DataLoader]): + """Create data_loaders to be added into composed dataloader.""" if isinstance(data_loaders, DataLoader): data_loaders = [data_loaders] else: @@ -34,12 +41,9 @@ def add_dataloaders(self, data_loaders: Union[Sequence[DataLoader], DataLoader]) self.composed_loader = None def before_epoch(self, runner): + """Create composedDL before running epoch.""" if self.composed_loader is None: - logger.info( - "Creating ComposedDL " - f"(runner's -> {runner.data_loader}, " - f"hook's -> {[i for i in self.data_loaders]})" - ) + logger.info("Creating ComposedDL " f"(runner's -> {runner.data_loader}, " f"hook's -> {self.data_loaders})") self.composed_loader = ComposedDL([runner.data_loader, *self.data_loaders]) # Per-epoch replacement: train-only loader -> train loader + additional loaders # (It's similar to local variable in epoch. Need to update every epoch...) diff --git a/otx/algorithms/common/adapters/mmcv/hooks/custom_model_ema_hook.py b/otx/algorithms/common/adapters/mmcv/hooks/custom_model_ema_hook.py new file mode 100644 index 00000000000..f321a63d196 --- /dev/null +++ b/otx/algorithms/common/adapters/mmcv/hooks/custom_model_ema_hook.py @@ -0,0 +1,113 @@ +"""EMA hooks.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +import math +from math import cos, pi + +from mmcv.parallel import is_module_wrapper +from mmcv.runner import HOOKS, BaseRunner, Hook +from mmcv.runner.hooks.ema import EMAHook + +from otx.algorithms.common.utils.logger import get_logger + +logger = get_logger() + + +@HOOKS.register_module() +class CustomModelEMAHook(EMAHook): + """Custom EMAHook to update momentum for ema over training.""" + + def __init__(self, momentum=0.0002, epoch_momentum=0.0, interval=1, **kwargs): + super().__init__(momentum=momentum, interval=interval, **kwargs) + self.momentum = momentum + self.epoch_momentum = epoch_momentum + self.interval = interval + + def before_train_epoch(self, runner): + """Update the momentum.""" + if self.epoch_momentum > 0.0: + iter_per_epoch = len(runner.data_loader) + epoch_decay = 1 - self.epoch_momentum + iter_decay = math.pow(epoch_decay, self.interval / iter_per_epoch) + self.momentum = 1 - iter_decay + logger.info(f"Update EMA momentum: {self.momentum}") + self.epoch_momentum = 0.0 # disable re-compute + + super().before_train_epoch(runner) + + +@HOOKS.register_module() +class EMAMomentumUpdateHook(Hook): + """Exponential moving average (EMA) momentum update hook for self-supervised methods. + + This hook includes momentum adjustment in self-supervised methods following: + m = 1 - ( 1- m_0) * (cos(pi * k / K) + 1) / 2, + k: current step, K: total steps. + + :param end_momentum: The final momentum coefficient for the target network, defaults to 1. + :param update_interval: Interval to update new momentum, defaults to 1. + :param by_epoch: Whether updating momentum by epoch or not, defaults to False. + """ + + def __init__(self, end_momentum: float = 1.0, update_interval: int = 1, by_epoch: bool = False): + self.by_epoch = by_epoch + self.end_momentum = end_momentum + self.update_interval = update_interval + + def before_train_epoch(self, runner: BaseRunner): + """Called before_train_epoch in EMAMomentumUpdateHook.""" + if not self.by_epoch: + return + + if is_module_wrapper(runner.model): + model = runner.model.module + else: + model = runner.model + + if not hasattr(model, "momentum"): + raise AttributeError('The model must have attribute "momentum".') + if not hasattr(model, "base_momentum"): + raise AttributeError('The model must have attribute "base_momentum".') + + if self.every_n_epochs(runner, self.update_interval): + cur_epoch = runner.epoch + max_epoch = runner.max_epochs + base_m = model.base_momentum + updated_m = ( + self.end_momentum - (self.end_momentum - base_m) * (cos(pi * cur_epoch / float(max_epoch)) + 1) / 2 + ) + model.momentum = updated_m + + def before_train_iter(self, runner: BaseRunner): + """Called before_train_iter in EMAMomentumUpdateHook.""" + if self.by_epoch: + return + + if is_module_wrapper(runner.model): + model = runner.model.module + else: + model = runner.model + + if not hasattr(model, "momentum"): + raise AttributeError('The model must have attribute "momentum".') + if not hasattr(model, "base_momentum"): + raise AttributeError('The model must have attribute "base_momentum".') + + if self.every_n_iters(runner, self.update_interval): + cur_iter = runner.iter + max_iter = runner.max_iters + base_m = model.base_momentum + updated_m = ( + self.end_momentum - (self.end_momentum - base_m) * (cos(pi * cur_iter / float(max_iter)) + 1) / 2 + ) + model.momentum = updated_m + + def after_train_iter(self, runner: BaseRunner): + """Called after_train_iter in EMAMomentumUpdateHook.""" + if self.every_n_iters(runner, self.update_interval): + if is_module_wrapper(runner.model): + runner.model.module.momentum_update() + else: + runner.model.momentum_update() diff --git a/otx/mpa/modules/hooks/model_ema_hook.py b/otx/algorithms/common/adapters/mmcv/hooks/dual_model_ema_hook.py similarity index 83% rename from otx/mpa/modules/hooks/model_ema_hook.py rename to otx/algorithms/common/adapters/mmcv/hooks/dual_model_ema_hook.py index 8d96bd4b97c..e2c731fbb73 100644 --- a/otx/mpa/modules/hooks/model_ema_hook.py +++ b/otx/algorithms/common/adapters/mmcv/hooks/dual_model_ema_hook.py @@ -1,4 +1,5 @@ -# Copyright (C) 2022 Intel Corporation +"""Dual model EMA hooks.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -7,16 +8,17 @@ import torch from mmcv.parallel import is_module_wrapper from mmcv.runner import HOOKS, Hook -from mmcv.runner.hooks.ema import EMAHook -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger logger = get_logger() +# pylint: disable=too-many-instance-attributes + @HOOKS.register_module() class DualModelEMAHook(Hook): - """Generalized re-implementation of mmcv.runner.EMAHook + r"""Generalized re-implementation of mmcv.runner.EMAHook. Source model paramters would be exponentially averaged onto destination model pararmeters on given intervals @@ -55,8 +57,12 @@ def __init__( self.epoch_momentum = epoch_momentum self.interval = interval self.start_epoch = start_epoch + self.src_model = None + self.dst_model = None self.src_model_name = src_model_name self.dst_model_name = dst_model_name + self.src_params = None + self.dst_params = None self.enabled = False def before_run(self, runner): @@ -78,6 +84,7 @@ def before_run(self, runner): logger.info(f"model_s model_t diff: {self._diff_model()}") def before_train_epoch(self, runner): + """Momentum update.""" if self.epoch_momentum > 0.0: iter_per_epoch = len(runner.data_loader) epoch_decay = 1 - self.epoch_momentum @@ -104,6 +111,7 @@ def after_train_iter(self, runner): self._ema_model() def after_train_epoch(self, runner): + """Log difference between models if enabled.""" if self.enabled: logger.info(f"model_s model_t diff: {self._diff_model()}") @@ -141,23 +149,3 @@ def _diff_model(self): diff = ((src_param - dst_param) ** 2).sum() diff_sum += diff return diff_sum - - -@HOOKS.register_module() -class CustomModelEMAHook(EMAHook): - def __init__(self, momentum=0.0002, epoch_momentum=0.0, interval=1, **kwargs): - super().__init__(momentum=momentum, interval=interval, **kwargs) - self.momentum = momentum - self.epoch_momentum = epoch_momentum - self.interval = interval - - def before_train_epoch(self, runner): - if self.epoch_momentum > 0.0: - iter_per_epoch = len(runner.data_loader) - epoch_decay = 1 - self.epoch_momentum - iter_decay = math.pow(epoch_decay, self.interval / iter_per_epoch) - self.momentum = 1 - iter_decay - logger.info(f"Update EMA momentum: {self.momentum}") - self.epoch_momentum = 0.0 # disable re-compute - - super().before_train_epoch(runner) diff --git a/otx/mpa/modules/hooks/early_stopping_hook.py b/otx/algorithms/common/adapters/mmcv/hooks/early_stopping_hook.py similarity index 84% rename from otx/mpa/modules/hooks/early_stopping_hook.py rename to otx/algorithms/common/adapters/mmcv/hooks/early_stopping_hook.py index 75bd2faeed7..aee3e0f4120 100644 --- a/otx/mpa/modules/hooks/early_stopping_hook.py +++ b/otx/algorithms/common/adapters/mmcv/hooks/early_stopping_hook.py @@ -1,24 +1,26 @@ -# Copyright (C) 2022 Intel Corporation +"""Early stopping hooks.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # from math import inf, isnan -from typing import Optional +from typing import List, Optional from mmcv.runner import BaseRunner, LrUpdaterHook from mmcv.runner.hooks import HOOKS, Hook from mmcv.utils import print_log -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger +from otx.api.utils.argument_checks import check_input_parameters_type logger = get_logger() +# pylint: disable=too-many-arguments, too-many-instance-attributes -# Temp copy from detection_task -# TODO: refactoing + +@HOOKS.register_module() class EarlyStoppingHook(Hook): - """ - Cancel training when a metric has stopped improving. + """Cancel training when a metric has stopped improving. Early Stopping hook monitors a metric quantity and if no improvement is seen for a ‘patience’ number of epochs, the training is cancelled. @@ -48,6 +50,7 @@ class EarlyStoppingHook(Hook): greater_keys = ["acc", "top", "AR@", "auc", "precision", "mAP", "mDice", "mIoU", "mAcc", "aAcc", "MHAcc"] less_keys = ["loss"] + @check_input_parameters_type() def __init__( self, interval: int, @@ -68,6 +71,8 @@ def __init__( self.last_iter = 0 self.wait_count = 0 self.best_score = self.init_value_map[self.rule] + self.warmup_iters = None + self.by_epoch = True def _init_rule(self, rule, key_indicator): """Initialize rule, key_indicator, comparison_func, and best score. @@ -104,8 +109,11 @@ def _init_rule(self, rule, key_indicator): self.key_indicator = key_indicator self.compare_func = self.rule_map[self.rule] + @check_input_parameters_type() def before_run(self, runner: BaseRunner): - self.by_epoch = False if runner.max_epochs is None else True + """Called before_run in EarlyStoppingHook.""" + if runner.max_epochs is None: + self.by_epoch = False for hook in runner.hooks: if isinstance(hook, LrUpdaterHook): self.warmup_iters = hook.warmup_iters @@ -113,17 +121,20 @@ def before_run(self, runner: BaseRunner): if getattr(self, "warmup_iters", None) is None: raise ValueError("LrUpdaterHook must be registered to runner.") + @check_input_parameters_type() def after_train_iter(self, runner: BaseRunner): """Called after every training iter to evaluate the results.""" if not self.by_epoch: self._do_check_stopping(runner) + @check_input_parameters_type() def after_train_epoch(self, runner: BaseRunner): """Called after every training epoch to evaluate the results.""" if self.by_epoch: self._do_check_stopping(runner) def _do_check_stopping(self, runner): + """Called _do_check_stopping in EarlyStoppingHook.""" if not self._should_check_stopping(runner) or self.warmup_iters > runner.iter: return @@ -159,6 +170,7 @@ def _do_check_stopping(self, runner): runner.should_stop = True def _should_check_stopping(self, runner): + """Called _should_check_stopping in EarlyStoppingHook.""" check_time = self.every_n_epochs if self.by_epoch else self.every_n_iters if not check_time(runner, self.interval): # No evaluation during the interval. @@ -168,6 +180,8 @@ def _should_check_stopping(self, runner): @HOOKS.register_module() class LazyEarlyStoppingHook(EarlyStoppingHook): + """Lazy early stop hook.""" + def __init__( self, interval: int, @@ -179,7 +193,7 @@ def __init__( start: int = None, ): self.start = start - super(LazyEarlyStoppingHook, self).__init__(interval, metric, rule, patience, iteration_patience, min_delta) + super().__init__(interval, metric, rule, patience, iteration_patience, min_delta) def _should_check_stopping(self, runner): if self.by_epoch: @@ -201,10 +215,9 @@ def _should_check_stopping(self, runner): return True -@HOOKS.register_module() +@HOOKS.register_module(force=True) class ReduceLROnPlateauLrUpdaterHook(LrUpdaterHook): - """ - Reduce learning rate when a metric has stopped improving. + """Reduce learning rate when a metric has stopped improving. Models often benefit from reducing the learning rate by a factor of 2-10 once learning stagnates. This scheduler reads a metrics quantity and if no improvement is seen for a ‘patience’ @@ -233,9 +246,10 @@ class ReduceLROnPlateauLrUpdaterHook(LrUpdaterHook): rule_map = {"greater": lambda x, y: x > y, "less": lambda x, y: x < y} init_value_map = {"greater": -inf, "less": inf} - greater_keys = ["acc", "top", "AR@", "auc", "precision", "mAP", "mDice", "mIoU", "mAcc", "aAcc"] + greater_keys = ["acc", "top", "AR@", "auc", "precision", "mAP", "mDice", "mIoU", "mAcc", "aAcc", "MHAcc"] less_keys = ["loss"] + @check_input_parameters_type() def __init__( self, min_lr: float, @@ -256,7 +270,8 @@ def __init__( self.metric = metric self.bad_count = 0 self.last_iter = 0 - self.current_lr = None + self.current_lr = -1.0 + self.base_lr = [] # type: List self._init_rule(rule, metric) self.best_score = self.init_value_map[self.rule] @@ -295,30 +310,33 @@ def _init_rule(self, rule, key_indicator): self.key_indicator = key_indicator self.compare_func = self.rule_map[self.rule] - def _should_check_stopping(self, runner): - check_time = self.every_n_epochs if self.by_epoch else self.every_n_iters - if not check_time(runner, self.interval): - # No evaluation during the interval. - return False - return True + def _is_check_timing(self, runner: BaseRunner) -> bool: + """Check whether current epoch or iter is multiple of self.interval, skip during warmup interations.""" + check_time = self.after_each_n_epochs if self.by_epoch else self.after_each_n_iters + return check_time(runner, self.interval) and (self.warmup_iters <= runner.iter) - def get_lr(self, runner: BaseRunner, base_lr: float): - if not self._should_check_stopping(runner) or self.warmup_iters > runner.iter: - return self.current_lr if self.current_lr is not None else base_lr + def after_each_n_epochs(self, runner: BaseRunner, interval: int) -> bool: + """Check whether current epoch is a next epoch after multiples of interval.""" + return runner.epoch % interval == 0 if interval > 0 and runner.epoch != 0 else False + + def after_each_n_iters(self, runner: BaseRunner, interval: int) -> bool: + """Check whether current iter is a next iter after multiples of interval.""" + return runner.iter % interval == 0 if interval > 0 and runner.iter != 0 else False - if self.current_lr is None: + @check_input_parameters_type() + def get_lr(self, runner: BaseRunner, base_lr: float): + """Called get_lr in ReduceLROnPlateauLrUpdaterHook.""" + if self.current_lr < 0: self.current_lr = base_lr - if hasattr(runner, self.metric): - score = getattr(runner, self.metric, 0.0) + if not self._is_check_timing(runner): + return self.current_lr + + if hasattr(runner, "all_metrics"): + score = runner.all_metrics.get(self.metric, 0.0) else: return self.current_lr - print_log( - f"\nBest Score: {self.best_score}, Current Score: {score}, Patience: {self.patience} " - f"Count: {self.bad_count}", - logger=runner.logger, - ) if self.compare_func(score, self.best_score): self.best_score = score self.bad_count = 0 @@ -326,6 +344,12 @@ def get_lr(self, runner: BaseRunner, base_lr: float): else: self.bad_count += 1 + print_log( + f"\nBest Score: {self.best_score}, Current Score: {score}, Patience: {self.patience} " + f"Count: {self.bad_count}", + logger=runner.logger, + ) + if self.bad_count >= self.patience: if runner.iter - self.last_iter < self.iteration_patience: print_log( @@ -345,7 +369,9 @@ def get_lr(self, runner: BaseRunner, base_lr: float): self.current_lr = max(self.current_lr * self.factor, self.min_lr) return self.current_lr + @check_input_parameters_type() def before_run(self, runner: BaseRunner): + """Called before_run in ReduceLROnPlateauLrUpdaterHook.""" # TODO: remove overloaded method after fixing the issue # https://github.com/open-mmlab/mmdetection/issues/6572 for group in runner.optimizer.param_groups: @@ -353,13 +379,17 @@ def before_run(self, runner: BaseRunner): self.base_lr = [group["initial_lr"] for group in runner.optimizer.param_groups] self.bad_count = 0 self.last_iter = 0 - self.current_lr = None + self.current_lr = -1.0 self.best_score = self.init_value_map[self.rule] -@HOOKS.register_module() +@HOOKS.register_module(force=True) class StopLossNanTrainingHook(Hook): + """StopLossNanTrainingHook.""" + + @check_input_parameters_type() def after_train_iter(self, runner: BaseRunner): + """Called after_train_iter in StopLossNanTrainingHook.""" if isnan(runner.outputs["loss"].item()): logger.warning("Early Stopping since loss is NaN") runner.should_stop = True diff --git a/otx/mpa/modules/hooks/eval_hook.py b/otx/algorithms/common/adapters/mmcv/hooks/eval_hook.py similarity index 83% rename from otx/mpa/modules/hooks/eval_hook.py rename to otx/algorithms/common/adapters/mmcv/hooks/eval_hook.py index eed4e743996..667667d3fa4 100644 --- a/otx/mpa/modules/hooks/eval_hook.py +++ b/otx/algorithms/common/adapters/mmcv/hooks/eval_hook.py @@ -1,19 +1,19 @@ -# Copyright (C) 2022 Intel Corporation +"""Module for definig CustomEvalHook for classification task.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # from os import path as osp import mmcv -import numpy as np import torch -from mmcv.runner import HOOKS, EvalHook, Hook +from mmcv.runner import HOOKS, EvalHook from torch.utils.data import DataLoader @HOOKS.register_module() class CustomEvalHook(EvalHook): - """Custom Evaluation hook for the MPA + """Custom Evaluation hook for the MPA. Args: dataloader (DataLoader): A PyTorch dataloader. @@ -44,7 +44,7 @@ def __init__( self.save_mode = self.eval_kwargs.get("save_mode", "score") def _do_evaluate(self, runner, ema=False): - """perform evaluation""" + """Perform evaluation.""" results = single_gpu_test(runner.model, self.dataloader) if ema and hasattr(runner, "ema_model") and (runner.epoch >= self.ema_eval_start_epoch): results_ema = single_gpu_test(runner.ema_model.module, self.dataloader) @@ -53,17 +53,20 @@ def _do_evaluate(self, runner, ema=False): self.evaluate(runner, results) def after_train_epoch(self, runner): + """Check whether current epoch is to be evaluated or not.""" if not self.by_epoch or not self.every_n_epochs(runner, self.interval): return self._do_evaluate(runner, ema=True) def after_train_iter(self, runner): + """Check whether current iteration is to be evaluated or not.""" if self.by_epoch or not self.every_n_iters(runner, self.interval): return runner.log_buffer.clear() self._do_evaluate(runner) def evaluate(self, runner, results, results_ema=None): + """Evaluate predictions from model with ground truth.""" eval_res = self.dataloader.dataset.evaluate(results, logger=runner.logger, **self.eval_kwargs) score = eval_res[self.metric] for name, val in eval_res.items(): @@ -84,11 +87,12 @@ def evaluate(self, runner, results, results_ema=None): def single_gpu_test(model, data_loader): + """Single gpu test for inference.""" model.eval() results = [] dataset = data_loader.dataset prog_bar = mmcv.ProgressBar(len(dataset)) - for i, data in enumerate(data_loader): + for data in data_loader: with torch.no_grad(): result = model(return_loss=False, **data) results.append(result) @@ -102,14 +106,16 @@ def single_gpu_test(model, data_loader): @HOOKS.register_module() class DistCustomEvalHook(CustomEvalHook): + """Distributed Custom Evaluation Hook for Multi-GPU environment.""" + def __init__(self, dataloader, interval=1, gpu_collect=False, by_epoch=True, **eval_kwargs): if not isinstance(dataloader, DataLoader): raise TypeError("dataloader must be a pytorch DataLoader, but got " f"{type(dataloader)}") self.gpu_collect = gpu_collect - super(DistCustomEvalHook, self).__init__(dataloader, interval, by_epoch=by_epoch, **eval_kwargs) + super().__init__(dataloader, interval, by_epoch=by_epoch, **eval_kwargs) def _do_evaluate(self, runner): - """perform evaluation""" + """Perform evaluation.""" from mmcls.apis import multi_gpu_test results = multi_gpu_test( @@ -120,11 +126,13 @@ def _do_evaluate(self, runner): self.evaluate(runner, results) def after_train_epoch(self, runner): + """Check whether current epoch is to be evaluated or not.""" if not self.by_epoch or not self.every_n_epochs(runner, self.interval): return self._do_evaluate(runner) def after_train_iter(self, runner): + """Check whether current iteration is to be evaluated or not.""" if self.by_epoch or not self.every_n_iters(runner, self.interval): return runner.log_buffer.clear() diff --git a/otx/algorithms/common/adapters/mmcv/hooks/force_train_hook.py b/otx/algorithms/common/adapters/mmcv/hooks/force_train_hook.py new file mode 100644 index 00000000000..0d47e67841c --- /dev/null +++ b/otx/algorithms/common/adapters/mmcv/hooks/force_train_hook.py @@ -0,0 +1,38 @@ +"""Collections of hooks for common OTX algorithms.""" + +# Copyright (C) 2021-2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions +# and limitations under the License. + +from mmcv.runner.hooks import HOOKS, Hook + +from otx.algorithms.common.utils.logger import get_logger + +logger = get_logger() + + +@HOOKS.register_module() +class ForceTrainModeHook(Hook): + """Force train mode for model. + + This is a workaround of a bug in EvalHook from MMCV. + If a model evaluation is enabled before training by setting 'start=0' in EvalHook, + EvalHook does not put a model in a training mode again after evaluation. + + This simple hook forces to put a model in a training mode before every train epoch + with the lowest priority. + """ + + def before_train_epoch(self, runner): + """Make sure to put a model in a training mode before train epoch.""" + runner.model.train() diff --git a/otx/mpa/modules/hooks/fp16_sam_optimizer_hook.py b/otx/algorithms/common/adapters/mmcv/hooks/fp16_sam_optimizer_hook.py similarity index 93% rename from otx/mpa/modules/hooks/fp16_sam_optimizer_hook.py rename to otx/algorithms/common/adapters/mmcv/hooks/fp16_sam_optimizer_hook.py index 34fc575bf07..410b4ee65dc 100644 --- a/otx/mpa/modules/hooks/fp16_sam_optimizer_hook.py +++ b/otx/algorithms/common/adapters/mmcv/hooks/fp16_sam_optimizer_hook.py @@ -1,4 +1,5 @@ -# Copyright (C) 2022 Intel Corporation +"""Module for Sharpness-aware Minimization optimizer hook implementation for MMCV Runners with FP16 precision.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -8,7 +9,7 @@ @HOOKS.register_module() class Fp16SAMOptimizerHook(Fp16OptimizerHook): - """Sharpness-aware Minimization optimizer hook + """Sharpness-aware Minimization optimizer hook. Implemented as OptimizerHook for MMCV Runners - Paper ref: https://arxiv.org/abs/2010.01412 @@ -23,7 +24,8 @@ def __init__(self, rho=0.05, start_epoch=1, **kwargs): raise ValueError("rho should be greater than 0 for SAM optimizer") def after_train_iter(self, runner): - """Perform SAM optimization + """Perform SAM optimization. + 0. compute current loss (DONE IN model.train_step()) 1. compute current gradient 2. move param to the approximate local maximum: w + e(w) = w + rho*norm_grad @@ -77,6 +79,7 @@ def after_train_iter(self, runner): runner.meta.setdefault("fp16", {})["loss_scaler"] = self.loss_scaler.state_dict() runner.log_buffer.update({"sharpness": float(max_loss - curr_loss), "max_loss": float(max_loss)}) + return None def _get_current_batch(self, model): if hasattr(model, "module"): diff --git a/otx/mpa/modules/hooks/ib_loss_hook.py b/otx/algorithms/common/adapters/mmcv/hooks/ib_loss_hook.py similarity index 74% rename from otx/mpa/modules/hooks/ib_loss_hook.py rename to otx/algorithms/common/adapters/mmcv/hooks/ib_loss_hook.py index 7525ca87e9c..a9a2fdf3007 100644 --- a/otx/mpa/modules/hooks/ib_loss_hook.py +++ b/otx/algorithms/common/adapters/mmcv/hooks/ib_loss_hook.py @@ -1,4 +1,5 @@ -# Copyright (C) 2022 Intel Corporation +"""Module for defining a hook for IB loss using mmcls.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -8,9 +9,13 @@ @HOOKS.register_module() class IBLossHook(Hook): + """Hook for IB loss. + + It passes the number of data per class and current epoch to IB loss class. + """ + def __init__(self, dst_classes): - """Hook for IB loss. - It passes the number of data per class and current epoch to IB loss class. + """Initialize the IBLossHook. Args: dst_classes (list): A list of classes including new_classes to be newly learned @@ -19,10 +24,8 @@ def __init__(self, dst_classes): self.dst_classes = dst_classes def before_train_epoch(self, runner): - # get loss from model + """Get loss from model and pass the number of data per class and current epoch to IB loss.""" model_loss = self._get_model_loss(runner) - - # pass the number of data per class and current epoch to IB loss if runner.epoch == 0: dataset = runner.data_loader.dataset num_data = self._get_num_data(dataset) diff --git a/otx/algorithms/common/adapters/mmcv/hooks/logger_hook.py b/otx/algorithms/common/adapters/mmcv/hooks/logger_hook.py new file mode 100644 index 00000000000..f9effae0a7e --- /dev/null +++ b/otx/algorithms/common/adapters/mmcv/hooks/logger_hook.py @@ -0,0 +1,87 @@ +"""Logger hooks.""" +from collections import defaultdict +from typing import Any, Dict, Optional + +from mmcv.runner import BaseRunner +from mmcv.runner.dist_utils import master_only +from mmcv.runner.hooks import HOOKS, Hook, LoggerHook + +from otx.algorithms.common.utils.logger import get_logger +from otx.api.utils.argument_checks import check_input_parameters_type + +logger = get_logger() + + +@HOOKS.register_module() +class OTXLoggerHook(LoggerHook): + """OTXLoggerHook for Logging.""" + + class Curve: + """Curve with x (epochs) & y (scores).""" + + def __init__(self): + self.x = [] + self.y = [] + + def __repr__(self): + """Repr function.""" + points = [] + for x, y in zip(self.x, self.y): + points.append(f"({x},{y})") + return "curve[" + ",".join(points) + "]" + + @check_input_parameters_type() + def __init__( + self, + curves: Optional[Dict[Any, Curve]] = None, + interval: int = 10, + ignore_last: bool = True, + reset_flag: bool = True, + by_epoch: bool = True, + ): + super().__init__(interval, ignore_last, reset_flag, by_epoch) + self.curves = curves if curves is not None else defaultdict(self.Curve) + + @master_only + @check_input_parameters_type() + def log(self, runner: BaseRunner): + """Log function for OTXLoggerHook.""" + tags = self.get_loggable_tags(runner, allow_text=False, tags_to_skip=()) + if runner.max_epochs is not None: + normalized_iter = self.get_iter(runner) / runner.max_iters * runner.max_epochs + else: + normalized_iter = self.get_iter(runner) + for tag, value in tags.items(): + curve = self.curves[tag] + # Remove duplicates. + if len(curve.x) > 0 and curve.x[-1] == normalized_iter: + curve.x.pop() + curve.y.pop() + curve.x.append(normalized_iter) + curve.y.append(value) + + @check_input_parameters_type() + def after_train_epoch(self, runner: BaseRunner): + """Called after_train_epoch in OTXLoggerHook.""" + # Iteration counter is increased right after the last iteration in the epoch, + # temporarily decrease it back. + runner._iter -= 1 + super().after_train_epoch(runner) + runner._iter += 1 + + +@HOOKS.register_module() +class LoggerReplaceHook(Hook): + """replace logger in the runner to the MPA logger. + + DO NOT INCLUDE this hook to the recipe directly. + mpa will add this hook to all recipe internally. + """ + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def before_run(self, runner): + """Replace logger.""" + runner.logger = logger + logger.info("logger in the runner is replaced to the MPA logger") diff --git a/otx/mpa/modules/hooks/model_ema_v2_hook.py b/otx/algorithms/common/adapters/mmcv/hooks/model_ema_v2_hook.py similarity index 90% rename from otx/mpa/modules/hooks/model_ema_v2_hook.py rename to otx/algorithms/common/adapters/mmcv/hooks/model_ema_v2_hook.py index b98c646dee1..b22f7989776 100644 --- a/otx/mpa/modules/hooks/model_ema_v2_hook.py +++ b/otx/algorithms/common/adapters/mmcv/hooks/model_ema_v2_hook.py @@ -1,21 +1,23 @@ -# Copyright (C) 2022 Intel Corporation +"""Model EMA V2 hooks.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # from copy import deepcopy import torch -import torch.nn as nn from mmcv.runner import HOOKS, Hook +from torch import nn -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger logger = get_logger() @HOOKS.register_module() class ModelEmaV2Hook(Hook): - """ + r"""ModelEmaV2Hook. + Source model paramters would be exponentially averaged onto destination model pararmeters on given intervals .. math:: @@ -37,8 +39,10 @@ def __init__(self, ema_decay=0.9995, interval=1, start_epoch=0, dataset_len_thr= self.interval = interval self.start_epoch = start_epoch self.dataset_len_thr = dataset_len_thr + self.use_ema = None def before_train_epoch(self, runner): + """Make emav2 model before run epoch.""" if not hasattr(self, "use_ema"): self.use_ema = len(runner.data_loader.dataset) > self.dataset_len_thr @@ -48,6 +52,7 @@ def before_train_epoch(self, runner): runner.ema_model = ema_model def before_run(self, runner): + """Log before run.""" logger.info("\t* EMA V2 Enable") def after_train_iter(self, runner): @@ -67,7 +72,8 @@ def after_train_iter(self, runner): class ModelEmaV2(nn.Module): - """Model Exponential Moving Average V2 + """Model Exponential Moving Average V2. + Keep a moving average of everything in the model state_dict (parameters and buffers). V2 of this module is simpler, it does not match params/buffers based on name but simply iterates in order. It works with torchscript (JIT of full model). @@ -86,7 +92,7 @@ class ModelEmaV2(nn.Module): """ def __init__(self, model, decay=0.9999, dataset_len_thr=None, device=None): - super(ModelEmaV2, self).__init__() + super().__init__() # make a copy of the model for accumulating moving average of weights self.module = deepcopy(model) self.module.eval() @@ -98,6 +104,10 @@ def __init__(self, model, decay=0.9999, dataset_len_thr=None, device=None): if self.device is not None: self.module.to(device=device) + def forward(self): + """Forward.""" + return + def _update(self, update_fn): with torch.no_grad(): for ema_v, model_v in zip(self.dst_model.values(), self.src_model.values()): @@ -106,4 +116,5 @@ def _update(self, update_fn): ema_v.copy_(update_fn(ema_v, model_v)) def update(self): + """Update.""" self._update(update_fn=lambda e, m: self.decay * e + (1.0 - self.decay) * m) diff --git a/otx/algorithms/common/adapters/mmcv/hooks/no_bias_decay_hook.py b/otx/algorithms/common/adapters/mmcv/hooks/no_bias_decay_hook.py new file mode 100644 index 00000000000..06e1e06e485 --- /dev/null +++ b/otx/algorithms/common/adapters/mmcv/hooks/no_bias_decay_hook.py @@ -0,0 +1,73 @@ +"""Module for NoBiasDecayHook used in classification.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +from mmcv.runner import HOOKS, Hook +from torch import nn + +from otx.algorithms.common.utils.logger import get_logger + +logger = get_logger() + + +@HOOKS.register_module() +class NoBiasDecayHook(Hook): + """Hook for No Bias Decay Method (Bag of Tricks for Image Classification). + + This hook divides model's weight & bias to 3 parameter groups + [weight with decay, weight without decay, bias without decay]. + """ + + def before_train_epoch(self, runner): + """Split weights into decay/no-decay groups.""" + weight_decay, bias_no_decay, weight_no_decay = [], [], [] + for module in runner.model.modules(): + if isinstance(module, (nn.Conv2d, nn.Linear)): + weight_decay.append(module.weight) + if module.bias is not None: + bias_no_decay.append(module.bias) + elif hasattr(module, "weight") or hasattr(module, "bias"): + if hasattr(module, "weight"): + weight_no_decay.append(module.weight) + if hasattr(module, "bias"): + bias_no_decay.append(module.bias) + elif len(list(module.children())) == 0: + for p in module.parameters(): + weight_decay.append(p) + + weight_decay_group = runner.optimizer.param_groups[0].copy() + weight_decay_group["params"] = weight_decay + + bias_group = runner.optimizer.param_groups[0].copy() + bias_group["params"] = bias_no_decay + bias_group["weight_decay"] = 0.0 + bias_group["lr"] *= 2 + + weight_no_decay_group = runner.optimizer.param_groups[0].copy() + weight_no_decay_group["params"] = weight_no_decay + weight_no_decay_group["weight_decay"] = 0.0 + + param_groups = [weight_decay_group, bias_group, weight_no_decay_group] + runner.optimizer.param_groups = param_groups + + def after_train_epoch(self, runner): + """Merge splited groups before saving checkpoint.""" + params = [] + for module in runner.model.modules(): + if isinstance(module, (nn.Conv2d, nn.Linear)): + params.append(module.weight) + if module.bias is not None: + params.append(module.bias) + elif hasattr(module, "weight") or hasattr(module, "bias"): + if hasattr(module, "weight"): + params.append(module.weight) + if hasattr(module, "bias"): + params.append(module.bias) + elif len(list(module.children())) == 0: + for p in module.parameters(): + params.append(p) + + param_groups = runner.optimizer.param_groups[0].copy() + param_groups["params"] = params + runner.optimizer.param_groups = [param_groups] diff --git a/otx/algorithms/common/adapters/mmcv/hooks/progress_hook.py b/otx/algorithms/common/adapters/mmcv/hooks/progress_hook.py new file mode 100644 index 00000000000..0b62a7fda18 --- /dev/null +++ b/otx/algorithms/common/adapters/mmcv/hooks/progress_hook.py @@ -0,0 +1,101 @@ +"""Collections of hooks for common OTX algorithms.""" + +# Copyright (C) 2021-2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions +# and limitations under the License. + +import math + +from mmcv.runner import BaseRunner +from mmcv.runner.hooks import HOOKS, Hook + +from otx.algorithms.common.utils.logger import get_logger +from otx.api.usecases.reporting.time_monitor_callback import TimeMonitorCallback +from otx.api.utils.argument_checks import check_input_parameters_type + +logger = get_logger() + + +@HOOKS.register_module() +class OTXProgressHook(Hook): + """OTXProgressHook for getting progress.""" + + @check_input_parameters_type() + def __init__(self, time_monitor: TimeMonitorCallback, verbose: bool = False): + super().__init__() + self.time_monitor = time_monitor + self.verbose = verbose + self.print_threshold = 1 + + @check_input_parameters_type() + def before_run(self, runner: BaseRunner): + """Called before_run in OTXProgressHook.""" + total_epochs = runner.max_epochs if runner.max_epochs is not None else 1 + self.time_monitor.total_epochs = total_epochs + self.time_monitor.train_steps = runner.max_iters // total_epochs if total_epochs else 1 + self.time_monitor.steps_per_epoch = self.time_monitor.train_steps + self.time_monitor.val_steps + self.time_monitor.total_steps = max(math.ceil(self.time_monitor.steps_per_epoch * total_epochs), 1) + self.time_monitor.current_step = 0 + self.time_monitor.current_epoch = 0 + self.time_monitor.on_train_begin() + + @check_input_parameters_type() + def before_epoch(self, runner: BaseRunner): + """Called before_epoch in OTXProgressHook.""" + self.time_monitor.on_epoch_begin(runner.epoch) + + @check_input_parameters_type() + def after_epoch(self, runner: BaseRunner): + """Called after_epoch in OTXProgressHook.""" + # put some runner's training status to use on the other hooks + runner.log_buffer.output["current_iters"] = runner.iter + self.time_monitor.on_epoch_end(runner.epoch, runner.log_buffer.output) + + @check_input_parameters_type() + def before_iter(self, runner: BaseRunner): + """Called before_iter in OTXProgressHook.""" + self.time_monitor.on_train_batch_begin(1) + + @check_input_parameters_type() + def after_iter(self, runner: BaseRunner): + """Called after_iter in OTXProgressHook.""" + # put some runner's training status to use on the other hooks + runner.log_buffer.output["current_iters"] = runner.iter + self.time_monitor.on_train_batch_end(1) + if self.verbose: + progress = self.progress + if progress >= self.print_threshold: + logger.warning(f"training progress {progress:.0f}%") + self.print_threshold = (progress + 10) // 10 * 10 + + @check_input_parameters_type() + def before_val_iter(self, runner: BaseRunner): + """Called before_val_iter in OTXProgressHook.""" + self.time_monitor.on_test_batch_begin(1, logger) + + @check_input_parameters_type() + def after_val_iter(self, runner: BaseRunner): + """Called after_val_iter in OTXProgressHook.""" + self.time_monitor.on_test_batch_end(1, logger) + + @check_input_parameters_type() + def after_run(self, runner: BaseRunner): + """Called after_run in OTXProgressHook.""" + self.time_monitor.on_train_end(1) + if self.time_monitor.update_progress_callback: + self.time_monitor.update_progress_callback(int(self.time_monitor.get_progress())) + + @property + def progress(self): + """Getting Progress from time monitor.""" + return self.time_monitor.get_progress() diff --git a/otx/mpa/modules/hooks/recording_forward_hooks.py b/otx/algorithms/common/adapters/mmcv/hooks/recording_forward_hook.py similarity index 80% rename from otx/mpa/modules/hooks/recording_forward_hooks.py rename to otx/algorithms/common/adapters/mmcv/hooks/recording_forward_hook.py index 4b3fc7011e2..5b3662b53f3 100644 --- a/otx/mpa/modules/hooks/recording_forward_hooks.py +++ b/otx/algorithms/common/adapters/mmcv/hooks/recording_forward_hook.py @@ -1,4 +1,5 @@ -# Copyright (C) 2022 Intel Corporation +"""Recording forward hooks for explain mode.""" +# Copyright (C) 2023 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,7 +16,7 @@ from __future__ import annotations from abc import ABC, abstractmethod -from typing import Sequence, Union +from typing import List, Sequence, Union import torch @@ -27,11 +28,13 @@ class BaseRecordingForwardHook(ABC): """While registered with the designated PyTorch module, this class caches feature vector during forward pass. + Example:: with BaseRecordingForwardHook(model.module.backbone) as hook: with torch.no_grad(): result = model(return_loss=False, **data) print(hook.records) + Args: module (torch.nn.Module): The PyTorch module to be registered in forward pass fpn_idx (int, optional): The layer index to be processed if the model is a FPN. @@ -41,60 +44,72 @@ class BaseRecordingForwardHook(ABC): def __init__(self, module: torch.nn.Module, fpn_idx: int = -1) -> None: self._module = module self._handle = None - self._records = [] + self._records = [] # type: List[torch.Tensor] self._fpn_idx = fpn_idx @property def records(self): + """Return records.""" return self._records @abstractmethod def func(self, feature_map: torch.Tensor, fpn_idx: int = -1) -> torch.Tensor: """This method get the feature vector or saliency map from the output of the module. + Args: x (torch.Tensor): Feature map from the backbone module fpn_idx (int, optional): The layer index to be processed if the model is a FPN. Defaults to 0 which uses the largest feature map from FPN. + Returns: torch.Tensor (torch.Tensor): Saliency map for feature vector """ raise NotImplementedError - def _recording_forward(self, _: torch.nn.Module, input: torch.Tensor, output: torch.Tensor): + def _recording_forward( + self, _: torch.nn.Module, x: torch.Tensor, output: torch.Tensor + ): # pylint: disable=unused-argument tensors = self.func(output) tensors = tensors.detach().cpu().numpy() for tensor in tensors: self._records.append(tensor) def __enter__(self) -> BaseRecordingForwardHook: + """Enter.""" self._handle = self._module.backbone.register_forward_hook(self._recording_forward) return self def __exit__(self, exc_type, exc_value, traceback): + """Exit.""" self._handle.remove() class EigenCamHook(BaseRecordingForwardHook): + """EigenCamHook.""" + @staticmethod def func(feature_map: Union[torch.Tensor, Sequence[torch.Tensor]], fpn_idx: int = -1) -> torch.Tensor: + """Generate the saliency map.""" if isinstance(feature_map, (list, tuple)): feature_map = feature_map[fpn_idx] x = feature_map.type(torch.float) - bs, c, h, w = x.size() - reshaped_fmap = x.reshape((bs, c, h * w)).transpose(1, 2) + batch_size, channel, h, w = x.size() + reshaped_fmap = x.reshape((batch_size, channel, h * w)).transpose(1, 2) reshaped_fmap = reshaped_fmap - reshaped_fmap.mean(1)[:, None, :] - U, S, V = torch.linalg.svd(reshaped_fmap, full_matrices=True) - saliency_map = (reshaped_fmap @ V[:, 0][:, :, None]).squeeze(-1) + _, _, vh = torch.linalg.svd(reshaped_fmap, full_matrices=True) # pylint: disable=invalid-name + saliency_map = (reshaped_fmap @ vh[:, 0][:, :, None]).squeeze(-1) max_values, _ = torch.max(saliency_map, -1) min_values, _ = torch.min(saliency_map, -1) saliency_map = 255 * (saliency_map - min_values[:, None]) / ((max_values - min_values + 1e-12)[:, None]) - saliency_map = saliency_map.reshape((bs, h, w)) + saliency_map = saliency_map.reshape((batch_size, h, w)) saliency_map = saliency_map.to(torch.uint8) return saliency_map class ActivationMapHook(BaseRecordingForwardHook): + """ActivationMapHook.""" + @staticmethod def func(feature_map: Union[torch.Tensor, Sequence[torch.Tensor]], fpn_idx: int = -1) -> torch.Tensor: """Generate the saliency map by average feature maps then normalizing to (0, 255).""" @@ -104,20 +119,22 @@ def func(feature_map: Union[torch.Tensor, Sequence[torch.Tensor]], fpn_idx: int ), f"fpn_idx: {fpn_idx} is out of scope of feature_map length {len(feature_map)}!" feature_map = feature_map[fpn_idx] - bs, c, h, w = feature_map.size() + batch_size, _, h, w = feature_map.size() activation_map = torch.mean(feature_map, dim=1) - activation_map = activation_map.reshape((bs, h * w)) + activation_map = activation_map.reshape((batch_size, h * w)) max_values, _ = torch.max(activation_map, -1) min_values, _ = torch.min(activation_map, -1) activation_map = 255 * (activation_map - min_values[:, None]) / (max_values - min_values + 1e-12)[:, None] - activation_map = activation_map.reshape((bs, h, w)) + activation_map = activation_map.reshape((batch_size, h, w)) activation_map = activation_map.to(torch.uint8) return activation_map class FeatureVectorHook(BaseRecordingForwardHook): + """FeatureVectorHook.""" + @staticmethod - def func(feature_map: Union[torch.Tensor, Sequence[torch.Tensor]]) -> torch.Tensor: + def func(feature_map: Union[torch.Tensor, Sequence[torch.Tensor]], fpn_idx: int = -1) -> torch.Tensor: """Generate the feature vector by average pooling feature maps.""" if isinstance(feature_map, (list, tuple)): # aggregate feature maps from Feature Pyramid Network @@ -129,8 +146,8 @@ def func(feature_map: Union[torch.Tensor, Sequence[torch.Tensor]]) -> torch.Tens class ReciproCAMHook(BaseRecordingForwardHook): - """ - Implementation of recipro-cam for class-wise saliency map + """Implementation of recipro-cam for class-wise saliency map. + recipro-cam: gradient-free reciprocal class activation map (https://arxiv.org/pdf/2209.14074.pdf) """ @@ -141,8 +158,7 @@ def __init__(self, module: torch.nn.Module, fpn_idx: int = -1) -> None: self._num_classes = module.head.num_classes def func(self, feature_map: Union[torch.Tensor, Sequence[torch.Tensor]], fpn_idx: int = -1) -> torch.Tensor: - """ - Generate the class-wise saliency maps using Recipro-CAM and then normalizing to (0, 255). + """Generate the class-wise saliency maps using Recipro-CAM and then normalizing to (0, 255). Args: feature_map (Union[torch.Tensor, List[torch.Tensor]]): feature maps from backbone or list of feature maps @@ -156,18 +172,18 @@ def func(self, feature_map: Union[torch.Tensor, Sequence[torch.Tensor]], fpn_idx if isinstance(feature_map, (list, tuple)): feature_map = feature_map[fpn_idx] - bs, c, h, w = feature_map.size() - saliency_maps = torch.empty(bs, self._num_classes, h, w) - for f in range(bs): - mosaic_feature_map = self._get_mosaic_feature_map(feature_map[f], c, h, w) + batch_size, channel, h, w = feature_map.size() + saliency_maps = torch.empty(batch_size, self._num_classes, h, w) + for f in range(batch_size): + mosaic_feature_map = self._get_mosaic_feature_map(feature_map[f], channel, h, w) mosaic_prediction = self._predict_from_feature_map(mosaic_feature_map) saliency_maps[f] = mosaic_prediction.transpose(0, 1).reshape((self._num_classes, h, w)) - saliency_maps = saliency_maps.reshape((bs, self._num_classes, h * w)) + saliency_maps = saliency_maps.reshape((batch_size, self._num_classes, h * w)) max_values, _ = torch.max(saliency_maps, -1) min_values, _ = torch.min(saliency_maps, -1) saliency_maps = 255 * (saliency_maps - min_values[:, :, None]) / (max_values - min_values + 1e-12)[:, :, None] - saliency_maps = saliency_maps.reshape((bs, self._num_classes, h, w)) + saliency_maps = saliency_maps.reshape((batch_size, self._num_classes, h, w)) saliency_maps = saliency_maps.to(torch.uint8) return saliency_maps @@ -182,11 +198,9 @@ def _predict_from_feature_map(self, x: torch.Tensor) -> torch.Tensor: def _get_mosaic_feature_map(self, feature_map: torch.Tensor, c: int, h: int, w: int) -> torch.Tensor: if MMCLS_AVAILABLE and self._neck is not None and isinstance(self._neck, GlobalAveragePooling): - """ - Optimization workaround for the GAP case (simulate GAP with more simple compute graph) - Possible due to static sparsity of mosaic_feature_map - Makes the downstream GAP operation to be dummy - """ + # Optimization workaround for the GAP case (simulate GAP with more simple compute graph) + # Possible due to static sparsity of mosaic_feature_map + # Makes the downstream GAP operation to be dummy feature_map_transposed = torch.flatten(feature_map, start_dim=1).transpose(0, 1)[:, :, None, None] mosaic_feature_map = feature_map_transposed / (h * w) else: diff --git a/otx/mpa/modules/hooks/sam_optimizer_hook.py b/otx/algorithms/common/adapters/mmcv/hooks/sam_optimizer_hook.py similarity index 93% rename from otx/mpa/modules/hooks/sam_optimizer_hook.py rename to otx/algorithms/common/adapters/mmcv/hooks/sam_optimizer_hook.py index 29357dfda0f..ffa08b12ae8 100644 --- a/otx/mpa/modules/hooks/sam_optimizer_hook.py +++ b/otx/algorithms/common/adapters/mmcv/hooks/sam_optimizer_hook.py @@ -1,4 +1,5 @@ -# Copyright (C) 2022 Intel Corporation +"""This module contains the Sharpness-aware Minimization optimizer hook implementation for MMCV Runners.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -8,7 +9,7 @@ @HOOKS.register_module() class SAMOptimizerHook(OptimizerHook): - """Sharpness-aware Minimization optimizer hook + """Sharpness-aware Minimization optimizer hook. Implemented as OptimizerHook for MMCV Runners - Paper ref: https://arxiv.org/abs/2010.01412 @@ -23,7 +24,8 @@ def __init__(self, rho=0.05, start_epoch=1, **kwargs): raise ValueError("rho should be greater than 0 for SAM optimizer") def after_train_iter(self, runner): - """Perform SAM optimization + """Perform SAM optimization. + 0. compute current loss (DONE IN model.train_step()) 1. compute current gradient 2. move param to the approximate local maximum: w + e(w) = w + rho*norm_grad @@ -73,6 +75,7 @@ def after_train_iter(self, runner): # Shaprpness-aware param update runner.optimizer.step() # param -= lr * sam_grad runner.log_buffer.update({"sharpness": float(max_loss - curr_loss), "max_loss": float(max_loss)}) + return None def _get_current_batch(self, model): if hasattr(model, "module"): diff --git a/otx/mpa/modules/hooks/semisl_cls_hook.py b/otx/algorithms/common/adapters/mmcv/hooks/semisl_cls_hook.py similarity index 81% rename from otx/mpa/modules/hooks/semisl_cls_hook.py rename to otx/algorithms/common/adapters/mmcv/hooks/semisl_cls_hook.py index d53006862ea..178e3c9b3aa 100644 --- a/otx/mpa/modules/hooks/semisl_cls_hook.py +++ b/otx/algorithms/common/adapters/mmcv/hooks/semisl_cls_hook.py @@ -1,4 +1,5 @@ -# Copyright (C) 2022 Intel Corporation +"""Module for defining hook for semi-supervised learning for classification task.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -10,7 +11,7 @@ @HOOKS.register_module() class SemiSLClsHook(Hook): - """Hook for SemiSL for classification + """Hook for SemiSL for classification. This hook includes unlabeled warm-up loss coefficient (default: True): unlabeled_coef = (0.5 - cos(min(pi, 2 * pi * k) / K)) / 2 @@ -25,14 +26,14 @@ class SemiSLClsHook(Hook): If False, Semi-SL uses 1 as unlabeled loss coefficient """ - def __init__(self, total_steps=0, unlabeled_warmup=True, **kwargs): + def __init__(self, total_steps=0, unlabeled_warmup=True): self.unlabeled_warmup = unlabeled_warmup self.total_steps = total_steps self.current_step, self.unlabeled_coef = 0, 0 self.num_pseudo_label = 0 def before_train_iter(self, runner): - # Calculate the unlabeled warm-up loss coefficient before training iteration + """Calculate the unlabeled warm-up loss coefficient before training iteration.""" if self.unlabeled_warmup and self.unlabeled_coef < 1.0: if self.total_steps == 0: self.total_steps = runner.max_iters @@ -44,12 +45,12 @@ def before_train_iter(self, runner): self.current_step += 1 def after_train_iter(self, runner): + """Add the number of pseudo-labels correctly selected from iteration.""" model = self._get_model(runner) - # Add the number of pseudo-labels currently selected from iteration self.num_pseudo_label += int(model.head.num_pseudo_label) def after_epoch(self, runner): - # Add data related to Semi-SL to the log + """Add data related to Semi-SL to the log.""" if self.unlabeled_warmup: runner.log_buffer.output.update({"unlabeled_coef": round(self.unlabeled_coef, 4)}) runner.log_buffer.output.update({"pseudo_label": self.num_pseudo_label}) diff --git a/otx/mpa/modules/hooks/task_adapt_hook.py b/otx/algorithms/common/adapters/mmcv/hooks/task_adapt_hook.py similarity index 86% rename from otx/mpa/modules/hooks/task_adapt_hook.py rename to otx/algorithms/common/adapters/mmcv/hooks/task_adapt_hook.py index 116115bada5..54f53b5e63f 100644 --- a/otx/mpa/modules/hooks/task_adapt_hook.py +++ b/otx/algorithms/common/adapters/mmcv/hooks/task_adapt_hook.py @@ -1,20 +1,23 @@ -# Copyright (C) 2022 Intel Corporation +"""Task adapt hook which selects a proper sampler.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # from mmcv.runner import HOOKS, Hook, get_dist_info from torch.utils.data import DataLoader -from otx.mpa.modules.datasets.samplers.balanced_sampler import BalancedSampler -from otx.mpa.modules.datasets.samplers.cls_incr_sampler import ClsIncrSampler -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.adapters.torch.dataloaders.samplers import ( + BalancedSampler, + ClsIncrSampler, +) +from otx.algorithms.common.utils.logger import get_logger logger = get_logger() @HOOKS.register_module() class TaskAdaptHook(Hook): - """Task Adaptation Hook for Task-Inc & Class-Inc + """Task Adaptation Hook for Task-Inc & Class-Inc. Args: src_classes (list): A list of old classes used in the existing model @@ -46,6 +49,7 @@ def __init__( logger.info(f"- Sampler flag: {self.sampler_flag}") def before_epoch(self, runner): + """Produce a proper sampler for task-adaptation.""" if self.sampler_flag: dataset = runner.data_loader.dataset batch_size = runner.data_loader.batch_size diff --git a/otx/algorithms/common/adapters/mmcv/hooks/two_crop_transform_hook.py b/otx/algorithms/common/adapters/mmcv/hooks/two_crop_transform_hook.py new file mode 100644 index 00000000000..8522643a932 --- /dev/null +++ b/otx/algorithms/common/adapters/mmcv/hooks/two_crop_transform_hook.py @@ -0,0 +1,92 @@ +"""Two crop transform hook.""" +from typing import List + +from mmcv.runner import BaseRunner +from mmcv.runner.hooks import HOOKS, Hook + +from otx.algorithms.common.utils.logger import get_logger +from otx.api.utils.argument_checks import check_input_parameters_type + +logger = get_logger() + + +@HOOKS.register_module() +class TwoCropTransformHook(Hook): + """TwoCropTransformHook with every specific interval. + + This hook decides whether using single pipeline or two pipelines + implemented in `TwoCropTransform` for the current iteration. + + Args: + interval (int): If `interval` == 1, both pipelines is used. + If `interval` > 1, the first pipeline is used and then + both pipelines are used every `interval`. Defaults to 1. + by_epoch (bool): (TODO) Use `interval` by epoch. Defaults to False. + """ + + @check_input_parameters_type() + def __init__(self, interval: int = 1, by_epoch: bool = False): + assert interval > 0, f"interval (={interval}) must be positive value." + if by_epoch: + raise NotImplementedError("by_epoch is not implemented.") + + self.interval = interval + self.cnt = 0 + + @check_input_parameters_type() + def _get_dataset(self, runner: BaseRunner): + """Get dataset to handle `is_both`.""" + if hasattr(runner.data_loader.dataset, "dataset"): + # for RepeatDataset + dataset = runner.data_loader.dataset.dataset + else: + dataset = runner.data_loader.dataset + + return dataset + + # pylint: disable=inconsistent-return-statements + @check_input_parameters_type() + def _find_two_crop_transform(self, transforms: List[object]): + """Find TwoCropTransform among transforms.""" + for transform in transforms: + if transform.__class__.__name__ == "TwoCropTransform": + return transform + + @check_input_parameters_type() + def before_train_epoch(self, runner: BaseRunner): + """Called before_train_epoch in TwoCropTransformHook.""" + # Always keep `TwoCropTransform` enabled. + if self.interval == 1: + return + + dataset = self._get_dataset(runner) + two_crop_transform = self._find_two_crop_transform(dataset.pipeline.transforms) + if self.cnt == self.interval - 1: + # start using both pipelines + two_crop_transform.is_both = True + else: + two_crop_transform.is_both = False + + @check_input_parameters_type() + def after_train_iter(self, runner: BaseRunner): + """Called after_train_iter in TwoCropTransformHook.""" + # Always keep `TwoCropTransform` enabled. + if self.interval == 1: + return + + if self.cnt < self.interval - 1: + # Instead of using `runner.every_n_iters` or `runner.every_n_inner_iters`, + # this condition is used to compare `self.cnt` with `self.interval` throughout the entire epochs. + self.cnt += 1 + + if self.cnt == self.interval - 1: + dataset = self._get_dataset(runner) + two_crop_transform = self._find_two_crop_transform(dataset.pipeline.transforms) + if not two_crop_transform.is_both: + # If `self.cnt` == `self.interval`-1, there are two cases, + # 1. `self.cnt` was updated in L709, so `is_both` must be on for the next iter. + # 2. if the current iter was already conducted, `is_both` must be off. + two_crop_transform.is_both = True + else: + two_crop_transform.is_both = False + self.cnt = 0 diff --git a/otx/mpa/modules/hooks/unbiased_teacher_hook.py b/otx/algorithms/common/adapters/mmcv/hooks/unbiased_teacher_hook.py similarity index 83% rename from otx/mpa/modules/hooks/unbiased_teacher_hook.py rename to otx/algorithms/common/adapters/mmcv/hooks/unbiased_teacher_hook.py index 922fe1b05ed..c45b805371f 100644 --- a/otx/mpa/modules/hooks/unbiased_teacher_hook.py +++ b/otx/algorithms/common/adapters/mmcv/hooks/unbiased_teacher_hook.py @@ -1,24 +1,29 @@ -# Copyright (C) 2022 Intel Corporation +"""Unbiased-teacher hook.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # from mmcv.runner import HOOKS -from otx.mpa.utils.logger import get_logger - -from .model_ema_hook import DualModelEMAHook +from otx.algorithms.common.adapters.mmcv.hooks.dual_model_ema_hook import ( + DualModelEMAHook, +) +from otx.algorithms.common.utils.logger import get_logger logger = get_logger() @HOOKS.register_module() class UnbiasedTeacherHook(DualModelEMAHook): + """UnbiasedTeacherHook for semi-supervised learnings.""" + def __init__(self, min_pseudo_label_ratio=0.1, **kwargs): super().__init__(**kwargs) self.min_pseudo_label_ratio = min_pseudo_label_ratio self.unlabeled_loss_enabled = False def before_train_epoch(self, runner): + """Enable unlabeled loss if over start epoch.""" super().before_train_epoch(runner) if runner.epoch + 1 < self.start_epoch: diff --git a/otx/mpa/modules/hooks/workflow_hooks.py b/otx/algorithms/common/adapters/mmcv/hooks/workflow_hook.py similarity index 67% rename from otx/mpa/modules/hooks/workflow_hooks.py rename to otx/algorithms/common/adapters/mmcv/hooks/workflow_hook.py index 42da9a02b4a..741e7e3ff16 100644 --- a/otx/mpa/modules/hooks/workflow_hooks.py +++ b/otx/algorithms/common/adapters/mmcv/hooks/workflow_hook.py @@ -1,4 +1,5 @@ -# Copyright (C) 2022 Intel Corporation +"""Workflow hooks.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -8,65 +9,80 @@ from mmcv.utils import Registry -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger logger = get_logger() WORKFLOW_HOOKS = Registry("workflow_hooks") +# pylint: disable=unused-argument + def build_workflow_hook(config, *args, **kwargs): + """Build a workflow hook.""" logger.info(f"called build_workflow_hook({config})") whook_type = config.pop("type") # event = config.pop('event') if whook_type not in WORKFLOW_HOOKS: raise KeyError(f"not supported workflow hook type {whook_type}") - else: - whook_cls = WORKFLOW_HOOKS.get(whook_type) + whook_cls = WORKFLOW_HOOKS.get(whook_type) return whook_cls(*args, **kwargs, **config) class WorkflowHook: + """Workflow hook.""" + def __init__(self, name): self.name = name def before_workflow(self, workflow, idx=-1, results=None): - pass + """Before workflow.""" + return def after_workflow(self, workflow, idx=-1, results=None): - pass + """After workflow.""" + return def before_stage(self, workflow, idx, results=None): - pass + """Before stage.""" + return def after_stage(self, workflow, idx, results=None): - pass + """After stage.""" + return @WORKFLOW_HOOKS.register_module() class SampleLoggingHook(WorkflowHook): + """Sample logging hook.""" + def __init__(self, name=__name__, log_level="DEBUG"): - super(SampleLoggingHook, self).__init__(name) + super().__init__(name) self.logging = getattr(logger, log_level.lower()) - def before_stage(self, wf, stage_idx, results): + def before_stage(self, workflow, idx, results=None): + """Before stage.""" self.logging(f"called {self.name}.run()") - self.logging(f"stage index {stage_idx}, results keys = {results.keys()}") - result_key = f"{self.name}|{stage_idx}" + self.logging(f"stage index {idx}, results keys = {results.keys()}") + result_key = f"{self.name}|{idx}" results[result_key] = dict(message=f"this is a sample result of the {__name__} hook") @WORKFLOW_HOOKS.register_module() class WFProfileHook(WorkflowHook): + """Workflow profile hook.""" + def __init__(self, name=__name__, output_path=None): - super(WFProfileHook, self).__init__(name) + super().__init__(name) self.output_path = output_path self.profile = dict(start=0, end=0, elapsed=0, stages=dict()) logger.info(f"initialized {__name__}....") - def before_workflow(self, wf, idx=-1, results=None): + def before_workflow(self, workflow, idx=-1, results=None): + """Before workflow.""" self.profile["start"] = datetime.datetime.now() - def after_workflow(self, wf, idx=-1, results=None): + def after_workflow(self, workflow, idx=-1, results=None): + """After workflow.""" self.profile["end"] = datetime.datetime.now() self.profile["elapsed"] = self.profile["end"] - self.profile["start"] @@ -74,15 +90,17 @@ def after_workflow(self, wf, idx=-1, results=None): logger.info("** workflow profile results **") logger.info(str_dumps) if self.output_path is not None: - with open(self.output_path, "w") as f: + with open(self.output_path, "w") as f: # pylint: disable=unspecified-encoding f.write(str_dumps) - def before_stage(self, wf, idx=-1, results=None): + def before_stage(self, workflow, idx=-1, results=None): + """Before stage.""" stages = self.profile.get("stages") stages[f"{idx}"] = {} stages[f"{idx}"]["start"] = datetime.datetime.now() - def after_stage(self, wf, idx=-1, results=None): + def after_stage(self, workflow, idx=-1, results=None): + """After stage.""" stages = self.profile.get("stages") stages[f"{idx}"]["end"] = datetime.datetime.now() stages[f"{idx}"]["elapsed"] = stages[f"{idx}"]["end"] - stages[f"{idx}"]["start"] @@ -90,11 +108,14 @@ def after_stage(self, wf, idx=-1, results=None): @WORKFLOW_HOOKS.register_module() class AfterStageWFHook(WorkflowHook): + """After stage workflow hook.""" + def __init__(self, name, stage_cfg_updated_callback): self.callback = stage_cfg_updated_callback super().__init__(name) def after_stage(self, workflow, idx, results=None): + """After stage.""" logger.info(f"{__name__}: called after_stage()") name = copy.deepcopy(workflow.stages[idx].name) cfg = copy.deepcopy(workflow.stages[idx].cfg) diff --git a/otx/algorithms/common/adapters/mmcv/models/backbones/efficientnet.py b/otx/algorithms/common/adapters/mmcv/models/backbones/efficientnet.py index 24f4772fc3a..e16541f6327 100644 --- a/otx/algorithms/common/adapters/mmcv/models/backbones/efficientnet.py +++ b/otx/algorithms/common/adapters/mmcv/models/backbones/efficientnet.py @@ -21,7 +21,7 @@ from torch import nn from torch.nn import init -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger from ..builder import BACKBONES diff --git a/otx/algorithms/common/adapters/mmcv/models/backbones/efficientnetv2.py b/otx/algorithms/common/adapters/mmcv/models/backbones/efficientnetv2.py index bbf1367c6ea..8e5c3dcd014 100644 --- a/otx/algorithms/common/adapters/mmcv/models/backbones/efficientnetv2.py +++ b/otx/algorithms/common/adapters/mmcv/models/backbones/efficientnetv2.py @@ -17,7 +17,7 @@ from mmcv.runner import load_checkpoint from torch import nn -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger from ..builder import BACKBONES diff --git a/otx/algorithms/common/adapters/mmcv/models/backbones/mobilenetv3.py b/otx/algorithms/common/adapters/mmcv/models/backbones/mobilenetv3.py index 6cf7fac5030..8d5630a1a51 100644 --- a/otx/algorithms/common/adapters/mmcv/models/backbones/mobilenetv3.py +++ b/otx/algorithms/common/adapters/mmcv/models/backbones/mobilenetv3.py @@ -19,7 +19,7 @@ from mmcv.runner import load_checkpoint from torch import nn -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger from ..builder import BACKBONES diff --git a/otx/algorithms/common/adapters/mmcv/nncf/patches.py b/otx/algorithms/common/adapters/mmcv/nncf/patches.py index 0ebd3653ae4..bf0adc3e489 100644 --- a/otx/algorithms/common/adapters/mmcv/nncf/patches.py +++ b/otx/algorithms/common/adapters/mmcv/nncf/patches.py @@ -32,17 +32,17 @@ def _evaluation_wrapper(self, fn, runner, *args, **kwargs): NNCF_PATCHER.patch("mmcv.runner.EvalHook.evaluate", _evaluation_wrapper) -NNCF_PATCHER.patch("otx.mpa.modules.hooks.eval_hook.CustomEvalHook.evaluate", _evaluation_wrapper) +NNCF_PATCHER.patch("otx.algorithms.common.adapters.mmcv.hooks.eval_hook.CustomEvalHook.evaluate", _evaluation_wrapper) NNCF_PATCHER.patch( - "otx.mpa.modules.hooks.recording_forward_hooks.FeatureVectorHook.func", + "otx.algorithms.common.adapters.mmcv.hooks.recording_forward_hook.FeatureVectorHook.func", no_nncf_trace_wrapper, ) NNCF_PATCHER.patch( - "otx.mpa.modules.hooks.recording_forward_hooks.ActivationMapHook.func", + "otx.algorithms.common.adapters.mmcv.hooks.recording_forward_hook.ActivationMapHook.func", no_nncf_trace_wrapper, ) NNCF_PATCHER.patch( - "otx.mpa.modules.hooks.recording_forward_hooks.ReciproCAMHook.func", + "otx.algorithms.common.adapters.mmcv.hooks.recording_forward_hook.ReciproCAMHook.func", no_nncf_trace_wrapper, ) diff --git a/otx/algorithms/common/adapters/mmcv/nncf/utils.py b/otx/algorithms/common/adapters/mmcv/nncf/utils.py index 82a0f5c34ac..459c0dc1180 100644 --- a/otx/algorithms/common/adapters/mmcv/nncf/utils.py +++ b/otx/algorithms/common/adapters/mmcv/nncf/utils.py @@ -26,7 +26,7 @@ no_nncf_trace, ) from otx.algorithms.common.utils import get_arg_spec -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger logger = get_logger() diff --git a/tests/unit/mpa/modules/models/__init__.py b/otx/algorithms/common/adapters/mmcv/pipelines/__init__.py similarity index 66% rename from tests/unit/mpa/modules/models/__init__.py rename to otx/algorithms/common/adapters/mmcv/pipelines/__init__.py index 4683a907217..274a4d10038 100644 --- a/tests/unit/mpa/modules/models/__init__.py +++ b/otx/algorithms/common/adapters/mmcv/pipelines/__init__.py @@ -1,4 +1,4 @@ -"""Test for otx.mpa.modules.models""" - +"""Pipelines for mmcv.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +# diff --git a/tests/unit/mpa/modules/models/scalar_schedulers/__init__.py b/otx/algorithms/common/adapters/mmcv/pipelines/transforms/__init__.py similarity index 60% rename from tests/unit/mpa/modules/models/scalar_schedulers/__init__.py rename to otx/algorithms/common/adapters/mmcv/pipelines/transforms/__init__.py index 57e225a8be4..48f52baf0c8 100644 --- a/tests/unit/mpa/modules/models/scalar_schedulers/__init__.py +++ b/otx/algorithms/common/adapters/mmcv/pipelines/transforms/__init__.py @@ -1,4 +1,4 @@ -"""Test for otx.mpa.modules.models.scheculers""" - +"""Transforms for mmcv.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +# diff --git a/otx/mpa/modules/datasets/pipelines/transforms/augments.py b/otx/algorithms/common/adapters/mmcv/pipelines/transforms/augments.py similarity index 69% rename from otx/mpa/modules/datasets/pipelines/transforms/augments.py rename to otx/algorithms/common/adapters/mmcv/pipelines/transforms/augments.py index 8e92e9a41a7..52f88018d97 100644 --- a/otx/mpa/modules/datasets/pipelines/transforms/augments.py +++ b/otx/algorithms/common/adapters/mmcv/pipelines/transforms/augments.py @@ -1,3 +1,4 @@ +"""Module for defining Augments and CythonArguments class used for classification task.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -5,181 +6,210 @@ import random from typing import Union -import numpy as np +from numpy import ndarray as CvImage from PIL import Image, ImageEnhance, ImageOps +from PIL.Image import Image as PILImage from PIL.Image import Resampling -import otx.mpa.modules.datasets.pipelines.transforms.cython_augments.pil_augment as pil_aug +# type: ignore[attr-defined] +# pylint: disable = no-name-in-module +import otx.algorithms.common.adapters.mmcv.pipelines.transforms.cython_augments.pil_augment as pil_aug -PILImage = Image.Image -CvImage = np.ndarray ImgTypes = Union[PILImage, CvImage] -class Augments: +class Augments: # pylint: disable=unused-argument + """Augments class that implements various augmentations via plain PIL.""" + + @staticmethod def _check_args_tf(kwargs): def _interpolation(kwargs): interpolation = kwargs.pop("resample", Resampling.BILINEAR) if isinstance(interpolation, (list, tuple)): return random.choice(interpolation) - else: - return interpolation + return interpolation - kwargs["resample"] = _interpolation(kwargs) + new_kwargs = {**kwargs, "resample": _interpolation(kwargs)} + return new_kwargs @staticmethod def autocontrast(img: PILImage, *args, **kwargs) -> PILImage: + """Apply autocontrast for an given image.""" return ImageOps.autocontrast(img) @staticmethod def equalize(img: PILImage, *args, **kwargs) -> PILImage: + """Apply equalize for an given image.""" return ImageOps.equalize(img) @staticmethod def solarize(img: PILImage, threshold: int, *args, **kwargs) -> PILImage: + """Apply solarize for an given image.""" return ImageOps.solarize(img, threshold) @staticmethod def posterize(img: PILImage, bits_to_keep: int, *args, **kwargs) -> PILImage: + """Apply posterize for an given image.""" if bits_to_keep >= 8: return img - return ImageOps.posterize(img, bits_to_keep) @staticmethod def color(img: PILImage, factor: float, *args, **kwargs) -> PILImage: + """Apply color for an given image.""" return ImageEnhance.Color(img).enhance(factor) @staticmethod def contrast(img: PILImage, factor: float, *args, **kwargs) -> PILImage: + """Apply contrast for an given image.""" return ImageEnhance.Contrast(img).enhance(factor) @staticmethod def brightness(img: PILImage, factor: float, *args, **kwargs) -> PILImage: + """Apply brightness for an given image.""" return ImageEnhance.Brightness(img).enhance(factor) @staticmethod def sharpness(img: PILImage, factor: float, *args, **kwargs) -> PILImage: + """Apply sharpness for an given image.""" return ImageEnhance.Sharpness(img).enhance(factor) @staticmethod def rotate(img: PILImage, degree: float, *args, **kwargs) -> PILImage: - Augments._check_args_tf(kwargs) + """Apply rotate for an given image.""" + kwargs = Augments._check_args_tf(kwargs) return img.rotate(degree, **kwargs) @staticmethod def shear_x(img: PILImage, factor: float, *args, **kwargs) -> PILImage: - Augments._check_args_tf(kwargs) + """Apply shear_x for an given image.""" + kwargs = Augments._check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs) @staticmethod def shear_y(img: PILImage, factor: float, *args, **kwargs) -> PILImage: - Augments._check_args_tf(kwargs) + """Apply shear_y for an given image.""" + kwargs = Augments._check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs) @staticmethod def translate_x_rel(img: PILImage, pct: float, *args, **kwargs) -> PILImage: - Augments._check_args_tf(kwargs) + """Apply translate_x_rel for an given image.""" + kwargs = Augments._check_args_tf(kwargs) pixels = pct * img.size[0] return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs) @staticmethod def translate_y_rel(img: PILImage, pct: float, *args, **kwargs) -> PILImage: - Augments._check_args_tf(kwargs) + """Apply translate_y_rel for an given image.""" + kwargs = Augments._check_args_tf(kwargs) pixels = pct * img.size[1] return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs) class CythonAugments(Augments): + """CythonAugments class that support faster augmentation with cythonizing.""" + + @staticmethod def autocontrast(img: ImgTypes, *args, **kwargs) -> ImgTypes: + """Apply autocontrast for an given image.""" if Image.isImageType(img): return pil_aug.autocontrast(img) - raise NotImplementedError(f"Unknown type: {type(img)}") + @staticmethod def equalize(img: ImgTypes, *args, **kwargs) -> ImgTypes: + """Apply equalize for an given image.""" if Image.isImageType(img): return pil_aug.equalize(img) - raise NotImplementedError(f"Unknown type: {type(img)}") + @staticmethod def solarize(img: ImgTypes, threshold: int, *args, **kwargs) -> ImgTypes: + """Apply solarize for an given image.""" if Image.isImageType(img): return pil_aug.solarize(img, threshold) - raise NotImplementedError(f"Unknown type: {type(img)}") + @staticmethod def posterize(img: ImgTypes, bits_to_keep: int, *args, **kwargs) -> ImgTypes: + """Apply posterize for an given image.""" if Image.isImageType(img): if bits_to_keep >= 8: return img - return pil_aug.posterize(img, bits_to_keep) - raise NotImplementedError(f"Unknown type: {type(img)}") + @staticmethod def color(img: ImgTypes, factor: float, *args, **kwargs) -> ImgTypes: + """Apply color for an given image.""" if Image.isImageType(img): return pil_aug.color(img, factor) - raise NotImplementedError(f"Unknown type: {type(img)}") + @staticmethod def contrast(img: ImgTypes, factor: float, *args, **kwargs) -> ImgTypes: + """Apply contrast for an given image.""" if Image.isImageType(img): return pil_aug.contrast(img, factor) - raise NotImplementedError(f"Unknown type: {type(img)}") + @staticmethod def brightness(img: ImgTypes, factor: float, *args, **kwargs) -> ImgTypes: + """Apply brightness for an given image.""" if Image.isImageType(img): return pil_aug.brightness(img, factor) - raise NotImplementedError(f"Unknown type: {type(img)}") + @staticmethod def sharpness(img: ImgTypes, factor: float, *args, **kwargs) -> ImgTypes: + """Apply sharpness for an given image.""" if Image.isImageType(img): return pil_aug.sharpness(img, factor) - raise NotImplementedError(f"Unknown type: {type(img)}") + @staticmethod def rotate(img: ImgTypes, degree: float, *args, **kwargs) -> ImgTypes: + """Apply rotate for an given image.""" Augments._check_args_tf(kwargs) if Image.isImageType(img): return pil_aug.rotate(img, degree) - raise NotImplementedError(f"Unknown type: {type(img)}") + @staticmethod def shear_x(img: ImgTypes, factor: float, *args, **kwargs) -> ImgTypes: + """Apply shear_x for an given image.""" Augments._check_args_tf(kwargs) - if Image.isImageType(img): return pil_aug.shear_x(img, factor) - raise NotImplementedError(f"Unknown type: {type(img)}") + @staticmethod def shear_y(img: ImgTypes, factor: float, *args, **kwargs) -> ImgTypes: + """Apply shear_y for an given image.""" if Image.isImageType(img): return pil_aug.shear_y(img, factor) - raise NotImplementedError(f"Unknown type: {type(img)}") + @staticmethod def translate_x_rel(img: ImgTypes, pct: float, *args, **kwargs) -> ImgTypes: + """Apply translate_x_rel for an given image.""" if Image.isImageType(img): return pil_aug.translate_x_rel(img, pct) - raise NotImplementedError(f"Unknown type: {type(img)}") + @staticmethod def translate_y_rel(img: ImgTypes, pct: float, *args, **kwargs) -> ImgTypes: + """Apply translate_y_rel for an given image.""" if Image.isImageType(img): return pil_aug.translate_y_rel(img, pct) - raise NotImplementedError(f"Unknown type: {type(img)}") - def blend(src: ImgTypes, dst: CvImage, weight: float): + @staticmethod + def blend(src: ImgTypes, dst: CvImage, weight: float = 0.0): + """Apply blend for an given image.""" assert isinstance(dst, CvImage), f"Type of dst should be numpy array, but type(dst)={type(dst)}." - if Image.isImageType(src): return pil_aug.blend(src, dst, weight) - raise NotImplementedError(f"Unknown type: {type(src)}") diff --git a/tests/unit/mpa/modules/models/backbones/__init__.py b/otx/algorithms/common/adapters/mmcv/pipelines/transforms/cython_augments/__init__.py similarity index 60% rename from tests/unit/mpa/modules/models/backbones/__init__.py rename to otx/algorithms/common/adapters/mmcv/pipelines/transforms/cython_augments/__init__.py index 24b1785922f..25b78835d40 100644 --- a/tests/unit/mpa/modules/models/backbones/__init__.py +++ b/otx/algorithms/common/adapters/mmcv/pipelines/transforms/cython_augments/__init__.py @@ -1,4 +1,3 @@ -"""Test for otx.mpa.modules.models.backbones.""" - +"""Module to init cython augments.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 diff --git a/otx/mpa/modules/datasets/pipelines/transforms/cython_augments/cv_augment.pyx b/otx/algorithms/common/adapters/mmcv/pipelines/transforms/cython_augments/cv_augment.pyx similarity index 100% rename from otx/mpa/modules/datasets/pipelines/transforms/cython_augments/cv_augment.pyx rename to otx/algorithms/common/adapters/mmcv/pipelines/transforms/cython_augments/cv_augment.pyx diff --git a/otx/mpa/modules/datasets/pipelines/transforms/cython_augments/pil_augment.pyx b/otx/algorithms/common/adapters/mmcv/pipelines/transforms/cython_augments/pil_augment.pyx similarity index 100% rename from otx/mpa/modules/datasets/pipelines/transforms/cython_augments/pil_augment.pyx rename to otx/algorithms/common/adapters/mmcv/pipelines/transforms/cython_augments/pil_augment.pyx diff --git a/otx/mpa/__init__.py b/otx/algorithms/common/adapters/mmcv/tasks/__init__.py similarity index 68% rename from otx/mpa/__init__.py rename to otx/algorithms/common/adapters/mmcv/tasks/__init__.py index 37d737989a1..fd4acb753af 100644 --- a/otx/mpa/__init__.py +++ b/otx/algorithms/common/adapters/mmcv/tasks/__init__.py @@ -1,4 +1,5 @@ -# Copyright (C) 2022 Intel Corporation +"""Initialzation OTX Tasks with MMCV framework.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -12,6 +13,8 @@ class MPAConstants: + """Various path for MPA.""" + PACKAGE_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) # PACKAGE_ROOT = os.path.dirname(Path(__file__).) RECIPES_PATH = os.path.join(PACKAGE_ROOT, "recipes") @@ -21,4 +24,13 @@ class MPAConstants: # print(f'pkg root ======> {MPAConstants.PACKAGE_ROOT}') -__all__ = [get_version, __version__, build, build_workflow_hook, Stage, get_available_types, Workflow, MPAConstants] +__all__ = [ + "get_version", + "__version__", + "build", + "build_workflow_hook", + "Stage", + "get_available_types", + "Workflow", + "MPAConstants", +] diff --git a/otx/mpa/builder.py b/otx/algorithms/common/adapters/mmcv/tasks/builder.py similarity index 70% rename from otx/mpa/builder.py rename to otx/algorithms/common/adapters/mmcv/tasks/builder.py index 7abc0d7e0f8..d4736aa3c78 100644 --- a/otx/mpa/builder.py +++ b/otx/algorithms/common/adapters/mmcv/tasks/builder.py @@ -1,3 +1,4 @@ +"""Build workflow.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -8,12 +9,15 @@ from mmcv import Config, ConfigDict, build_from_cfg -from .modules.hooks.workflow_hooks import WorkflowHook, build_workflow_hook -from .registry import STAGES -from .stage import get_available_types -from .utils.config_utils import MPAConfig -from .utils.logger import config_logger, get_logger -from .workflow import Workflow +from otx.algorithms.common.adapters.mmcv.hooks.workflow_hook import ( + WorkflowHook, + build_workflow_hook, +) +from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES +from otx.algorithms.common.adapters.mmcv.tasks.stage import get_available_types +from otx.algorithms.common.adapters.mmcv.tasks.workflow import Workflow +from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig +from otx.algorithms.common.utils.logger import config_logger, get_logger # from collections import defaultdict @@ -78,6 +82,7 @@ def __build_workflow(config): def build(config, mode=None, stage_type=None, common_cfg=None): + """Build workflow.""" logger.info("called build_recipe()") logger.debug(f"[args] config = {config}") @@ -92,23 +97,22 @@ def build(config, mode=None, stage_type=None, common_cfg=None): if hasattr(config, "stages"): # build as workflow return __build_workflow(config) - else: - # build as stage - if not hasattr(config, "type"): - logger.info("seems to be passed stage yaml...") - supported_stage_types = get_available_types() - if stage_type in supported_stage_types: - cfg_dict = ConfigDict( - dict( - type=stage_type, - name=f"{stage_type}-{mode}", - mode=mode, - config=config, - index=0, - ) + # build as stage + if not hasattr(config, "type"): + logger.info("seems to be passed stage yaml...") + supported_stage_types = get_available_types() + if stage_type in supported_stage_types: + cfg_dict = ConfigDict( + dict( + type=stage_type, + name=f"{stage_type}-{mode}", + mode=mode, + config=config, + index=0, ) - else: - msg = f"type {stage_type} is not in {supported_stage_types}" - logger.error(msg) - raise RuntimeError(msg) - return __build_stage(cfg_dict, common_cfg=common_cfg) + ) + else: + msg = f"type {stage_type} is not in {supported_stage_types}" + logger.error(msg) + raise RuntimeError(msg) + return __build_stage(cfg_dict, common_cfg=common_cfg) diff --git a/otx/mpa/exporter_mixin.py b/otx/algorithms/common/adapters/mmcv/tasks/exporter_mixin.py similarity index 89% rename from otx/mpa/exporter_mixin.py rename to otx/algorithms/common/adapters/mmcv/tasks/exporter_mixin.py index 9597ec3d25f..0601ace3f3b 100644 --- a/otx/mpa/exporter_mixin.py +++ b/otx/algorithms/common/adapters/mmcv/tasks/exporter_mixin.py @@ -1,17 +1,21 @@ -# Copyright (C) 2022 Intel Corporation +"""Base Exporter for OTX tasks.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # import os import traceback -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger logger = get_logger() -class ExporterMixin(object): +class ExporterMixin: + """Exporter Mixin class for OTX export.""" + def run(self, model_cfg, model_ckpt, data_cfg, **kwargs): # noqa: C901 + """Run export procedure.""" self._init_logger() logger.info("exporting the model") mode = kwargs.get("mode", "train") @@ -56,7 +60,7 @@ def run(self, model_cfg, model_ckpt, data_cfg, **kwargs): # noqa: C901 ) else: self.naive_export(cfg.work_dir, model_builder, precision, cfg, model_name) - except Exception as ex: + except RuntimeError as ex: # output_model.model_status = ModelStatus.FAILED # raise RuntimeError('Optimization was unsuccessful.') from ex return { @@ -91,7 +95,8 @@ def mmdeploy_export( deploy_cfg, model_name="model", ): - from .deploy.apis import MMdeployExporter + """Export procedure using mmdeploy backend.""" + from otx.algorithms.common.adapters.mmdeploy.apis import MMdeployExporter if precision == "FP16": deploy_cfg.backend_config.mo_options.flags.append("--compress_to_fp16") @@ -99,4 +104,5 @@ def mmdeploy_export( @staticmethod def naive_export(output_dir, model_builder, precision, cfg, model_name="model"): + """Export using pytorch backend.""" raise NotImplementedError() diff --git a/otx/mpa/registry.py b/otx/algorithms/common/adapters/mmcv/tasks/registry.py similarity index 81% rename from otx/mpa/registry.py rename to otx/algorithms/common/adapters/mmcv/tasks/registry.py index be6786138f7..9381f330f96 100644 --- a/otx/mpa/registry.py +++ b/otx/algorithms/common/adapters/mmcv/tasks/registry.py @@ -1,3 +1,4 @@ +"""Registry of Stages and Explainers.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # diff --git a/otx/mpa/stage.py b/otx/algorithms/common/adapters/mmcv/tasks/stage.py similarity index 78% rename from otx/mpa/stage.py rename to otx/algorithms/common/adapters/mmcv/tasks/stage.py index 24c0587f124..64d13882eb7 100644 --- a/otx/mpa/stage.py +++ b/otx/algorithms/common/adapters/mmcv/tasks/stage.py @@ -1,3 +1,4 @@ +"""Base stage for OTX tasks.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -8,7 +9,7 @@ import os.path as osp import random import time -from typing import Any, Callable, Dict, Optional +from typing import Any, Callable, Dict, List, Optional import mmcv import numpy as np @@ -16,17 +17,19 @@ from mmcv import Config, ConfigDict from mmcv.runner import CheckpointLoader, wrap_fp16_model from torch import distributed as dist -from torch.utils.data import Dataset from otx.algorithms.common.adapters.mmcv.utils import ( build_dataloader, build_dataset, get_data_cfg, ) +from otx.algorithms.common.adapters.mmcv.utils.config_utils import ( + MPAConfig, + update_or_add_custom_hook, +) +from otx.algorithms.common.utils.logger import config_logger, get_logger from .registry import STAGES -from .utils.config_utils import MPAConfig, update_or_add_custom_hook -from .utils.logger import config_logger, get_logger logger = get_logger() @@ -53,20 +56,25 @@ def _set_random_seed(seed, deterministic=False): def get_available_types(): + """Return available type of stage.""" types = [] - for k, v in STAGES.module_dict.items(): - # logger.info(f'key [{k}] = value[{v}]') - types.append(k) + for key in STAGES.module_dict: + types.append(key) return types MODEL_TASK = {"classification": "mmcls", "detection": "mmdet", "segmentation": "mmseg"} + # @STAGES.register_module() -class Stage(object): - MODEL_BUILDER = None +# pylint: disable=too-many-instance-attributes +class Stage: + """Class for base stage of OTX tasks.""" + + MODEL_BUILDER: Optional[Callable] = None - def __init__(self, name, mode, config, common_cfg={}, index=0, **kwargs): + # pylint: disable=too-many-branches, too-many-statements + def __init__(self, name, mode, config, common_cfg=None, index=0, **kwargs): logger.debug(f"init stage with: {name}, {mode}, {config}, {common_cfg}, {index}, {kwargs}") # the name of 'config' cannot be changed to such as 'config_file' # because it is defined as 'config' in recipe file..... @@ -76,16 +84,20 @@ def __init__(self, name, mode, config, common_cfg={}, index=0, **kwargs): self.input = kwargs.pop("input", {}) # input_map?? input_dict? just input? self.output_keys = kwargs.pop("output", []) self._distributed = False + self.task_adapt_type = None + self.task_adapt_op = "REPLACE" + self.org_model_classes: List[str] = [] + self.model_classes: List[str] = [] + self.data_classes: List[str] = [] if common_cfg is None: common_cfg = dict(output_path="logs") if not isinstance(common_cfg, dict): raise TypeError(f"common_cfg should be the type of dict but {type(common_cfg)}") - else: - if common_cfg.get("output_path") is None: - logger.info("output_path is not set in common_cfg. set it to 'logs' as default") - common_cfg["output_path"] = "logs" + if common_cfg.get("output_path") is None: + logger.info("output_path is not set in common_cfg. set it to 'logs' as default") + common_cfg["output_path"] = "logs" self.output_prefix = common_cfg["output_path"] self.output_suffix = f"stage{self.index:02d}_{self.name}" @@ -116,9 +128,9 @@ def __init__(self, name, mode, config, common_cfg={}, index=0, **kwargs): if len(kwargs) > 0: addtional_dict = {} logger.info("found override configurations for the stage") - for k, v in kwargs.items(): - addtional_dict[k] = v - logger.info(f"\t{k}: {v}") + for key, value in kwargs.items(): + addtional_dict[key] = value + logger.info(f"\t{key}: {value}") cfg.merge_from_dict(addtional_dict) max_epochs = -1 @@ -184,78 +196,78 @@ def __init_device(self): @property def distributed(self): + """Return whether this is distributed running.""" return self._distributed - def run(self, **kwargs): - raise NotImplementedError - - def _init_logger(self, **kwargs): + def _init_logger(self): timestamp = time.strftime("%Y%m%d_%H%M%S", time.localtime()) config_logger(os.path.join(self.cfg.work_dir, f"{timestamp}.log"), level=self.cfg.log_level) logger.info(f"configured logger at {self.cfg.work_dir} with named {timestamp}.log") return logger - @staticmethod - def configure_data(cfg, training, **kwargs): - # update data configuration using image options - def configure_split(target): - def update_transform(opt, pipeline, idx, transform): - if isinstance(opt, dict): - if "_delete_" in opt.keys() and opt.get("_delete_", False): - # if option include _delete_=True, remove this transform from pipeline - logger.info(f"configure_data: {transform['type']} is deleted") - del pipeline[idx] - return - logger.info(f"configure_data: {transform['type']} is updated with {opt}") - transform.update(**opt) - - def update_config(src, pipeline_options): - logger.info(f"update_config() {pipeline_options}") - if src.get("pipeline") is not None or ( - src.get("dataset") is not None and src.get("dataset").get("pipeline") is not None - ): - if src.get("pipeline") is not None: - pipeline = src.get("pipeline", None) - else: - pipeline = src.get("dataset").get("pipeline") - if isinstance(pipeline, list): - for idx, transform in enumerate(pipeline): - for opt_key, opt in pipeline_options.items(): - if transform["type"] == opt_key: - update_transform(opt, pipeline, idx, transform) - elif isinstance(pipeline, dict): - for _, pipe in pipeline.items(): - for idx, transform in enumerate(pipe): - for opt_key, opt in pipeline_options.items(): - if transform["type"] == opt_key: - update_transform(opt, pipe, idx, transform) - else: - raise NotImplementedError(f"pipeline type of {type(pipeline)} is not supported") - else: - logger.info("no pipeline in the data split") - - split = cfg.data.get(target) - if split is not None: - if isinstance(split, list): - for sub_item in split: - update_config(sub_item, pipeline_options) - elif isinstance(split, dict): - update_config(split, pipeline_options) - else: - logger.warning(f"type of split '{target}'' should be list or dict but {type(split)}") - + def configure_data(self, cfg, training): + """Update data configuration using image options.""" logger.info("configure_data()") logger.debug(f"[args] {cfg.data}") pipeline_options = cfg.data.pop("pipeline_options", None) if pipeline_options is not None and isinstance(pipeline_options, dict): - configure_split("train") - configure_split("val") + self._configure_split(cfg, pipeline_options, "train") + self._configure_split(cfg, pipeline_options, "val") if not training: - configure_split("test") - configure_split("unlabeled") + self._configure_split(cfg, pipeline_options, "test") + self._configure_split(cfg, pipeline_options, "unlabeled") + + @staticmethod + def _configure_split(cfg, pipeline_options, target): + def update_transform(opt, pipeline, idx, transform): + if isinstance(opt, dict): + if "_delete_" in opt.keys() and opt.get("_delete_", False): + # if option include _delete_=True, remove this transform from pipeline + logger.info(f"configure_data: {transform['type']} is deleted") + del pipeline[idx] + return + logger.info(f"configure_data: {transform['type']} is updated with {opt}") + transform.update(**opt) + + # pylint: disable=too-many-nested-blocks + def update_config(src, pipeline_options): + logger.info(f"update_config() {pipeline_options}") + if src.get("pipeline") is not None or ( + src.get("dataset") is not None and src.get("dataset").get("pipeline") is not None + ): + if src.get("pipeline") is not None: + pipeline = src.get("pipeline", None) + else: + pipeline = src.get("dataset").get("pipeline") + if isinstance(pipeline, list): + for idx, transform in enumerate(pipeline): + for opt_key, opt in pipeline_options.items(): + if transform["type"] == opt_key: + update_transform(opt, pipeline, idx, transform) + elif isinstance(pipeline, dict): + for _, pipe in pipeline.items(): + for idx, transform in enumerate(pipe): + for opt_key, opt in pipeline_options.items(): + if transform["type"] == opt_key: + update_transform(opt, pipe, idx, transform) + else: + raise NotImplementedError(f"pipeline type of {type(pipeline)} is not supported") + else: + logger.info("no pipeline in the data split") + + split = cfg.data.get(target) + if split is not None: + if isinstance(split, list): + for sub_item in split: + update_config(sub_item, pipeline_options) + elif isinstance(split, dict): + update_config(split, pipeline_options) + else: + logger.warning(f"type of split '{target}'' should be list or dict but {type(split)}") def configure_ckpt(self, cfg, model_ckpt, pretrained=None): """Patch checkpoint path for pretrained weight. + Replace cfg.load_from to model_ckpt Replace cfg.load_from to pretrained Replace cfg.resume_from to cfg.load_from @@ -269,11 +281,11 @@ def configure_ckpt(self, cfg, model_ckpt, pretrained=None): cfg.resume_from = cfg.load_from @staticmethod - def configure_hook(cfg, **kwargs): - """Update cfg.custom_hooks based on cfg.custom_hook_options""" + def configure_hook(cfg): + """Update cfg.custom_hooks based on cfg.custom_hook_options.""" def update_hook(opt, custom_hooks, idx, hook): - """Delete of update a custom hook""" + """Delete of update a custom hook.""" if isinstance(opt, dict): if opt.get("_delete_", False): # if option include _delete_=True, remove this hook from custom_hooks @@ -297,6 +309,7 @@ def configure_samples_per_gpu( subset: str, distributed: bool = False, ): + """Patch samples_per_gpu settings.""" dataloader_cfg = cfg.data.get(f"{subset}_dataloader", ConfigDict()) samples_per_gpu = dataloader_cfg.get("samples_per_gpu", cfg.data.get("samples_per_gpu", 1)) @@ -326,9 +339,7 @@ def configure_compat_cfg( """Modify config to keep the compatibility.""" def _configure_dataloader(cfg): - """Consume all the global dataloader config and convert them - to specific dataloader config as it would be deprecated in the future. - """ + """Consume all the global dataloader config and convert them to specific dataloader config.""" global_dataloader_cfg = {} global_dataloader_cfg.update( { @@ -383,6 +394,7 @@ def configure_fp16_optimizer(cfg: Config, distributed: bool = False): @staticmethod def configure_unlabeled_dataloader(cfg: Config, distributed: bool = False): + """Patch for loading unlabeled dataloader.""" if "unlabeled" in cfg.data: task_lib_module = importlib.import_module(f"{MODEL_TASK[cfg.model_task]}.datasets") dataset_builder = getattr(task_lib_module, "build_dataset") @@ -415,6 +427,7 @@ def configure_unlabeled_dataloader(cfg: Config, distributed: bool = False): @staticmethod def get_model_meta(cfg): + """Return model_meta.""" ckpt_path = cfg.get("load_from", None) meta = {} if ckpt_path: @@ -424,17 +437,18 @@ def get_model_meta(cfg): @staticmethod def get_data_cfg(cfg, subset): + """Return data_cfg from cfg's subset.""" assert subset in ["train", "val", "test"], f"Unknown subset:{subset}" if "dataset" in cfg.data[subset]: # Concat|RepeatDataset dataset = cfg.data[subset].dataset while hasattr(dataset, "dataset"): dataset = dataset.dataset return dataset - else: - return cfg.data[subset] + return cfg.data[subset] @staticmethod def get_data_classes(cfg): + """Return data_classes from cfg.""" data_classes = [] train_cfg = Stage.get_data_cfg(cfg, "train") if "data_classes" in train_cfg: @@ -446,6 +460,7 @@ def get_data_classes(cfg): @staticmethod def get_model_classes(cfg): """Extract trained classes info from checkpoint file. + MMCV-based models would save class info in ckpt['meta']['CLASSES'] For other cases, try to get the info from cfg.model.classes (with pop()) - Which means that model classes should be specified in model-cfg for @@ -469,6 +484,7 @@ def get_model_classes(cfg): @staticmethod def get_model_ckpt(ckpt_path, new_path=None): + """Return model ckpt from ckpt_path.""" ckpt = CheckpointLoader.load_checkpoint(ckpt_path, map_location="cpu") if "model" in ckpt: ckpt = ckpt["model"] @@ -476,11 +492,11 @@ def get_model_ckpt(ckpt_path, new_path=None): new_path = ckpt_path[:-3] + "converted.pth" torch.save(ckpt, new_path) return new_path - else: - return ckpt_path + return ckpt_path @staticmethod def read_label_schema(ckpt_path, name_only=True, file_name="label_schema.json"): + """Read label_schema and return all classes.""" serialized_label_schema = [] if any(ckpt_path.endswith(extension) for extension in (".xml", ".bin", ".pth")): label_schema_path = osp.join(osp.dirname(ckpt_path), file_name) @@ -496,19 +512,20 @@ def read_label_schema(ckpt_path, name_only=True, file_name="label_schema.json"): all_classes = [] return all_classes + # pylint: disable=unused-argument @staticmethod def set_inference_progress_callback(model, cfg): - # InferenceProgressCallback (Time Monitor enable into Infer task) + """Inferenceprogresscallback (Time Monitor enable into Infer task).""" time_monitor = None if cfg.get("custom_hooks", None): time_monitor = [hook.time_monitor for hook in cfg.custom_hooks if hook.type == "OTXProgressHook"] time_monitor = time_monitor[0] if time_monitor else None if time_monitor is not None: - def pre_hook(module, input): + def pre_hook(*args, **kwargs): time_monitor.on_test_batch_begin(None, None) - def hook(module, input, output): + def hook(*args, **kwargs): time_monitor.on_test_batch_end(None, None) model.register_forward_pre_hook(pre_hook) @@ -523,6 +540,7 @@ def build_model( fp16: bool = False, **kwargs, ) -> torch.nn.Module: + """Build model from model_builder.""" if model_builder is None: model_builder = cls.MODEL_BUILDER assert model_builder is not None diff --git a/otx/mpa/version.py b/otx/algorithms/common/adapters/mmcv/tasks/version.py similarity index 64% rename from otx/mpa/version.py rename to otx/algorithms/common/adapters/mmcv/tasks/version.py index 743f7a87bd0..6d59ff872f2 100644 --- a/otx/mpa/version.py +++ b/otx/algorithms/common/adapters/mmcv/tasks/version.py @@ -1,3 +1,4 @@ +"""Return current version, this should be removed.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -6,4 +7,5 @@ def get_version(): + """Return version.""" return __version__ diff --git a/otx/mpa/workflow.py b/otx/algorithms/common/adapters/mmcv/tasks/workflow.py similarity index 92% rename from otx/mpa/workflow.py rename to otx/algorithms/common/adapters/mmcv/tasks/workflow.py index 20b43b5951d..b25767056b9 100644 --- a/otx/mpa/workflow.py +++ b/otx/algorithms/common/adapters/mmcv/tasks/workflow.py @@ -1,14 +1,18 @@ -# Copyright (C) 2022 Intel Corporation +"""Base workflow for OTX task.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # # from datetime import datetime as dt +from otx.algorithms.common.adapters.mmcv.utils.config_utils import copy_config + from .stage import Stage -from .utils.config_utils import copy_config -class Workflow(object): +class Workflow: + """Base workflow class for OTX task.""" + def __init__(self, stages, workflow_hooks=None): if not isinstance(stages, list): raise ValueError("stages parameter should be the list of Stage instance") @@ -30,7 +34,9 @@ def _call_wf_hooks(self, fname, stage_idx=-1): for hook in self.workflow_hooks: getattr(hook, fname)(self, stage_idx, self.results) + # pylint: disable=too-many-locals def run(self, **kwargs): + """Run workflow.""" model_cfg = kwargs.get("model_cfg", None) data_cfg = kwargs.get("data_cfg", None) model_ckpt = kwargs.get("model_ckpt", None) diff --git a/otx/algorithms/common/adapters/mmcv/utils/__init__.py b/otx/algorithms/common/adapters/mmcv/utils/__init__.py index 3995479870f..8a17a7e2940 100644 --- a/otx/algorithms/common/adapters/mmcv/utils/__init__.py +++ b/otx/algorithms/common/adapters/mmcv/utils/__init__.py @@ -8,6 +8,7 @@ from ._config_utils_get_configs_by_pairs import get_configs_by_pairs from .builder import build_dataloader, build_dataset from .config_utils import ( + MPAConfig, align_data_config_with_recipe, config_from_string, get_data_cfg, @@ -46,4 +47,5 @@ "get_meta_keys", "prepare_work_dir", "get_data_cfg", + "MPAConfig", ] diff --git a/otx/algorithms/common/adapters/mmcv/utils/config_utils.py b/otx/algorithms/common/adapters/mmcv/utils/config_utils.py index 5a339d15419..8df8f2cb317 100644 --- a/otx/algorithms/common/adapters/mmcv/utils/config_utils.py +++ b/otx/algorithms/common/adapters/mmcv/utils/config_utils.py @@ -17,18 +17,27 @@ import copy import glob import os +import os.path as osp +import platform +import shutil +import sys import tempfile +import warnings from collections.abc import Mapping -from typing import Any, Dict, List, Tuple, Union +from importlib import import_module +from typing import Any, Callable, Dict, List, Tuple, Union from mmcv import Config, ConfigDict +from mmcv.utils.config import BASE_KEY, DEPRECATION_KEY +from mmcv.utils.misc import import_modules_from_strings +from mmcv.utils.path import check_file_exist +from otx.algorithms.common.utils.logger import get_logger from otx.api.entities.datasets import DatasetEntity from otx.api.utils.argument_checks import ( DatasetParamTypeCheck, check_input_parameters_type, ) -from otx.mpa.utils.logger import get_logger from ._config_utils_get_configs_by_keys import get_configs_by_keys from ._config_utils_get_configs_by_pairs import get_configs_by_pairs @@ -36,6 +45,210 @@ logger = get_logger() +# TODO: refactor Config +class MPAConfig(Config): + """A class that extends the base `Config` class, adds additional functionality for loading configuration files.""" + + @staticmethod + def _file2dict( + filename, use_predefined_variables=True + ): # pylint: disable=too-many-locals, too-many-branches, too-many-statements + """Static method that loads the configuration file and returns a dictionary of its contents. + + :param filename: str, the path of the configuration file to be loaded. + :param use_predefined_variables: bool, a flag indicating whether to substitute predefined variables in the + configuration file. + :return: tuple of dictionary and string. Returns a dictionary containing the contents of the configuration file + and a string representation of the configuration file. + :raises: IOError if the file type is not supported. + """ + filename = osp.abspath(osp.expanduser(filename)) + check_file_exist(filename) + extender = osp.splitext(filename)[1] + if extender not in [".py", ".json", ".yaml", ".yml"]: + raise IOError("Only py/yml/yaml/json type are supported now!") + + with tempfile.TemporaryDirectory() as temp_config_dir: + with tempfile.NamedTemporaryFile(dir=temp_config_dir, suffix=extender) as temp_config_file: + if platform.system() == "Windows": + temp_config_file.close() + temp_config_name = osp.basename(temp_config_file.name) + # Substitute predefined variables + if use_predefined_variables: + Config._substitute_predefined_vars(filename, temp_config_file.name) + else: + shutil.copyfile(filename, temp_config_file.name) + # Substitute base variables from placeholders to strings + base_var_dict = Config._pre_substitute_base_vars(temp_config_file.name, temp_config_file.name) + if filename.endswith(".py"): + temp_module_name = osp.splitext(temp_config_name)[0] + sys.path.insert(0, temp_config_dir) + Config._validate_py_syntax(filename) + mod = import_module(temp_module_name) + sys.path.pop(0) + cfg_dict = {name: value for name, value in mod.__dict__.items() if not name.startswith("__")} + # delete imported module + del sys.modules[temp_module_name] + elif filename.endswith((".yml", ".yaml", ".json")): + import mmcv + + cfg_dict = mmcv.load(temp_config_file.name) + + # check deprecation information + if DEPRECATION_KEY in cfg_dict: + deprecation_info = cfg_dict.pop(DEPRECATION_KEY) + warning_msg = f"The config file {filename} will be deprecated " "in the future." + if "expected" in deprecation_info: + warning_msg += f' Please use {deprecation_info["expected"]} ' "instead." + if "reference" in deprecation_info: + warning_msg += " More information can be found at " f'{deprecation_info["reference"]}' + warnings.warn(warning_msg) + + cfg_text = filename + "\n" + with open(filename, "r", encoding="utf-8") as f: + # Setting encoding explicitly to resolve coding issue on windows + cfg_text += f.read() + + if BASE_KEY in cfg_dict: + cfg_dir = osp.dirname(filename) + base_filename = cfg_dict.pop(BASE_KEY) + base_filename = base_filename if isinstance(base_filename, list) else [base_filename] + + cfg_dict_list = [] + cfg_text_list = [] + for f in base_filename: + _cfg_dict, _cfg_text = MPAConfig._file2dict(osp.join(cfg_dir, f)) + cfg_dict_list.append(_cfg_dict) + cfg_text_list.append(_cfg_text) + + base_cfg_dict = dict() + # for c in cfg_dict_list: + # duplicate_keys = base_cfg_dict.keys() & c.keys() + # if len(duplicate_keys) > 0: + # raise KeyError('Duplicate key is not allowed among bases. ' + # f'Duplicate keys: {duplicate_keys}') + # base_cfg_dict.update(c) + for c in cfg_dict_list: + if len(base_cfg_dict.keys() & c.keys()) > 0: + # raise KeyError(f'Duplicate key is not allowed among bases [{base_cfg_dict.keys() & c.keys()}]') + logger.warning(f"Duplicate key is detected among bases [{base_cfg_dict.keys() & c.keys()}]") + logger.debug(f"base = {base_cfg_dict}, cfg = {c}") + base_cfg_dict = Config._merge_a_into_b(base_cfg_dict, c) + logger.debug(f"merged dict = {base_cfg_dict}") + else: + base_cfg_dict.update(c) + + # Subtitute base variables from strings to their actual values + cfg_dict = Config._substitute_base_vars(cfg_dict, base_var_dict, base_cfg_dict) + + base_cfg_dict = Config._merge_a_into_b(cfg_dict, base_cfg_dict) + cfg_dict = base_cfg_dict + + # merge cfg_text + cfg_text_list.append(cfg_text) + cfg_text = "\n".join(cfg_text_list) + + return cfg_dict, cfg_text + + @staticmethod + def fromfile(filename, use_predefined_variables=True, import_custom_modules=True): + """Static method that loads a configuration file and returns an instance of `Config` class. + + :param filename: str, the path of the configuration file to be loaded. + :param use_predefined_variables: bool, a flag indicating whether to substitute predefined variables in the + configuration file. + :param import_custom_modules: bool, a flag indicating whether to import custom modules. + :return: Config object, an instance of `Config` class containing the contents of the configuration file. + """ + cfg_dict, cfg_text = MPAConfig._file2dict(filename, use_predefined_variables) + if import_custom_modules and cfg_dict.get("custom_imports", None): + import_modules_from_strings(**cfg_dict["custom_imports"]) + return Config(cfg_dict, cfg_text=cfg_text, filename=filename) + + +def copy_config(cfg): + """A function that creates a deep copy of the input configuration object. + + :param cfg: Config object, an instance of `Config` class to be copied. + :return: Config object, a deep copy of the input configuration object. + :raises: ValueError if the input object is not an instance of `Config` class. + """ + if not isinstance(cfg, Config): + raise ValueError(f"cannot copy this instance {type(cfg)}") + # new_cfg = copy.deepcopy(cfg) + # new_cfg._cfg_dict = copy.deepcopy(cfg._cfg_dict) + # new_cfg.filename = cfg.filename + import pickle + + data = pickle.dumps(cfg) + return pickle.loads(data) + + +def update_or_add_custom_hook(cfg: Config, hook_cfg: ConfigDict): + """Update hook cfg if same type is in custom_hook or append it.""" + custom_hooks = cfg.get("custom_hooks", []) + custom_hooks_updated = False + for custom_hook in custom_hooks: + if custom_hook["type"] == hook_cfg["type"]: + custom_hook.update(hook_cfg) + custom_hooks_updated = True + break + if not custom_hooks_updated: + custom_hooks.append(hook_cfg) + cfg["custom_hooks"] = custom_hooks + + +def remove_custom_hook(cfg: Config, hook_type: str): + """Remove hook cfg if hook_type is in custom_hook.""" + custom_hooks = cfg.get("custom_hooks", []) + if len(custom_hooks) > 0: + idx_to_del = None + for i, custom_hook in enumerate(custom_hooks): + if custom_hook["type"] == hook_type: + idx_to_del = i + break + if idx_to_del is not None: + del custom_hooks[idx_to_del] + + +def recursively_update_cfg( + cfg: Union[Config, dict], + criterion: Callable[[Any, Any], bool], + update_dict: Any, +): + """A function that recursively updates the input dictionary or `Config` object with a new dictionary. + + :param cfg: Union[Config, dict], an input dictionary or `Config` object to be updated. + :param criterion: Callable[[Any, Any], bool], a function that determines whether to update a key-value pair based on + a criterion. The function takes two arguments: key and value, and returns a boolean. + :param update_dict: Any, a dictionary to be used for updating the input dictionary. + :return: None + """ + for key, val in list(cfg.items()): + if isinstance(val, dict): + recursively_update_cfg(val, criterion, update_dict) + if criterion(key, val): + cfg.update(update_dict) + + +def add_custom_hook_if_not_exists(cfg: Config, hook_cfg: ConfigDict): + """A function that adds a custom hook to the input `Config` object if it doesn't already exist. + + :param cfg: Config object, an instance of `Config` class to which the custom hook will be added. + :param hook_cfg: ConfigDict object, an instance of `ConfigDict` class representing the custom hook to be added. + :return: None + """ + custom_hooks = cfg.get("custom_hooks", []) + found = False + for hook in custom_hooks: + if hook["type"] == hook_cfg["type"]: + found = True + break + if not found: + custom_hooks.append(hook_cfg) + cfg["custom_hooks"] = custom_hooks + + @check_input_parameters_type() def remove_from_config(config: Union[Config, ConfigDict], key: str): """Update & Remove configs.""" @@ -84,7 +297,13 @@ def update_config( @check_input_parameters_type() def get_dataset_configs(config: Union[Config, ConfigDict], subset: str) -> List[ConfigDict]: - """Get 'datasets' configs.""" + """A function that retrieves 'datasets' configurations from the input `Config` object or `ConfigDict` object. + + :param config: Union[Config, ConfigDict], an instance of `Config` class or `ConfigDict` class containing the + configurations. + :param subset: str, a string representing the subset for which the 'datasets' configuration is required. + :return: List[ConfigDict], a list of 'datasets' configuration dictionaries. + """ if config.data.get(subset, None) is None: return [] data_cfg = config.data[subset] diff --git a/otx/algorithms/common/adapters/mmdeploy/__init__.py b/otx/algorithms/common/adapters/mmdeploy/__init__.py new file mode 100644 index 00000000000..01687800428 --- /dev/null +++ b/otx/algorithms/common/adapters/mmdeploy/__init__.py @@ -0,0 +1,10 @@ +"""Adapters for mmdeploy.""" +# Copyright (C) 2023 Intel Corporation +# +# SPDX-License-Identifier: MIT + +from .utils.mmdeploy import is_mmdeploy_enabled + +__all__ = [ + "is_mmdeploy_enabled", +] diff --git a/otx/mpa/deploy/apis.py b/otx/algorithms/common/adapters/mmdeploy/apis.py similarity index 90% rename from otx/mpa/deploy/apis.py rename to otx/algorithms/common/adapters/mmdeploy/apis.py index 4e71e18d8bc..cb59bad7bb1 100644 --- a/otx/mpa/deploy/apis.py +++ b/otx/algorithms/common/adapters/mmdeploy/apis.py @@ -1,3 +1,4 @@ +"""API of otx.algorithms.common.adapters.mmdeploy.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -12,20 +13,23 @@ import mmcv import numpy as np -import onnx import torch from mmcv.parallel import collate, scatter -from .utils import numpy_2_list from .utils.mmdeploy import ( is_mmdeploy_enabled, mmdeploy_init_model_helper, update_deploy_cfg, ) from .utils.onnx import prepare_onnx_for_openvino +from .utils.utils import numpy_2_list + +# pylint: disable=too-many-locals class NaiveExporter: + """NaiveExporter for non-mmdeploy export.""" + @staticmethod def export2openvino( output_dir: str, @@ -38,13 +42,15 @@ def export2openvino( input_names: Optional[List[str]] = None, output_names: Optional[List[str]] = None, opset_version: int = 11, - dynamic_axes: Dict[Any, Any] = {}, + dynamic_axes: Optional[Dict[Any, Any]] = None, mo_transforms: str = "", ): + """Function for exporting to openvino.""" input_data = scatter(collate([input_data], samples_per_gpu=1), [-1])[0] model = model_builder(cfg) model = model.cpu().eval() + dynamic_axes = dynamic_axes if dynamic_axes else dict() onnx_path = NaiveExporter.torch2onnx( output_dir, @@ -108,10 +114,11 @@ def torch2onnx( input_names: Optional[List[str]] = None, output_names: Optional[List[str]] = None, opset_version: int = 11, - dynamic_axes: Dict[Any, Any] = {}, + dynamic_axes: Optional[Dict[Any, Any]] = None, verbose: bool = False, **onnx_options, ) -> str: + """Function for torch to onnx exporting.""" img_metas = input_data.get("img_metas") numpy_2_list(img_metas) @@ -119,6 +126,7 @@ def torch2onnx( model.forward = partial(model.forward, img_metas=img_metas, return_loss=False) onnx_file_name = model_name + ".onnx" + dynamic_axes = dynamic_axes if dynamic_axes else dict() torch.onnx.export( model, imgs, @@ -143,7 +151,8 @@ def onnx2openvino( model_name: str = "model", **openvino_options, ) -> Tuple[str, str]: - from otx.mpa.utils import mo_wrapper + """Function for onnx to openvino exporting.""" + from otx.algorithms.common.utils import mo_wrapper mo_args = { "input_model": onnx_path, @@ -163,17 +172,15 @@ def onnx2openvino( if is_mmdeploy_enabled(): import mmdeploy.apis.openvino as openvino_api - from mmdeploy.apis import ( - build_task_processor, - extract_model, - get_predefined_partition_cfg, - torch2onnx, - ) + from mmdeploy.apis import build_task_processor, extract_model, torch2onnx from mmdeploy.apis.openvino import get_input_info_from_cfg, get_mo_options_from_cfg - from mmdeploy.core import FUNCTION_REWRITER - from mmdeploy.utils import get_backend_config, get_ir_config, get_partition_config + + # from mmdeploy.core import FUNCTION_REWRITER + from mmdeploy.utils import get_ir_config, get_partition_config class MMdeployExporter: + """MMdeployExporter for mmdeploy exporting.""" + @staticmethod def export2openvino( output_dir: str, @@ -183,6 +190,7 @@ def export2openvino( *, model_name: str = "model", ): + """Function for exporting to openvino.""" task_processor = build_task_processor(cfg, deploy_cfg, "cpu") @@ -248,6 +256,7 @@ def torch2onnx( *, model_name: str = "model", ) -> str: + """Function for torch to onnx exporting.""" onnx_file_name = model_name + ".onnx" torch2onnx( input_data, @@ -266,6 +275,7 @@ def partition_onnx( onnx_path: str, partition_cfgs: Union[mmcv.ConfigDict, List[mmcv.ConfigDict]], ) -> Tuple[str, ...]: + """Function for parition onnx.""" partitioned_paths = [] if not isinstance(partition_cfgs, list): @@ -290,6 +300,7 @@ def onnx2openvino( *, model_name: Optional[str] = None, ) -> Tuple[str, str]: + """Function for onnx to openvino exporting.""" input_info = get_input_info_from_cfg(deploy_cfg) output_names = get_ir_config(deploy_cfg).output_names diff --git a/otx/mpa/deploy/utils/__init__.py b/otx/algorithms/common/adapters/mmdeploy/utils/__init__.py similarity index 80% rename from otx/mpa/deploy/utils/__init__.py rename to otx/algorithms/common/adapters/mmdeploy/utils/__init__.py index d4800169f5e..5c1f7760edd 100644 --- a/otx/mpa/deploy/utils/__init__.py +++ b/otx/algorithms/common/adapters/mmdeploy/utils/__init__.py @@ -1,3 +1,4 @@ +"""Init file for otx.algorithms.common.adapters.mmdeploy.utils.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # diff --git a/otx/mpa/deploy/utils/mmdeploy.py b/otx/algorithms/common/adapters/mmdeploy/utils/mmdeploy.py similarity index 71% rename from otx/mpa/deploy/utils/mmdeploy.py rename to otx/algorithms/common/adapters/mmdeploy/utils/mmdeploy.py index eba7c531b2d..ee33bbd3705 100644 --- a/otx/mpa/deploy/utils/mmdeploy.py +++ b/otx/algorithms/common/adapters/mmdeploy/utils/mmdeploy.py @@ -1,3 +1,4 @@ +"""Functions for mmdeploy adapters.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -5,14 +6,24 @@ import importlib import onnx -from mmcv.utils import ConfigDict def is_mmdeploy_enabled(): + """Checks if the 'mmdeploy' Python module is installed and available for use. + + Returns: + bool: True if 'mmdeploy' is installed, False otherwise. + + Example: + >>> is_mmdeploy_enabled() + True + """ return importlib.util.find_spec("mmdeploy") is not None def mmdeploy_init_model_helper(ctx, model_checkpoint=None, cfg_options=None, **kwargs): + """Helper function for initializing a model for inference using the 'mmdeploy' library.""" + model_builder = kwargs.pop("model_builder") model = model_builder( ctx.model_cfg, @@ -31,12 +42,14 @@ def mmdeploy_init_model_helper(ctx, model_checkpoint=None, cfg_options=None, **k return model -def update_deploy_cfg(onnx_path, deploy_cfg, mo_options={}): +def update_deploy_cfg(onnx_path, deploy_cfg, mo_options=None): + """Update the 'deploy_cfg' configuration file based on the ONNX model specified by 'onnx_path'.""" + from mmdeploy.utils import get_backend_config, get_ir_config onnx_model = onnx.load(onnx_path) ir_config = get_ir_config(deploy_cfg) - backend_config = get_backend_config(deploy_cfg) + get_backend_config(deploy_cfg) # update input input_names = [i.name for i in onnx_model.graph.input] @@ -47,6 +60,7 @@ def update_deploy_cfg(onnx_path, deploy_cfg, mo_options={}): ir_config["output_names"] = output_names # update mo options + mo_options = mo_options if mo_options else dict() deploy_cfg.merge_from_dict({"backend_config": {"mo_options": mo_options}}) diff --git a/otx/mpa/deploy/utils/onnx.py b/otx/algorithms/common/adapters/mmdeploy/utils/onnx.py similarity index 87% rename from otx/mpa/deploy/utils/onnx.py rename to otx/algorithms/common/adapters/mmdeploy/utils/onnx.py index 9ec80579904..b44812324e2 100644 --- a/otx/mpa/deploy/utils/onnx.py +++ b/otx/algorithms/common/adapters/mmdeploy/utils/onnx.py @@ -1,3 +1,4 @@ +"""Functions for onnx adapters.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -6,6 +7,7 @@ def remove_nodes_by_op_type(onnx_model, op_type): + """Remove all nodes of a specified op type from the ONNX model.""" # TODO: support more nodes supported_op_types = ["Mark", "Conv", "Gemm"] @@ -42,6 +44,7 @@ def remove_nodes_by_op_type(onnx_model, op_type): def prepare_onnx_for_openvino(in_path, out_path): + """Modify the specified ONNX model to be compatible with OpenVINO by removing 'Mark' op nodes.""" onnx_model = onnx.load(in_path) onnx_model = remove_nodes_by_op_type(onnx_model, "Mark") onnx.checker.check_model(onnx_model) diff --git a/otx/mpa/deploy/utils/operations_domain.py b/otx/algorithms/common/adapters/mmdeploy/utils/operations_domain.py similarity index 73% rename from otx/mpa/deploy/utils/operations_domain.py rename to otx/algorithms/common/adapters/mmdeploy/utils/operations_domain.py index 11ffdcc48f8..e54af8bf1ab 100644 --- a/otx/mpa/deploy/utils/operations_domain.py +++ b/otx/algorithms/common/adapters/mmdeploy/utils/operations_domain.py @@ -1,3 +1,4 @@ +"""Add domain function.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -6,4 +7,5 @@ def add_domain(name_operator: str) -> str: + """Function for adding to DOMAIN_CUSTOM_OPS_NAME.""" return DOMAIN_CUSTOM_OPS_NAME + "::" + name_operator diff --git a/otx/mpa/deploy/utils/utils.py b/otx/algorithms/common/adapters/mmdeploy/utils/utils.py similarity index 90% rename from otx/mpa/deploy/utils/utils.py rename to otx/algorithms/common/adapters/mmdeploy/utils/utils.py index 9c6148b7062..a0025bc8364 100644 --- a/otx/mpa/deploy/utils/utils.py +++ b/otx/algorithms/common/adapters/mmdeploy/utils/utils.py @@ -1,3 +1,4 @@ +"""Util functions of otx.algorithms.common.adapters.mmdeploy.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -9,6 +10,7 @@ def sync_batchnorm_2_batchnorm(module, dim=2): + """Syncs the BatchNorm layers in a model to use regular BatchNorm layers.""" if dim == 1: bn = torch.nn.BatchNorm1d elif dim == 2: @@ -48,6 +50,7 @@ def sync_batchnorm_2_batchnorm(module, dim=2): def numpy_2_list(data): + """Converts NumPy arrays to Python lists.""" if isinstance(data, np.ndarray): return data.tolist() diff --git a/tests/unit/mpa/modules/models/classifiers/__init__.py b/otx/algorithms/common/adapters/torch/__init__.py similarity index 60% rename from tests/unit/mpa/modules/models/classifiers/__init__.py rename to otx/algorithms/common/adapters/torch/__init__.py index 3058f445c64..7d2cf100bb3 100644 --- a/tests/unit/mpa/modules/models/classifiers/__init__.py +++ b/otx/algorithms/common/adapters/torch/__init__.py @@ -1,4 +1,4 @@ -"""Test for otx.mpa.modules.models.classifiers""" - +"""Adapters for OTX Common Algorithms.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +# diff --git a/otx/algorithms/common/adapters/torch/dataloaders/__init__.py b/otx/algorithms/common/adapters/torch/dataloaders/__init__.py new file mode 100644 index 00000000000..3132fbf7e0f --- /dev/null +++ b/otx/algorithms/common/adapters/torch/dataloaders/__init__.py @@ -0,0 +1,10 @@ +"""Dataloaders used in OTX.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +# flake8: noqa + +from .composed_dataloader import ComposedDL + +__all__ = ["ComposedDL"] diff --git a/otx/mpa/modules/datasets/composed_dataloader.py b/otx/algorithms/common/adapters/torch/dataloaders/composed_dataloader.py similarity index 59% rename from otx/mpa/modules/datasets/composed_dataloader.py rename to otx/algorithms/common/adapters/torch/dataloaders/composed_dataloader.py index fb178ee0062..5fb30db8ead 100644 --- a/otx/mpa/modules/datasets/composed_dataloader.py +++ b/otx/algorithms/common/adapters/torch/dataloaders/composed_dataloader.py @@ -1,27 +1,31 @@ -# Copyright (C) 2022 Intel Corporation +"""Composed dataloader.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger logger = get_logger() class CDLIterator: + """Iterator for aligning the number of batches as many as samples in the first iterator.""" + def __init__(self, cdl): self._cdl = cdl self._index = 0 self._cdl_iter = [iter(dl) for dl in self._cdl.loaders] def __next__(self): + """Generate the next batch.""" if self._index < self._cdl.max_iter: batches = {} - for i, it in enumerate(self._cdl_iter): + for i, iterator in enumerate(self._cdl_iter): if i == 0: - batches = next(it) + batches = next(iterator) else: try: - batches[f"extra_{i-1}"] = next(it) + batches[f"extra_{i-1}"] = next(iterator) except StopIteration: self._cdl_iter[1] = iter(self._cdl.loaders[1]) batches[f"extra_{i-1}"] = next(self._cdl_iter[1]) @@ -30,32 +34,38 @@ def __next__(self): raise StopIteration -class ComposedDL(object): - class DummySampler(object): - """dummy sampler class to relay set_epoch() call to the - list of data loaders in the CDL - """ +class ComposedDL: + """Composed dataloader for combining two or more loaders together.""" + + class DummySampler: + """Dummy sampler class to relay set_epoch() call to the list of data loaders in the CDL.""" def __init__(self, cdl): self.cdl = cdl def set_epoch(self, epoch): + """Set epoch.""" loaders = self.cdl.loaders for loader in loaders: loader.sampler.set_epoch(epoch) - def __init__(self, loaders=[]): + def __init__(self, loaders=None): + if loaders is None: + loaders = [] self.loaders = loaders self.max_iter = len(self.loaders[0]) logger.info(f"possible max iterations = {self.max_iter}") self._sampler = ComposedDL.DummySampler(self) def __len__(self): + """Return length of the first loader.""" return self.max_iter def __iter__(self): + """Iter.""" return CDLIterator(self) @property def sampler(self): + """Return sampler.""" return self._sampler diff --git a/otx/algorithms/common/adapters/torch/dataloaders/samplers/__init__.py b/otx/algorithms/common/adapters/torch/dataloaders/samplers/__init__.py new file mode 100644 index 00000000000..5d6133af7f8 --- /dev/null +++ b/otx/algorithms/common/adapters/torch/dataloaders/samplers/__init__.py @@ -0,0 +1,11 @@ +"""Samplers for imbalanced and incremental learning.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +# flake8: noqa + +from .balanced_sampler import BalancedSampler +from .cls_incr_sampler import ClsIncrSampler + +__all__ = ["BalancedSampler", "ClsIncrSampler"] diff --git a/otx/mpa/modules/datasets/samplers/balanced_sampler.py b/otx/algorithms/common/adapters/torch/dataloaders/samplers/balanced_sampler.py similarity index 90% rename from otx/mpa/modules/datasets/samplers/balanced_sampler.py rename to otx/algorithms/common/adapters/torch/dataloaders/samplers/balanced_sampler.py index 61cc60a3ba3..5b07d07bc90 100644 --- a/otx/mpa/modules/datasets/samplers/balanced_sampler.py +++ b/otx/algorithms/common/adapters/torch/dataloaders/samplers/balanced_sampler.py @@ -1,15 +1,17 @@ +"""Balanced sampler for imbalanced data.""" import math import numpy as np from torch.utils.data.sampler import Sampler -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger logger = get_logger() -class BalancedSampler(Sampler): - """Sampler for Class-Incremental Task +class BalancedSampler(Sampler): # pylint: disable=too-many-instance-attributes + """Balanced sampler for imbalanced data for class-incremental task. + This sampler is a sampler that creates an effective batch In reduce mode, reduce the iteration size by estimating the trials @@ -39,7 +41,7 @@ def __init__(self, dataset, batch_size, efficient_mode=True, num_replicas=1, ran if efficient_mode: # Reduce the # of sampling (sampling data for a single epoch) - self.num_tail = min([len(cls_indices) for cls_indices in self.img_indices.values()]) + self.num_tail = min(len(cls_indices) for cls_indices in self.img_indices.values()) base = 1 - (1 / self.num_tail) if base == 0: raise ValueError("Required more than one sample per class") @@ -75,9 +77,10 @@ def _calculate_num_samples(self): return num_samples def __iter__(self): + """Iter.""" indices = [] for _ in range(self.repeat): - for i in range(self.num_trials): + for _ in range(self.num_trials): indice = np.concatenate( [np.random.choice(self.img_indices[cls_indices], 1) for cls_indices in self.img_indices.keys()] ) @@ -110,4 +113,5 @@ def __iter__(self): return iter(indices) def __len__(self): + """Return length of selected samples.""" return self.num_samples diff --git a/otx/mpa/modules/datasets/samplers/cls_incr_sampler.py b/otx/algorithms/common/adapters/torch/dataloaders/samplers/cls_incr_sampler.py similarity index 91% rename from otx/mpa/modules/datasets/samplers/cls_incr_sampler.py rename to otx/algorithms/common/adapters/torch/dataloaders/samplers/cls_incr_sampler.py index bc2126078c0..0cab42f5033 100644 --- a/otx/mpa/modules/datasets/samplers/cls_incr_sampler.py +++ b/otx/algorithms/common/adapters/torch/dataloaders/samplers/cls_incr_sampler.py @@ -1,4 +1,5 @@ -# Copyright (C) 2022 Intel Corporation +"""Class incremental sampler for cls-incremental learning.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -8,11 +9,12 @@ import numpy as np from torch.utils.data.sampler import Sampler -from otx.mpa.modules.utils.task_adapt import unwrap_dataset +from otx.algorithms.common.utils.task_adapt import unwrap_dataset -class ClsIncrSampler(Sampler): - """Sampler for Class-Incremental Task +class ClsIncrSampler(Sampler): # pylint: disable=too-many-instance-attributes + """Sampler for Class-Incremental Task. + This sampler is a sampler that creates an effective batch For default setting, the square root of (number of old data/number of new data) is used as the ratio of old data @@ -83,9 +85,10 @@ def _calcuate_num_samples(self): return num_samples def __iter__(self): + """Iter.""" indices = [] for _ in range(self.repeat): - for i in range(int(self.data_length / (1 + self.old_new_ratio))): + for _ in range(int(self.data_length / (1 + self.old_new_ratio))): indice = np.concatenate( [np.random.choice(self.new_indices, 1), np.random.choice(self.old_indices, self.old_new_ratio)] ) @@ -123,4 +126,5 @@ def __iter__(self): return iter(indices) def __len__(self): + """Return length of selected samples.""" return self.num_samples diff --git a/otx/algorithms/common/configs/training_base.py b/otx/algorithms/common/configs/training_base.py index 1e99f5048ee..c1a85446eb8 100644 --- a/otx/algorithms/common/configs/training_base.py +++ b/otx/algorithms/common/configs/training_base.py @@ -32,15 +32,17 @@ from .configuration_enums import POTQuantizationPreset +# pylint: disable=invalid-name + class TrainType(ConfigurableEnum): """TrainType for OTX Algorithms.""" - FINETUNE = "FINETUNE" - SEMISUPERVISED = "SEMISUPERVISED" - SELFSUPERVISED = "SELFSUPERVISED" - INCREMENTAL = "INCREMENTAL" - FUTUREWORK = "FUTUREWORK" + Finetune = "Finetune" + Semisupervised = "Semisupervised" + Selfsupervised = "Selfsupervised" + Incremental = "Incremental" + Futurework = "Futurework" class LearningRateSchedule(ConfigurableEnum): @@ -65,7 +67,7 @@ class BaseLearningParameters(ParameterGroup): batch_size = configurable_integer( default_value=5, min_value=1, - max_value=512, + max_value=2048, header="Batch size", description="The number of training samples seen in each iteration of training. Increasing thisvalue " "improves training time and may make the training more stable. A larger batch size has higher " @@ -275,7 +277,7 @@ class BaseAlgoBackendParameters(ParameterGroup): """BaseAlgoBackendParameters for OTX Algorithms.""" train_type = selectable( - default_value=TrainType.INCREMENTAL, + default_value=TrainType.Incremental, header="train type", description="Training scheme option that determines how to train the model", editable=False, diff --git a/otx/algorithms/common/tasks/nncf_base.py b/otx/algorithms/common/tasks/nncf_base.py index cafaf77273f..d32579383a0 100644 --- a/otx/algorithms/common/tasks/nncf_base.py +++ b/otx/algorithms/common/tasks/nncf_base.py @@ -37,6 +37,7 @@ from otx.algorithms.common.adapters.nncf.config import compose_nncf_config from otx.algorithms.common.utils.callback import OptimizationProgressCallback from otx.algorithms.common.utils.data import get_dataset +from otx.algorithms.common.utils.logger import get_logger from otx.api.configuration import cfg_helper from otx.api.configuration.helper.utils import ids_to_strings from otx.api.entities.datasets import DatasetEntity @@ -62,7 +63,6 @@ DatasetParamTypeCheck, check_input_parameters_type, ) -from otx.mpa.utils.logger import get_logger from .training_base import BaseTask @@ -132,9 +132,6 @@ def _init_train_data_cfg(self, dataset: DatasetEntity): labels=self._labels, ) - # Temparory remedy for cfg.pretty_text error - for label in self._labels: - label.hotkey = "a" return data_cfg def _init_nncf_cfg(self): diff --git a/otx/algorithms/common/tasks/training_base.py b/otx/algorithms/common/tasks/training_base.py index 3016685fd4e..ff18f928377 100644 --- a/otx/algorithms/common/tasks/training_base.py +++ b/otx/algorithms/common/tasks/training_base.py @@ -28,12 +28,22 @@ from mmcv.utils.config import Config, ConfigDict from otx.algorithms.common.adapters.mmcv.hooks import OTXLoggerHook +from otx.algorithms.common.adapters.mmcv.hooks.cancel_hook import CancelInterfaceHook +from otx.algorithms.common.adapters.mmcv.tasks.builder import build +from otx.algorithms.common.adapters.mmcv.tasks.stage import Stage from otx.algorithms.common.adapters.mmcv.utils import ( align_data_config_with_recipe, get_configs_by_pairs, ) +from otx.algorithms.common.adapters.mmcv.utils.config_utils import ( + MPAConfig, + add_custom_hook_if_not_exists, + remove_custom_hook, + update_or_add_custom_hook, +) from otx.algorithms.common.configs import TrainType from otx.algorithms.common.utils import UncopiableDefaultDict +from otx.algorithms.common.utils.logger import get_logger from otx.api.entities.datasets import DatasetEntity from otx.api.entities.label import LabelEntity from otx.api.entities.model import ModelEntity, ModelPrecision, OptimizationMethod @@ -46,22 +56,12 @@ from otx.api.usecases.tasks.interfaces.unload_interface import IUnload from otx.api.utils.argument_checks import check_input_parameters_type from otx.core.data import caching -from otx.mpa.builder import build -from otx.mpa.modules.hooks.cancel_interface_hook import CancelInterfaceHook -from otx.mpa.stage import Stage -from otx.mpa.utils.config_utils import ( - MPAConfig, - add_custom_hook_if_not_exists, - remove_custom_hook, - update_or_add_custom_hook, -) -from otx.mpa.utils.logger import get_logger logger = get_logger() TRAIN_TYPE_DIR_PATH = { - TrainType.INCREMENTAL.name: ".", - TrainType.SELFSUPERVISED.name: "selfsl", - TrainType.SEMISUPERVISED.name: "semisl", + TrainType.Incremental.name: ".", + TrainType.Selfsupervised.name: "selfsl", + TrainType.Semisupervised.name: "semisl", } @@ -113,7 +113,7 @@ def __init__(self, task_config, task_environment: TaskEnvironment, output_path: self._learning_curves = UncopiableDefaultDict(OTXLoggerHook.Curve) self._is_training = False self._should_stop = False - self.cancel_interface = None + self.cancel_interface = None # type: Optional[CancelInterfaceHook] self.reserved_cancel = False self.on_hook_initialized = self.OnHookInitialized(self) diff --git a/otx/algorithms/common/utils/distance_utils.py b/otx/algorithms/common/utils/distance_utils.py new file mode 100644 index 00000000000..bd6403645a9 --- /dev/null +++ b/otx/algorithms/common/utils/distance_utils.py @@ -0,0 +1,15 @@ +"""Module for defining distance utils.""" +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# +import torch.distributed as dist + + +def get_dist_info(): # pylint: disable=inconsistent-return-statements + """A function that retrieves information about the current distributed training environment.""" + if dist.is_available(): + # data distributed parallel + try: + return dist.get_rank(), dist.get_world_size(), True + except RuntimeError: + return 0, 1, False diff --git a/otx/algorithms/common/utils/ext_loader.py b/otx/algorithms/common/utils/ext_loader.py new file mode 100644 index 00000000000..a2641e814bc --- /dev/null +++ b/otx/algorithms/common/utils/ext_loader.py @@ -0,0 +1,21 @@ +"""Module for defining ext loader.""" +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +import importlib + + +def load_ext(path, funcs): + """A function that loads module and verifies that the specified functions are present in the module. + + :param path: str, the file path of the module to load. + :param funcs: list of str, the names of the functions to verify in the loaded module. + :return: the loaded module object. + :raises: AssertionError if any of the specified functions are missing from the loaded module. + """ + ext = importlib.import_module(path) + for fun in funcs: + assert hasattr(ext, fun), f"{fun} miss in module {path}" + + return ext diff --git a/otx/mpa/utils/logger.py b/otx/algorithms/common/utils/logger.py similarity index 68% rename from otx/mpa/utils/logger.py rename to otx/algorithms/common/utils/logger.py index 75a5e6b9a5f..14929749f16 100644 --- a/otx/mpa/utils/logger.py +++ b/otx/algorithms/common/utils/logger.py @@ -1,3 +1,4 @@ +"""Module for defining custom logger.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -27,11 +28,11 @@ def _get_logger(): logger = logging.getLogger("mpa") logger.propagate = False - def print(message, *args, **kws): + def logger_print(message, *args, **kws): if logger.isEnabledFor(_CUSTOM_LOG_LEVEL): logger.log(_CUSTOM_LOG_LEVEL, message, *args, **kws) - logger.print = print + logger.print = logger_print logger.setLevel(LEVEL) console = logging.StreamHandler(sys.stdout) @@ -53,7 +54,13 @@ def print(message, *args, **kws): def config_logger(log_file, level="WARNING"): - global _LOG_DIR, _FILE_HANDLER + """A function that configures the logging system. + + :param log_file: str, a string representing the path to the log file. + :param level: str, a string representing the log level. Default is "WARNING". + :return: None + """ + global _LOG_DIR, _FILE_HANDLER # pylint: disable=global-statement if _FILE_HANDLER is not None: _logger.removeHandler(_FILE_HANDLER) del _FILE_HANDLER @@ -75,36 +82,49 @@ def _get_log_level(level): # get level number level_number = logging.getLevelName(level.upper()) if level_number not in [0, 10, 20, 30, 40, 50, _CUSTOM_LOG_LEVEL]: - msg = "Log level must be one of DEBUG/INFO/WARN/ERROR/CRITICAL/LOG" ", but {} is given.".format(level) + msg = f"Log level must be one of DEBUG/INFO/WARN/ERROR/CRITICAL/LOG, but {level} is given." raise ValueError(msg) return level_number def get_log_dir(): + """A function that retrieves the directory path of the log file. + + :return: str, a string representing the directory path of the log file. + """ return _LOG_DIR class _DummyLogger(logging.Logger): - def debug(message, *args, **kws): + def debug(self, message, *args, **kws): pass - def info(message, *args, **kws): + def info(self, message, *args, **kws): pass - def warning(message, *args, **kws): + def warning(self, message, *args, **kws): pass - def critical(message, *args, **kws): + def critical(self, message, *args, **kws): pass - def error(message, *args, **kws): + def error(self, message, *args, **kws): pass def local_master_only(func: Callable) -> Callable: + """A decorator that allows a function to be executed only by the local master process in distributed training setup. + + Args: + func: the function to be decorated. + + Returns: + A wrapped function that can only be executed by the local master process. + """ + @functools.wraps(func) - def wrapper(*args, **kwargs): + def wrapper(*args, **kwargs): # pylint: disable=inconsistent-return-statements local_rank = 0 if dist.is_available() and dist.is_initialized(): local_rank = int(os.environ["LOCAL_RANK"]) @@ -121,6 +141,7 @@ def wrapper(*args, **kwargs): def get_logger(): + """Return logger.""" # if dist.is_available() and dist.is_initialized(): # rank = dist.get_rank() # else: diff --git a/otx/mpa/utils/mo_wrapper.py b/otx/algorithms/common/utils/mo_wrapper.py similarity index 63% rename from otx/mpa/utils/mo_wrapper.py rename to otx/algorithms/common/utils/mo_wrapper.py index 1c68d5a181c..ab123917f48 100644 --- a/otx/mpa/utils/mo_wrapper.py +++ b/otx/algorithms/common/utils/mo_wrapper.py @@ -1,6 +1,8 @@ +"""Module for defining Model Optimizer (mo) wrapper.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # +# pylint: disable=import-error import os import subprocess @@ -14,7 +16,12 @@ def __mo_check_requirements(framework="onnx"): return mo_check_requirements(framework) -def check_requirements_with_version(target, framework=None): +def check_requirements_with_version(framework=None): + """A function that checks the compatibility of Model Optimizer with a specific target and framework version. + + :param framework: str, a string representing the framework name. Default is None. + :return: bool, True if the compatibility check is successful, False otherwise. + """ from mo.utils.version import get_version as mo_get_version mo_version = mo_get_version() @@ -29,15 +36,20 @@ def check_requirements_with_version(target, framework=None): # return False err_code = __mo_check_requirements(framework) if err_code: - print("mo_check_requriements returns: {}".format(err_code)) + print(f"mo_check_requriements returns: {err_code}") return False return True def check_requirements(framework="onnx"): + """A function that checks the compatibility of Model Optimizer with a specific framework version. + + :param framework: str, a string representing the framework name. Default is "onnx". + :return: bool, True if the compatibility check is successful, False otherwise. + """ err_code = __mo_check_requirements(framework) if err_code: - print("mo_check_requriements returns: {}".format(err_code)) + print("mo_check_requriements returns: {err_code}") return False return True @@ -85,15 +97,25 @@ def __mo_main_wrapper(argv, framework=None): def generate_ir(output_path, model_path, silent, save_xml=True, **mo_kwargs): + """A function that generates IR from a given model using the Model Optimizer. + + :param output_path: str, a string representing the path to save the IR files. + :param model_path: str, a string representing the path to the original model file. + :param silent: bool, a flag indicating whether to suppress the output messages. If True, + the function will redirect stdout to null device. If False, print messages to the console. + :param save_xml: bool, a flag indicating whether to save the XML file. Default is True. + :param mo_kwargs: keyword arguments for the Model Optimizer. + :return: tuple of int and str, the return code of the Model Optimizer and the output message. + """ # parse kwargs for the model optimizer mo_args = [] for key, value in mo_kwargs.items(): if key not in MO_ARGS: - return -1, "Not supported argument: {}".format(key) + return -1, "Not supported argument: {key}" if value is not None: - mo_args.append("--{}={}".format(key, value)) + mo_args.append(f"--{key}={value}") else: - mo_args.append("--{}".format(key)) + mo_args.append(f"--{key}") is_output_dir_provided = False for mo_arg in mo_args: @@ -101,18 +123,18 @@ def generate_ir(output_path, model_path, silent, save_xml=True, **mo_kwargs): is_output_dir_provided = True break if not is_output_dir_provided: - mo_args.append("--output_dir={}".format(model_path)) - print("mo-args: {}".format(mo_args)) + mo_args.append("--output_dir={model_path}") + print(f"mo-args: {mo_args}") if silent: # redirect stdout messages from MO to null device - devnull = open("/dev/null", "w") + devnull = open("/dev/null", "w", encoding="utf-8") # pylint: disable=consider-using-with old_stdout = sys.stdout sys.stdout = devnull # ret = __mo_main_wrapper(mo_args, None) # ret = os.system('mo.py ' + ' '.join(mo_args)) - ret = subprocess.run(["mo"] + mo_args, shell=False).returncode + ret = subprocess.run(["mo"] + mo_args, shell=False, check=True).returncode if silent: # return back stdout @@ -120,18 +142,16 @@ def generate_ir(output_path, model_path, silent, save_xml=True, **mo_kwargs): # NOTE: mo returns non zero return code (245) even though it successfully generate IR cur_time = time.time() - time_threshold = 5 model_name = mo_kwargs.get("model_name", "model") if not ( ret == 245 and not {f"{model_name}.bin", f"{model_name}.xml"} - set(os.listdir(model_path)) and ( - os.path.getmtime(os.path.join(model_path, f"{model_name}.bin")) - cur_time < time_threshold - and os.path.getmtime(os.path.join(model_path, f"{model_name}.xml")) - cur_time < time_threshold + os.path.getmtime(os.path.join(model_path, f"{model_name}.bin")) - cur_time < 5 + and os.path.getmtime(os.path.join(model_path, f"{model_name}.xml")) - cur_time < 5 ) ): - err_msg = "Failed to run the model optimizer to convert a model" - return ret, err_msg + return ret, "Failed to run the model optimizer to convert a model" print("*** Model optimization completed ***") # move bin files to workspace @@ -148,4 +168,4 @@ def generate_ir(output_path, model_path, silent, save_xml=True, **mo_kwargs): os.path.join(output_path, model_name + ".xml"), ) - return 0, "Saved outputs into {}".format(output_path) + return 0, f"Saved outputs into {output_path}" diff --git a/otx/mpa/modules/utils/task_adapt.py b/otx/algorithms/common/utils/task_adapt.py similarity index 53% rename from otx/mpa/modules/utils/task_adapt.py rename to otx/algorithms/common/utils/task_adapt.py index 260be73445a..1f726a3f1c1 100644 --- a/otx/mpa/modules/utils/task_adapt.py +++ b/otx/algorithms/common/utils/task_adapt.py @@ -1,17 +1,17 @@ +"""Module for defining task adapt related utils.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # import numpy as np -import torch -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger logger = get_logger() def map_class_names(src_classes, dst_classes): - """Computes src to dst index mapping + """Computes src to dst index mapping. src2dst[src_idx] = dst_idx # according to class name matching, -1 for non-matched ones @@ -31,6 +31,11 @@ def map_class_names(src_classes, dst_classes): def refine_results(results): + """A function that concatenates the results of multiple runs into a single array. + + :param results: list, a list of dictionaries or arrays containing the results. + :return: numpy.ndarray or dict, the concatenated results. + """ if isinstance(results[0], dict): tasks = results[0].keys() res_refine = {} @@ -42,30 +47,41 @@ def refine_results(results): def extract_anchor_ratio(dataset, num_ratios=5): - ratio_info = [] - if hasattr(dataset, "dataset"): # to confirm dataset is wrapped. - dataset = dataset.dataset - for ds in dataset: - ori_shape = ds["img_metas"].data["ori_shape"] - img_shape = ds["img_metas"].data["img_shape"] - bboxes = ds["gt_bboxes"].data.numpy() + """A function that extracts anchor ratios from a given dataset. + + :param dataset: dataset object, an instance of a dataset. + :param num_ratios: int, the number of anchor ratios to be extracted. + :return: list, a list of extracted anchor ratios. + """ + ratio_dict = dict(info=[], step=-1) + dataset, _ = unwrap_dataset(dataset) + for item in dataset: + ori_shape = item["img_metas"].data["ori_shape"] + img_shape = item["img_metas"].data["img_shape"] + bboxes = item["gt_bboxes"].data.numpy() for bbox in bboxes: w_o = bbox[2] - bbox[0] h_o = bbox[3] - bbox[1] if w_o > 0.04 * ori_shape[1] and h_o > 0.04 * ori_shape[0]: w_i = w_o * img_shape[1] / ori_shape[1] h_i = h_o * img_shape[0] / ori_shape[0] - ratio_info.append(w_i / h_i) - ratio_info = np.sort(np.array(ratio_info)) - ratio_step = int(len(ratio_info) / num_ratios) + ratio_dict["info"].append(w_i / h_i) + ratio_dict["info"] = np.sort(np.array(ratio_dict["info"])) + ratio_dict["step"] = int(len(ratio_dict["info"]) / num_ratios) proposal_ratio = [] for i in range(num_ratios): - r = np.mean(ratio_info[i * ratio_step : (i + 1) * ratio_step]) + r = np.mean(ratio_dict["info"][i * ratio_dict["step"] : (i + 1) * ratio_dict["step"]]) proposal_ratio.append(r) return proposal_ratio def map_cat_and_cls_as_order(classes, cats): + """A function that maps classes and categories to label orders. + + :param classes: list, a list of class names. + :param cats: dict, a dictionary containing category information. + :return: tuple of dict and list, a dictionary mapping category IDs to label orders and a list of category IDs. + """ cat2label = {} cat_ids = [] for i, cls in enumerate(classes): @@ -78,6 +94,11 @@ def map_cat_and_cls_as_order(classes, cats): def unwrap_dataset(dataset): + """A function that unwraps a dataset object to its base dataset. + + :param dataset: dataset object, an instance of a dataset. + :return: tuple of dataset object and int, the base dataset and the number of times to repeat the dataset. + """ times = 1 target_dataset = dataset while hasattr(target_dataset, "dataset"): diff --git a/otx/algorithms/common/utils/utils.py b/otx/algorithms/common/utils/utils.py index 4f13334b57b..ea3935d97f0 100644 --- a/otx/algorithms/common/utils/utils.py +++ b/otx/algorithms/common/utils/utils.py @@ -17,7 +17,7 @@ import importlib import inspect from collections import defaultdict -from typing import Callable, Literal, Optional, Tuple +from typing import Callable, Optional, Tuple import yaml @@ -94,77 +94,3 @@ def get_arg_spec( # noqa: C901 # pylint: disable=too-many-branches if spec.varkw is None and spec.varargs is None: break return tuple(args) - - -def left_vlaue_is_better(val1, val2, mode: Literal["max", "min"]) -> bool: - """Check left value is better than right value. - - Whether check it's greather or lesser is changed depending on 'model'. - - Args: - val1 : value to check that it's bigger than other value. - val2 : value to check that it's bigger than other value. - mode (Literal['max', 'min']): value to decide whether better means greater or lesser. - - Returns: - bool: whether val1 is better than val2. - """ - check_mode_input(mode) - if mode == "max": - return val1 > val2 - return val1 < val2 - - -def check_positive(value, variable_name: Optional[str] = None, error_message: Optional[str] = None): - """Validate that value is positivle. - - Args: - value (Any): value to validate. - variable_name (Optional[str], optional): name of value. It's used for error message. Defaults to None. - error_message (Optional[str], optional): Error message to use when type is different. Defaults to None. - - Raises: - ValueError: If value isn't positive, the error is raised. - """ - if value <= 0: - if error_message is not None: - message = error_message - elif variable_name: - message = f"{variable_name} should be positive.\n" f"your value : {value}" - else: - raise ValueError - raise ValueError(message) - - -def check_not_negative(value, variable_name: Optional[str] = None, error_message: Optional[str] = None): - """Validate that value isn't negative. - - Args: - value (Any): value to validate. - variable_name (Optional[str], optional): name of value. It's used for error message. Defaults to None. - error_message (Optional[str], optional): Error message to use when type is different. Defaults to None. - - Raises: - ValueError: If value is negative, the error is raised. - """ - if value < 0: - if error_message is not None: - message = error_message - elif variable_name: - message = f"{variable_name} should be positive.\n" f"your value : {value}" - else: - raise ValueError - raise ValueError(message) - - -def check_mode_input(mode: str): - """Validate that mode is 'max' or 'min'. - - Args: - mode (str): string to validate. - - Raises: - ValueError: If 'mode' is not both 'max' and 'min', the error is raised. - """ - if mode not in ["max", "min"]: - raise ValueError("mode should be max or min.\n" f"Your value : {mode}") diff --git a/otx/algorithms/detection/adapters/mmdet/datasets/task_adapt_dataset.py b/otx/algorithms/detection/adapters/mmdet/datasets/task_adapt_dataset.py index 5075f8db203..abd350b61ba 100644 --- a/otx/algorithms/detection/adapters/mmdet/datasets/task_adapt_dataset.py +++ b/otx/algorithms/detection/adapters/mmdet/datasets/task_adapt_dataset.py @@ -7,7 +7,10 @@ import numpy as np from mmdet.datasets import DATASETS, PIPELINES, build_dataset -from otx.mpa.modules.utils.task_adapt import map_cat_and_cls_as_order, map_class_names +from otx.algorithms.common.utils.task_adapt import ( + map_cat_and_cls_as_order, + map_class_names, +) # pylint: disable=invalid-name diff --git a/otx/algorithms/detection/adapters/mmdet/hooks/det_saliency_map_hook.py b/otx/algorithms/detection/adapters/mmdet/hooks/det_saliency_map_hook.py index 103996184f5..17d96acab0c 100644 --- a/otx/algorithms/detection/adapters/mmdet/hooks/det_saliency_map_hook.py +++ b/otx/algorithms/detection/adapters/mmdet/hooks/det_saliency_map_hook.py @@ -7,6 +7,9 @@ import torch import torch.nn.functional as F +from otx.algorithms.common.adapters.mmcv.hooks.recording_forward_hook import ( + BaseRecordingForwardHook, +) from otx.algorithms.detection.adapters.mmdet.models.heads.custom_atss_head import ( CustomATSSHead, ) @@ -19,7 +22,6 @@ from otx.algorithms.detection.adapters.mmdet.models.heads.custom_yolox_head import ( CustomYOLOXHead, ) -from otx.mpa.modules.hooks.recording_forward_hooks import BaseRecordingForwardHook # pylint: disable=too-many-locals diff --git a/otx/algorithms/detection/adapters/mmdet/models/backbones/mmov_backbone.py b/otx/algorithms/detection/adapters/mmdet/models/backbones/mmov_backbone.py index e976a44ebb9..baa12457bc9 100644 --- a/otx/algorithms/detection/adapters/mmdet/models/backbones/mmov_backbone.py +++ b/otx/algorithms/detection/adapters/mmdet/models/backbones/mmov_backbone.py @@ -5,7 +5,7 @@ from mmdet.models.builder import BACKBONES -from otx.mpa.modules.ov.models.mmov_model import MMOVModel +from otx.core.ov.models.mmov_model import MMOVModel @BACKBONES.register_module() diff --git a/otx/algorithms/detection/adapters/mmdet/models/dense_heads/mmov_rpn_head.py b/otx/algorithms/detection/adapters/mmdet/models/dense_heads/mmov_rpn_head.py index a5be0a455fa..853840d7f6e 100644 --- a/otx/algorithms/detection/adapters/mmdet/models/dense_heads/mmov_rpn_head.py +++ b/otx/algorithms/detection/adapters/mmdet/models/dense_heads/mmov_rpn_head.py @@ -11,8 +11,8 @@ from mmdet.models.builder import HEADS from mmdet.models.dense_heads.rpn_head import RPNHead -from otx.mpa.modules.ov.models.mmov_model import MMOVModel -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger +from otx.core.ov.models.mmov_model import MMOVModel logger = get_logger() diff --git a/otx/algorithms/detection/adapters/mmdet/models/dense_heads/mmov_ssd_head.py b/otx/algorithms/detection/adapters/mmdet/models/dense_heads/mmov_ssd_head.py index 08f82c5d25b..90a2b573cdd 100644 --- a/otx/algorithms/detection/adapters/mmdet/models/dense_heads/mmov_ssd_head.py +++ b/otx/algorithms/detection/adapters/mmdet/models/dense_heads/mmov_ssd_head.py @@ -12,7 +12,7 @@ from mmdet.models.builder import HEADS from mmdet.models.dense_heads.ssd_head import SSDHead -from otx.mpa.modules.ov.models.mmov_model import MMOVModel +from otx.core.ov.models.mmov_model import MMOVModel # TODO: Need to fix pylint issues # pylint: disable=redefined-argument-from-local, too-many-instance-attributes diff --git a/otx/algorithms/detection/adapters/mmdet/models/dense_heads/mmov_yolov3_head.py b/otx/algorithms/detection/adapters/mmdet/models/dense_heads/mmov_yolov3_head.py index 89a2b2af830..be9bfb1ecd2 100644 --- a/otx/algorithms/detection/adapters/mmdet/models/dense_heads/mmov_yolov3_head.py +++ b/otx/algorithms/detection/adapters/mmdet/models/dense_heads/mmov_yolov3_head.py @@ -11,7 +11,7 @@ from mmdet.models.builder import HEADS from mmdet.models.dense_heads.yolo_head import YOLOV3Head -from otx.mpa.modules.ov.models.mmov_model import MMOVModel +from otx.core.ov.models.mmov_model import MMOVModel # TODO: Need to fix pylint issues # pylint: disable=too-many-instance-attributes, keyword-arg-before-vararg diff --git a/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_atss_detector.py b/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_atss_detector.py index c27feb43515..1875b880bbc 100644 --- a/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_atss_detector.py +++ b/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_atss_detector.py @@ -9,13 +9,15 @@ from mmdet.models.builder import DETECTORS from mmdet.models.detectors.atss import ATSS +from otx.algorithms.common.adapters.mmcv.hooks.recording_forward_hook import ( + FeatureVectorHook, +) +from otx.algorithms.common.adapters.mmdeploy.utils import is_mmdeploy_enabled +from otx.algorithms.common.utils.logger import get_logger +from otx.algorithms.common.utils.task_adapt import map_class_names from otx.algorithms.detection.adapters.mmdet.hooks.det_saliency_map_hook import ( DetSaliencyMapHook, ) -from otx.mpa.deploy.utils import is_mmdeploy_enabled -from otx.mpa.modules.hooks.recording_forward_hooks import FeatureVectorHook -from otx.mpa.modules.utils.task_adapt import map_class_names -from otx.mpa.utils.logger import get_logger from .l2sp_detector_mixin import L2SPDetectorMixin from .sam_detector_mixin import SAMDetectorMixin diff --git a/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_maskrcnn_detector.py b/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_maskrcnn_detector.py index ed96bf83e51..c1d7428bdbc 100644 --- a/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_maskrcnn_detector.py +++ b/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_maskrcnn_detector.py @@ -9,13 +9,13 @@ from mmdet.models.builder import DETECTORS from mmdet.models.detectors.mask_rcnn import MaskRCNN -from otx.mpa.deploy.utils import is_mmdeploy_enabled -from otx.mpa.modules.hooks.recording_forward_hooks import ( +from otx.algorithms.common.adapters.mmcv.hooks.recording_forward_hook import ( ActivationMapHook, FeatureVectorHook, ) -from otx.mpa.modules.utils.task_adapt import map_class_names -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.adapters.mmdeploy.utils import is_mmdeploy_enabled +from otx.algorithms.common.utils.logger import get_logger +from otx.algorithms.common.utils.task_adapt import map_class_names from .l2sp_detector_mixin import L2SPDetectorMixin from .sam_detector_mixin import SAMDetectorMixin diff --git a/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_single_stage_detector.py b/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_single_stage_detector.py index 32f6d9ffe00..1bd442270ac 100644 --- a/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_single_stage_detector.py +++ b/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_single_stage_detector.py @@ -9,13 +9,15 @@ from mmdet.models.builder import DETECTORS from mmdet.models.detectors.single_stage import SingleStageDetector +from otx.algorithms.common.adapters.mmcv.hooks.recording_forward_hook import ( + FeatureVectorHook, +) +from otx.algorithms.common.adapters.mmdeploy.utils import is_mmdeploy_enabled +from otx.algorithms.common.utils.logger import get_logger +from otx.algorithms.common.utils.task_adapt import map_class_names from otx.algorithms.detection.adapters.mmdet.hooks.det_saliency_map_hook import ( DetSaliencyMapHook, ) -from otx.mpa.deploy.utils import is_mmdeploy_enabled -from otx.mpa.modules.hooks.recording_forward_hooks import FeatureVectorHook -from otx.mpa.modules.utils.task_adapt import map_class_names -from otx.mpa.utils.logger import get_logger from .l2sp_detector_mixin import L2SPDetectorMixin from .sam_detector_mixin import SAMDetectorMixin diff --git a/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_two_stage_detector.py b/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_two_stage_detector.py index 0d007c52368..552891c8978 100644 --- a/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_two_stage_detector.py +++ b/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_two_stage_detector.py @@ -8,8 +8,8 @@ from mmdet.models.builder import DETECTORS from mmdet.models.detectors.two_stage import TwoStageDetector -from otx.mpa.modules.utils.task_adapt import map_class_names -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger +from otx.algorithms.common.utils.task_adapt import map_class_names from .l2sp_detector_mixin import L2SPDetectorMixin from .sam_detector_mixin import SAMDetectorMixin diff --git a/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_vfnet_detector.py b/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_vfnet_detector.py index 23a072e31de..1700bb0071e 100644 --- a/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_vfnet_detector.py +++ b/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_vfnet_detector.py @@ -8,8 +8,8 @@ from mmdet.models.builder import DETECTORS from mmdet.models.detectors.vfnet import VFNet -from otx.mpa.modules.utils.task_adapt import map_class_names -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger +from otx.algorithms.common.utils.task_adapt import map_class_names from .l2sp_detector_mixin import L2SPDetectorMixin from .sam_detector_mixin import SAMDetectorMixin diff --git a/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_yolox_detector.py b/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_yolox_detector.py index 26b4e2712eb..352d713ceb7 100644 --- a/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_yolox_detector.py +++ b/otx/algorithms/detection/adapters/mmdet/models/detectors/custom_yolox_detector.py @@ -9,13 +9,15 @@ from mmdet.models.builder import DETECTORS from mmdet.models.detectors.yolox import YOLOX +from otx.algorithms.common.adapters.mmcv.hooks.recording_forward_hook import ( + FeatureVectorHook, +) +from otx.algorithms.common.adapters.mmdeploy.utils import is_mmdeploy_enabled +from otx.algorithms.common.utils.logger import get_logger +from otx.algorithms.common.utils.task_adapt import map_class_names from otx.algorithms.detection.adapters.mmdet.hooks.det_saliency_map_hook import ( DetSaliencyMapHook, ) -from otx.mpa.deploy.utils import is_mmdeploy_enabled -from otx.mpa.modules.hooks.recording_forward_hooks import FeatureVectorHook -from otx.mpa.modules.utils.task_adapt import map_class_names -from otx.mpa.utils.logger import get_logger from .l2sp_detector_mixin import L2SPDetectorMixin from .sam_detector_mixin import SAMDetectorMixin diff --git a/otx/algorithms/detection/adapters/mmdet/models/detectors/unbiased_teacher.py b/otx/algorithms/detection/adapters/mmdet/models/detectors/unbiased_teacher.py index 164f337fb01..5b5d9639c34 100644 --- a/otx/algorithms/detection/adapters/mmdet/models/detectors/unbiased_teacher.py +++ b/otx/algorithms/detection/adapters/mmdet/models/detectors/unbiased_teacher.py @@ -11,7 +11,7 @@ from mmdet.models import DETECTORS, build_detector from mmdet.models.detectors import BaseDetector -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger from .sam_detector_mixin import SAMDetectorMixin diff --git a/otx/algorithms/detection/adapters/mmdet/models/necks/mmov_fpn.py b/otx/algorithms/detection/adapters/mmdet/models/necks/mmov_fpn.py index b4fbe7ba01d..bbb9cea7849 100644 --- a/otx/algorithms/detection/adapters/mmdet/models/necks/mmov_fpn.py +++ b/otx/algorithms/detection/adapters/mmdet/models/necks/mmov_fpn.py @@ -10,7 +10,7 @@ from mmdet.models.necks.fpn import FPN from torch import nn -from otx.mpa.modules.ov.models.mmov_model import MMOVModel +from otx.core.ov.models.mmov_model import MMOVModel # TODO: Need to fix pylint issues # pylint: disable=keyword-arg-before-vararg, too-many-locals diff --git a/otx/algorithms/detection/adapters/mmdet/models/necks/mmov_ssd_neck.py b/otx/algorithms/detection/adapters/mmdet/models/necks/mmov_ssd_neck.py index b65403142cf..27df619302c 100644 --- a/otx/algorithms/detection/adapters/mmdet/models/necks/mmov_ssd_neck.py +++ b/otx/algorithms/detection/adapters/mmdet/models/necks/mmov_ssd_neck.py @@ -12,7 +12,7 @@ from mmdet.models.builder import NECKS from torch import nn -from otx.mpa.modules.ov.models.mmov_model import MMOVModel +from otx.core.ov.models.mmov_model import MMOVModel # pylint: disable=too-many-arguments, too-many-locals diff --git a/otx/algorithms/detection/adapters/mmdet/models/necks/mmov_yolov3_neck.py b/otx/algorithms/detection/adapters/mmdet/models/necks/mmov_yolov3_neck.py index d879d2f15d5..24473f4c34e 100644 --- a/otx/algorithms/detection/adapters/mmdet/models/necks/mmov_yolov3_neck.py +++ b/otx/algorithms/detection/adapters/mmdet/models/necks/mmov_yolov3_neck.py @@ -10,8 +10,8 @@ from mmdet.models.builder import NECKS from mmdet.models.necks.yolo_neck import YOLOV3Neck -from otx.mpa.modules.ov.models.mmov_model import MMOVModel -from otx.mpa.modules.ov.models.parser_mixin import ParserMixin +from otx.core.ov.models.mmov_model import MMOVModel +from otx.core.ov.models.parser_mixin import ParserMixin # type: ignore[attr-defined] @NECKS.register_module() diff --git a/otx/algorithms/detection/adapters/mmdet/models/roi_heads/bbox_heads/mmov_bbox_head.py b/otx/algorithms/detection/adapters/mmdet/models/roi_heads/bbox_heads/mmov_bbox_head.py index f741ba3ffd7..c144fe9e7c4 100644 --- a/otx/algorithms/detection/adapters/mmdet/models/roi_heads/bbox_heads/mmov_bbox_head.py +++ b/otx/algorithms/detection/adapters/mmdet/models/roi_heads/bbox_heads/mmov_bbox_head.py @@ -11,7 +11,7 @@ from mmdet.models.builder import HEADS from mmdet.models.roi_heads.bbox_heads.bbox_head import BBoxHead -from otx.mpa.modules.ov.models.mmov_model import MMOVModel +from otx.core.ov.models.mmov_model import MMOVModel # TODO: Need to fix pylint issues # pylint: disable=too-many-instance-attributes, too-many-arguments, keyword-arg-before-vararg, dangerous-default-value diff --git a/otx/algorithms/detection/adapters/mmdet/models/roi_heads/mask_heads/mmov_mask_head.py b/otx/algorithms/detection/adapters/mmdet/models/roi_heads/mask_heads/mmov_mask_head.py index 7343fa92043..29475519b44 100644 --- a/otx/algorithms/detection/adapters/mmdet/models/roi_heads/mask_heads/mmov_mask_head.py +++ b/otx/algorithms/detection/adapters/mmdet/models/roi_heads/mask_heads/mmov_mask_head.py @@ -10,7 +10,7 @@ from mmdet.models.builder import HEADS from mmdet.models.roi_heads.mask_heads.fcn_mask_head import FCNMaskHead -from otx.mpa.modules.ov.models.mmov_model import MMOVModel +from otx.core.ov.models.mmov_model import MMOVModel # TODO: Need to fix pylint issues # pylint: disable=too-many-instance-attributes, too-many-arguments, keyword-arg-before-vararg, dangerous-default-value diff --git a/otx/algorithms/detection/adapters/mmdet/nncf/patches.py b/otx/algorithms/detection/adapters/mmdet/nncf/patches.py index 5e36c254654..da640d248c9 100644 --- a/otx/algorithms/detection/adapters/mmdet/nncf/patches.py +++ b/otx/algorithms/detection/adapters/mmdet/nncf/patches.py @@ -19,6 +19,7 @@ from mmdet.models.roi_heads.bbox_heads.sabl_head import SABLHead from mmdet.models.roi_heads.mask_heads.fcn_mask_head import FCNMaskHead +from otx.algorithms.common.adapters.mmdeploy.utils import is_mmdeploy_enabled from otx.algorithms.common.adapters.nncf import ( NNCF_PATCHER, is_in_nncf_tracing, @@ -26,7 +27,6 @@ no_nncf_trace_wrapper, ) from otx.algorithms.common.adapters.nncf.patches import nncf_trace_context -from otx.mpa.deploy.utils import is_mmdeploy_enabled HEADS_TARGETS = dict( classes=( diff --git a/otx/mpa/det/__init__.py b/otx/algorithms/detection/adapters/mmdet/tasks/__init__.py similarity index 75% rename from otx/mpa/det/__init__.py rename to otx/algorithms/detection/adapters/mmdet/tasks/__init__.py index 8fd0f6a8480..b885231de57 100644 --- a/otx/mpa/det/__init__.py +++ b/otx/algorithms/detection/adapters/mmdet/tasks/__init__.py @@ -1,7 +1,10 @@ -# Copyright (C) 2022 Intel Corporation +"""Initalization OTX Detection with MMDET.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # +import otx.algorithms.common.adapters.mmcv.hooks +import otx.algorithms.common.adapters.mmcv.hooks.composed_dataloaders_hook import otx.algorithms.detection.adapters.mmdet.datasets.pipelines.torchvision2mmdet import otx.algorithms.detection.adapters.mmdet.datasets.task_adapt_dataset import otx.algorithms.detection.adapters.mmdet.hooks.det_saliency_map_hook @@ -9,8 +12,6 @@ import otx.algorithms.detection.adapters.mmdet.models.detectors import otx.algorithms.detection.adapters.mmdet.models.heads import otx.algorithms.detection.adapters.mmdet.models.losses -import otx.mpa.modules.hooks -import otx.mpa.modules.hooks.composed_dataloaders_hook # flake8: noqa from . import explainer, exporter, incremental, inferrer, semisl, stage, trainer diff --git a/otx/mpa/det/explainer.py b/otx/algorithms/detection/adapters/mmdet/tasks/explainer.py similarity index 88% rename from otx/mpa/det/explainer.py rename to otx/algorithms/detection/adapters/mmdet/tasks/explainer.py index 0ddf52f00a4..50f4593ba3e 100644 --- a/otx/mpa/det/explainer.py +++ b/otx/algorithms/detection/adapters/mmdet/tasks/explainer.py @@ -1,27 +1,29 @@ -# Copyright (C) 2022 Intel Corporation +"""Explain task for OTX Detection with MMDET.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # +import torch from mmcv.utils import Config, ConfigDict from mmdet.datasets import build_dataloader as mmdet_build_dataloader from mmdet.datasets import build_dataset as mmdet_build_dataset from mmdet.datasets import replace_ImageToTensor +from otx.algorithms.common.adapters.mmcv.hooks.recording_forward_hook import ( + ActivationMapHook, + EigenCamHook, +) +from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES from otx.algorithms.common.adapters.mmcv.utils import ( build_data_parallel, build_dataloader, build_dataset, ) +from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.detection.adapters.mmdet.datasets import ImageTilingDataset from otx.algorithms.detection.adapters.mmdet.hooks.det_saliency_map_hook import ( DetSaliencyMapHook, ) -from otx.mpa.modules.hooks.recording_forward_hooks import ( - ActivationMapHook, - EigenCamHook, -) -from otx.mpa.registry import STAGES -from otx.mpa.utils.logger import get_logger from .stage import DetectionStage @@ -35,12 +37,15 @@ @STAGES.register_module() class DetectionExplainer(DetectionStage): + """Explainer for Object Detection.""" + def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.dataset = None + self.explainer_hook = None def run(self, model_cfg, model_ckpt, data_cfg, **kwargs): - """Run explain stage for detection + """Run explain stage for detection. - Configuration - Environment setup @@ -61,7 +66,9 @@ def run(self, model_cfg, model_ckpt, data_cfg, **kwargs): return dict(outputs=outputs) + # pylint: disable=too-many-locals, too-many-branches def explain(self, cfg, model_builder=None): + """Main explain function.""" # TODO: distributed inference data_cfg = cfg.data.test.copy() @@ -130,15 +137,6 @@ def explain(self, cfg, model_builder=None): # Model cfg.model.pretrained = None - if cfg.model.get("neck"): - if isinstance(cfg.model.neck, list): - for neck_cfg in cfg.model.neck: - if neck_cfg.get("rfp_backbone"): - if neck_cfg.rfp_backbone.get("pretrained"): - neck_cfg.rfp_backbone.pretrained = None - elif cfg.model.neck.get("rfp_backbone"): - if cfg.model.neck.rfp_backbone.get("pretrained"): - cfg.model.neck.rfp_backbone.pretrained = None # TODO: Check Inference FP16 Support model = self.build_model(cfg, model_builder, fp16=False) model.CLASSES = target_classes @@ -153,7 +151,8 @@ def explain(self, cfg, model_builder=None): eval_predictions = [] with self.explainer_hook(feature_model) as saliency_hook: for data in test_dataloader: - result = model(return_loss=False, rescale=True, **data) + with torch.no_grad(): + result = model(return_loss=False, rescale=True, **data) eval_predictions.extend(result) saliency_maps = saliency_hook.records diff --git a/otx/mpa/det/exporter.py b/otx/algorithms/detection/adapters/mmdet/tasks/exporter.py similarity index 76% rename from otx/mpa/det/exporter.py rename to otx/algorithms/detection/adapters/mmdet/tasks/exporter.py index 6af24b6801a..a9ad7391daa 100644 --- a/otx/mpa/det/exporter.py +++ b/otx/algorithms/detection/adapters/mmdet/tasks/exporter.py @@ -1,14 +1,15 @@ -# Copyright (C) 2022 Intel Corporation +"""Export task for OTX Detection with MMDET.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # import numpy as np from mmcv.runner import wrap_fp16_model -from otx.mpa.deploy.utils import sync_batchnorm_2_batchnorm -from otx.mpa.exporter_mixin import ExporterMixin -from otx.mpa.registry import STAGES -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.adapters.mmcv.tasks.exporter_mixin import ExporterMixin +from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES +from otx.algorithms.common.adapters.mmdeploy.utils import sync_batchnorm_2_batchnorm +from otx.algorithms.common.utils.logger import get_logger from .stage import DetectionStage @@ -17,8 +18,10 @@ @STAGES.register_module() class DetectionExporter(ExporterMixin, DetectionStage): + """Export class for object detection.""" + def run(self, model_cfg, model_ckpt, data_cfg, **kwargs): # noqa: C901 - """Run exporter stage""" + """Run exporter stage.""" precision = kwargs.get("precision", "FP32") model_builder = kwargs.get("model_builder", self.MODEL_BUILDER) @@ -43,10 +46,11 @@ def model_builder_helper(*args, **kwargs): @staticmethod def naive_export(output_dir, model_builder, precision, cfg, model_name="model"): + """Export using pytorch backend.""" from mmdet.apis.inference import LoadImage from mmdet.datasets.pipelines import Compose - from ..deploy.apis import NaiveExporter + from otx.algorithms.common.adapters.mmdeploy.apis import NaiveExporter def get_fake_data(cfg, orig_img_shape=(128, 128, 3)): pipeline = [LoadImage()] + cfg.data.test.pipeline[1:] diff --git a/otx/mpa/det/incremental/__init__.py b/otx/algorithms/detection/adapters/mmdet/tasks/incremental/__init__.py similarity index 66% rename from otx/mpa/det/incremental/__init__.py rename to otx/algorithms/detection/adapters/mmdet/tasks/incremental/__init__.py index 9ddb234e8da..f82cc92b74e 100644 --- a/otx/mpa/det/incremental/__init__.py +++ b/otx/algorithms/detection/adapters/mmdet/tasks/incremental/__init__.py @@ -1,4 +1,5 @@ -# Copyright (C) 2022 Intel Corporation +"""Initialize incremental learning for OTX Detection.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # diff --git a/otx/algorithms/detection/adapters/mmdet/tasks/incremental/inferrer.py b/otx/algorithms/detection/adapters/mmdet/tasks/incremental/inferrer.py new file mode 100644 index 00000000000..8d9227cf605 --- /dev/null +++ b/otx/algorithms/detection/adapters/mmdet/tasks/incremental/inferrer.py @@ -0,0 +1,21 @@ +"""Inference Incremental learning model of OTX detection.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES +from otx.algorithms.common.utils.logger import get_logger +from otx.algorithms.detection.adapters.mmdet.tasks.inferrer import DetectionInferrer + +from .stage import IncrDetectionStage + +logger = get_logger() + + +# pylint: disable=super-init-not-called +@STAGES.register_module() +class IncrDetectionInferrer(IncrDetectionStage, DetectionInferrer): + """Inferencer for OTX Detection incremental learngin with MMDET.""" + + def __init__(self, **kwargs): + IncrDetectionStage.__init__(self, **kwargs) diff --git a/otx/mpa/det/incremental/stage.py b/otx/algorithms/detection/adapters/mmdet/tasks/incremental/stage.py similarity index 75% rename from otx/mpa/det/incremental/stage.py rename to otx/algorithms/detection/adapters/mmdet/tasks/incremental/stage.py index fb17d9ed8ca..14eaf0be833 100644 --- a/otx/mpa/det/incremental/stage.py +++ b/otx/algorithms/detection/adapters/mmdet/tasks/incremental/stage.py @@ -1,24 +1,27 @@ -# Copyright (C) 2022 Intel Corporation +"""Stage for incremental object detection.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # from mmcv import ConfigDict -from otx.mpa.det.stage import DetectionStage -from otx.mpa.utils.config_utils import update_or_add_custom_hook -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.adapters.mmcv.utils.config_utils import ( + update_or_add_custom_hook, +) +from otx.algorithms.common.utils.logger import get_logger +from otx.algorithms.detection.adapters.mmdet.tasks.stage import DetectionStage logger = get_logger() class IncrDetectionStage(DetectionStage): - """Patch config to support incremental learning for object detection""" + """Patch config to support incremental learning for object detection.""" def __init__(self, **kwargs): super().__init__(**kwargs) def configure_task(self, cfg, training, **kwargs): - """Patch config to support incremental learning""" + """Patch config to support incremental learning.""" super().configure_task(cfg, training, **kwargs) if "task_adapt" in cfg and self.task_adapt_type == "mpa": self.configure_task_adapt_hook(cfg) diff --git a/otx/algorithms/detection/adapters/mmdet/tasks/incremental/trainer.py b/otx/algorithms/detection/adapters/mmdet/tasks/incremental/trainer.py new file mode 100644 index 00000000000..1a9d5ceeab9 --- /dev/null +++ b/otx/algorithms/detection/adapters/mmdet/tasks/incremental/trainer.py @@ -0,0 +1,21 @@ +"""Train task for Incremental Learning for OTX Detection with MMDET.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES +from otx.algorithms.common.utils.logger import get_logger +from otx.algorithms.detection.adapters.mmdet.tasks.trainer import DetectionTrainer + +from .stage import IncrDetectionStage + +logger = get_logger() + + +# pylint: disable=super-init-not-called +@STAGES.register_module() +class IncrDetectionTrainer(IncrDetectionStage, DetectionTrainer): + """Train class for incremental object detection.""" + + def __init__(self, **kwargs): + IncrDetectionStage.__init__(self, **kwargs) diff --git a/otx/mpa/det/inferrer.py b/otx/algorithms/detection/adapters/mmdet/tasks/inferrer.py similarity index 84% rename from otx/mpa/det/inferrer.py rename to otx/algorithms/detection/adapters/mmdet/tasks/inferrer.py index 066e44971c4..d0e1701933a 100644 --- a/otx/mpa/det/inferrer.py +++ b/otx/algorithms/detection/adapters/mmdet/tasks/inferrer.py @@ -1,10 +1,10 @@ -# Copyright (C) 2022 Intel Corporation +"""Inference task for OTX Detection with MMDET.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # from contextlib import nullcontext -import torch from mmcv.utils import Config, ConfigDict from mmdet.apis import single_gpu_test from mmdet.datasets import build_dataloader as mmdet_build_dataloader @@ -12,21 +12,21 @@ from mmdet.datasets import replace_ImageToTensor from mmdet.models.detectors import TwoStageDetector +from otx.algorithms.common.adapters.mmcv.hooks.recording_forward_hook import ( + ActivationMapHook, + FeatureVectorHook, +) +from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES from otx.algorithms.common.adapters.mmcv.utils import ( build_data_parallel, build_dataloader, build_dataset, ) +from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.detection.adapters.mmdet.datasets import ImageTilingDataset from otx.algorithms.detection.adapters.mmdet.hooks.det_saliency_map_hook import ( DetSaliencyMapHook, ) -from otx.mpa.modules.hooks.recording_forward_hooks import ( - ActivationMapHook, - FeatureVectorHook, -) -from otx.mpa.registry import STAGES -from otx.mpa.utils.logger import get_logger from .stage import DetectionStage @@ -35,12 +35,14 @@ @STAGES.register_module() class DetectionInferrer(DetectionStage): + """Class for object detection inference.""" + def __init__(self, **kwargs): super().__init__(**kwargs) self.dataset = None def run(self, model_cfg, model_ckpt, data_cfg, **kwargs): - """Run inference stage for detection + """Run inference stage for detection. - Configuration - Environment setup @@ -58,11 +60,11 @@ def run(self, model_cfg, model_ckpt, data_cfg, **kwargs): model_builder = kwargs.get("model_builder", None) dump_features = kwargs.get("dump_features", False) dump_saliency_map = kwargs.get("dump_saliency_map", False) - eval = kwargs.get("eval", False) + do_eval = kwargs.get("eval", False) outputs = self.infer( cfg, model_builder=model_builder, - eval=eval, + do_eval=do_eval, dump_features=dump_features, dump_saliency_map=dump_saliency_map, ) @@ -74,21 +76,10 @@ def run(self, model_cfg, model_ckpt, data_cfg, **kwargs): # output_file_path=output_file_path, outputs=outputs ) - # TODO: save in json - """ - class NumpyEncoder(json.JSONEncoder): - def default(self, obj): - if isinstance(obj, np.ndarray): - return obj.tolist() - return json.JSONEncoder.default(self, obj) - - a = np.array([[1, 2, 3], [4, 5, 6]]) - json_dump = json.dumps({'a': a, 'aa': [2, (2, 3, 4), a], 'bb': [2]}, - cls=NumpyEncoder) - print(json_dump) - """ - def infer(self, cfg, model_builder=None, eval=False, dump_features=False, dump_saliency_map=False): + # pylint: disable=too-many-locals, too-many-branches, too-many-statements + def infer(self, cfg, model_builder=None, do_eval=False, dump_features=False, dump_saliency_map=False): + """Main inference function.""" # TODO: distributed inference data_cfg = cfg.data.test.copy() @@ -150,15 +141,6 @@ def infer(self, cfg, model_builder=None, eval=False, dump_features=False, dump_s # Model cfg.model.pretrained = None - if cfg.model.get("neck"): - if isinstance(cfg.model.neck, list): - for neck_cfg in cfg.model.neck: - if neck_cfg.get("rfp_backbone"): - if neck_cfg.rfp_backbone.get("pretrained"): - neck_cfg.rfp_backbone.pretrained = None - elif cfg.model.neck.get("rfp_backbone"): - if cfg.model.neck.rfp_backbone.get("pretrained"): - cfg.model.neck.rfp_backbone.pretrained = None # TODO: Check Inference FP16 Support model = self.build_model(cfg, model_builder, fp16=False) model.CLASSES = target_classes @@ -181,6 +163,7 @@ def infer(self, cfg, model_builder=None, eval=False, dump_features=False, dump_s else: saliency_hook = DetSaliencyMapHook(feature_model) + # pylint: disable=no-member eval_predictions = [] with FeatureVectorHook(feature_model) if dump_features else nullcontext() as feature_vector_hook: with saliency_hook: @@ -192,7 +175,7 @@ def infer(self, cfg, model_builder=None, eval=False, dump_features=False, dump_s cfg.evaluation.pop(key, None) metric = None - if eval: + if do_eval: metric = self.dataset.evaluate(eval_predictions, **cfg.evaluation) metric = metric["mAP"] if isinstance(cfg.evaluation.metric, list) else metric[cfg.evaluation.metric] diff --git a/otx/mpa/det/semisl/__init__.py b/otx/algorithms/detection/adapters/mmdet/tasks/semisl/__init__.py similarity index 76% rename from otx/mpa/det/semisl/__init__.py rename to otx/algorithms/detection/adapters/mmdet/tasks/semisl/__init__.py index 6e22dd926bb..f85e88b32f2 100644 --- a/otx/mpa/det/semisl/__init__.py +++ b/otx/algorithms/detection/adapters/mmdet/tasks/semisl/__init__.py @@ -1,4 +1,5 @@ -# Copyright (C) 2022 Intel Corporation +"""Initialization of Semi-SL Object Detection with MMDET.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # diff --git a/otx/mpa/det/semisl/exporter.py b/otx/algorithms/detection/adapters/mmdet/tasks/semisl/exporter.py similarity index 54% rename from otx/mpa/det/semisl/exporter.py rename to otx/algorithms/detection/adapters/mmdet/tasks/semisl/exporter.py index cfe0190568a..8570e6daf60 100644 --- a/otx/mpa/det/semisl/exporter.py +++ b/otx/algorithms/detection/adapters/mmdet/tasks/semisl/exporter.py @@ -1,10 +1,11 @@ -# Copyright (C) 2022 Intel Corporation +"""Exporter for Semi-SL Object Detection.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -from otx.mpa.det.exporter import DetectionExporter -from otx.mpa.registry import STAGES -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES +from otx.algorithms.common.utils.logger import get_logger +from otx.algorithms.detection.adapters.mmdet.tasks.exporter import DetectionExporter from .stage import SemiSLDetectionStage @@ -13,6 +14,8 @@ @STAGES.register_module() class SemiSLDetectionExporter(SemiSLDetectionStage, DetectionExporter): + """Exporter class for Semi-SL Object detection.""" + def __init__(self, **kwargs): SemiSLDetectionStage.__init__(self, **kwargs) diff --git a/otx/mpa/det/semisl/inferrer.py b/otx/algorithms/detection/adapters/mmdet/tasks/semisl/inferrer.py similarity index 56% rename from otx/mpa/det/semisl/inferrer.py rename to otx/algorithms/detection/adapters/mmdet/tasks/semisl/inferrer.py index 9f09db85ae0..e2b3493bd06 100644 --- a/otx/mpa/det/semisl/inferrer.py +++ b/otx/algorithms/detection/adapters/mmdet/tasks/semisl/inferrer.py @@ -1,18 +1,22 @@ +"""Inference task for Semi-SL OTX Detection with MMDET.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -from otx.mpa.det.inferrer import DetectionInferrer -from otx.mpa.registry import STAGES -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES +from otx.algorithms.common.utils.logger import get_logger +from otx.algorithms.detection.adapters.mmdet.tasks.inferrer import DetectionInferrer from .stage import SemiSLDetectionStage logger = get_logger() +# pylint: disable=super-init-not-called @STAGES.register_module() class SemiSLDetectionInferrer(SemiSLDetectionStage, DetectionInferrer): + """Class for semi-sl detection.""" + def __init__(self, **kwargs): SemiSLDetectionStage.__init__(self, **kwargs) diff --git a/otx/mpa/det/semisl/stage.py b/otx/algorithms/detection/adapters/mmdet/tasks/semisl/stage.py similarity index 76% rename from otx/mpa/det/semisl/stage.py rename to otx/algorithms/detection/adapters/mmdet/tasks/semisl/stage.py index 9404754eeeb..a60261fb40a 100644 --- a/otx/mpa/det/semisl/stage.py +++ b/otx/algorithms/detection/adapters/mmdet/tasks/semisl/stage.py @@ -1,25 +1,25 @@ -# Copyright (C) 2022 Intel Corporation +"""Stage for Semi-SL training with MMDET.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -from mmcv import ConfigDict - -from otx.mpa.det.stage import DetectionStage -from otx.mpa.utils.config_utils import update_or_add_custom_hook -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger +from otx.algorithms.detection.adapters.mmdet.tasks.stage import DetectionStage logger = get_logger() class SemiSLDetectionStage(DetectionStage): - """Patch config to support semi supervised learning for object detection""" + """Patch config to support semi supervised learning for object detection.""" def __init__(self, **kwargs): super().__init__(**kwargs) + self.task_adapt_type = None + self.task_adapt_op = "REPLACE" - def configure_data(self, cfg, training, data_cfg, **kwargs): + def configure_data(self, cfg, training, data_cfg): """Patch cfg.data.""" - super().configure_data(cfg, training, data_cfg, **kwargs) + super().configure_data(cfg, training, data_cfg) # Set unlabeled data hook if training: if cfg.data.get("unlabeled", False) and cfg.data.unlabeled.get("otx_dataset", False): @@ -27,7 +27,7 @@ def configure_data(self, cfg, training, data_cfg, **kwargs): cfg.data.unlabeled.pipeline = cfg.data.train.pipeline.copy() self.configure_unlabeled_dataloader(cfg, self.distributed) - def configure_task(self, cfg, training, **kwargs): + def configure_task(self, cfg, training): """Patch config to support training algorithm.""" logger.info(f"Semi-SL task config!!!!: training={training}") if "task_adapt" in cfg: diff --git a/otx/algorithms/detection/adapters/mmdet/tasks/semisl/trainer.py b/otx/algorithms/detection/adapters/mmdet/tasks/semisl/trainer.py new file mode 100644 index 00000000000..522e88ce16c --- /dev/null +++ b/otx/algorithms/detection/adapters/mmdet/tasks/semisl/trainer.py @@ -0,0 +1,21 @@ +"""Semi-SL Object detection Task with MMDET.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES +from otx.algorithms.common.utils.logger import get_logger +from otx.algorithms.detection.adapters.mmdet.tasks.trainer import DetectionTrainer + +from .stage import SemiSLDetectionStage + +logger = get_logger() + + +# pylint: disable=super-init-not-called +@STAGES.register_module() +class SemiSLDetectionTrainer(SemiSLDetectionStage, DetectionTrainer): + """Train class for semi-sl object detection.""" + + def __init__(self, **kwargs): + SemiSLDetectionStage.__init__(self, **kwargs) diff --git a/otx/mpa/det/stage.py b/otx/algorithms/detection/adapters/mmdet/tasks/stage.py similarity index 85% rename from otx/mpa/det/stage.py rename to otx/algorithms/detection/adapters/mmdet/tasks/stage.py index c0ad939f9e9..41c8914f91a 100644 --- a/otx/mpa/det/stage.py +++ b/otx/algorithms/detection/adapters/mmdet/tasks/stage.py @@ -1,13 +1,17 @@ -# Copyright (C) 2022 Intel Corporation +"""Base stage for OTX Detection with MMDET.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # from mmcv.utils import ConfigDict +from otx.algorithms.common.adapters.mmcv.tasks.stage import Stage +from otx.algorithms.common.adapters.mmcv.utils.config_utils import ( + recursively_update_cfg, + update_or_add_custom_hook, +) +from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.detection.adapters.mmdet.utils.builder import build_detector -from otx.mpa.stage import Stage -from otx.mpa.utils.config_utils import recursively_update_cfg, update_or_add_custom_hook -from otx.mpa.utils.logger import get_logger logger = get_logger() @@ -18,28 +22,29 @@ class DetectionStage(Stage): MODEL_BUILDER = build_detector def configure(self, model_cfg, model_ckpt, data_cfg, training=True, **kwargs): - """Create MMCV-consumable config from given inputs""" + """Create MMCV-consumable config from given inputs.""" logger.info(f"configure!: training={training}") cfg = self.cfg - self.configure_model(cfg, model_cfg, training, **kwargs) + self.configure_model(cfg, model_cfg, **kwargs) self.configure_ckpt(cfg, model_ckpt, kwargs.get("pretrained", None)) - self.configure_data(cfg, training, data_cfg, **kwargs) + self.configure_data(cfg, training, data_cfg) self.configure_regularization(cfg, training) - self.configure_hyperparams(cfg, training, **kwargs) - self.configure_task(cfg, training, **kwargs) + self.configure_hyperparams(cfg, **kwargs) + self.configure_task(cfg, training) self.configure_hook(cfg) return cfg - def configure_model(self, cfg, model_cfg, training, **kwargs): # noqa: C901 + def configure_model(self, cfg, model_cfg, **kwargs): # noqa: C901 """Patch config's model. + Replace cfg.model to model_cfg Change model type to super type Patch for OMZ backbones """ if model_cfg: if hasattr(model_cfg, "model"): - cfg.merge_from_dict(model_cfg._cfg_dict) + cfg.merge_from_dict(model_cfg) else: raise ValueError( "Unexpected config was passed through 'model_cfg'. " @@ -58,8 +63,8 @@ def configure_model(self, cfg, model_cfg, training, **kwargs): # noqa: C901 ir_model_path = kwargs.get("ir_model_path") if ir_model_path: - def is_mmov_model(k, v): - if k == "type" and v.startswith("MMOV"): + def is_mmov_model(key, value): + if key == "type" and value.startswith("MMOV"): return True return False @@ -71,8 +76,9 @@ def is_mmov_model(k, v): {"model_path": ir_model_path, "weight_path": ir_weight_path, "init_weight": ir_weight_init}, ) - def configure_data(self, cfg, training, data_cfg, **kwargs): # noqa: C901 + def configure_data(self, cfg, training, data_cfg): # noqa: C901 """Patch cfg.data. + Merge cfg and data_cfg Match cfg.data.train.type to super_type Patch for unlabeled data path ==> This may be moved to SemiDetectionStage @@ -80,7 +86,7 @@ def configure_data(self, cfg, training, data_cfg, **kwargs): # noqa: C901 if data_cfg: cfg.merge_from_dict(data_cfg) - super().configure_data(cfg, training, **kwargs) + super().configure_data(cfg, training) super_type = cfg.data.train.pop("super_type", None) if super_type: cfg.data.train.org_type = cfg.data.train.type @@ -105,20 +111,20 @@ def configure_regularization(self, cfg, training): # noqa: C901 if "weight_decay" in cfg.optimizer: cfg.optimizer.weight_decay = 0.0 - def configure_hyperparams(self, cfg, training, **kwargs): + def configure_hyperparams(self, cfg, **kwargs): """Patch optimization hyparms such as batch size, learning rate.""" if "hyperparams" in cfg: hyperparams = kwargs.get("hyperparams", None) if hyperparams is not None: - bs = hyperparams.get("bs", None) - if bs is not None: - cfg.data.samples_per_gpu = bs + batch_size = hyperparams.get("batch_size", None) + if batch_size is not None: + cfg.data.samples_per_gpu = batch_size - lr = hyperparams.get("lr", None) - if lr is not None: - cfg.optimizer.lr = lr + learning_rate = hyperparams.get("lr", None) + if learning_rate is not None: + cfg.optimizer.lr = learning_rate - def configure_task(self, cfg, training, **kwargs): + def configure_task(self, cfg, training): """Patch config to support training algorithm.""" if "task_adapt" in cfg: logger.info(f"task config!!!!: training={training}") @@ -138,6 +144,7 @@ def configure_task(self, cfg, training, **kwargs): src_data_cfg = self.get_data_cfg(cfg, "train") src_data_cfg.pop("old_new_indices", None) + # pylint: disable=too-many-branches def configure_classes(self, cfg): """Patch classes for model and dataset.""" org_model_classes = self.get_model_classes(cfg) @@ -198,12 +205,12 @@ def configure_classes(self, cfg): self.data_classes = data_classes def configure_task_data_pipeline(self, cfg): - # Trying to alter class indices of training data according to model class order + """Trying to alter class indices of training data according to model class order.""" tr_data_cfg = self.get_data_cfg(cfg, "train") class_adapt_cfg = dict(type="AdaptClassLabels", src_classes=self.data_classes, dst_classes=self.model_classes) pipeline_cfg = tr_data_cfg.pipeline - for i, op in enumerate(pipeline_cfg): - if op["type"] == "LoadAnnotations": # insert just after this op + for i, operation in enumerate(pipeline_cfg): + if operation["type"] == "LoadAnnotations": # insert just after this operation op_next_ann = pipeline_cfg[i + 1] if i + 1 < len(pipeline_cfg) else {} if op_next_ann.get("type", "") == class_adapt_cfg["type"]: op_next_ann.update(class_adapt_cfg) @@ -211,7 +218,8 @@ def configure_task_data_pipeline(self, cfg): pipeline_cfg.insert(i + 1, class_adapt_cfg) break - def configure_anchor(self, cfg, proposal_ratio=None): + def configure_anchor(self, cfg): + """Patch anchor which are generated from dataset.""" if cfg.model.type in ["SingleStageDetector", "CustomSingleStageDetector"]: anchor_cfg = cfg.model.bbox_head.anchor_generator if anchor_cfg.type == "SSDAnchorGeneratorClustered": @@ -219,6 +227,7 @@ def configure_anchor(self, cfg, proposal_ratio=None): def configure_bbox_head(self, cfg): """Patch bbox head in detector for class incremental learning. + Most of patching are related with hyper-params in focal loss """ if cfg.get("task", "detection") == "detection": @@ -279,6 +288,7 @@ def configure_ema(cfg): @staticmethod def add_yolox_hooks(cfg): + """Add YOLOX related hooks.""" update_or_add_custom_hook( cfg, ConfigDict( diff --git a/otx/mpa/det/trainer.py b/otx/algorithms/detection/adapters/mmdet/tasks/trainer.py similarity index 88% rename from otx/mpa/det/trainer.py rename to otx/algorithms/detection/adapters/mmdet/tasks/trainer.py index 60abf0c4c36..825aa898164 100644 --- a/otx/mpa/det/trainer.py +++ b/otx/algorithms/detection/adapters/mmdet/tasks/trainer.py @@ -1,4 +1,5 @@ -# Copyright (C) 2022 Intel Corporation +"""Base trainer for OTX Object Detection with MMDET.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -13,9 +14,9 @@ from mmdet.utils import collect_env from torch import nn -from otx.mpa.modules.utils.task_adapt import extract_anchor_ratio -from otx.mpa.registry import STAGES -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES +from otx.algorithms.common.utils.logger import get_logger +from otx.algorithms.common.utils.task_adapt import extract_anchor_ratio from .stage import DetectionStage @@ -24,11 +25,14 @@ @STAGES.register_module() class DetectionTrainer(DetectionStage): + """Trainer class for MMDET.""" + def __init__(self, **kwargs): super().__init__(**kwargs) + # pylint: disable=too-many-locals, too-many-function-args def run(self, model_cfg, model_ckpt, data_cfg, **kwargs): - """Run training stage for detection + """Run training stage for detection. - Configuration - Environment setup @@ -50,7 +54,7 @@ def run(self, model_cfg, model_ckpt, data_cfg, **kwargs): env_info_dict = collect_env() env_info = "\n".join([(f"{k}: {v}") for k, v in env_info_dict.items()]) dash_line = "-" * 60 + "\n" - logger.info("Environment info:\n" + dash_line + env_info + "\n" + dash_line) + logger.info(f"Environment info:\n{dash_line}{env_info}\n{dash_line}") # Data datasets = [build_dataset(cfg.data.train)] @@ -103,7 +107,7 @@ def run(self, model_cfg, model_ckpt, data_cfg, **kwargs): # cfg.dump(osp.join(cfg.work_dir, 'config.py')) # logger.info(f'Config:\n{cfg.pretty_text}') - validate = True if cfg.data.get("val", None) else False + validate = "val" in cfg.data train_detector( model, datasets, diff --git a/otx/algorithms/detection/adapters/mmdet/utils/builder.py b/otx/algorithms/detection/adapters/mmdet/utils/builder.py index 593388b5eaf..61a50ab80db 100644 --- a/otx/algorithms/detection/adapters/mmdet/utils/builder.py +++ b/otx/algorithms/detection/adapters/mmdet/utils/builder.py @@ -10,7 +10,7 @@ from mmcv.runner import load_checkpoint from mmcv.utils import Config, ConfigDict, get_logger -from otx.mpa.utils.logger import LEVEL +from otx.algorithms.common.utils.logger import LEVEL logger = get_logger("mmdet") diff --git a/otx/algorithms/detection/adapters/mmdet/utils/config_utils.py b/otx/algorithms/detection/adapters/mmdet/utils/config_utils.py index d85d222053b..545017b1ad3 100644 --- a/otx/algorithms/detection/adapters/mmdet/utils/config_utils.py +++ b/otx/algorithms/detection/adapters/mmdet/utils/config_utils.py @@ -30,6 +30,7 @@ remove_from_config, update_config, ) +from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.detection.configs.base import DetectionConfig from otx.algorithms.detection.utils.data import ( format_list_to_str, @@ -43,7 +44,6 @@ DirectoryPathCheck, check_input_parameters_type, ) -from otx.mpa.utils.logger import get_logger try: from sklearn.cluster import KMeans diff --git a/otx/algorithms/detection/configs/detection/configuration.yaml b/otx/algorithms/detection/configs/detection/configuration.yaml index cd8ba0eadff..e53fe639e0f 100644 --- a/otx/algorithms/detection/configs/detection/configuration.yaml +++ b/otx/algorithms/detection/configs/detection/configuration.yaml @@ -245,21 +245,21 @@ algo_backend: header: Algo backend parameters train_type: affects_outcome_of: TRAINING - default_value: INCREMENTAL + default_value: Incremental description: Training scheme option that determines how to train the model editable: True enum_name: TrainType header: Train type options: - INCREMENTAL: "INCREMENTAL" - SEMISUPERVISED: "SEMISUPERVISED" + Incremental: "Incremental" + Semisupervised: "Semisupervised" type: SELECTABLE ui_rules: action: DISABLE_EDITING operator: AND rules: [] type: UI_RULES - value: INCREMENTAL + value: Incremental visible_in_ui: True warning: null mem_cache_size: diff --git a/otx/algorithms/detection/configs/detection/cspdarknet_yolox/semisl/hparam.yaml b/otx/algorithms/detection/configs/detection/cspdarknet_yolox/semisl/hparam.yaml index 55395b0d84c..580462daa1e 100644 --- a/otx/algorithms/detection/configs/detection/cspdarknet_yolox/semisl/hparam.yaml +++ b/otx/algorithms/detection/configs/detection/cspdarknet_yolox/semisl/hparam.yaml @@ -3,4 +3,4 @@ hyper_parameters: parameter_overrides: algo_backend: train_type: - default_value: SEMISUPERVISED + default_value: Semisupervised diff --git a/otx/algorithms/detection/configs/detection/cspdarknet_yolox/template.yaml b/otx/algorithms/detection/configs/detection/cspdarknet_yolox/template.yaml index cf1546e24a1..6360cba3b74 100644 --- a/otx/algorithms/detection/configs/detection/cspdarknet_yolox/template.yaml +++ b/otx/algorithms/detection/configs/detection/cspdarknet_yolox/template.yaml @@ -46,7 +46,7 @@ hyper_parameters: default_value: 1.0 algo_backend: train_type: - default_value: INCREMENTAL + default_value: Incremental # Training resources. max_nodes: 1 diff --git a/otx/algorithms/detection/configs/detection/mobilenetv2_atss/semisl/hparam.yaml b/otx/algorithms/detection/configs/detection/mobilenetv2_atss/semisl/hparam.yaml index 55395b0d84c..580462daa1e 100644 --- a/otx/algorithms/detection/configs/detection/mobilenetv2_atss/semisl/hparam.yaml +++ b/otx/algorithms/detection/configs/detection/mobilenetv2_atss/semisl/hparam.yaml @@ -3,4 +3,4 @@ hyper_parameters: parameter_overrides: algo_backend: train_type: - default_value: SEMISUPERVISED + default_value: Semisupervised diff --git a/otx/algorithms/detection/configs/detection/mobilenetv2_atss/template.yaml b/otx/algorithms/detection/configs/detection/mobilenetv2_atss/template.yaml index 49b91e29014..cc4051f4611 100644 --- a/otx/algorithms/detection/configs/detection/mobilenetv2_atss/template.yaml +++ b/otx/algorithms/detection/configs/detection/mobilenetv2_atss/template.yaml @@ -46,7 +46,7 @@ hyper_parameters: default_value: 1.0 algo_backend: train_type: - default_value: INCREMENTAL + default_value: Incremental # Training resources. max_nodes: 1 diff --git a/otx/algorithms/detection/configs/detection/mobilenetv2_ssd/semisl/hparam.yaml b/otx/algorithms/detection/configs/detection/mobilenetv2_ssd/semisl/hparam.yaml index 55395b0d84c..580462daa1e 100644 --- a/otx/algorithms/detection/configs/detection/mobilenetv2_ssd/semisl/hparam.yaml +++ b/otx/algorithms/detection/configs/detection/mobilenetv2_ssd/semisl/hparam.yaml @@ -3,4 +3,4 @@ hyper_parameters: parameter_overrides: algo_backend: train_type: - default_value: SEMISUPERVISED + default_value: Semisupervised diff --git a/otx/algorithms/detection/configs/detection/mobilenetv2_ssd/template.yaml b/otx/algorithms/detection/configs/detection/mobilenetv2_ssd/template.yaml index bdb817d21cf..a90ac70b124 100644 --- a/otx/algorithms/detection/configs/detection/mobilenetv2_ssd/template.yaml +++ b/otx/algorithms/detection/configs/detection/mobilenetv2_ssd/template.yaml @@ -46,7 +46,7 @@ hyper_parameters: default_value: 1.0 algo_backend: train_type: - default_value: INCREMENTAL + default_value: Incremental # Training resources. max_nodes: 1 diff --git a/otx/algorithms/detection/configs/detection/resnet50_vfnet/template_experimental.yaml b/otx/algorithms/detection/configs/detection/resnet50_vfnet/template_experimental.yaml index 624b6331ae4..6605ee5bed6 100644 --- a/otx/algorithms/detection/configs/detection/resnet50_vfnet/template_experimental.yaml +++ b/otx/algorithms/detection/configs/detection/resnet50_vfnet/template_experimental.yaml @@ -34,7 +34,7 @@ hyper_parameters: default_value: 100 algo_backend: train_type: - default_value: INCREMENTAL + default_value: Incremental # Training resources. max_nodes: 1 diff --git a/otx/algorithms/detection/configs/instance_segmentation/configuration.yaml b/otx/algorithms/detection/configs/instance_segmentation/configuration.yaml index 57693128302..bd8b078dd3f 100644 --- a/otx/algorithms/detection/configs/instance_segmentation/configuration.yaml +++ b/otx/algorithms/detection/configs/instance_segmentation/configuration.yaml @@ -245,21 +245,21 @@ algo_backend: header: Algo backend parameters train_type: affects_outcome_of: TRAINING - default_value: INCREMENTAL + default_value: Incremental description: Training scheme option that determines how to train the model editable: True enum_name: TrainType header: Train type options: - INCREMENTAL: "INCREMENTAL" - SEMISUPEVISED: "SEMISUPERVISED" + Incremental: "Incremental" + SEMISUPEVISED: "Semisupervised" type: SELECTABLE ui_rules: action: DISABLE_EDITING operator: AND rules: [] type: UI_RULES - value: INCREMENTAL + value: Incremental visible_in_ui: True warning: null mem_cache_size: diff --git a/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/template.yaml b/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/template.yaml index 8b08cff74c9..70ff835290f 100644 --- a/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/template.yaml +++ b/otx/algorithms/detection/configs/instance_segmentation/efficientnetb2b_maskrcnn/template.yaml @@ -49,7 +49,7 @@ hyper_parameters: default_value: 1.0 algo_backend: train_type: - default_value: INCREMENTAL + default_value: Incremental # Training resources. max_nodes: 1 diff --git a/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/template.yaml b/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/template.yaml index 9f62ac6fea5..c8ddb1ab624 100644 --- a/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/template.yaml +++ b/otx/algorithms/detection/configs/instance_segmentation/resnet50_maskrcnn/template.yaml @@ -49,7 +49,7 @@ hyper_parameters: default_value: 1.0 algo_backend: train_type: - default_value: INCREMENTAL + default_value: Incremental # Training resources. max_nodes: 1 diff --git a/otx/algorithms/detection/configs/rotated_detection/configuration.yaml b/otx/algorithms/detection/configs/rotated_detection/configuration.yaml index 4f6c2420a4b..9091a47232f 100644 --- a/otx/algorithms/detection/configs/rotated_detection/configuration.yaml +++ b/otx/algorithms/detection/configs/rotated_detection/configuration.yaml @@ -245,21 +245,21 @@ algo_backend: header: Algo backend parameters train_type: affects_outcome_of: TRAINING - default_value: INCREMENTAL + default_value: Incremental description: Training scheme option that determines how to train the model editable: True enum_name: TrainType header: Train type options: - INCREMENTAL: "INCREMENTAL" - SEMISUPEVISED: "SEMISUPERVISED" + Incremental: "Incremental" + SEMISUPEVISED: "Semisupervised" type: SELECTABLE ui_rules: action: DISABLE_EDITING operator: AND rules: [] type: UI_RULES - value: INCREMENTAL + value: Incremental visible_in_ui: True warning: null type: PARAMETER_GROUP diff --git a/otx/algorithms/detection/configs/rotated_detection/efficientnetb2b_maskrcnn/template.yaml b/otx/algorithms/detection/configs/rotated_detection/efficientnetb2b_maskrcnn/template.yaml index c863f176cc6..c652c833a0a 100644 --- a/otx/algorithms/detection/configs/rotated_detection/efficientnetb2b_maskrcnn/template.yaml +++ b/otx/algorithms/detection/configs/rotated_detection/efficientnetb2b_maskrcnn/template.yaml @@ -49,7 +49,7 @@ hyper_parameters: default_value: 1.0 algo_backend: train_type: - default_value: INCREMENTAL + default_value: Incremental # Training resources. max_nodes: 1 diff --git a/otx/algorithms/detection/configs/rotated_detection/resnet50_maskrcnn/template.yaml b/otx/algorithms/detection/configs/rotated_detection/resnet50_maskrcnn/template.yaml index 3593f5e6a09..ff7992857e0 100644 --- a/otx/algorithms/detection/configs/rotated_detection/resnet50_maskrcnn/template.yaml +++ b/otx/algorithms/detection/configs/rotated_detection/resnet50_maskrcnn/template.yaml @@ -49,7 +49,7 @@ hyper_parameters: default_value: 1.0 algo_backend: train_type: - default_value: INCREMENTAL + default_value: Incremental # Training resources. max_nodes: 1 diff --git a/otx/algorithms/detection/tasks/__init__.py b/otx/algorithms/detection/tasks/__init__.py index d1697d53209..83ccf7fc91a 100644 --- a/otx/algorithms/detection/tasks/__init__.py +++ b/otx/algorithms/detection/tasks/__init__.py @@ -15,7 +15,7 @@ # and limitations under the License. import otx.algorithms.common.adapters.mmcv.models as OTXBackbones -import otx.mpa.det as MPADetection +import otx.algorithms.detection.adapters.mmdet.tasks as MPADetection from .inference import DetectionInferenceTask from .nncf import DetectionNNCFTask diff --git a/otx/algorithms/detection/tasks/inference.py b/otx/algorithms/detection/tasks/inference.py index 89434ae1e60..f403a52eadf 100644 --- a/otx/algorithms/detection/tasks/inference.py +++ b/otx/algorithms/detection/tasks/inference.py @@ -28,10 +28,12 @@ patch_default_config, patch_runner, ) +from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig from otx.algorithms.common.configs.training_base import TrainType from otx.algorithms.common.tasks.training_base import BaseTask from otx.algorithms.common.utils.callback import InferenceProgressCallback from otx.algorithms.common.utils.ir import embed_ir_model_data +from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.detection.adapters.mmdet.utils import ( patch_datasets, patch_evaluation, @@ -75,14 +77,12 @@ check_input_parameters_type, ) from otx.api.utils.dataset_utils import add_saliency_maps_to_dataset_item -from otx.mpa.utils.config_utils import MPAConfig -from otx.mpa.utils.logger import get_logger logger = get_logger() RECIPE_TRAIN_TYPE = { - TrainType.SEMISUPERVISED: "semisl.py", - TrainType.INCREMENTAL: "incremental.py", + TrainType.Semisupervised: "semisl.py", + TrainType.Incremental: "incremental.py", } @@ -356,8 +356,8 @@ def _init_test_data_cfg(self, dataset: DatasetEntity): return data_cfg def _update_stage_module(self, stage_module): - module_prefix = {TrainType.INCREMENTAL: "Incr", TrainType.SEMISUPERVISED: "SemiSL"} - if self._train_type == TrainType.SEMISUPERVISED and stage_module == "DetectionExporter": + module_prefix = {TrainType.Incremental: "Incr", TrainType.Semisupervised: "SemiSL"} + if self._train_type == TrainType.Semisupervised and stage_module == "DetectionExporter": stage_module = "SemiSLDetectionExporter" elif self._train_type in module_prefix and stage_module in [ "DetectionTrainer", diff --git a/otx/algorithms/detection/tasks/nncf.py b/otx/algorithms/detection/tasks/nncf.py index cda701d9917..ce92eda406c 100644 --- a/otx/algorithms/detection/tasks/nncf.py +++ b/otx/algorithms/detection/tasks/nncf.py @@ -21,6 +21,7 @@ import otx.algorithms.detection.adapters.mmdet.nncf.patches # noqa: F401 # pylint: disable=unused-import from otx.algorithms.common.adapters.mmcv.utils import remove_from_config from otx.algorithms.common.tasks.nncf_base import NNCFBaseTask +from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.detection.adapters.mmdet.nncf import build_nncf_detector from otx.algorithms.detection.adapters.mmdet.utils.config_utils import ( should_cluster_anchors, @@ -32,7 +33,6 @@ from otx.api.entities.resultset import ResultSetEntity from otx.api.entities.subset import Subset from otx.api.usecases.evaluation.metrics_helper import MetricsHelper -from otx.mpa.utils.logger import get_logger from .inference import DetectionInferenceTask from .train import DetectionTrainTask diff --git a/otx/algorithms/detection/tasks/openvino.py b/otx/algorithms/detection/tasks/openvino.py index 354c48ad848..694d947b1b4 100644 --- a/otx/algorithms/detection/tasks/openvino.py +++ b/otx/algorithms/detection/tasks/openvino.py @@ -35,6 +35,7 @@ from openvino.model_zoo.model_api.adapters import OpenvinoAdapter, create_core from openvino.model_zoo.model_api.models import Model +from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.detection.adapters.openvino import model_wrappers from otx.algorithms.detection.configs.base import DetectionConfig from otx.api.configuration.helper.utils import config_to_bytes @@ -83,7 +84,6 @@ ) from otx.api.utils.dataset_utils import add_saliency_maps_to_dataset_item from otx.api.utils.detection_utils import detection2array -from otx.mpa.utils.logger import get_logger logger = get_logger() diff --git a/otx/algorithms/detection/tasks/train.py b/otx/algorithms/detection/tasks/train.py index b74d6a086e1..2267120770f 100644 --- a/otx/algorithms/detection/tasks/train.py +++ b/otx/algorithms/detection/tasks/train.py @@ -23,6 +23,7 @@ from otx.algorithms.common.utils.callback import TrainingProgressCallback from otx.algorithms.common.utils.data import get_dataset +from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.detection.adapters.mmdet.utils.config_utils import ( should_cluster_anchors, ) @@ -52,7 +53,6 @@ DatasetParamTypeCheck, check_input_parameters_type, ) -from otx.mpa.utils.logger import get_logger from .inference import DetectionInferenceTask @@ -217,9 +217,6 @@ def _init_train_data_cfg(self, dataset: DatasetEntity): labels=self._labels, ) - # Temparory remedy for cfg.pretty_text error - for label in self._labels: - label.hotkey = "a" return data_cfg @staticmethod diff --git a/otx/algorithms/segmentation/adapters/__init__.py b/otx/algorithms/segmentation/adapters/__init__.py index 8830b8e5239..53d34a44210 100644 --- a/otx/algorithms/segmentation/adapters/__init__.py +++ b/otx/algorithms/segmentation/adapters/__init__.py @@ -1,4 +1,16 @@ """Adapters for Segmentation.""" + # Copyright (C) 2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions +# and limitations under the License. diff --git a/otx/algorithms/segmentation/adapters/mmseg/__init__.py b/otx/algorithms/segmentation/adapters/mmseg/__init__.py index d8754290882..4651aac0f2b 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/__init__.py +++ b/otx/algorithms/segmentation/adapters/mmseg/__init__.py @@ -1,10 +1,38 @@ """OTX Adapters - mmseg.""" -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -from .data import MPASegDataset -from .models import DetConB, DetConLoss, SelfSLMLP, SupConDetConB +# Copyright (C) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions +# and limitations under the License. + + +from .datasets import MPASegDataset +from .models import ( + ClassIncrEncoderDecoder, + ConstantScalarScheduler, + CrossEntropyLossWithIgnore, + CustomFCNHead, + DetConB, + DetConLoss, + LiteHRNet, + MeanTeacherSegmentor, + MMOVBackbone, + MMOVDecodeHead, + PolyScalarScheduler, + SelfSLMLP, + StepScalarScheduler, + SupConDetConB, +) # fmt: off # isort: off @@ -16,4 +44,20 @@ # fmt: off # isort: on -__all__ = ["MPASegDataset", "DetConLoss", "SelfSLMLP", "DetConB", "SupConDetConB"] +__all__ = [ + "MPASegDataset", + "LiteHRNet", + "MMOVBackbone", + "CustomFCNHead", + "MMOVDecodeHead", + "DetConLoss", + "SelfSLMLP", + "ConstantScalarScheduler", + "PolyScalarScheduler", + "StepScalarScheduler", + "DetConB", + "CrossEntropyLossWithIgnore", + "SupConDetConB", + "ClassIncrEncoderDecoder", + "MeanTeacherSegmentor", +] diff --git a/otx/algorithms/segmentation/adapters/mmseg/data/__init__.py b/otx/algorithms/segmentation/adapters/mmseg/datasets/__init__.py similarity index 89% rename from otx/algorithms/segmentation/adapters/mmseg/data/__init__.py rename to otx/algorithms/segmentation/adapters/mmseg/datasets/__init__.py index f62eeed6289..6072fc61b45 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/data/__init__.py +++ b/otx/algorithms/segmentation/adapters/mmseg/datasets/__init__.py @@ -1,6 +1,6 @@ """OTX Algorithms - Segmentation Dataset.""" -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2023 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,13 +18,17 @@ from .pipelines import ( LoadAnnotationFromOTXDataset, LoadImageFromOTXDataset, + MaskCompose, + ProbCompose, TwoCropTransform, ) __all__ = [ - "get_annotation_mmseg_format", - "LoadImageFromOTXDataset", "LoadAnnotationFromOTXDataset", - "MPASegDataset", + "LoadImageFromOTXDataset", + "MaskCompose", + "ProbCompose", "TwoCropTransform", + "get_annotation_mmseg_format", + "MPASegDataset", ] diff --git a/otx/algorithms/segmentation/adapters/mmseg/data/dataset.py b/otx/algorithms/segmentation/adapters/mmseg/datasets/dataset.py similarity index 99% rename from otx/algorithms/segmentation/adapters/mmseg/data/dataset.py rename to otx/algorithms/segmentation/adapters/mmseg/datasets/dataset.py index 9e506a04cc2..eb267d4fe12 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/data/dataset.py +++ b/otx/algorithms/segmentation/adapters/mmseg/datasets/dataset.py @@ -1,6 +1,6 @@ """Base MMDataset for Segmentation Task.""" -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2023 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/otx/algorithms/segmentation/adapters/mmseg/datasets/pipelines/__init__.py b/otx/algorithms/segmentation/adapters/mmseg/datasets/pipelines/__init__.py new file mode 100644 index 00000000000..ec2878f7abc --- /dev/null +++ b/otx/algorithms/segmentation/adapters/mmseg/datasets/pipelines/__init__.py @@ -0,0 +1,21 @@ +"""OTX Algorithms - Segmentation pipelines.""" + +# Copyright (C) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions +# and limitations under the License. + +from .compose import MaskCompose, ProbCompose +from .loads import LoadAnnotationFromOTXDataset, LoadImageFromOTXDataset +from .transforms import TwoCropTransform + +__all__ = ["MaskCompose", "ProbCompose", "LoadImageFromOTXDataset", "LoadAnnotationFromOTXDataset", "TwoCropTransform"] diff --git a/otx/mpa/modules/datasets/pipelines/compose.py b/otx/algorithms/segmentation/adapters/mmseg/datasets/pipelines/compose.py similarity index 87% rename from otx/mpa/modules/datasets/pipelines/compose.py rename to otx/algorithms/segmentation/adapters/mmseg/datasets/pipelines/compose.py index f8ea7953a62..99ecfc4bf36 100644 --- a/otx/mpa/modules/datasets/pipelines/compose.py +++ b/otx/algorithms/segmentation/adapters/mmseg/datasets/pipelines/compose.py @@ -1,4 +1,6 @@ -# Copyright (C) 2022 Intel Corporation +"""Collection of compose pipelines for segmentation task.""" + +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -11,8 +13,11 @@ from scipy.ndimage import gaussian_filter +# pylint: disable=consider-using-f-string @PIPELINES.register_module() -class ProbCompose(object): +class ProbCompose: + """Compose pipelines in a list and enable or disable them with the probability.""" + def __init__(self, transforms, probs): assert isinstance(transforms, Sequence) assert isinstance(probs, Sequence) @@ -35,6 +40,7 @@ def __init__(self, transforms, probs): raise TypeError(f"transform must be callable or a dict, but got {type(transform)}") def __call__(self, data): + """Callback function of ProbCompose.""" rand_value = np.random.rand() transform_id = np.max(np.where(rand_value > self.limits)[0]) @@ -44,17 +50,20 @@ def __call__(self, data): return data def __repr__(self): + """Repr.""" format_string = self.__class__.__name__ + "(" for t in self.transforms: format_string += "\n" - format_string += " {0}".format(t) + format_string += f" {t}" format_string += "\n)" return format_string @PIPELINES.register_module() -class MaskCompose(object): +class MaskCompose: + """Compose mask-related pipelines in a list and enable or disable them with the probability.""" + def __init__(self, transforms, prob, lambda_limits=(4, 16), keep_original=False): self.keep_original = keep_original self.prob = prob @@ -102,6 +111,7 @@ def _mix_img(main_img, aux_img, mask): return np.where(np.expand_dims(mask, axis=2), main_img, aux_img) def __call__(self, data): + """Callback function of MaskCompose.""" main_data = self._apply_transforms(deepcopy(data), self.transforms) assert main_data is not None if not self.keep_original and np.random.rand() > self.prob: @@ -123,10 +133,11 @@ def __call__(self, data): return main_data def __repr__(self): + """Repr.""" format_string = self.__class__.__name__ + "(" for t in self.transforms: format_string += "\n" - format_string += " {0}".format(t) + format_string += f" {t}" format_string += "\n)" return format_string diff --git a/otx/algorithms/segmentation/adapters/mmseg/datasets/pipelines/loads.py b/otx/algorithms/segmentation/adapters/mmseg/datasets/pipelines/loads.py new file mode 100644 index 00000000000..e35c589f492 --- /dev/null +++ b/otx/algorithms/segmentation/adapters/mmseg/datasets/pipelines/loads.py @@ -0,0 +1,57 @@ +"""Collection of load pipelines for segmentation task.""" + +# Copyright (C) 2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions +# and limitations under the License. +from typing import Any, Dict + +from mmseg.datasets.builder import PIPELINES + +import otx.core.data.pipelines.load_image_from_otx_dataset as load_image_base +from otx.algorithms.segmentation.adapters.mmseg.datasets.dataset import ( + get_annotation_mmseg_format, +) +from otx.api.utils.argument_checks import check_input_parameters_type + + +# pylint: disable=too-many-instance-attributes, too-many-arguments +@PIPELINES.register_module() +class LoadImageFromOTXDataset(load_image_base.LoadImageFromOTXDataset): + """Pipeline element that loads an image from a OTX Dataset on the fly.""" + + +@PIPELINES.register_module() +class LoadAnnotationFromOTXDataset: + """Pipeline element that loads an annotation from a OTX Dataset on the fly. + + Expected entries in the 'results' dict that should be passed to this pipeline element are: + results['dataset_item']: dataset_item from which to load the annotation + results['ann_info']['label_list']: list of all labels in the project + + """ + + def __init__(self): + pass + + @check_input_parameters_type() + def __call__(self, results: Dict[str, Any]): + """Callback function of LoadAnnotationFromOTXDataset.""" + dataset_item = results["dataset_item"] + labels = results["ann_info"]["labels"] + + ann_info = get_annotation_mmseg_format(dataset_item, labels) + + results["gt_semantic_seg"] = ann_info["gt_semantic_seg"] + results["seg_fields"].append("gt_semantic_seg") + + return results diff --git a/otx/algorithms/segmentation/adapters/mmseg/data/pipelines.py b/otx/algorithms/segmentation/adapters/mmseg/datasets/pipelines/transforms.py similarity index 65% rename from otx/algorithms/segmentation/adapters/mmseg/data/pipelines.py rename to otx/algorithms/segmentation/adapters/mmseg/datasets/pipelines/transforms.py index 9d0f0278954..2ba3ada9e27 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/data/pipelines.py +++ b/otx/algorithms/segmentation/adapters/mmseg/datasets/pipelines/transforms.py @@ -1,66 +1,156 @@ -"""Collection Pipeline for segmentation task.""" -# Copyright (C) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 +"""Collection of transfrom pipelines for segmentation task.""" + +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 # -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions -# and limitations under the License. + from copy import deepcopy from typing import Any, Dict, List +import mmcv import numpy as np +from mmcv.parallel import DataContainer as DC from mmcv.utils import build_from_cfg from mmseg.datasets.builder import PIPELINES from mmseg.datasets.pipelines import Compose +from mmseg.datasets.pipelines.formatting import to_tensor from PIL import Image from torchvision import transforms as T from torchvision.transforms import functional as F -import otx.core.data.pipelines.load_image_from_otx_dataset as load_image_base from otx.api.utils.argument_checks import check_input_parameters_type -from .dataset import get_annotation_mmseg_format +@PIPELINES.register_module(force=True) +class Normalize: + """Normalize the image. -# pylint: disable=too-many-instance-attributes, too-many-arguments -@PIPELINES.register_module() -class LoadImageFromOTXDataset(load_image_base.LoadImageFromOTXDataset): - """Pipeline element that loads an image from a OTX Dataset on the fly.""" + Added key is "img_norm_cfg". + + Args: + mean (sequence): Mean values of 3 channels. + std (sequence): Std values of 3 channels. + to_rgb (bool): Whether to convert the image from BGR to RGB, + default is true. + """ + def __init__(self, mean, std, to_rgb=True): + self.mean = np.array(mean, dtype=np.float32) + self.std = np.array(std, dtype=np.float32) + self.to_rgb = to_rgb -@PIPELINES.register_module() -class LoadAnnotationFromOTXDataset: - """Pipeline element that loads an annotation from a OTX Dataset on the fly. + def __call__(self, results): + """Call function to normalize images. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Normalized results, 'img_norm_cfg' key is added into + result dict. + """ - Expected entries in the 'results' dict that should be passed to this pipeline element are: - results['dataset_item']: dataset_item from which to load the annotation - results['ann_info']['label_list']: list of all labels in the project + for target in ["img", "ul_w_img", "aux_img"]: + if target in results: + results[target] = mmcv.imnormalize(results[target], self.mean, self.std, self.to_rgb) + results["img_norm_cfg"] = dict(mean=self.mean, std=self.std, to_rgb=self.to_rgb) + return results + + def __repr__(self): + """Repr.""" + repr_str = self.__class__.__name__ + repr_str += f"(mean={self.mean}, std={self.std}, to_rgb=" f"{self.to_rgb})" + return repr_str + + +@PIPELINES.register_module(force=True) +class DefaultFormatBundle: + """Default formatting bundle. + + It simplifies the pipeline of formatting common fields, including "img" + and "gt_semantic_seg". These fields are formatted as follows. + + - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True) + - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, + (3)to DataContainer (stack=True) """ - def __init__(self): - pass + def __call__(self, results): + """Call function to transform and format common fields in results. - @check_input_parameters_type() - def __call__(self, results: Dict[str, Any]): - """Callback function of LoadAnnotationFromOTXDataset.""" - dataset_item = results["dataset_item"] - labels = results["ann_info"]["labels"] + Args: + results (dict): Result dict contains the data to convert. - ann_info = get_annotation_mmseg_format(dataset_item, labels) + Returns: + dict: The result dict contains the data that is formatted with + default bundle. + """ + for target in ["img", "ul_w_img", "aux_img"]: + if target not in results: + continue + + img = results[target] + if len(img.shape) < 3: + img = np.expand_dims(img, -1) + + if len(img.shape) == 3: + img = np.ascontiguousarray(img.transpose(2, 0, 1)).astype(np.float32) + elif len(img.shape) == 4: + # for selfsl or supcon + img = np.ascontiguousarray(img.transpose(0, 3, 1, 2)).astype(np.float32) + else: + raise ValueError(f"img.shape={img.shape} is not supported.") + + results[target] = DC(to_tensor(img), stack=True) + + for trg_name in ["gt_semantic_seg", "gt_class_borders", "pixel_weights"]: + if trg_name not in results: + continue + + out_type = np.float32 if trg_name == "pixel_weights" else np.int64 + results[trg_name] = DC(to_tensor(results[trg_name][None, ...].astype(out_type)), stack=True) + + return results + + def __repr__(self): + """Repr.""" + return self.__class__.__name__ + + +@PIPELINES.register_module() +class BranchImage: + """Branch images by copying with name of key. - results["gt_semantic_seg"] = ann_info["gt_semantic_seg"] - results["seg_fields"].append("gt_semantic_seg") + Args: + key_map (dict): keys to name each image. + """ + + def __init__(self, key_map): + self.key_map = key_map + + def __call__(self, results): + """Call function to branch images in img_fields in results. + Args: + results (dict): Result dict contains the image data to branch. + + Returns: + dict: The result dict contains the original image data and copied image data. + """ + for key1, key2 in self.key_map.items(): + if key1 in results: + results[key2] = results[key1] + if key1 in results["img_fields"]: + results["img_fields"].append(key2) return results + def __repr__(self): + """Repr.""" + + repr_str = self.__class__.__name__ + return repr_str + @PIPELINES.register_module() class TwoCropTransform: diff --git a/otx/algorithms/segmentation/adapters/mmseg/models/__init__.py b/otx/algorithms/segmentation/adapters/mmseg/models/__init__.py index d4ef2e9c4ef..fa66af700d4 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/models/__init__.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/__init__.py @@ -1,6 +1,6 @@ """Adapters for OTX Common Algorithm. - mmseg.model.""" -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2023 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,9 +14,35 @@ # See the License for the specific language governing permissions # and limitations under the License. - -from .losses import DetConLoss +from .backbones import LiteHRNet, MMOVBackbone +from .heads import CustomFCNHead, MMOVDecodeHead +from .losses import CrossEntropyLossWithIgnore, DetConLoss from .necks import SelfSLMLP -from .segmentors import DetConB, SupConDetConB +from .schedulers import ( + ConstantScalarScheduler, + PolyScalarScheduler, + StepScalarScheduler, +) +from .segmentors import ( + ClassIncrEncoderDecoder, + DetConB, + MeanTeacherSegmentor, + SupConDetConB, +) -__all__ = ["DetConLoss", "SelfSLMLP", "DetConB", "SupConDetConB"] +__all__ = [ + "LiteHRNet", + "MMOVBackbone", + "CustomFCNHead", + "MMOVDecodeHead", + "DetConLoss", + "SelfSLMLP", + "ConstantScalarScheduler", + "PolyScalarScheduler", + "StepScalarScheduler", + "DetConB", + "CrossEntropyLossWithIgnore", + "SupConDetConB", + "ClassIncrEncoderDecoder", + "MeanTeacherSegmentor", +] diff --git a/otx/algorithms/segmentation/adapters/mmseg/models/backbones/__init__.py b/otx/algorithms/segmentation/adapters/mmseg/models/backbones/__init__.py new file mode 100644 index 00000000000..a241bbb48f8 --- /dev/null +++ b/otx/algorithms/segmentation/adapters/mmseg/models/backbones/__init__.py @@ -0,0 +1,24 @@ +"""Backbones for semantic segmentation.""" + +# Copyright (C) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions +# and limitations under the License. + + +from .litehrnet import LiteHRNet +from .mmov_backbone import MMOVBackbone + +__all__ = [ + "LiteHRNet", + "MMOVBackbone", +] diff --git a/otx/mpa/modules/models/backbones/litehrnet.py b/otx/algorithms/segmentation/adapters/mmseg/models/backbones/litehrnet.py similarity index 96% rename from otx/mpa/modules/models/backbones/litehrnet.py rename to otx/algorithms/segmentation/adapters/mmseg/models/backbones/litehrnet.py index 85f7df0ac31..737c5581de3 100644 --- a/otx/mpa/modules/models/backbones/litehrnet.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/backbones/litehrnet.py @@ -1,19 +1,22 @@ +"""HRNet network modules for base backbone. + +Modified from: +- https://github.com/HRNet/Lite-HRNet +""" + # Copyright (c) 2018-2020 Open-MMLab. # SPDX-License-Identifier: Apache-2.0 # # Copyright (c) 2021 DeLightCMU # SPDX-License-Identifier: Apache-2.0 # -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -"""Modified from: https://github.com/HRNet/Lite-HRNet""" - import mmcv import torch -import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as cp from mmcv.cnn import ( @@ -28,8 +31,9 @@ from mmseg.models.backbones.resnet import BasicBlock, Bottleneck from mmseg.models.builder import BACKBONES from mmseg.utils import get_root_logger +from torch import nn -from ..utils import ( +from otx.algorithms.segmentation.adapters.mmseg.models.utils import ( AsymmetricPositionAttentionModule, IterativeAggregator, LocalAttentionModule, @@ -37,7 +41,11 @@ ) +# pylint: disable=invalid-name, too-many-lines, too-many-instance-attributes, too-many-locals, too-many-arguments +# pylint: disable=unused-argument, consider-using-enumerate class NeighbourSupport(nn.Module): + """Neighbour support module.""" + def __init__(self, channels, kernel_size=3, key_ratio=8, value_ratio=8, conv_cfg=None, norm_cfg=None): super().__init__() @@ -100,6 +108,7 @@ def __init__(self, channels, kernel_size=3, key_ratio=8, value_ratio=8, conv_cfg ) def forward(self, x): + """Forward.""" h, w = [int(_) for _ in x.size()[-2:]] key = self.key(x).view(-1, 1, self.kernel_size**2, h, w) @@ -115,6 +124,8 @@ def forward(self, x): class CrossResolutionWeighting(nn.Module): + """Cross resolution weighting.""" + def __init__( self, channels, ratio=16, conv_cfg=None, norm_cfg=None, act_cfg=(dict(type="ReLU"), dict(type="Sigmoid")) ): @@ -148,6 +159,7 @@ def __init__( ) def forward(self, x): + """Forward.""" min_size = [int(_) for _ in x[-1].size()[-2:]] out = [F.adaptive_avg_pool2d(s, min_size) for s in x[:-1]] + [x[-1]] @@ -161,6 +173,8 @@ def forward(self, x): class SpatialWeighting(nn.Module): + """Spatial weighting.""" + def __init__(self, channels, ratio=16, conv_cfg=None, act_cfg=(dict(type="ReLU"), dict(type="Sigmoid")), **kwargs): super().__init__() @@ -188,6 +202,7 @@ def __init__(self, channels, ratio=16, conv_cfg=None, act_cfg=(dict(type="ReLU") ) def forward(self, x): + """Forward.""" out = self.global_avgpool(x) out = self.conv1(out) out = self.conv2(out) @@ -196,7 +211,7 @@ def forward(self, x): class SpatialWeightingV2(nn.Module): - """The original repo: https://github.com/DeLightCMU/PSA""" + """The original repo: https://github.com/DeLightCMU/PSA.""" def __init__(self, channels, ratio=16, conv_cfg=None, norm_cfg=None, enable_norm=False, **kwargs): super().__init__() @@ -294,6 +309,7 @@ def _spatial_weighting(self, x): return out def forward(self, x): + """Forward.""" y_channel = self._channel_weighting(x) y_spatial = self._spatial_weighting(x) out = y_channel + y_spatial @@ -302,13 +318,15 @@ def forward(self, x): class ConditionalChannelWeighting(nn.Module): + """Conditional channel weighting module.""" + def __init__( self, in_channels, stride, reduce_ratio, conv_cfg=None, - norm_cfg=dict(type="BN"), + norm_cfg=None, with_cp=False, dropout=None, weighting_module_version="v1", @@ -317,6 +335,9 @@ def __init__( ): super().__init__() + if norm_cfg is None: + norm_cfg = dict(type="BN") + self.with_cp = with_cp self.stride = stride assert stride in [1, 2] @@ -389,6 +410,7 @@ def _inner_forward(self, x): return out def forward(self, x): + """Forward.""" if self.with_cp and x.requires_grad: out = cp.checkpoint(self._inner_forward, x) else: @@ -398,6 +420,8 @@ def forward(self, x): class Stem(nn.Module): + """Stem.""" + def __init__( self, in_channels, @@ -405,7 +429,7 @@ def __init__( out_channels, expand_ratio, conv_cfg=None, - norm_cfg=dict(type="BN"), + norm_cfg=None, with_cp=False, strides=(2, 2), extra_stride=False, @@ -413,6 +437,9 @@ def __init__( ): super().__init__() + if norm_cfg is None: + norm_cfg = dict(type="BN") + assert isinstance(strides, (tuple, list)) assert len(strides) == 2 @@ -535,6 +562,7 @@ def _inner_forward(self, x): return out def forward(self, x): + """Forward.""" if self.with_cp and x.requires_grad: out = cp.checkpoint(self._inner_forward, x) else: @@ -544,6 +572,8 @@ def forward(self, x): class StemV2(nn.Module): + """StemV2.""" + def __init__( self, in_channels, @@ -551,7 +581,7 @@ def __init__( out_channels, expand_ratio, conv_cfg=None, - norm_cfg=dict(type="BN"), + norm_cfg=None, with_cp=False, num_stages=1, strides=(2, 2), @@ -560,6 +590,9 @@ def __init__( ): super().__init__() + if norm_cfg is None: + norm_cfg = dict(type="BN") + assert num_stages > 0 assert isinstance(strides, (tuple, list)) assert len(strides) == 1 + num_stages @@ -689,6 +722,7 @@ def _inner_forward(self, x): return out_list def forward(self, x): + """Forward.""" if self.with_cp and x.requires_grad: out = cp.checkpoint(self._inner_forward, x) else: @@ -720,11 +754,17 @@ def __init__( out_channels, stride=1, conv_cfg=None, - norm_cfg=dict(type="BN"), - act_cfg=dict(type="ReLU"), + norm_cfg=None, + act_cfg=None, with_cp=False, ): super().__init__() + + if norm_cfg is None: + norm_cfg = dict(type="BN") + if act_cfg is None: + act_cfg = dict(type="ReLU") + self.stride = stride self.with_cp = with_cp @@ -812,6 +852,7 @@ def _inner_forward(self, x): return out def forward(self, x): + """Forward.""" if self.with_cp and x.requires_grad: out = cp.checkpoint(self._inner_forward, x) else: @@ -821,6 +862,8 @@ def forward(self, x): class LiteHRModule(nn.Module): + """LiteHR module.""" + def __init__( self, num_branches, @@ -831,13 +874,16 @@ def __init__( multiscale_output=False, with_fuse=True, conv_cfg=None, - norm_cfg=dict(type="BN"), + norm_cfg=None, with_cp=False, dropout=None, weighting_module_version="v1", neighbour_weighting=False, ): super().__init__() + + if norm_cfg is None: + norm_cfg = dict(type="BN") self._check_branches(num_branches, in_channels) self.in_channels = in_channels @@ -871,7 +917,7 @@ def _check_branches(num_branches, in_channels): def _make_weighting_blocks(self, num_blocks, reduce_ratio, stride=1, dropout=None): layers = [] - for i in range(num_blocks): + for _ in range(num_blocks): layers.append( ConditionalChannelWeighting( self.in_channels, @@ -902,7 +948,7 @@ def _make_one_branch(self, branch_index, num_blocks, stride=1): with_cp=self.with_cp, ) ] - for i in range(1, num_blocks): + for _ in range(1, num_blocks): layers.append( ShuffleUnit( self.in_channels[branch_index], @@ -1081,7 +1127,7 @@ def __init__( extra, in_channels=3, conv_cfg=None, - norm_cfg=dict(type="BN"), + norm_cfg=None, norm_eval=False, with_cp=False, zero_init_residual=False, @@ -1090,6 +1136,9 @@ def __init__( ): super().__init__(init_cfg=init_cfg) + if norm_cfg is None: + norm_cfg = dict(type="BN") + self.extra = extra self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg @@ -1123,12 +1172,12 @@ def __init__( num_channels = self.stages_spec["num_channels"][i] num_channels = [num_channels[i] for i in range(len(num_channels))] - setattr(self, "transition{}".format(i), self._make_transition_layer(num_channels_last, num_channels)) + setattr(self, f"transition{i}", self._make_transition_layer(num_channels_last, num_channels)) stage, num_channels_last = self._make_stage( self.stages_spec, i, num_channels, multiscale_output=True, dropout=dropout ) - setattr(self, "stage{}".format(i), stage) + setattr(self, f"stage{i}", stage) self.out_modules = None if self.extra.get("out_modules") is not None: @@ -1356,7 +1405,7 @@ def forward(self, x): y_list = [y] for i in range(self.num_stages): - transition_modules = getattr(self, "transition{}".format(i)) + transition_modules = getattr(self, f"transition{i}") stage_inputs = [] for j in range(self.stages_spec["num_branches"][i]): @@ -1368,7 +1417,7 @@ def forward(self, x): else: stage_inputs.append(y_list[j]) - stage_module = getattr(self, "stage{}".format(i)) + stage_module = getattr(self, f"stage{i}") y_list = stage_module(stage_inputs) if self.out_modules is not None: diff --git a/otx/mpa/modules/ov/models/mmseg/backbones/mmov_backbone.py b/otx/algorithms/segmentation/adapters/mmseg/models/backbones/mmov_backbone.py similarity index 65% rename from otx/mpa/modules/ov/models/mmseg/backbones/mmov_backbone.py rename to otx/algorithms/segmentation/adapters/mmseg/models/backbones/mmov_backbone.py index 31e4aaaf218..9ab524dcfb7 100644 --- a/otx/mpa/modules/ov/models/mmseg/backbones/mmov_backbone.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/backbones/mmov_backbone.py @@ -1,18 +1,25 @@ -# Copyright (C) 2022 Intel Corporation +"""Backbone used for openvino export.""" + +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # from mmseg.models.builder import BACKBONES -from ...mmov_model import MMOVModel +from otx.core.ov.models.mmov_model import MMOVModel + +# pylint: disable=unused-argument @BACKBONES.register_module() class MMOVBackbone(MMOVModel): + """MMOVBackbone.""" + def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def forward(self, *args, **kwargs): + """Forward.""" outputs = super().forward(*args, **kwargs) if not isinstance(outputs, tuple): outputs = (outputs,) @@ -20,5 +27,6 @@ def forward(self, *args, **kwargs): return outputs def init_weights(self, pretrained=None): + """Initialize the weights.""" # TODO - pass + return diff --git a/otx/algorithms/segmentation/adapters/mmseg/models/heads/__init__.py b/otx/algorithms/segmentation/adapters/mmseg/models/heads/__init__.py new file mode 100644 index 00000000000..8ea771a0803 --- /dev/null +++ b/otx/algorithms/segmentation/adapters/mmseg/models/heads/__init__.py @@ -0,0 +1,21 @@ +"""Semantic segmentation heads.""" + +# Copyright (C) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions +# and limitations under the License. + + +from .custom_fcn_head import CustomFCNHead +from .mmov_decode_head import MMOVDecodeHead + +__all__ = ["MMOVDecodeHead", "CustomFCNHead"] diff --git a/otx/mpa/modules/models/heads/custom_fcn_head.py b/otx/algorithms/segmentation/adapters/mmseg/models/heads/custom_fcn_head.py similarity index 62% rename from otx/mpa/modules/models/heads/custom_fcn_head.py rename to otx/algorithms/segmentation/adapters/mmseg/models/heads/custom_fcn_head.py index 00efd56cec0..20b3fb2039b 100644 --- a/otx/mpa/modules/models/heads/custom_fcn_head.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/heads/custom_fcn_head.py @@ -1,18 +1,24 @@ -# Copyright (C) 2022 Intel Corporation +"""Custom FCN head.""" + +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # from mmseg.models.builder import HEADS from mmseg.models.decode_heads.fcn_head import FCNHead -from .aggregator_mixin import AggregatorMixin -from .mix_loss_mixin import MixLossMixin -from .pixel_weights_mixin import PixelWeightsMixin2 -from .segment_out_norm_mixin import SegmentOutNormMixin +from .mixin import ( + AggregatorMixin, + MixLossMixin, + PixelWeightsMixin2, + SegmentOutNormMixin, +) @HEADS.register_module() -class CustomFCNHead(SegmentOutNormMixin, AggregatorMixin, MixLossMixin, PixelWeightsMixin2, FCNHead): +class CustomFCNHead( + SegmentOutNormMixin, AggregatorMixin, MixLossMixin, PixelWeightsMixin2, FCNHead +): # pylint: disable=too-many-ancestors """Custom Fully Convolution Networks for Semantic Segmentation.""" def __init__(self, *args, **kwargs): diff --git a/otx/mpa/modules/models/heads/pixel_weights_mixin.py b/otx/algorithms/segmentation/adapters/mmseg/models/heads/mixin.py similarity index 59% rename from otx/mpa/modules/models/heads/pixel_weights_mixin.py rename to otx/algorithms/segmentation/adapters/mmseg/models/heads/mixin.py index 6ca8821b3ac..befa1309f4d 100644 --- a/otx/mpa/modules/models/heads/pixel_weights_mixin.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/heads/mixin.py @@ -1,26 +1,148 @@ -# Copyright (C) 2022 Intel Corporation +"""Modules for aggregator and loss mix.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # - -import torch.nn as nn +import torch +import torch.nn.functional as F from mmcv.runner import force_fp32 from mmseg.core import add_prefix from mmseg.models.losses import accuracy from mmseg.ops import resize +from torch import nn -from otx.mpa.modules.utils.seg_utils import get_valid_label_mask_per_batch +from otx.algorithms.segmentation.adapters.mmseg.models.utils import ( + AngularPWConv, + IterativeAggregator, + LossEqualizer, + normalize, +) +from otx.algorithms.segmentation.adapters.mmseg.utils import ( + get_valid_label_mask_per_batch, +) -from ..losses.utils import LossEqualizer +# pylint: disable=abstract-method, unused-argument, keyword-arg-before-vararg -class PixelWeightsMixin(nn.Module): +class SegmentOutNormMixin(nn.Module): + """SegmentOutNormMixin.""" + + def __init__(self, *args, enable_out_seg=True, enable_out_norm=False, **kwargs): + super().__init__(*args, **kwargs) + + self.enable_out_seg = enable_out_seg + self.enable_out_norm = enable_out_norm + + if enable_out_seg: + if enable_out_norm: + self.conv_seg = AngularPWConv(self.channels, self.out_channels, clip_output=True) + else: + self.conv_seg = None + + def cls_seg(self, feat): + """Classify each pixel.""" + if self.dropout is not None: + feat = self.dropout(feat) + if self.enable_out_norm: + feat = normalize(feat, dim=1, p=2) + if self.conv_seg is not None: + return self.conv_seg(feat) + return feat + + +class AggregatorMixin(nn.Module): + """A class for creating an aggregator.""" + def __init__( self, - enable_loss_equalizer=False, - loss_target="gt_semantic_seg", *args, + enable_aggregator=False, + aggregator_min_channels=None, + aggregator_merge_norm=None, + aggregator_use_concat=False, **kwargs, ): + + in_channels = kwargs.get("in_channels") + in_index = kwargs.get("in_index") + norm_cfg = kwargs.get("norm_cfg") + conv_cfg = kwargs.get("conv_cfg") + input_transform = kwargs.get("input_transform") + + aggregator = None + if enable_aggregator: + assert isinstance(in_channels, (tuple, list)) + assert len(in_channels) > 1 + + aggregator = IterativeAggregator( + in_channels=in_channels, + min_channels=aggregator_min_channels, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + merge_norm=aggregator_merge_norm, + use_concat=aggregator_use_concat, + ) + + aggregator_min_channels = aggregator_min_channels if aggregator_min_channels is not None else 0 + # change arguments temporarily + kwargs["in_channels"] = max(in_channels[0], aggregator_min_channels) + kwargs["input_transform"] = None + if in_index is not None: + kwargs["in_index"] = in_index[0] + + super().__init__(*args, **kwargs) + + self.aggregator = aggregator + # re-define variables + self.in_channels = in_channels + self.input_transform = input_transform + self.in_index = in_index + + def _transform_inputs(self, inputs): + inputs = super()._transform_inputs(inputs) + if self.aggregator is not None: + inputs = self.aggregator(inputs)[0] + return inputs + + +class MixLossMixin(nn.Module): + """Loss mixing module.""" + + @staticmethod + def _mix_loss(logits, target, ignore_index=255): + num_samples = logits.size(0) + assert num_samples % 2 == 0 + + with torch.no_grad(): + probs = F.softmax(logits, dim=1) + probs_a, probs_b = torch.split(probs, num_samples // 2) + mean_probs = 0.5 * (probs_a + probs_b) + trg_probs = torch.cat([mean_probs, mean_probs], dim=0) + + log_probs = torch.log_softmax(logits, dim=1) + losses = torch.sum(trg_probs * log_probs, dim=1).neg() + + valid_mask = target != ignore_index + valid_losses = torch.where(valid_mask, losses, torch.zeros_like(losses)) + + return valid_losses.mean() + + @force_fp32(apply_to=("seg_logit",)) + def losses(self, seg_logit, seg_label, train_cfg, *args, **kwargs): + """Loss computing.""" + loss = super().losses(seg_logit, seg_label, train_cfg, *args, **kwargs) + if train_cfg.get("mix_loss", None) and train_cfg.mix_loss.get("enable", False): + mix_loss = self._mix_loss(seg_logit, seg_label, ignore_index=self.ignore_index) + + mix_loss_weight = train_cfg.mix_loss.get("weight", 1.0) + loss["loss_mix"] = mix_loss_weight * mix_loss + + return loss + + +class PixelWeightsMixin(nn.Module): + """PixelWeightsMixin.""" + + def __init__(self, enable_loss_equalizer=False, loss_target="gt_semantic_seg", *args, **kwargs): super().__init__(*args, **kwargs) self.enable_loss_equalizer = enable_loss_equalizer @@ -34,10 +156,12 @@ def __init__( @property def loss_target_name(self): + """Return loss target name.""" return self.loss_target @property def last_scale(self): + """Return the last scale.""" if not isinstance(self.loss_decode, nn.ModuleList): losses_decode = [self.loss_decode] else: @@ -54,6 +178,7 @@ def last_scale(self): return loss_module.last_scale def set_step_params(self, init_iter, epoch_size): + """Set step parameters.""" if not isinstance(self.loss_decode, nn.ModuleList): losses_decode = [self.loss_decode] else: @@ -73,6 +198,7 @@ def forward_train( return_logits=False, ): """Forward function for training. + Args: inputs (list[Tensor]): List of multi-level img features. img_metas (list[dict]): List of image info dict where each dict @@ -138,6 +264,8 @@ def losses(self, seg_logit, seg_label, train_cfg, pixel_weights=None): class PixelWeightsMixin2(PixelWeightsMixin): + """Pixel weight mixin class.""" + def forward_train( self, inputs, @@ -148,6 +276,7 @@ def forward_train( return_logits=False, ): """Forward function for training. + Args: inputs (list[Tensor]): List of multi-level img features. img_metas (list[dict]): List of image info dict where each dict @@ -176,7 +305,9 @@ def forward_train( return losses @force_fp32(apply_to=("seg_logit",)) - def losses(self, seg_logit, seg_label, train_cfg, valid_label_mask, pixel_weights=None): + def losses( + self, seg_logit, seg_label, train_cfg, valid_label_mask, pixel_weights=None + ): # pylint: disable=arguments-renamed """Compute segmentation loss.""" loss = dict() diff --git a/otx/mpa/modules/ov/models/mmseg/decode_heads/mmov_decode_head.py b/otx/algorithms/segmentation/adapters/mmseg/models/heads/mmov_decode_head.py similarity index 79% rename from otx/mpa/modules/ov/models/mmseg/decode_heads/mmov_decode_head.py rename to otx/algorithms/segmentation/adapters/mmseg/models/heads/mmov_decode_head.py index 375fb51b17b..f34c72b9840 100644 --- a/otx/mpa/modules/ov/models/mmseg/decode_heads/mmov_decode_head.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/heads/mmov_decode_head.py @@ -1,4 +1,6 @@ -# Copyright (C) 2022 Intel Corporation +"""Decode-head used for openvino export.""" + +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -6,25 +8,31 @@ from typing import Dict, List, Optional, Union import openvino.runtime as ov -from mmseg.models.builder import HEADS from mmseg.models.decode_heads.decode_head import BaseDecodeHead -from ...mmov_model import MMOVModel +from otx.core.ov.models.mmov_model import MMOVModel + +# pylint: disable=too-many-instance-attributes, keyword-arg-before-vararg -@HEADS.register_module() class MMOVDecodeHead(BaseDecodeHead): + """MMOVDecodeHead.""" + def __init__( self, model_path_or_model: Union[str, ov.Model] = None, weight_path: Optional[str] = None, - inputs: Dict[str, Union[str, List[str]]] = {}, - outputs: Dict[str, Union[str, List[str]]] = {}, + inputs: Optional[Dict[str, Union[str, List[str]]]] = None, + outputs: Optional[Dict[str, Union[str, List[str]]]] = None, init_weight: bool = False, verify_shape: bool = True, *args, - **kwargs, + **kwargs ): + if inputs is None: + inputs = {} + if outputs is None: + outputs = {} self._model_path_or_model = model_path_or_model self._weight_path = weight_path self._inputs = deepcopy(inputs) @@ -68,10 +76,12 @@ def __init__( ) def init_weights(self): + """Init weights.""" # TODO - pass + return def forward(self, inputs): + """Forward.""" outputs = self._transform_inputs(inputs) if getattr(self, "extractor"): outputs = self.extractor(outputs) diff --git a/otx/algorithms/segmentation/adapters/mmseg/models/losses/__init__.py b/otx/algorithms/segmentation/adapters/mmseg/models/losses/__init__.py index 7455c2529c3..f6ba9b2e32b 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/models/losses/__init__.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/losses/__init__.py @@ -1,6 +1,6 @@ """Segmentation losses.""" -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2023 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions # and limitations under the License. +from .cross_entropy_loss_with_ignore import CrossEntropyLossWithIgnore from .detcon_loss import DetConLoss -__all__ = ["DetConLoss"] +__all__ = ["DetConLoss", "CrossEntropyLossWithIgnore"] diff --git a/otx/mpa/modules/models/losses/base_pixel_loss.py b/otx/algorithms/segmentation/adapters/mmseg/models/losses/base_pixel_loss.py similarity index 89% rename from otx/mpa/modules/models/losses/base_pixel_loss.py rename to otx/algorithms/segmentation/adapters/mmseg/models/losses/base_pixel_loss.py index 423cf744fea..1d0d69e27c8 100644 --- a/otx/mpa/modules/models/losses/base_pixel_loss.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/losses/base_pixel_loss.py @@ -1,4 +1,5 @@ -# Copyright (C) 2022 Intel Corporation +"""Base pixel loss.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -8,18 +9,23 @@ import torch.nn.functional as F from mmseg.models.losses.utils import weight_reduce_loss -from otx.mpa.modules.models.builder import build_scalar_scheduler +from otx.algorithms.segmentation.adapters.mmseg.utils.builder import ( + build_scalar_scheduler, +) from .base_weighted_loss import BaseWeightedLoss def entropy(p, dim=1, keepdim=False): + """Calculates the entropy.""" return -torch.where(p > 0.0, p * p.log(), torch.zeros_like(p)).sum(dim=dim, keepdim=keepdim) class BasePixelLoss(BaseWeightedLoss): + """Base pixel loss.""" + def __init__(self, scale_cfg=None, pr_product=False, conf_penalty_weight=None, border_reweighting=False, **kwargs): - super(BasePixelLoss, self).__init__(**kwargs) + super().__init__(**kwargs) self._enable_pr_product = pr_product self._border_reweighting = border_reweighting @@ -32,22 +38,27 @@ def __init__(self, scale_cfg=None, pr_product=False, conf_penalty_weight=None, b @property def last_scale(self): + """Return last_scale.""" return self._last_scale @property def last_reg_weight(self): + """Return last_reg_weight.""" return self._last_reg_weight @property def with_regularization(self): + """Check regularization use.""" return self._reg_weight_scheduler is not None @property def with_pr_product(self): + """Check pr_product.""" return self._enable_pr_product @property def with_border_reweighting(self): + """Check border reweighting.""" return self._border_reweighting @staticmethod @@ -99,7 +110,9 @@ def _pred_stat(output, labels, valid_mask, window_size=5, min_group_ratio=0.6): return out_ratio.item() - def _forward(self, output, labels, avg_factor=None, pixel_weights=None, reduction_override=None): + def _forward( + self, output, labels, avg_factor=None, pixel_weights=None, reduction_override=None + ): # pylint: disable=too-many-locals assert reduction_override in (None, "none", "mean", "sum") reduction = reduction_override if reduction_override else self.reduction diff --git a/otx/mpa/modules/models/losses/base_weighted_loss.py b/otx/algorithms/segmentation/adapters/mmseg/models/losses/base_weighted_loss.py similarity index 83% rename from otx/mpa/modules/models/losses/base_weighted_loss.py rename to otx/algorithms/segmentation/adapters/mmseg/models/losses/base_weighted_loss.py index e74b5233e97..2487eed40fe 100644 --- a/otx/mpa/modules/models/losses/base_weighted_loss.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/losses/base_weighted_loss.py @@ -1,17 +1,21 @@ -# Copyright (C) 2022 Intel Corporation +"""Base weighted loss function for semantic segmentation.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # from abc import ABCMeta, abstractmethod import torch -import torch.nn as nn from mmseg.core import build_pixel_sampler -from scipy.special import erfinv +from scipy.special import erfinv # pylint: disable=no-name-in-module +from torch import nn -from otx.mpa.modules.models.builder import build_scalar_scheduler +from otx.algorithms.segmentation.adapters.mmseg.utils.builder import ( + build_scalar_scheduler, +) +# pylint: disable=too-many-instance-attributes, unused-argument class BaseWeightedLoss(nn.Module, metaclass=ABCMeta): """Base class for loss. @@ -57,6 +61,7 @@ def __init__( self._epoch_size = 1 def set_step_params(self, init_iter, epoch_size): + """Set step parameters.""" assert init_iter >= 0 assert epoch_size > 0 @@ -65,18 +70,22 @@ def set_step_params(self, init_iter, epoch_size): @property def with_loss_jitter(self): + """Check loss jitter.""" return self._jitter_sigma_factor is not None @property def iter(self): + """Return iteration.""" return self._iter @property def epoch_size(self): + """Return epoch size.""" return self._epoch_size @property def last_loss_weight(self): + """Return last loss weight.""" return self._last_loss_weight @abstractmethod @@ -99,8 +108,8 @@ def forward(self, *args, **kwargs): loss, meta = self._forward(*args, **kwargs) # make sure meta data are tensor as well for aggregation # when parsing loss in sgementator - for k, v in meta.items(): - meta[k] = torch.tensor(v, dtype=loss.dtype, device=loss.device) + for key, val in meta.items(): + meta[key] = torch.tensor(val, dtype=loss.dtype, device=loss.device) if self.with_loss_jitter and loss.numel() == 1: if self._smooth_loss is None: diff --git a/otx/mpa/modules/models/losses/cross_entropy_loss_with_ignore.py b/otx/algorithms/segmentation/adapters/mmseg/models/losses/cross_entropy_loss_with_ignore.py similarity index 88% rename from otx/mpa/modules/models/losses/cross_entropy_loss_with_ignore.py rename to otx/algorithms/segmentation/adapters/mmseg/models/losses/cross_entropy_loss_with_ignore.py index cdabc189798..57e9c24c268 100644 --- a/otx/mpa/modules/models/losses/cross_entropy_loss_with_ignore.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/losses/cross_entropy_loss_with_ignore.py @@ -1,4 +1,5 @@ -# Copyright (C) 2022 Intel Corporation +"""Cross entropy loss for ignored mode in class-incremental learning.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -7,11 +8,11 @@ from mmseg.models.builder import LOSSES from mmseg.models.losses.utils import get_class_weight -from .mpa_pixel_base import MPABasePixelLoss +from .otx_pixel_base import OTXBasePixelLoss @LOSSES.register_module() -class CrossEntropyLossWithIgnore(MPABasePixelLoss): +class CrossEntropyLossWithIgnore(OTXBasePixelLoss): """CrossEntropyLossWithIgnore with Ignore Mode Support for Class Incremental Learning. Args: @@ -24,13 +25,14 @@ class CrossEntropyLossWithIgnore(MPABasePixelLoss): """ def __init__(self, reduction="mean", loss_weight=None, **kwargs): - super(CrossEntropyLossWithIgnore, self).__init__(**kwargs) + super().__init__(**kwargs) self.reduction = reduction self.class_weight = get_class_weight(loss_weight) @property def name(self): + """name.""" return "ce_with_ignore" def _calculate(self, cls_score, label, valid_label_mask, scale): diff --git a/otx/algorithms/segmentation/adapters/mmseg/models/losses/detcon_loss.py b/otx/algorithms/segmentation/adapters/mmseg/models/losses/detcon_loss.py index 8d93688cbc1..19140effac1 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/models/losses/detcon_loss.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/losses/detcon_loss.py @@ -1,6 +1,6 @@ """DetCon loss.""" -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # # pylint: disable=no-name-in-module, not-callable diff --git a/otx/mpa/modules/models/losses/mpa_pixel_base.py b/otx/algorithms/segmentation/adapters/mmseg/models/losses/otx_pixel_base.py similarity index 88% rename from otx/mpa/modules/models/losses/mpa_pixel_base.py rename to otx/algorithms/segmentation/adapters/mmseg/models/losses/otx_pixel_base.py index e7a7bacdd96..b9f68a5ffc4 100644 --- a/otx/mpa/modules/models/losses/mpa_pixel_base.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/losses/otx_pixel_base.py @@ -1,4 +1,5 @@ -# Copyright (C) 2022 Intel Corporation +"""OTX pixel loss.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -7,10 +8,14 @@ from .base_pixel_loss import BasePixelLoss +# pylint: disable=too-many-function-args, too-many-locals + + +class OTXBasePixelLoss(BasePixelLoss): # pylint: disable=abstract-method + """OTXBasePixelLoss.""" -class MPABasePixelLoss(BasePixelLoss): def __init__(self, **kwargs): - super(MPABasePixelLoss, self).__init__(**kwargs) + super().__init__(**kwargs) def _forward( self, @@ -20,7 +25,7 @@ def _forward( avg_factor=None, pixel_weights=None, reduction_override=None, - ): + ): # pylint: disable=arguments-renamed assert reduction_override in (None, "none", "mean", "sum") reduction = reduction_override if reduction_override else self.reduction diff --git a/otx/algorithms/segmentation/adapters/mmseg/models/necks/__init__.py b/otx/algorithms/segmentation/adapters/mmseg/models/necks/__init__.py index 841d7bf50d4..cf76dc5c172 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/models/necks/__init__.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/necks/__init__.py @@ -1,6 +1,6 @@ """OTX Algorithms - Segmentation Necks.""" -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2023 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/otx/algorithms/segmentation/adapters/mmseg/models/necks/selfsl_mlp.py b/otx/algorithms/segmentation/adapters/mmseg/models/necks/selfsl_mlp.py index 563c89b4df4..e7656efd150 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/models/necks/selfsl_mlp.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/necks/selfsl_mlp.py @@ -3,7 +3,7 @@ This MLP consists of fc (conv) - norm - relu - fc (conv). """ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # # pylint: disable=dangerous-default-value diff --git a/otx/algorithms/segmentation/adapters/mmseg/models/schedulers/__init__.py b/otx/algorithms/segmentation/adapters/mmseg/models/schedulers/__init__.py new file mode 100644 index 00000000000..47c10da0113 --- /dev/null +++ b/otx/algorithms/segmentation/adapters/mmseg/models/schedulers/__init__.py @@ -0,0 +1,25 @@ +"""Scaler schedulers for semantic segmentation.""" + +# Copyright (C) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions +# and limitations under the License. + +from .constant import ConstantScalarScheduler +from .poly import PolyScalarScheduler +from .step import StepScalarScheduler + +__all__ = [ + "ConstantScalarScheduler", + "PolyScalarScheduler", + "StepScalarScheduler", +] diff --git a/otx/mpa/modules/models/scalar_schedulers/base.py b/otx/algorithms/segmentation/adapters/mmseg/models/schedulers/base.py similarity index 70% rename from otx/mpa/modules/models/scalar_schedulers/base.py rename to otx/algorithms/segmentation/adapters/mmseg/models/schedulers/base.py index e3000f0a21b..600309d8f1d 100644 --- a/otx/mpa/modules/models/scalar_schedulers/base.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/schedulers/base.py @@ -1,4 +1,5 @@ -# Copyright (C) 2022 Intel Corporation +"""Base scalar scheduler.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -6,10 +7,10 @@ class BaseScalarScheduler(metaclass=ABCMeta): - def __init__(self): - super(BaseScalarScheduler, self).__init__() + """Base scalar scheduler.""" def __call__(self, step, epoch_size) -> float: + """Callback function of BaseScalarScheduler.""" return self._get_value(step, epoch_size) @abstractmethod diff --git a/otx/mpa/modules/models/scalar_schedulers/constant.py b/otx/algorithms/segmentation/adapters/mmseg/models/schedulers/constant.py similarity index 73% rename from otx/mpa/modules/models/scalar_schedulers/constant.py rename to otx/algorithms/segmentation/adapters/mmseg/models/schedulers/constant.py index 96536dd8994..f7819d18ce5 100644 --- a/otx/mpa/modules/models/scalar_schedulers/constant.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/schedulers/constant.py @@ -1,8 +1,10 @@ -# Copyright (C) 2022 Intel Corporation +"""Constant scheduler.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -from ..builder import SCALAR_SCHEDULERS +from otx.algorithms.segmentation.adapters.mmseg.utils.builder import SCALAR_SCHEDULERS + from .base import BaseScalarScheduler @@ -11,12 +13,13 @@ class ConstantScalarScheduler(BaseScalarScheduler): """The learning rate remains constant over time. The learning rate equals the scale. + Args: scale (float): The learning rate scale. """ def __init__(self, scale: float = 30.0): - super(ConstantScalarScheduler, self).__init__() + super().__init__() self._end_s = scale assert self._end_s > 0.0 diff --git a/otx/mpa/modules/models/scalar_schedulers/poly.py b/otx/algorithms/segmentation/adapters/mmseg/models/schedulers/poly.py similarity index 90% rename from otx/mpa/modules/models/scalar_schedulers/poly.py rename to otx/algorithms/segmentation/adapters/mmseg/models/schedulers/poly.py index 6b70fe9423b..f173b62f374 100644 --- a/otx/mpa/modules/models/scalar_schedulers/poly.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/schedulers/poly.py @@ -1,10 +1,13 @@ -# Copyright (C) 2022 Intel Corporation +"""Polynomial scheduler.""" + +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # import numpy as np -from ..builder import SCALAR_SCHEDULERS +from otx.algorithms.segmentation.adapters.mmseg.utils.builder import SCALAR_SCHEDULERS + from .base import BaseScalarScheduler @@ -23,7 +26,7 @@ class PolyScalarScheduler(BaseScalarScheduler): def __init__( self, start_scale: float, end_scale: float, num_iters: int, power: float = 1.2, by_epoch: bool = False ): - super(PolyScalarScheduler, self).__init__() + super().__init__() self._start_s = start_scale assert self._start_s >= 0.0 diff --git a/otx/mpa/modules/models/scalar_schedulers/step.py b/otx/algorithms/segmentation/adapters/mmseg/models/schedulers/step.py similarity index 89% rename from otx/mpa/modules/models/scalar_schedulers/step.py rename to otx/algorithms/segmentation/adapters/mmseg/models/schedulers/step.py index 3646f148960..19c4f81563d 100644 --- a/otx/mpa/modules/models/scalar_schedulers/step.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/schedulers/step.py @@ -1,4 +1,5 @@ -# Copyright (C) 2022 Intel Corporation +"""Step scheduler.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -6,7 +7,8 @@ import numpy as np -from ..builder import SCALAR_SCHEDULERS +from otx.algorithms.segmentation.adapters.mmseg.utils.builder import SCALAR_SCHEDULERS + from .base import BaseScalarScheduler @@ -26,7 +28,7 @@ class StepScalarScheduler(BaseScalarScheduler): """ def __init__(self, scales: List[float], num_iters: List[int], by_epoch: bool = False): - super(StepScalarScheduler, self).__init__() + super().__init__() self.by_epoch = by_epoch diff --git a/otx/algorithms/segmentation/adapters/mmseg/models/segmentors/__init__.py b/otx/algorithms/segmentation/adapters/mmseg/models/segmentors/__init__.py index cf76332e0d2..d953b628f81 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/models/segmentors/__init__.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/segmentors/__init__.py @@ -1,6 +1,6 @@ """OTX Algorithms - Segmentation Segmentors.""" -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2023 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ # See the License for the specific language governing permissions # and limitations under the License. +from .class_incr_encoder_decoder import ClassIncrEncoderDecoder from .detcon import DetConB, SupConDetConB +from .mean_teacher_segmentor import MeanTeacherSegmentor -__all__ = ["DetConB", "SupConDetConB"] +__all__ = ["DetConB", "SupConDetConB", "ClassIncrEncoderDecoder", "MeanTeacherSegmentor"] diff --git a/otx/algorithms/segmentation/adapters/mmseg/models/segmentors/class_incr_encoder_decoder.py b/otx/algorithms/segmentation/adapters/mmseg/models/segmentors/class_incr_encoder_decoder.py new file mode 100644 index 00000000000..596bcccae9d --- /dev/null +++ b/otx/algorithms/segmentation/adapters/mmseg/models/segmentors/class_incr_encoder_decoder.py @@ -0,0 +1,109 @@ +"""Encoder-decoder for incremental learning.""" + +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +import functools + +import torch +from mmseg.models import SEGMENTORS +from mmseg.utils import get_root_logger + +from otx.algorithms.common.utils.task_adapt import map_class_names + +from .mixin import PixelWeightsMixin +from .otx_encoder_decoder import OTXEncoderDecoder + + +@SEGMENTORS.register_module() +class ClassIncrEncoderDecoder(PixelWeightsMixin, OTXEncoderDecoder): + """Encoder-decoder for incremental learning.""" + + def __init__(self, *args, task_adapt=None, **kwargs): + super().__init__(*args, **kwargs) + + # Hook for class-sensitive weight loading + assert task_adapt is not None, "When using task_adapt, task_adapt must be set." + + self._register_load_state_dict_pre_hook( + functools.partial( + self.load_state_dict_pre_hook, + self, # model + task_adapt["dst_classes"], # model_classes + task_adapt["src_classes"], # chkpt_classes + ) + ) + + def forward_train( + self, + img, + img_metas, + gt_semantic_seg, + aux_img=None, + **kwargs, + ): # pylint: disable=arguments-renamed + """Forward function for training. + + Args: + img (Tensor): Input images. + img_metas (list[dict]): List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmseg/datasets/pipelines/formatting.py:Collect`. + gt_semantic_seg (Tensor): Semantic segmentation masks + used if the architecture supports semantic segmentation task. + aux_img (Tensor): Auxiliary images. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + if aux_img is not None: + mix_loss_enabled = False + mix_loss_cfg = self.train_cfg.get("mix_loss", None) + if mix_loss_cfg is not None: + mix_loss_enabled = mix_loss_cfg.get("enable", False) + if mix_loss_enabled: + self.train_cfg.mix_loss.enable = mix_loss_enabled + + if self.train_cfg.mix_loss.enable: + img = torch.cat([img, aux_img], dim=0) + gt_semantic_seg = torch.cat([gt_semantic_seg, gt_semantic_seg], dim=0) + + return super().forward_train(img, img_metas, gt_semantic_seg, **kwargs) + + @staticmethod + def load_state_dict_pre_hook( + model, model_classes, chkpt_classes, chkpt_dict, prefix, *args, **kwargs + ): # pylint: disable=too-many-locals, unused-argument + """Modify input state_dict according to class name matching before weight loading.""" + logger = get_root_logger("INFO") + logger.info(f"----------------- ClassIncrEncoderDecoder.load_state_dict_pre_hook() called w/ prefix: {prefix}") + + # Dst to src mapping index + model_classes = list(model_classes) + chkpt_classes = list(chkpt_classes) + model2chkpt = map_class_names(model_classes, chkpt_classes) + logger.info(f"{chkpt_classes} -> {model_classes} ({model2chkpt})") + + model_dict = model.state_dict() + param_names = [ + "decode_head.conv_seg.weight", + "decode_head.conv_seg.bias", + ] + for model_name in param_names: + chkpt_name = prefix + model_name + if model_name not in model_dict or chkpt_name not in chkpt_dict: + logger.info(f"Skipping weight copy: {chkpt_name}") + continue + + # Mix weights + model_param = model_dict[model_name].clone() + chkpt_param = chkpt_dict[chkpt_name] + for model_key, c in enumerate(model2chkpt): + if c >= 0: + model_param[model_key].copy_(chkpt_param[c]) + + # Replace checkpoint weight by mixed weights + chkpt_dict[chkpt_name] = model_param diff --git a/otx/algorithms/segmentation/adapters/mmseg/models/segmentors/detcon.py b/otx/algorithms/segmentation/adapters/mmseg/models/segmentors/detcon.py index 399b2384290..4569a1096b8 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/models/segmentors/detcon.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/segmentors/detcon.py @@ -4,7 +4,7 @@ - 'Efficient Visual Pretraining with Contrastive Detection', https://arxiv.org/abs/2103.10957 """ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # # pylint: disable=unused-argument, invalid-name, unnecessary-pass, not-callable @@ -24,10 +24,9 @@ from mmseg.ops import resize from torch import nn -from otx.mpa.modules.models.segmentors.class_incr_encoder_decoder import ( - ClassIncrEncoderDecoder, -) -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger + +from .class_incr_encoder_decoder import ClassIncrEncoderDecoder logger = get_logger() @@ -517,10 +516,10 @@ def __init__( # pylint: disable=arguments-renamed def forward_train( self, - img: torch.Tensor, - img_metas: List[Dict], - gt_semantic_seg: torch.Tensor, - pixel_weights: Optional[torch.Tensor] = None, + img, + img_metas, + gt_semantic_seg, + pixel_weights=None, **kwargs, ): """Forward function for training. diff --git a/otx/mpa/modules/models/segmentors/mean_teacher_segmentor.py b/otx/algorithms/segmentation/adapters/mmseg/models/segmentors/mean_teacher_segmentor.py similarity index 65% rename from otx/mpa/modules/models/segmentors/mean_teacher_segmentor.py rename to otx/algorithms/segmentation/adapters/mmseg/models/segmentors/mean_teacher_segmentor.py index cf3eb65c8d3..35687748cc5 100644 --- a/otx/mpa/modules/models/segmentors/mean_teacher_segmentor.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/segmentors/mean_teacher_segmentor.py @@ -1,20 +1,28 @@ +"""Mean teacher segmentor for semi-supervised learning.""" + import functools -from collections import OrderedDict import torch from mmseg.models import SEGMENTORS, build_segmentor from mmseg.models.segmentors.base import BaseSegmentor from mmseg.ops import resize -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger logger = get_logger() +# pylint: disable=too-many-locals, protected-access + @SEGMENTORS.register_module() class MeanTeacherSegmentor(BaseSegmentor): + """Mean teacher segmentor for semi-supervised learning. + + It creates two models and ema from one to the other for consistency loss. + """ + def __init__(self, orig_type=None, unsup_weight=0.1, semisl_start_iter=30, **kwargs): - super(MeanTeacherSegmentor, self).__init__() + super().__init__() self.test_cfg = kwargs["test_cfg"] self.semisl_start_iter = semisl_start_iter self.count_iter = 0 @@ -31,21 +39,27 @@ def __init__(self, orig_type=None, unsup_weight=0.1, semisl_start_iter=30, **kwa self._register_load_state_dict_pre_hook(functools.partial(self.load_state_dict_pre_hook, self)) def encode_decode(self, img, img_metas): + """Encode and decode images.""" return self.model_s.encode_decode(img, img_metas) def extract_feat(self, imgs): + """Extract feature.""" return self.model_s.extract_feat(imgs) - def simple_test(self, img, img_metas, **kwargs): - return self.model_s.simple_test(img, img_metas, **kwargs) + def simple_test(self, img, img_meta, **kwargs): + """Simple test.""" + return self.model_s.simple_test(img, img_meta, **kwargs) def aug_test(self, imgs, img_metas, **kwargs): + """Aug test.""" return self.model_s.aug_test(imgs, img_metas, **kwargs) def forward_dummy(self, img, **kwargs): + """Forward dummy.""" return self.model_s.forward_dummy(img, **kwargs) def forward_train(self, img, img_metas, gt_semantic_seg, **kwargs): + """Forward train.""" self.count_iter += 1 if self.semisl_start_iter > self.count_iter or "extra_0" not in kwargs: x = self.model_s.extract_feat(img) @@ -63,7 +77,7 @@ def forward_train(self, img, img_metas, gt_semantic_seg, **kwargs): teacher_logit = resize( input=teacher_logit, size=ul_w_img.shape[2:], mode="bilinear", align_corners=self.align_corners ) - conf_from_teacher, pl_from_teacher = torch.max(torch.softmax(teacher_logit, axis=1), axis=1, keepdim=True) + _, pl_from_teacher = torch.max(torch.softmax(teacher_logit, axis=1), axis=1, keepdim=True) losses = dict() @@ -72,34 +86,34 @@ def forward_train(self, img, img_metas, gt_semantic_seg, **kwargs): loss_decode = self.model_s._decode_head_forward_train(x, img_metas, gt_semantic_seg=gt_semantic_seg) loss_decode_u = self.model_s._decode_head_forward_train(x_u, ul_img_metas, gt_semantic_seg=pl_from_teacher) - for (k, v) in loss_decode_u.items(): - if v is None: + for (key, value) in loss_decode_u.items(): + if value is None: continue - losses[k] = loss_decode[k] + loss_decode_u[k] * self.unsup_weight + losses[key] = loss_decode[key] + loss_decode_u[key] * self.unsup_weight return losses @staticmethod - def state_dict_hook(module, state_dict, prefix, *args, **kwargs): - """Redirect student model as output state_dict (teacher as auxilliary)""" + def state_dict_hook(module, state_dict, prefix, *args, **kwargs): # pylint: disable=unused-argument + """Redirect student model as output state_dict (teacher as auxilliary).""" logger.info("----------------- MeanTeacherSegmentor.state_dict_hook() called") - for k in list(state_dict.keys()): - v = state_dict.pop(k) - if not prefix or k.startswith(prefix): - k = k.replace(prefix, "", 1) - if k.startswith("model_s."): - k = k.replace("model_s.", "", 1) - elif k.startswith("model_t."): + for key in list(state_dict.keys()): + value = state_dict.pop(key) + if not prefix or key.startswith(prefix): + key = key.replace(prefix, "", 1) + if key.startswith("model_s."): + key = key.replace("model_s.", "", 1) + elif key.startswith("model_t."): continue - k = prefix + k - state_dict[k] = v + key = prefix + key + state_dict[key] = value return state_dict @staticmethod - def load_state_dict_pre_hook(module, state_dict, *args, **kwargs): - """Redirect input state_dict to teacher model""" + def load_state_dict_pre_hook(module, state_dict, *args, **kwargs): # pylint: disable=unused-argument + """Redirect input state_dict to teacher model.""" logger.info("----------------- MeanTeacherSegmentor.load_state_dict_pre_hook() called") - for k in list(state_dict.keys()): - v = state_dict.pop(k) - state_dict["model_s." + k] = v - state_dict["model_t." + k] = v + for key in list(state_dict.keys()): + value = state_dict.pop(key) + state_dict["model_s." + key] = value + state_dict["model_t." + key] = value diff --git a/otx/mpa/modules/models/segmentors/pixel_weights_mixin.py b/otx/algorithms/segmentation/adapters/mmseg/models/segmentors/mixin.py similarity index 94% rename from otx/mpa/modules/models/segmentors/pixel_weights_mixin.py rename to otx/algorithms/segmentation/adapters/mmseg/models/segmentors/mixin.py index b5ed556856f..e24a3d6ec57 100644 --- a/otx/mpa/modules/models/segmentors/pixel_weights_mixin.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/segmentors/mixin.py @@ -1,16 +1,20 @@ -# Copyright (C) 2022 Intel Corporation +"""Modules for decode and loss reweighting/mix.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # - -import torch.nn as nn from mmseg.core import add_prefix from mmseg.models.builder import build_loss from mmseg.ops import resize +from torch import nn + +from otx.algorithms.segmentation.adapters.mmseg.models.utils import LossEqualizer -from ..losses.utils import LossEqualizer +# pylint: disable=too-many-locals -class PixelWeightsMixin(object): +class PixelWeightsMixin: + """PixelWeightsMixin.""" + def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._init_train_components(self.train_cfg) @@ -45,6 +49,7 @@ def _get_argument_by_name(trg_name, arguments): return arguments[trg_name] def set_step_params(self, init_iter, epoch_size): + """Sets the step params for the current object's decode head.""" self.decode_head.set_step_params(init_iter, epoch_size) if getattr(self, "auxiliary_head", None) is not None: @@ -55,7 +60,7 @@ def set_step_params(self, init_iter, epoch_size): self.auxiliary_head.set_step_params(init_iter, epoch_size) def _decode_head_forward_train(self, x, img_metas, pixel_weights=None, **kwargs): - + """Run forward train in decode head.""" trg_map = self._get_argument_by_name(self.decode_head.loss_target_name, kwargs) loss_decode, logits_decode = self.decode_head.forward_train( x, diff --git a/otx/mpa/modules/models/segmentors/otx_encoder_decoder.py b/otx/algorithms/segmentation/adapters/mmseg/models/segmentors/otx_encoder_decoder.py similarity index 68% rename from otx/mpa/modules/models/segmentors/otx_encoder_decoder.py rename to otx/algorithms/segmentation/adapters/mmseg/models/segmentors/otx_encoder_decoder.py index 4b6085340f8..33cbf38bf78 100644 --- a/otx/mpa/modules/models/segmentors/otx_encoder_decoder.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/segmentors/otx_encoder_decoder.py @@ -1,4 +1,5 @@ -# Copyright (C) 2022 Intel Corporation +"""OTX encoder decoder for semantic segmentation.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -6,11 +7,14 @@ from mmseg.models import SEGMENTORS from mmseg.models.segmentors.encoder_decoder import EncoderDecoder -from otx.mpa.deploy.utils import is_mmdeploy_enabled +from otx.algorithms.common.adapters.mmdeploy.utils import is_mmdeploy_enabled +# pylint: disable=unused-argument, line-too-long @SEGMENTORS.register_module() class OTXEncoderDecoder(EncoderDecoder): + """OTX encoder decoder.""" + def simple_test(self, img, img_meta, rescale=True, output_logits=False): """Simple test with single image.""" seg_logit = self.inference(img, img_meta, rescale) @@ -34,26 +38,27 @@ def simple_test(self, img, img_meta, rescale=True, output_logits=False): if is_mmdeploy_enabled(): from mmdeploy.core import FUNCTION_REWRITER - from otx.mpa.modules.hooks.recording_forward_hooks import FeatureVectorHook - - @FUNCTION_REWRITER.register_rewriter( - "otx.mpa.modules.models.segmentors.otx_encoder_decoder.OTXEncoderDecoder.extract_feat" + from otx.algorithms.common.adapters.mmcv.hooks.recording_forward_hook import ( # pylint: disable=ungrouped-imports + FeatureVectorHook, ) + + BASE_CLASS = "otx.algorithms.segmentation.adapters.mmseg.models.segmentors.otx_encoder_decoder.OTXEncoderDecoder" + + @FUNCTION_REWRITER.register_rewriter(f"{BASE_CLASS}.extract_feat") def single_stage_detector__extract_feat(ctx, self, img): + """Extract feature.""" feat = self.backbone(img) self.feature_map = feat if self.with_neck: feat = self.neck(feat) return feat - @FUNCTION_REWRITER.register_rewriter( - "otx.mpa.modules.models.segmentors.otx_encoder_decoder.OTXEncoderDecoder.simple_test" - ) + @FUNCTION_REWRITER.register_rewriter(f"{BASE_CLASS}.simple_test") def single_stage_detector__simple_test(ctx, self, img, img_metas, **kwargs): + """Test.""" # with output activation seg_logit = self.inference(img, img_metas, True) if ctx.cfg["dump_features"]: feature_vector = FeatureVectorHook.func(self.feature_map) return seg_logit, feature_vector - else: - return seg_logit + return seg_logit diff --git a/otx/mpa/modules/models/utils/__init__.py b/otx/algorithms/segmentation/adapters/mmseg/models/utils/__init__.py similarity index 83% rename from otx/mpa/modules/models/utils/__init__.py rename to otx/algorithms/segmentation/adapters/mmseg/models/utils/__init__.py index 97fc2539a3a..733932ab89c 100644 --- a/otx/mpa/modules/models/utils/__init__.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/utils/__init__.py @@ -1,16 +1,17 @@ -# Copyright (c) 2020-2021 The MMSegmentation Authors +"""Utils used for mmseg model.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -# Copyright (C) 2022 Intel Corporation +# Copyright (c) 2020-2021 The MMSegmentation Authors # SPDX-License-Identifier: Apache-2.0 # - from .aggregator import IterativeAggregator, IterativeConcatAggregator from .angular_pw_conv import AngularPWConv from .asymmetric_position_attention import AsymmetricPositionAttentionModule from .channel_shuffle import channel_shuffle from .local_attention import LocalAttentionModule +from .loss_equalizer import LossEqualizer from .normalize import normalize from .psp_layer import PSPModule @@ -19,6 +20,7 @@ "IterativeConcatAggregator", "channel_shuffle", "LocalAttentionModule", + "LossEqualizer", "PSPModule", "AsymmetricPositionAttentionModule", "AngularPWConv", diff --git a/otx/mpa/modules/models/utils/aggregator.py b/otx/algorithms/segmentation/adapters/mmseg/models/utils/aggregator.py similarity index 90% rename from otx/mpa/modules/models/utils/aggregator.py rename to otx/algorithms/segmentation/adapters/mmseg/models/utils/aggregator.py index 2fc565ecd06..7006606a62d 100644 --- a/otx/mpa/modules/models/utils/aggregator.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/utils/aggregator.py @@ -1,21 +1,26 @@ -# Copyright (C) 2022 Intel Corporation +"""Aggregators.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # import torch -import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule +from torch import nn -from ..utils import normalize +from otx.algorithms.segmentation.adapters.mmseg.models.utils import normalize +# pylint: disable=invalid-name class IterativeAggregator(nn.Module): - """Based on: https://github.com/HRNet/Lite-HRNet""" + """IterativeAggregator. - def __init__( - self, in_channels, min_channels=None, conv_cfg=None, norm_cfg=dict(type="BN"), merge_norm=None, use_concat=False - ): + Based on: https://github.com/HRNet/Lite-HRNet. + """ + + def __init__(self, in_channels, min_channels=None, conv_cfg=None, norm_cfg=None, merge_norm=None, use_concat=False): + if norm_cfg is None: + norm_cfg = dict(type="BN") super().__init__() self.use_concat = use_concat @@ -101,6 +106,7 @@ def _norm(x, mode=None): return out def forward(self, x): + """Forward.""" x = x[::-1] y_list = [] @@ -130,7 +136,12 @@ def forward(self, x): class IterativeConcatAggregator(nn.Module): - def __init__(self, in_channels, min_channels=None, conv_cfg=None, norm_cfg=dict(type="BN"), merge_norm=None): + """IterativeConcatAggregator.""" + + def __init__(self, in_channels, min_channels=None, conv_cfg=None, norm_cfg=None, merge_norm=None): + if norm_cfg is None: + norm_cfg = dict(type="BN") + super().__init__() num_branches = len(in_channels) @@ -180,6 +191,7 @@ def _norm(x, mode=None): return out def forward(self, x): + """Forward.""" x = x[::-1] y_list = [] diff --git a/otx/mpa/modules/models/utils/angular_pw_conv.py b/otx/algorithms/segmentation/adapters/mmseg/models/utils/angular_pw_conv.py similarity index 77% rename from otx/mpa/modules/models/utils/angular_pw_conv.py rename to otx/algorithms/segmentation/adapters/mmseg/models/utils/angular_pw_conv.py index d6f73635557..50246412580 100644 --- a/otx/mpa/modules/models/utils/angular_pw_conv.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/utils/angular_pw_conv.py @@ -1,17 +1,20 @@ -# Copyright (C) 2022 Intel Corporation +"""Angular pw conv.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # import torch -import torch.nn as nn import torch.nn.functional as F +from torch import nn -from .normalize import normalize +from otx.algorithms.segmentation.adapters.mmseg.models.utils import normalize class AngularPWConv(nn.Module): + """AngularPWConv.""" + def __init__(self, in_features, out_features, clip_output=False): - super(AngularPWConv, self).__init__() + super().__init__() self.in_features = in_features assert in_features > 0 @@ -23,6 +26,7 @@ def __init__(self, in_features, out_features, clip_output=False): self.weight.data.normal_().renorm_(2, 1, 1e-5).mul_(1e5) def forward(self, x): + """Forward.""" weight = normalize(self.weight, dim=1, p=2).view(self.out_features, self.in_features, 1, 1) out = F.conv2d(x, weight) diff --git a/otx/mpa/modules/models/utils/asymmetric_position_attention.py b/otx/algorithms/segmentation/adapters/mmseg/models/utils/asymmetric_position_attention.py similarity index 82% rename from otx/mpa/modules/models/utils/asymmetric_position_attention.py rename to otx/algorithms/segmentation/adapters/mmseg/models/utils/asymmetric_position_attention.py index 9796c7f84dd..f1dfea2e16e 100644 --- a/otx/mpa/modules/models/utils/asymmetric_position_attention.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/utils/asymmetric_position_attention.py @@ -1,32 +1,42 @@ +"""Asymmetric position attention module.""" # Copyright (c) 2019 MendelXu # SPDX-License-Identifier: Apache-2.0 # -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # import torch -import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule +from torch import nn from .psp_layer import PSPModule +# pylint: disable=too-many-instance-attributes class AsymmetricPositionAttentionModule(nn.Module): - """Reference: https://github.com/MendelXu/ANN""" + """AsymmetricPositionAttentionModule. + + Reference: https://github.com/MendelXu/ANN. + """ def __init__( self, in_channels, key_channels, value_channels=None, - psp_size=(1, 3, 6, 8), + psp_size=None, conv_cfg=None, - norm_cfg=dict(type="BN"), + norm_cfg=None, ): super().__init__() + if psp_size is None: + psp_size = (1, 3, 6, 8) + if norm_cfg is None: + norm_cfg = dict(type="BN") + self.in_channels = in_channels self.key_channels = key_channels self.value_channels = value_channels if value_channels is not None else in_channels @@ -69,7 +79,8 @@ def __init__( ) def forward(self, x): - batch_size, h, w = x.size(0), x.size(2), x.size(3) + """Forward.""" + batch_size, _, _ = x.size(0), x.size(2), x.size(3) query_key = self.query_key(x) diff --git a/otx/mpa/modules/models/utils/channel_shuffle.py b/otx/algorithms/segmentation/adapters/mmseg/models/utils/channel_shuffle.py similarity index 93% rename from otx/mpa/modules/models/utils/channel_shuffle.py rename to otx/algorithms/segmentation/adapters/mmseg/models/utils/channel_shuffle.py index d63714fbd88..a86218532e1 100644 --- a/otx/mpa/modules/models/utils/channel_shuffle.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/utils/channel_shuffle.py @@ -1,7 +1,8 @@ +"""Channel shuffle method.""" # Copyright (c) 2018-2020 Open-MMLab. # SPDX-License-Identifier: Apache-2.0 # -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # diff --git a/otx/mpa/modules/models/utils/local_attention.py b/otx/algorithms/segmentation/adapters/mmseg/models/utils/local_attention.py similarity index 83% rename from otx/mpa/modules/models/utils/local_attention.py rename to otx/algorithms/segmentation/adapters/mmseg/models/utils/local_attention.py index 5a943d3fce7..2e743fac2cf 100644 --- a/otx/mpa/modules/models/utils/local_attention.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/utils/local_attention.py @@ -1,19 +1,25 @@ +"""Local attention module.""" # Copyright (C) 2019-2021 Xiangtai Lee # SPDX-License-Identifier: MIT # -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule +from torch import nn class LocalAttentionModule(nn.Module): - """Reference: https://github.com/lxtGH/GALD-DGCNet""" + """LocalAttentionModule. - def __init__(self, num_channels, conv_cfg=None, norm_cfg=dict(type="BN")): + Reference: https://github.com/lxtGH/GALD-DGCNet. + """ + + def __init__(self, num_channels, conv_cfg=None, norm_cfg=None): + if norm_cfg is None: + norm_cfg = dict(type="BN") super().__init__() self.num_channels = num_channels @@ -56,6 +62,7 @@ def __init__(self, num_channels, conv_cfg=None, norm_cfg=dict(type="BN")): self.sigmoid_spatial = nn.Sigmoid() def forward(self, x): + """Forward.""" _, _, h, w = x.size() y = self.dwconv1(x) diff --git a/otx/mpa/modules/models/losses/utils.py b/otx/algorithms/segmentation/adapters/mmseg/models/utils/loss_equalizer.py similarity index 94% rename from otx/mpa/modules/models/losses/utils.py rename to otx/algorithms/segmentation/adapters/mmseg/models/utils/loss_equalizer.py index bbe8e592d75..1491b15e89f 100644 --- a/otx/mpa/modules/models/losses/utils.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/utils/loss_equalizer.py @@ -1,9 +1,12 @@ -# Copyright (C) 2022 Intel Corporation +"""Loss equalizer.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # class LossEqualizer: + """Loss equalizer.""" + def __init__(self, weights=None, momentum=0.1): self.momentum = momentum @@ -23,6 +26,7 @@ def __init__(self, weights=None, momentum=0.1): self._smoothed_values = dict() def reweight(self, losses): + """Reweight.""" assert isinstance(losses, dict) if len(losses) == 0: diff --git a/otx/mpa/modules/models/utils/normalize.py b/otx/algorithms/segmentation/adapters/mmseg/models/utils/normalize.py similarity index 54% rename from otx/mpa/modules/models/utils/normalize.py rename to otx/algorithms/segmentation/adapters/mmseg/models/utils/normalize.py index c7bbdb79e3d..db72f6884b2 100644 --- a/otx/mpa/modules/models/utils/normalize.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/utils/normalize.py @@ -1,31 +1,38 @@ -# Copyright (C) 2022 Intel Corporation +"""Normalization.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # import torch -import torch.nn as nn import torch.nn.functional as F +from torch import nn -class OnnxLpNormalization(torch.autograd.Function): +class OnnxLpNormalization(torch.autograd.Function): # pylint: disable=abstract-method + """OnnxLpNormalization.""" + @staticmethod - def forward(ctx, x, axis=0, p=2, eps=1e-12): + def forward(ctx, x, axis=0, p=2, eps=1e-12): # pylint: disable=unused-argument + """Forward.""" denom = x.norm(2, axis, True).clamp_min(eps).expand_as(x) return x / denom @staticmethod - def symbolic(g, x, axis=0, p=2, eps=1e-12): + def symbolic(g, x, axis=0, p=2, eps=1e-12): # pylint: disable=invalid-name, unused-argument + """Symbolic onnxLpNormalization.""" return g.op("LpNormalization", x, axis_i=int(axis), p_i=int(p)) def normalize(x, dim, p=2, eps=1e-12): + """Normalize method.""" if torch.onnx.is_in_onnx_export(): return OnnxLpNormalization.apply(x, dim, p, eps) - else: - return F.normalize(x, dim=dim, p=p, eps=eps) + return F.normalize(x, dim=dim, p=p, eps=eps) class Normalize(nn.Module): + """Normalize.""" + def __init__(self, dim=1, p=2, eps=1e-12): super().__init__() @@ -34,4 +41,5 @@ def __init__(self, dim=1, p=2, eps=1e-12): self.eps = eps def forward(self, x): + """Forward.""" return normalize(x, self.dim, self.p, self.eps) diff --git a/otx/mpa/modules/models/utils/psp_layer.py b/otx/algorithms/segmentation/adapters/mmseg/models/utils/psp_layer.py similarity index 65% rename from otx/mpa/modules/models/utils/psp_layer.py rename to otx/algorithms/segmentation/adapters/mmseg/models/utils/psp_layer.py index 93ad835d9f4..bd3ad2d4357 100644 --- a/otx/mpa/modules/models/utils/psp_layer.py +++ b/otx/algorithms/segmentation/adapters/mmseg/models/utils/psp_layer.py @@ -1,16 +1,20 @@ +"""PSP module.""" # Copyright (c) 2019 MendelXu # SPDX-License-Identifier: Apache-2.0 # -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # import torch -import torch.nn as nn +from torch import nn class PSPModule(nn.Module): - """Reference: https://github.com/MendelXu/ANN""" + """PSP module. + + Reference: https://github.com/MendelXu/ANN. + """ methods = {"max": nn.AdaptiveMaxPool2d, "avg": nn.AdaptiveAvgPool2d} @@ -23,9 +27,10 @@ def __init__(self, sizes=(1, 3, 6, 8), method="max"): self.stages = nn.ModuleList([pool_block(output_size=(size, size)) for size in sizes]) def forward(self, feats): - n, c, _, _ = feats.size() + """Forward.""" + batch_size, c, _, _ = feats.size() - priors = [stage(feats).view(n, c, -1) for stage in self.stages] + priors = [stage(feats).view(batch_size, c, -1) for stage in self.stages] out = torch.cat(priors, -1) return out diff --git a/otx/algorithms/segmentation/adapters/mmseg/nncf/__init__.py b/otx/algorithms/segmentation/adapters/mmseg/nncf/__init__.py index 23b312c33a5..1463ba35711 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/nncf/__init__.py +++ b/otx/algorithms/segmentation/adapters/mmseg/nncf/__init__.py @@ -1,9 +1,18 @@ """NNCF utils for mmseg.""" -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# -# flake8: noqa +# Copyright (C) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions +# and limitations under the License. from .builder import build_nncf_segmentor from .hooks import CustomstepLrUpdaterHook diff --git a/otx/algorithms/segmentation/adapters/mmseg/nncf/builder.py b/otx/algorithms/segmentation/adapters/mmseg/nncf/builder.py index cb8f9c54cb4..d09e1c8b157 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/nncf/builder.py +++ b/otx/algorithms/segmentation/adapters/mmseg/nncf/builder.py @@ -1,5 +1,5 @@ """NNCF wrapped mmcls models builder.""" -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # diff --git a/otx/algorithms/segmentation/adapters/mmseg/nncf/hooks.py b/otx/algorithms/segmentation/adapters/mmseg/nncf/hooks.py index 7c572870719..b7270e60354 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/nncf/hooks.py +++ b/otx/algorithms/segmentation/adapters/mmseg/nncf/hooks.py @@ -1,5 +1,5 @@ """NNCF task related hooks.""" -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # diff --git a/otx/algorithms/segmentation/adapters/mmseg/nncf/patches.py b/otx/algorithms/segmentation/adapters/mmseg/nncf/patches.py index 4acf96f333a..6955d15c02f 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/nncf/patches.py +++ b/otx/algorithms/segmentation/adapters/mmseg/nncf/patches.py @@ -1,5 +1,5 @@ """Patch mmseg library.""" -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # diff --git a/otx/algorithms/segmentation/adapters/mmseg/tasks/__init__.py b/otx/algorithms/segmentation/adapters/mmseg/tasks/__init__.py new file mode 100644 index 00000000000..6197506e2f0 --- /dev/null +++ b/otx/algorithms/segmentation/adapters/mmseg/tasks/__init__.py @@ -0,0 +1,21 @@ +"""Initialize OTX Segmentation with MMSEG.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +import otx.algorithms.common.adapters.mmcv.hooks +import otx.algorithms.segmentation.adapters.mmseg +import otx.algorithms.segmentation.adapters.mmseg.models +import otx.algorithms.segmentation.adapters.mmseg.models.schedulers +from otx.algorithms.segmentation.adapters.mmseg.tasks.incremental import ( + IncrSegInferrer, + IncrSegTrainer, +) +from otx.algorithms.segmentation.adapters.mmseg.tasks.semisl import ( + SemiSLSegExporter, + SemiSLSegInferrer, + SemiSLSegTrainer, +) + +# flake8: noqa +from . import exporter, inferrer, stage, trainer diff --git a/otx/mpa/seg/exporter.py b/otx/algorithms/segmentation/adapters/mmseg/tasks/exporter.py similarity index 77% rename from otx/mpa/seg/exporter.py rename to otx/algorithms/segmentation/adapters/mmseg/tasks/exporter.py index de42b7f870b..b9104ee7297 100644 --- a/otx/mpa/seg/exporter.py +++ b/otx/algorithms/segmentation/adapters/mmseg/tasks/exporter.py @@ -1,3 +1,4 @@ +"""Export task for OTX Segmentation with MMSEG.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -5,10 +6,10 @@ import numpy as np from mmcv.runner import wrap_fp16_model -from otx.mpa.deploy.utils import sync_batchnorm_2_batchnorm -from otx.mpa.exporter_mixin import ExporterMixin -from otx.mpa.registry import STAGES -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.adapters.mmcv.tasks.exporter_mixin import ExporterMixin +from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES +from otx.algorithms.common.adapters.mmdeploy.utils import sync_batchnorm_2_batchnorm +from otx.algorithms.common.utils.logger import get_logger from .stage import SegStage @@ -17,8 +18,10 @@ @STAGES.register_module() class SegExporter(ExporterMixin, SegStage): + """Class for segmentation model export.""" + def run(self, model_cfg, model_ckpt, data_cfg, **kwargs): # noqa: C901 - """Run exporter stage""" + """Run exporter stage.""" precision = kwargs.get("precision", "FP32") model_builder = kwargs.get("model_builder", self.MODEL_BUILDER) @@ -43,10 +46,11 @@ def model_builder_helper(*args, **kwargs): @staticmethod def naive_export(output_dir, model_builder, precision, cfg, model_name="model"): + """Export using pytorch backend.""" from mmseg.apis.inference import LoadImage from mmseg.datasets.pipelines import Compose - from ..deploy.apis import NaiveExporter + from otx.algorithms.common.adapters.mmdeploy.apis import NaiveExporter def get_fake_data(cfg, orig_img_shape=(128, 128, 3)): pipeline = [LoadImage()] + cfg.data.test.pipeline[1:] diff --git a/otx/mpa/seg/incremental/__init__.py b/otx/algorithms/segmentation/adapters/mmseg/tasks/incremental/__init__.py similarity index 61% rename from otx/mpa/seg/incremental/__init__.py rename to otx/algorithms/segmentation/adapters/mmseg/tasks/incremental/__init__.py index a6ee3e980b5..3c2102526da 100644 --- a/otx/mpa/seg/incremental/__init__.py +++ b/otx/algorithms/segmentation/adapters/mmseg/tasks/incremental/__init__.py @@ -1,6 +1,9 @@ +"""Initialize OTX Segmentation with MMSEG.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # from .inferrer import IncrSegInferrer from .trainer import IncrSegTrainer + +__all__ = ["IncrSegInferrer", "IncrSegTrainer"] diff --git a/otx/algorithms/segmentation/adapters/mmseg/tasks/incremental/inferrer.py b/otx/algorithms/segmentation/adapters/mmseg/tasks/incremental/inferrer.py new file mode 100644 index 00000000000..df95c6df7cb --- /dev/null +++ b/otx/algorithms/segmentation/adapters/mmseg/tasks/incremental/inferrer.py @@ -0,0 +1,18 @@ +"""Inference for OTX segmentation model with Incremental learning.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES +from otx.algorithms.segmentation.adapters.mmseg.tasks.inferrer import SegInferrer + +from .stage import IncrSegStage + + +# pylint: disable=super-init-not-called +@STAGES.register_module() +class IncrSegInferrer(IncrSegStage, SegInferrer): + """Inference class for incremental learning.""" + + def __init__(self, **kwargs): + IncrSegStage.__init__(self, **kwargs) diff --git a/otx/mpa/seg/incremental/stage.py b/otx/algorithms/segmentation/adapters/mmseg/tasks/incremental/stage.py similarity index 56% rename from otx/mpa/seg/incremental/stage.py rename to otx/algorithms/segmentation/adapters/mmseg/tasks/incremental/stage.py index a16b5a60a0a..0f833bf1ff4 100644 --- a/otx/mpa/seg/incremental/stage.py +++ b/otx/algorithms/segmentation/adapters/mmseg/tasks/incremental/stage.py @@ -1,32 +1,34 @@ -# Copyright (C) 2022 Intel Corporation +"""Stage for Incremental learning OTX segmentation with MMSEG.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # import numpy as np from mmcv import ConfigDict -from otx.mpa.seg.stage import SegStage -from otx.mpa.utils.config_utils import update_or_add_custom_hook -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.adapters.mmcv.utils.config_utils import ( + update_or_add_custom_hook, +) +from otx.algorithms.common.utils.logger import get_logger +from otx.algorithms.segmentation.adapters.mmseg.tasks.stage import SegStage logger = get_logger() class IncrSegStage(SegStage): + """Calss for incremental learning for segmentation.""" + def __init__(self, **kwargs): super().__init__(**kwargs) - def configure_task(self, cfg, training, **kwargs): - """Adjust settings for task adaptation""" - super().configure_task(cfg, training, **kwargs) + def configure_task(self, cfg, training): + """Adjust settings for task adaptation.""" + super().configure_task(cfg, training) new_classes = np.setdiff1d(self.model_classes, self.org_model_classes).tolist() - # FIXME : can be naive supervised learning (from-scratch ver.) # Check if new classes are added - has_new_class = True if len(new_classes) > 0 else False - if has_new_class is False: - ValueError("Incremental learning should have at least one new class!") + has_new_class = len(new_classes) > 0 # Update TaskAdaptHook (use incremental sampler) task_adapt_hook = ConfigDict( diff --git a/otx/algorithms/segmentation/adapters/mmseg/tasks/incremental/trainer.py b/otx/algorithms/segmentation/adapters/mmseg/tasks/incremental/trainer.py new file mode 100644 index 00000000000..6a95ca28b23 --- /dev/null +++ b/otx/algorithms/segmentation/adapters/mmseg/tasks/incremental/trainer.py @@ -0,0 +1,21 @@ +"""Trainer for Incremental OTX Segmentation with MMSEG.""" +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES +from otx.algorithms.common.utils.logger import get_logger +from otx.algorithms.segmentation.adapters.mmseg.tasks.trainer import SegTrainer + +from .stage import IncrSegStage + +logger = get_logger() + + +# pylint: disable=super-init-not-called +@STAGES.register_module() +class IncrSegTrainer(IncrSegStage, SegTrainer): + """Trainer for incremental segmentation.""" + + def __init__(self, **kwargs): + IncrSegStage.__init__(self, **kwargs) diff --git a/otx/mpa/seg/inferrer.py b/otx/algorithms/segmentation/adapters/mmseg/tasks/inferrer.py similarity index 90% rename from otx/mpa/seg/inferrer.py rename to otx/algorithms/segmentation/adapters/mmseg/tasks/inferrer.py index c6bb695b895..3a5fc12d378 100644 --- a/otx/mpa/seg/inferrer.py +++ b/otx/algorithms/segmentation/adapters/mmseg/tasks/inferrer.py @@ -1,24 +1,28 @@ -# Copyright (C) 2022 Intel Corporation +"""OTX segmentation inference with MMSEG.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # +import copy # noqa: E402 +import warnings # noqa: E402 from contextlib import nullcontext import torch -from mmcv.runner import wrap_fp16_model from mmcv.utils import Config, ConfigDict from mmseg.datasets import build_dataloader as mmseg_build_dataloader from mmseg.datasets import build_dataset as mmseg_build_dataset +from otx.algorithms.common.adapters.mmcv.hooks.recording_forward_hook import ( + FeatureVectorHook, +) +from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES +from otx.algorithms.common.adapters.mmcv.tasks.stage import Stage from otx.algorithms.common.adapters.mmcv.utils import ( build_data_parallel, build_dataloader, build_dataset, ) -from otx.mpa.modules.hooks.recording_forward_hooks import FeatureVectorHook -from otx.mpa.registry import STAGES -from otx.mpa.stage import Stage -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger from .stage import SegStage @@ -27,12 +31,14 @@ @STAGES.register_module() class SegInferrer(SegStage): + """Inference class with MMSEG.""" + def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.dataset = None def run(self, model_cfg, model_ckpt, data_cfg, **kwargs): - """Run inference stage for segmentation + """Run inference stage for segmentation. - Configuration - Environment setup @@ -57,7 +63,9 @@ def run(self, model_cfg, model_ckpt, data_cfg, **kwargs): return dict(outputs=outputs) + # pylint: disable=too-many-locals, too-many-branches def infer(self, cfg, model_builder=None, dump_features=False): + """Main inference function.""" # TODO: distributed inference data_cfg = cfg.data.test.copy() @@ -159,11 +167,9 @@ def infer(self, cfg, model_builder=None, dump_features=False): return outputs -import copy # noqa: E402 -import warnings # noqa: E402 - - +# pylint: disable=invalid-name def replace_ImageToTensor(pipelines): + """Change ImageToTensor pipeline to DefaultFormatBundle.""" pipelines = copy.deepcopy(pipelines) for i, pipeline in enumerate(pipelines): if pipeline["type"] == "MultiScaleFlipAug": diff --git a/otx/algorithms/segmentation/adapters/mmseg/tasks/semisl/__init__.py b/otx/algorithms/segmentation/adapters/mmseg/tasks/semisl/__init__.py new file mode 100644 index 00000000000..cb937a180d0 --- /dev/null +++ b/otx/algorithms/segmentation/adapters/mmseg/tasks/semisl/__init__.py @@ -0,0 +1,10 @@ +"""Initialize Semi-SL tasks for OTX segmentation.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +from .exporter import SemiSLSegExporter +from .inferrer import SemiSLSegInferrer +from .trainer import SemiSLSegTrainer + +__all__ = ["SemiSLSegExporter", "SemiSLSegInferrer", "SemiSLSegTrainer"] diff --git a/otx/mpa/seg/semisl/exporter.py b/otx/algorithms/segmentation/adapters/mmseg/tasks/semisl/exporter.py similarity index 61% rename from otx/mpa/seg/semisl/exporter.py rename to otx/algorithms/segmentation/adapters/mmseg/tasks/semisl/exporter.py index 34657710038..42eb2ff0c09 100644 --- a/otx/mpa/seg/semisl/exporter.py +++ b/otx/algorithms/segmentation/adapters/mmseg/tasks/semisl/exporter.py @@ -1,10 +1,11 @@ -# Copyright (C) 2022 Intel Corporation +"""Export task for Semi-SL OTX Segmentation with MMSEG.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -from otx.mpa.registry import STAGES -from otx.mpa.seg.exporter import SegExporter -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES +from otx.algorithms.common.utils.logger import get_logger +from otx.algorithms.segmentation.adapters.mmseg.tasks.exporter import SegExporter from .stage import SemiSLSegStage @@ -13,10 +14,13 @@ @STAGES.register_module() class SemiSLSegExporter(SemiSLSegStage, SegExporter): + """Exporter for semi-sl segmentation.""" + def __init__(self, **kwargs): SemiSLSegStage.__init__(self, **kwargs) def configure(self, model_cfg, model_ckpt, data_cfg, training=False, **kwargs): + """Patch config for semi-sl segmentation.""" cfg = SemiSLSegStage.configure(self, model_cfg, model_ckpt, data_cfg, training=training, **kwargs) cfg.model.type = cfg.model.orig_type diff --git a/otx/mpa/seg/semisl/inferrer.py b/otx/algorithms/segmentation/adapters/mmseg/tasks/semisl/inferrer.py similarity index 66% rename from otx/mpa/seg/semisl/inferrer.py rename to otx/algorithms/segmentation/adapters/mmseg/tasks/semisl/inferrer.py index d87e99c1a28..73d38ced93c 100644 --- a/otx/mpa/seg/semisl/inferrer.py +++ b/otx/algorithms/segmentation/adapters/mmseg/tasks/semisl/inferrer.py @@ -1,19 +1,24 @@ +"""Inferenc for Semi-SL OTX classification with MMCLS.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -from otx.mpa.registry import STAGES -from otx.mpa.seg.inferrer import SegInferrer +from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES +from otx.algorithms.segmentation.adapters.mmseg.tasks.inferrer import SegInferrer from .stage import SemiSLSegStage +# pylint: disable=super-init-not-called @STAGES.register_module() class SemiSLSegInferrer(SemiSLSegStage, SegInferrer): + """Inference class for Semi-SL.""" + def __init__(self, **kwargs): SemiSLSegStage.__init__(self, **kwargs) def configure(self, model_cfg, model_ckpt, data_cfg, training=False, **kwargs): + """Patch config for semi-sl classification.""" cfg = SemiSLSegStage.configure(self, model_cfg, model_ckpt, data_cfg, training=training, **kwargs) cfg.model.type = cfg.model.orig_type diff --git a/otx/mpa/seg/semisl/stage.py b/otx/algorithms/segmentation/adapters/mmseg/tasks/semisl/stage.py similarity index 65% rename from otx/mpa/seg/semisl/stage.py rename to otx/algorithms/segmentation/adapters/mmseg/tasks/semisl/stage.py index 26bb60c7dd3..f77feb68277 100644 --- a/otx/mpa/seg/semisl/stage.py +++ b/otx/algorithms/segmentation/adapters/mmseg/tasks/semisl/stage.py @@ -1,28 +1,31 @@ +"""Stage for Semi-SL OTX Segmentation with MMSEG.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -from otx.mpa.seg.stage import SegStage -from otx.mpa.utils.config_utils import remove_custom_hook -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.adapters.mmcv.utils.config_utils import remove_custom_hook +from otx.algorithms.common.utils.logger import get_logger +from otx.algorithms.segmentation.adapters.mmseg.tasks.stage import SegStage logger = get_logger() class SemiSLSegStage(SegStage): + """Semi-SL stage for segmentation.""" + def __init__(self, **kwargs): super().__init__(**kwargs) - def configure_data(self, cfg, training, data_cfg, **kwargs): + def configure_data(self, cfg, training, data_cfg): """Patch cfg.data.""" - super().configure_data(cfg, training, data_cfg, **kwargs) + super().configure_data(cfg, training, data_cfg) # Set unlabeled data hook if training: if cfg.data.get("unlabeled", False) and cfg.data.unlabeled.get("otx_dataset", False): self.configure_unlabeled_dataloader(cfg, self.distributed) def configure_task(self, cfg, training, **kwargs): - """Adjust settings for task adaptation""" + """Adjust settings for task adaptation.""" super().configure_task(cfg, training, **kwargs) # Don't pass task_adapt arg to semi-segmentor diff --git a/otx/algorithms/segmentation/adapters/mmseg/tasks/semisl/trainer.py b/otx/algorithms/segmentation/adapters/mmseg/tasks/semisl/trainer.py new file mode 100644 index 00000000000..57931944990 --- /dev/null +++ b/otx/algorithms/segmentation/adapters/mmseg/tasks/semisl/trainer.py @@ -0,0 +1,21 @@ +"""Train Semi-SL OTX Segmentation model with MMSEG.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES +from otx.algorithms.common.utils.logger import get_logger +from otx.algorithms.segmentation.adapters.mmseg.tasks.trainer import SegTrainer + +from .stage import SemiSLSegStage + +logger = get_logger() + + +# pylint: disable=super-init-not-called +@STAGES.register_module() +class SemiSLSegTrainer(SemiSLSegStage, SegTrainer): + """Class for semi-sl segmentation model train.""" + + def __init__(self, **kwargs): + SemiSLSegStage.__init__(self, **kwargs) diff --git a/otx/mpa/seg/stage.py b/otx/algorithms/segmentation/adapters/mmseg/tasks/stage.py similarity index 80% rename from otx/mpa/seg/stage.py rename to otx/algorithms/segmentation/adapters/mmseg/tasks/stage.py index d590b95c4f6..f6e6aeedde5 100644 --- a/otx/mpa/seg/stage.py +++ b/otx/algorithms/segmentation/adapters/mmseg/tasks/stage.py @@ -1,38 +1,43 @@ -# Copyright (C) 2022 Intel Corporation +"""Base stage for OTX Segmentation.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # from mmcv import ConfigDict +from otx.algorithms.common.adapters.mmcv.tasks.stage import Stage +from otx.algorithms.common.adapters.mmcv.utils.config_utils import ( + recursively_update_cfg, +) +from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.segmentation.adapters.mmseg.utils.builder import build_segmentor -from otx.mpa.stage import Stage -from otx.mpa.utils.config_utils import recursively_update_cfg -from otx.mpa.utils.logger import get_logger logger = get_logger() class SegStage(Stage): + """Class for configuration of segmentation recipe.""" + MODEL_BUILDER = build_segmentor def configure(self, model_cfg, model_ckpt, data_cfg, training=True, **kwargs): - """Create MMCV-consumable config from given inputs""" + """Create MMCV-consumable config from given inputs.""" logger.info(f"configure!: training={training}") cfg = self.cfg - self.configure_model(cfg, model_cfg, training, **kwargs) + self.configure_model(cfg, model_cfg, **kwargs) self.configure_ckpt(cfg, model_ckpt, kwargs.get("pretrained", None)) self.configure_data(cfg, training, data_cfg) - self.configure_task(cfg, training, **kwargs) + self.configure_task(cfg, training) self.configure_hook(cfg) return cfg - def configure_model(self, cfg, model_cfg, training, **kwargs): - + def configure_model(self, cfg, model_cfg, **kwargs): + """Patch model_cfg.""" if model_cfg: if hasattr(model_cfg, "model"): - cfg.merge_from_dict(model_cfg._cfg_dict) + cfg.merge_from_dict(model_cfg) else: raise ValueError( "Unexpected config was passed through 'model_cfg'. " @@ -46,8 +51,8 @@ def configure_model(self, cfg, model_cfg, training, **kwargs): ir_model_path = kwargs.get("ir_model_path") if ir_model_path: - def is_mmov_model(k, v): - if k == "type" and v.startswith("MMOV"): + def is_mmov_model(key, value): + if key == "type" and value.startswith("MMOV"): return True return False @@ -59,13 +64,14 @@ def is_mmov_model(k, v): {"model_path": ir_model_path, "weight_path": ir_weight_path, "init_weight": ir_weight_init}, ) - def configure_data(self, cfg, training, data_cfg, **kwargs): # noqa: C901 + def configure_data(self, cfg, training, data_cfg): # noqa: C901 + """Patch data_cfg.""" # Data if data_cfg: cfg.merge_from_dict(data_cfg) # Dataset - super().configure_data(cfg, training, **kwargs) + super().configure_data(cfg, training) src_data_cfg = Stage.get_data_cfg(cfg, "train") for mode in ["train", "val", "test"]: if src_data_cfg.type == "MPASegDataset" and cfg.data.get(mode, False): @@ -75,8 +81,8 @@ def configure_data(self, cfg, training, data_cfg, **kwargs): # noqa: C901 cfg.data[mode]["type"] = "MPASegDataset" cfg.data[mode]["org_type"] = org_type - def configure_task(self, cfg, training, **kwargs): - """Adjust settings for task adaptation""" + def configure_task(self, cfg, training): + """Adjust settings for task adaptation.""" if cfg.get("task_adapt", None): logger.info(f"task config!!!!: training={training}") task_adapt_op = cfg["task_adapt"].get("op", "REPLACE") @@ -87,6 +93,7 @@ def configure_task(self, cfg, training, **kwargs): self.configure_ignore(cfg) def configure_classes(self, cfg, task_adapt_op): + """Patch model_classes and data_classes.""" # Task classes org_model_classes = self.get_model_classes(cfg) data_classes = self.get_data_classes(cfg) @@ -130,7 +137,7 @@ def configure_classes(self, cfg, task_adapt_op): self.model_classes = model_classes def configure_ignore(self, cfg): - # Change to incremental loss (ignore mode) + """Change to incremental loss (ignore mode).""" if cfg.get("ignore", False): cfg_loss_decode = ConfigDict( type="CrossEntropyLossWithIgnore", diff --git a/otx/mpa/seg/trainer.py b/otx/algorithms/segmentation/adapters/mmseg/tasks/trainer.py similarity index 89% rename from otx/mpa/seg/trainer.py rename to otx/algorithms/segmentation/adapters/mmseg/tasks/trainer.py index 2653ba0a369..fa05d8277e9 100644 --- a/otx/mpa/seg/trainer.py +++ b/otx/algorithms/segmentation/adapters/mmseg/tasks/trainer.py @@ -1,10 +1,10 @@ -# Copyright (C) 2022 Intel Corporation +"""Base Trainer for OTX segmentation with MMSEG.""" +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # import glob import os -import os.path as osp import time from mmcv import get_git_hash @@ -14,8 +14,8 @@ from mmseg.utils import collect_env from torch import nn -from otx.mpa.registry import STAGES -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES +from otx.algorithms.common.utils.logger import get_logger from .stage import SegStage @@ -24,8 +24,11 @@ @STAGES.register_module() class SegTrainer(SegStage): + """Class for OTX segmentation train.""" + + # pylint: disable=too-many-locals def run(self, model_cfg, model_ckpt, data_cfg, **kwargs): - """Run training stage for segmentation + """Run training stage for segmentation. - Configuration - Environment setup @@ -51,7 +54,7 @@ def run(self, model_cfg, model_ckpt, data_cfg, **kwargs): env_info_dict = collect_env() env_info = "\n".join([(f"{k}: {v}") for k, v in env_info_dict.items()]) dash_line = "-" * 60 + "\n" - logger.info("Environment info:\n" + dash_line + env_info + "\n" + dash_line) + logger.info(f"Environment info:\n{dash_line}{env_info}\n{dash_line}") # Data datasets = [build_dataset(cfg.data.train)] @@ -96,7 +99,7 @@ def run(self, model_cfg, model_ckpt, data_cfg, **kwargs): # cfg.dump(osp.join(cfg.work_dir, 'config.py')) # logger.info(f'Config:\n{cfg.pretty_text}') - validate = True if cfg.data.get("val", None) else False + validate = "val" in cfg.data train_segmentor( model, datasets, diff --git a/otx/algorithms/segmentation/adapters/mmseg/utils/__init__.py b/otx/algorithms/segmentation/adapters/mmseg/utils/__init__.py index 8d8bd75c07e..690936587bd 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/utils/__init__.py +++ b/otx/algorithms/segmentation/adapters/mmseg/utils/__init__.py @@ -1,9 +1,20 @@ """OTX Adapters - mmseg.utils.""" -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 +# Copyright (C) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions +# and limitations under the License. -from .builder import build_segmentor +from .builder import build_scalar_scheduler, build_segmentor from .config_utils import ( patch_config, patch_datasets, @@ -11,7 +22,7 @@ prepare_for_training, set_hyperparams, ) -from .data_utils import load_dataset_items +from .data_utils import get_valid_label_mask_per_batch, load_dataset_items __all__ = [ "patch_config", @@ -20,5 +31,7 @@ "prepare_for_training", "set_hyperparams", "load_dataset_items", + "build_scalar_scheduler", "build_segmentor", + "get_valid_label_mask_per_batch", ] diff --git a/otx/algorithms/segmentation/adapters/mmseg/utils/builder.py b/otx/algorithms/segmentation/adapters/mmseg/utils/builder.py index df0c8777b80..6c8d5512851 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/utils/builder.py +++ b/otx/algorithms/segmentation/adapters/mmseg/utils/builder.py @@ -1,5 +1,5 @@ """MMseg model builder.""" -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -9,6 +9,23 @@ import torch from mmcv.runner import load_checkpoint from mmcv.utils import Config, ConfigDict +from mmseg.models.builder import MODELS + +SCALAR_SCHEDULERS = MODELS + + +def build_scalar_scheduler(cfg, default_value=None): + """Build scalar scheduler.""" + if cfg is None: + if default_value is not None: + assert isinstance(default_value, (int, float)) + cfg = dict(type="ConstantScalarScheduler", scale=float(default_value)) + else: + return None + elif isinstance(cfg, (int, float)): + cfg = dict(type="ConstantScalarScheduler", scale=float(cfg)) + + return SCALAR_SCHEDULERS.build(cfg) def build_segmentor( diff --git a/otx/algorithms/segmentation/adapters/mmseg/utils/data_utils.py b/otx/algorithms/segmentation/adapters/mmseg/utils/data_utils.py index bfcddf65e77..4bfb6e0d05f 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/utils/data_utils.py +++ b/otx/algorithms/segmentation/adapters/mmseg/utils/data_utils.py @@ -20,10 +20,12 @@ import cv2 import numpy as np +import torch import tqdm from mmseg.datasets.custom import CustomDataset from skimage.segmentation import felzenszwalb +from otx.algorithms.common.utils.logger import get_logger from otx.api.entities.annotation import ( Annotation, AnnotationSceneEntity, @@ -42,7 +44,6 @@ OptionalDirectoryPathCheck, check_input_parameters_type, ) -from otx.mpa.utils.logger import get_logger logger = get_logger() @@ -156,6 +157,17 @@ def get_extended_label_names(labels: List[LabelEntity]): return all_labels +def get_valid_label_mask_per_batch(img_metas, num_classes): + """Get valid label mask removing ignored classes to zero mask in a batch.""" + valid_label_mask_per_batch = [] + for _, meta in enumerate(img_metas): + valid_label_mask = torch.Tensor([1 for _ in range(num_classes)]) + if "ignored_labels" in meta and meta["ignored_labels"]: + valid_label_mask[meta["ignored_labels"]] = 0 + valid_label_mask_per_batch.append(valid_label_mask) + return valid_label_mask_per_batch + + @check_input_parameters_type() def create_pseudo_masks(ann_file_path: str, data_root_dir: str, mode="FH"): """Create pseudo masks for Self-SL using DetCon.""" diff --git a/otx/algorithms/segmentation/configs/configuration.yaml b/otx/algorithms/segmentation/configs/configuration.yaml index 0da91d335ba..132ab25a4d6 100644 --- a/otx/algorithms/segmentation/configs/configuration.yaml +++ b/otx/algorithms/segmentation/configs/configuration.yaml @@ -274,22 +274,22 @@ algo_backend: header: Algo backend parameters train_type: affects_outcome_of: TRAINING - default_value: INCREMENTAL + default_value: Incremental description: Training scheme option that determines how to train the model editable: True enum_name: TrainType header: Train type options: - INCREMENTAL: "INCREMENTAL" - SEMISUPERVISED: "SEMISUPERVISED" - SELFSUPERVISED: "SELFSUPERVISED" + Incremental: "Incremental" + Semisupervised: "Semisupervised" + Selfsupervised: "Selfsupervised" type: SELECTABLE ui_rules: action: DISABLE_EDITING operator: AND rules: [] type: UI_RULES - value: INCREMENTAL + value: Incremental visible_in_ui: True warning: null mem_cache_size: diff --git a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18/selfsl/hparam.yaml b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18/selfsl/hparam.yaml index c2259b834a8..c5ea68bbd4f 100644 --- a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18/selfsl/hparam.yaml +++ b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18/selfsl/hparam.yaml @@ -16,4 +16,4 @@ hyper_parameters: default_value: false algo_backend: train_type: - default_value: SELFSUPERVISED + default_value: Selfsupervised diff --git a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18/semisl/hparam.yaml b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18/semisl/hparam.yaml index 55395b0d84c..580462daa1e 100644 --- a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18/semisl/hparam.yaml +++ b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18/semisl/hparam.yaml @@ -3,4 +3,4 @@ hyper_parameters: parameter_overrides: algo_backend: train_type: - default_value: SEMISUPERVISED + default_value: Semisupervised diff --git a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18/template.yaml b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18/template.yaml index 548e2676bbc..ef3acd94560 100644 --- a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18/template.yaml +++ b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18/template.yaml @@ -38,7 +38,7 @@ hyper_parameters: default_value: 300 algo_backend: train_type: - default_value: INCREMENTAL + default_value: Incremental # Training resources. max_nodes: 1 diff --git a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/selfsl/hparam.yaml b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/selfsl/hparam.yaml index c2259b834a8..c5ea68bbd4f 100644 --- a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/selfsl/hparam.yaml +++ b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/selfsl/hparam.yaml @@ -16,4 +16,4 @@ hyper_parameters: default_value: false algo_backend: train_type: - default_value: SELFSUPERVISED + default_value: Selfsupervised diff --git a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/semisl/hparam.yaml b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/semisl/hparam.yaml index 55395b0d84c..580462daa1e 100644 --- a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/semisl/hparam.yaml +++ b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/semisl/hparam.yaml @@ -3,4 +3,4 @@ hyper_parameters: parameter_overrides: algo_backend: train_type: - default_value: SEMISUPERVISED + default_value: Semisupervised diff --git a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/template.yaml b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/template.yaml index fc1f1dfa889..92ba2428e52 100644 --- a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/template.yaml +++ b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/template.yaml @@ -47,7 +47,7 @@ hyper_parameters: default_value: 1.0 algo_backend: train_type: - default_value: INCREMENTAL + default_value: Incremental # Training resources. max_nodes: 1 diff --git a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_s_mod2/selfsl/hparam.yaml b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_s_mod2/selfsl/hparam.yaml index c2259b834a8..c5ea68bbd4f 100644 --- a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_s_mod2/selfsl/hparam.yaml +++ b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_s_mod2/selfsl/hparam.yaml @@ -16,4 +16,4 @@ hyper_parameters: default_value: false algo_backend: train_type: - default_value: SELFSUPERVISED + default_value: Selfsupervised diff --git a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_s_mod2/semisl/hparam.yaml b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_s_mod2/semisl/hparam.yaml index 55395b0d84c..580462daa1e 100644 --- a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_s_mod2/semisl/hparam.yaml +++ b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_s_mod2/semisl/hparam.yaml @@ -3,4 +3,4 @@ hyper_parameters: parameter_overrides: algo_backend: train_type: - default_value: SEMISUPERVISED + default_value: Semisupervised diff --git a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_s_mod2/template.yaml b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_s_mod2/template.yaml index 78b229b2e1d..c079a116738 100644 --- a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_s_mod2/template.yaml +++ b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_s_mod2/template.yaml @@ -48,7 +48,7 @@ hyper_parameters: default_value: 1.0 algo_backend: train_type: - default_value: INCREMENTAL + default_value: Incremental # Training resources. max_nodes: 1 diff --git a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/selfsl/hparam.yaml b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/selfsl/hparam.yaml index c2259b834a8..c5ea68bbd4f 100644 --- a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/selfsl/hparam.yaml +++ b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/selfsl/hparam.yaml @@ -16,4 +16,4 @@ hyper_parameters: default_value: false algo_backend: train_type: - default_value: SELFSUPERVISED + default_value: Selfsupervised diff --git a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/semisl/hparam.yaml b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/semisl/hparam.yaml index 55395b0d84c..580462daa1e 100644 --- a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/semisl/hparam.yaml +++ b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/semisl/hparam.yaml @@ -3,4 +3,4 @@ hyper_parameters: parameter_overrides: algo_backend: train_type: - default_value: SEMISUPERVISED + default_value: Semisupervised diff --git a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/template.yaml b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/template.yaml index af1383f58b7..2e95673f035 100644 --- a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/template.yaml +++ b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/template.yaml @@ -48,7 +48,7 @@ hyper_parameters: default_value: 1.0 algo_backend: train_type: - default_value: INCREMENTAL + default_value: Incremental # Training resources. max_nodes: 1 diff --git a/otx/algorithms/segmentation/tasks/__init__.py b/otx/algorithms/segmentation/tasks/__init__.py index 583d0c1f948..366dbb7cb2e 100644 --- a/otx/algorithms/segmentation/tasks/__init__.py +++ b/otx/algorithms/segmentation/tasks/__init__.py @@ -15,7 +15,7 @@ # and limitations under the License. import otx.algorithms.common.adapters.mmcv.models as OTXBackbones -import otx.mpa.seg as MPASegmentation +import otx.algorithms.segmentation.adapters.mmseg.tasks as MPASegmentation from .inference import SegmentationInferenceTask from .nncf import SegmentationNNCFTask diff --git a/otx/algorithms/segmentation/tasks/inference.py b/otx/algorithms/segmentation/tasks/inference.py index 408696e3530..0df51c26cce 100644 --- a/otx/algorithms/segmentation/tasks/inference.py +++ b/otx/algorithms/segmentation/tasks/inference.py @@ -26,9 +26,11 @@ patch_runner, remove_from_configs_by_type, ) +from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig from otx.algorithms.common.configs import TrainType from otx.algorithms.common.tasks import BaseTask from otx.algorithms.common.utils.callback import InferenceProgressCallback +from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.segmentation.adapters.mmseg.utils.builder import build_segmentor from otx.algorithms.segmentation.adapters.mmseg.utils.config_utils import ( patch_datasets, @@ -67,16 +69,14 @@ create_annotation_from_segmentation_map, create_hard_prediction_from_soft_prediction, ) -from otx.mpa.utils.config_utils import MPAConfig -from otx.mpa.utils.logger import get_logger logger = get_logger() RECIPE_TRAIN_TYPE = { - TrainType.SEMISUPERVISED: "semisl.py", - TrainType.INCREMENTAL: "incremental.py", - TrainType.SELFSUPERVISED: "selfsl.py", + TrainType.Semisupervised: "semisl.py", + TrainType.Incremental: "incremental.py", + TrainType.Selfsupervised: "selfsl.py", } @@ -191,7 +191,7 @@ def _init_recipe(self): # TODO: Need to remove the hard coding for supcon only. if ( self._train_type in RECIPE_TRAIN_TYPE - and self._train_type == TrainType.INCREMENTAL + and self._train_type == TrainType.Incremental and self._hyperparams.learning_parameters.enable_supcon and not self._model_dir.endswith("supcon") ): @@ -218,8 +218,8 @@ def _init_recipe(self): remove_from_configs_by_type(self._recipe_cfg.custom_hooks, "FreezeLayers") def _update_stage_module(self, stage_module: str): - module_prefix = {TrainType.SEMISUPERVISED: "SemiSL", TrainType.INCREMENTAL: "Incr"} - if self._train_type == TrainType.SEMISUPERVISED and stage_module == "SegExporter": + module_prefix = {TrainType.Semisupervised: "SemiSL", TrainType.Incremental: "Incr"} + if self._train_type == TrainType.Semisupervised and stage_module == "SegExporter": stage_module = "SemiSLSegExporter" elif self._train_type in module_prefix and stage_module in ["SegTrainer", "SegInferrer"]: stage_module = module_prefix[self._train_type] + stage_module diff --git a/otx/algorithms/segmentation/tasks/nncf.py b/otx/algorithms/segmentation/tasks/nncf.py index a6013bfb002..28693e19688 100644 --- a/otx/algorithms/segmentation/tasks/nncf.py +++ b/otx/algorithms/segmentation/tasks/nncf.py @@ -19,6 +19,7 @@ import otx.algorithms.segmentation.adapters.mmseg.nncf.patches # noqa: F401 # pylint: disable=unused-import from otx.algorithms.common.tasks.nncf_base import NNCFBaseTask +from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.segmentation.adapters.mmseg.nncf import build_nncf_segmentor from otx.api.entities.datasets import DatasetEntity from otx.api.entities.metrics import ( @@ -33,7 +34,6 @@ ) from otx.api.entities.model import ModelEntity from otx.api.entities.optimization_parameters import OptimizationParameters -from otx.mpa.utils.logger import get_logger from .inference import SegmentationInferenceTask diff --git a/otx/algorithms/segmentation/tasks/openvino.py b/otx/algorithms/segmentation/tasks/openvino.py index 2fe31d90143..fc60aff1656 100644 --- a/otx/algorithms/segmentation/tasks/openvino.py +++ b/otx/algorithms/segmentation/tasks/openvino.py @@ -32,6 +32,7 @@ from openvino.model_zoo.model_api.adapters import OpenvinoAdapter, create_core from openvino.model_zoo.model_api.models import Model +from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.segmentation.adapters.openvino import model_wrappers from otx.algorithms.segmentation.adapters.openvino.model_wrappers.blur import ( get_activation_map, @@ -75,7 +76,6 @@ DatasetParamTypeCheck, check_input_parameters_type, ) -from otx.mpa.utils.logger import get_logger logger = get_logger() diff --git a/otx/algorithms/segmentation/tasks/train.py b/otx/algorithms/segmentation/tasks/train.py index 1fd144467d8..6f8ac4d183d 100644 --- a/otx/algorithms/segmentation/tasks/train.py +++ b/otx/algorithms/segmentation/tasks/train.py @@ -22,6 +22,7 @@ from otx.algorithms.common.utils.callback import TrainingProgressCallback from otx.algorithms.common.utils.data import get_dataset +from otx.algorithms.common.utils.logger import get_logger from otx.api.configuration import cfg_helper from otx.api.configuration.helper.utils import ids_to_strings from otx.api.entities.datasets import DatasetEntity @@ -44,7 +45,6 @@ DatasetParamTypeCheck, check_input_parameters_type, ) -from otx.mpa.utils.logger import get_logger from .inference import SegmentationInferenceTask @@ -169,9 +169,6 @@ def _init_train_data_cfg(self, dataset: DatasetEntity): labels=self._labels, ) - # Temparory remedy for cfg.pretty_text error - for label in self._labels: - label.hotkey = "a" return data_cfg def _generate_training_metrics_group(self, learning_curves): diff --git a/otx/api/entities/media.py b/otx/api/entities/media.py index ac69461ff4a..b32deb8f831 100644 --- a/otx/api/entities/media.py +++ b/otx/api/entities/media.py @@ -54,4 +54,4 @@ def width(self) -> int: @property def path(self) -> Optional[str]: """Returns the path of the 2D Media object.""" - raise NotImplementedError + return None diff --git a/otx/api/usecases/exportable_code/demo/requirements.txt b/otx/api/usecases/exportable_code/demo/requirements.txt index 2d13ad05327..4e07f2b27f7 100644 --- a/otx/api/usecases/exportable_code/demo/requirements.txt +++ b/otx/api/usecases/exportable_code/demo/requirements.txt @@ -1,3 +1,3 @@ openmodelzoo-modelapi==2022.3.0 -otx @ git+https://github.com/openvinotoolkit/training_extensions/@3faaa782718d8d02e6303fba004c9123ee37d76a#egg=otx +otx @ git+https://github.com/openvinotoolkit/training_extensions/@d977c98b0635fc83910049b81ed9491e405f3045#egg=otx numpy>=1.21.0,<=1.23.5 # np.bool was removed in 1.24.0 which was used in openvino runtime diff --git a/otx/cli/builder/builder.py b/otx/cli/builder/builder.py index 5adfe235a96..5a937108fbb 100644 --- a/otx/cli/builder/builder.py +++ b/otx/cli/builder/builder.py @@ -28,13 +28,14 @@ from mmcv.utils import Registry, build_from_cfg from torch import nn +from otx.algorithms import TRANSFORMER_BACKBONES +from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig from otx.api.entities.model_template import TaskType from otx.cli.utils.importing import ( get_backbone_list, get_backbone_registry, get_module_args, ) -from otx.mpa.utils.config_utils import MPAConfig # pylint: disable=too-many-locals, too-many-statements, too-many-branches @@ -101,8 +102,8 @@ def update_backbone_args(backbone_config: dict, registry: Registry, backend: str def update_channels(model_config: MPAConfig, out_channels: Any): """Update in_channel of head or neck.""" - if hasattr(model_config.model, "neck"): - if model_config.model.neck.type == "GlobalAveragePooling": + if hasattr(model_config.model, "neck") and model_config.model.neck: + if model_config.model.neck.get("type", None) == "GlobalAveragePooling": model_config.model.neck.pop("in_channels", None) else: print(f"\tUpdate model.neck.in_channels: {out_channels}") @@ -212,6 +213,12 @@ def merge_backbone( out_channels = -1 if hasattr(model_config.model, "head"): model_config.model.head.in_channels = -1 + # TODO: This is a hard coded part of the Transformer backbone and needs to be refactored. + if backend == "mmcls" and backbone_class in TRANSFORMER_BACKBONES: + if hasattr(model_config.model, "neck"): + model_config.model.neck = None + if hasattr(model_config.model, "head"): + model_config.model.head["type"] = "VisionTransformerClsHead" else: # Need to update in/out channel configuration here out_channels = get_backbone_out_channels(backbone) diff --git a/otx/cli/builder/supported_backbone/mmcls.json b/otx/cli/builder/supported_backbone/mmcls.json index 6b5f1343a2e..71f10692aa5 100644 --- a/otx/cli/builder/supported_backbone/mmcls.json +++ b/otx/cli/builder/supported_backbone/mmcls.json @@ -11,7 +11,7 @@ "options": { "arch": ["tiny", "small", "base"] }, - "available": [] + "available": ["CLASSIFICATION"] }, "mmcls.ConvMixer": { "required": ["arch"], @@ -287,7 +287,7 @@ "mmcls.T2T_ViT": { "required": [], "options": {}, - "available": [] + "available": ["CLASSIFICATION"] }, "mmcls.TIMMBackbone": { "required": ["model_name"], @@ -341,7 +341,7 @@ "deit-base" ] }, - "available": [] + "available": ["CLASSIFICATION"] } } } diff --git a/otx/cli/manager/config_manager.py b/otx/cli/manager/config_manager.py index 906f1d77e20..d82e98f6bb5 100644 --- a/otx/cli/manager/config_manager.py +++ b/otx/cli/manager/config_manager.py @@ -16,6 +16,12 @@ from otx.api.entities.model_template import ModelTemplate, parse_model_template from otx.cli.registry import Registry as OTXRegistry from otx.cli.utils.config import configure_dataset, override_parameters +from otx.cli.utils.errors import ( + CliException, + ConfigValueError, + FileNotExistError, + NotSupportedError, +) from otx.cli.utils.importing import get_otx_root_path from otx.cli.utils.parser import gen_param_help, gen_params_dict_from_args from otx.core.data.manager.dataset_manager import DatasetManager @@ -26,7 +32,7 @@ "INSTANCE_SEGMENTATION": "Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50", "ROTATED_DETECTION": "Custom_Rotated_Detection_via_Instance_Segmentation_MaskRCNN_ResNet50", "SEGMENTATION": "Custom_Semantic_Segmentation_Lite-HRNet-18-mod2_OCR", - "ACTION_CLASSIFICATION": "Custom_Action_Classificaiton_X3D", + "ACTION_CLASSIFICATION": "Custom_Action_Classification_X3D", "ACTION_DETECTION": "Custom_Action_Detection_X3D_FAST_RCNN", "ANOMALY_CLASSIFICATION": "ote_anomaly_classification_padim", "ANOMALY_DETECTION": "ote_anomaly_detection_padim", @@ -54,9 +60,9 @@ } TASK_TYPE_TO_SUB_DIR_NAME = { - "INCREMENTAL": "", - "SEMISUPERVISED": "semisl", - "SELFSUPERVISED": "selfsl", + "Incremental": "", + "Semisupervised": "semisl", + "Selfsupervised": "selfsl", } @@ -112,7 +118,7 @@ def data_config_file_path(self) -> Path: if "data" in self.args and self.args.data: if Path(self.args.data).exists(): return Path(self.args.data) - raise FileNotFoundError(f"Not found: {self.args.data}") + raise FileNotExistError(f"Not found: {self.args.data}") return self.workspace_root / "data.yaml" def check_workspace(self) -> bool: @@ -140,6 +146,8 @@ def configure_template(self, model: str = None) -> None: else: task_type = self.task_type if not task_type and not model: + if not hasattr(self.args, "train_data_roots"): + raise ConfigValueError("Can't find the argument 'train_data_roots'") task_type = self.auto_task_detection(self.args.train_data_roots) self.template = self._get_template(task_type, model=model) self.task_type = self.template.task_type @@ -149,14 +157,14 @@ def configure_template(self, model: str = None) -> None: def _check_rebuild(self): """Checking for Rebuild status.""" if self.args.task and str(self.template.task_type) != self.args.task.upper(): - raise NotImplementedError("Task Update is not yet supported.") + raise NotSupportedError("Task Update is not yet supported.") result = False if self.args.model and self.template.name != self.args.model.upper(): print(f"[*] Rebuild model: {self.template.name} -> {self.args.model.upper()}") result = True template_train_type = self._get_train_type(ignore_args=True) - if self.args.train_type and template_train_type != self.args.train_type.upper(): - self.train_type = self.args.train_type.upper() + if self.args.train_type and template_train_type != self.args.train_type: + self.train_type = self.args.train_type print(f"[*] Rebuild train-type: {template_train_type} -> {self.train_type}") result = True return result @@ -184,25 +192,25 @@ def _get_train_type(self, ignore_args: bool = False) -> str: args_hyper_parameters = gen_params_dict_from_args(self.args) arg_algo_backend = args_hyper_parameters.get("algo_backend", False) if arg_algo_backend: - train_type = arg_algo_backend.get("train_type", {"value": "INCREMENTAL"}) # type: ignore - return train_type.get("value", "INCREMENTAL") + train_type = arg_algo_backend.get("train_type", {"value": "Incremental"}) # type: ignore + return train_type.get("value", "Incremental") if hasattr(self.args, "train_type") and self.mode in ("build", "train") and self.args.train_type: - self.train_type = self.args.train_type.upper() + self.train_type = self.args.train_type if self.train_type not in TASK_TYPE_TO_SUB_DIR_NAME: - raise ValueError(f"{self.train_type} is not currently supported by otx.") + raise NotSupportedError(f"{self.train_type} is not currently supported by otx.") if self.train_type in TASK_TYPE_TO_SUB_DIR_NAME: return self.train_type algo_backend = self.template.hyper_parameters.parameter_overrides.get("algo_backend", False) if algo_backend: - train_type = algo_backend.get("train_type", {"default_value": "INCREMENTAL"}) - return train_type.get("default_value", "INCREMENTAL") - return "INCREMENTAL" + train_type = algo_backend.get("train_type", {"default_value": "Incremental"}) + return train_type.get("default_value", "Incremental") + return "Incremental" def auto_task_detection(self, data_roots: str) -> str: """Detect task type automatically.""" if not data_roots: - raise ValueError("Workspace must already exist or one of {task or model or train-data-roots} must exist.") + raise CliException("Workspace must already exist or one of {task or model or train-data-roots} must exist.") self.data_format = self.dataset_manager.get_data_format(data_roots) return self._get_task_type_from_data_format(self.data_format) @@ -225,7 +233,7 @@ def _get_task_type_from_data_format(self, data_format: str) -> str: self.task_type = task_key print(f"[*] Detected task type: {self.task_type}") return task_key - raise ValueError(f"Can't find proper task. we are not support {data_format} format, yet.") + raise ConfigValueError(f"Can't find proper task. we are not support {data_format} format, yet.") def auto_split_data(self, data_roots: str, task: str): """Automatically Split train data --> train/val dataset.""" @@ -372,7 +380,7 @@ def _get_template(self, task_type: str, model: Optional[str] = None) -> ModelTem if model: template_lst = [temp for temp in otx_registry.templates if temp.name.lower() == model.lower()] if not template_lst: - raise ValueError( + raise NotSupportedError( f"[*] {model} is not a type supported by OTX {task_type}." f"\n[*] Please refer to 'otx find --template --task {task_type}'" ) @@ -426,7 +434,7 @@ def build_workspace(self, new_workspace_path: Optional[str] = None) -> None: model_dir = template_dir.absolute() / train_type_rel_path if not model_dir.exists(): - raise ValueError(f"[*] {self.train_type} is not a type supported by OTX {self.task_type}") + raise NotSupportedError(f"[*] {self.train_type} is not a type supported by OTX {self.task_type}") train_type_dir = self.workspace_root / train_type_rel_path train_type_dir.mkdir(exist_ok=True) @@ -474,12 +482,14 @@ def _copy_config_files(self, target_dir: Path, file_name: str, dest_dir: Path) - if (target_dir / file_name).exists(): if file_name.endswith(".py"): try: - from otx.mpa.utils.config_utils import MPAConfig + from otx.algorithms.common.adapters.mmcv.utils.config_utils import ( + MPAConfig, + ) config = MPAConfig.fromfile(str(target_dir / file_name)) config.dump(str(dest_dir / file_name)) except Exception as exc: - raise ImportError(f"{self.task_type} requires mmcv-full to be installed.") from exc + raise CliException(f"{self.task_type} requires mmcv-full to be installed.") from exc elif file_name.endswith((".yml", ".yaml")): config = OmegaConf.load(str(target_dir / file_name)) (dest_dir / file_name).write_text(OmegaConf.to_yaml(config)) diff --git a/otx/cli/tools/build.py b/otx/cli/tools/build.py index 0c5a4b3761f..246b2efb954 100644 --- a/otx/cli/tools/build.py +++ b/otx/cli/tools/build.py @@ -65,7 +65,7 @@ def get_args(): "--train-type", help=f"The currently supported options: {TASK_TYPE_TO_SUB_DIR_NAME.keys()}.", type=str, - default="incremental", + default="Incremental", ) parser.add_argument( "--work-dir", diff --git a/otx/cli/tools/train.py b/otx/cli/tools/train.py index 164210366ba..faffff85635 100644 --- a/otx/cli/tools/train.py +++ b/otx/cli/tools/train.py @@ -65,7 +65,7 @@ def get_args(): "--train-type", help=f"The currently supported options: {TASK_TYPE_TO_SUB_DIR_NAME.keys()}.", type=str, - default="incremental", + default="Incremental", ) parser.add_argument( "--load-weights", diff --git a/otx/cli/utils/errors.py b/otx/cli/utils/errors.py new file mode 100644 index 00000000000..3b5bc9657b7 --- /dev/null +++ b/otx/cli/utils/errors.py @@ -0,0 +1,30 @@ +"""Utils for CLI errors.""" + +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + + +class CliException(Exception): + """Custom exception class for CLI.""" + + +class ConfigValueError(CliException): + """Configuration value is not suitable for CLI.""" + + def __init__(self, message): + super().__init__(message) + + +class NotSupportedError(CliException): + """Not supported error.""" + + def __init__(self, message): + super().__init__(message) + + +class FileNotExistError(CliException): + """Not exist given configuration.""" + + def __init__(self, message): + super().__init__(message) diff --git a/otx/cli/utils/importing.py b/otx/cli/utils/importing.py index a00deb2cedc..173cdd8bf89 100644 --- a/otx/cli/utils/importing.py +++ b/otx/cli/utils/importing.py @@ -29,7 +29,7 @@ "mmseg": "mmseg.models", "torchvision": "otx.algorithms.common.adapters.mmcv.models", "pytorchcv": "mmdet.models", - "omz.mmcls": "otx.mpa.modules.ov.models.mmcls.backbones.mmov_backbone", + "omz.mmcls": "otx.algorithms.classification.adapters.mmcls.models.backbones.mmov_backbone", } diff --git a/otx/core/data/adapter/__init__.py b/otx/core/data/adapter/__init__.py index 140d644728e..a51ef7c32f6 100644 --- a/otx/core/data/adapter/__init__.py +++ b/otx/core/data/adapter/__init__.py @@ -23,53 +23,53 @@ ADAPTERS = { TaskType.CLASSIFICATION: { - "INCREMENTAL": { + "Incremental": { "module_name": "classification_dataset_adapter", "class": "ClassificationDatasetAdapter", } }, TaskType.DETECTION: { - "INCREMENTAL": { + "Incremental": { "module_name": "detection_dataset_adapter", "class": "DetectionDatasetAdapter", } }, TaskType.ROTATED_DETECTION: { - "INCREMENTAL": { + "Incremental": { "module_name": "detection_dataset_adapter", "class": "DetectionDatasetAdapter", } }, TaskType.INSTANCE_SEGMENTATION: { - "INCREMENTAL": { + "Incremental": { "module_name": "detection_dataset_adapter", "class": "DetectionDatasetAdapter", } }, TaskType.SEGMENTATION: { - "INCREMENTAL": { + "Incremental": { "module_name": "segmentation_dataset_adapter", "class": "SegmentationDatasetAdapter", }, - "SELFSUPERVISED": { + "Selfsupervised": { "module_name": "segmentation_dataset_adapter", "class": "SelfSLSegmentationDatasetAdapter", }, }, TaskType.ANOMALY_CLASSIFICATION: { - "INCREMENTAL": { + "Incremental": { "module_name": "anomaly_dataset_adapter", "class": "AnomalyClassificationDatasetAdapter", } }, TaskType.ANOMALY_DETECTION: { - "INCREMENTAL": { + "Incremental": { "module_name": "anomaly_dataset_adapter", "class": "AnomalyDetectionDatasetAdapter", } }, TaskType.ANOMALY_SEGMENTATION: { - "INCREMENTAL": { + "Incremental": { "module_name": "anomaly_dataset_adapter", "class": "AnomalySegmentationDatasetAdapter", } @@ -79,13 +79,13 @@ ADAPTERS.update( { TaskType.ACTION_CLASSIFICATION: { - "INCREMENTAL": { + "Incremental": { "module_name": "action_dataset_adapter", "class": "ActionClassificationDatasetAdapter", } }, TaskType.ACTION_DETECTION: { - "INCREMENTAL": { + "Incremental": { "module_name": "action_dataset_adapter", "class": "ActionDetectionDatasetAdapter", } @@ -107,18 +107,18 @@ def get_dataset_adapter( Args: task_type: A task type such as ANOMALY_CLASSIFICATION, ANOMALY_DETECTION, ANOMALY_SEGMENTATION, CLASSIFICATION, INSTANCE_SEGMENTATION, DETECTION, CLASSIFICATION, ROTATED_DETECTION, SEGMENTATION. - train_type: train type such as INCREMENTAL and SELFSUPERVISED. - SELFSUPERVISED is only supported for SEGMENTATION. + train_type: train type such as Incremental and Selfsupervised. + Selfsupervised is only supported for SEGMENTATION. train_data_roots: the path of data root for training data val_data_roots: the path of data root for validation data test_data_roots: the path of data root for test data unlabeled_data_roots: the path of data root for unlabeled data """ - train_type_to_be_called = TrainType.INCREMENTAL.value + train_type_to_be_called = TrainType.Incremental.value # FIXME : Hardcoded solution for self-sl for seg - if task_type == TaskType.SEGMENTATION and train_type == TrainType.SELFSUPERVISED.value: - train_type_to_be_called = TrainType.SELFSUPERVISED.value + if task_type == TaskType.SEGMENTATION and train_type == TrainType.Selfsupervised.value: + train_type_to_be_called = TrainType.Selfsupervised.value module_root = "otx.core.data.adapter." module = importlib.import_module(module_root + ADAPTERS[task_type][train_type_to_be_called]["module_name"]) diff --git a/otx/core/data/adapter/segmentation_dataset_adapter.py b/otx/core/data/adapter/segmentation_dataset_adapter.py index ab02c01d3c6..2725c4fd613 100644 --- a/otx/core/data/adapter/segmentation_dataset_adapter.py +++ b/otx/core/data/adapter/segmentation_dataset_adapter.py @@ -22,6 +22,7 @@ from datumaro.util.meta_file_util import parse_meta_file from skimage.segmentation import felzenszwalb +from otx.algorithms.common.utils.logger import get_logger from otx.api.entities.annotation import Annotation from otx.api.entities.dataset_item import DatasetItemEntity from otx.api.entities.datasets import DatasetEntity @@ -30,7 +31,6 @@ from otx.api.entities.model_template import TaskType from otx.api.entities.subset import Subset from otx.core.data.adapter.base_dataset_adapter import BaseDatasetAdapter -from otx.mpa.utils.logger import get_logger class SegmentationDatasetAdapter(BaseDatasetAdapter): diff --git a/otx/core/data/caching/mem_cache_handler.py b/otx/core/data/caching/mem_cache_handler.py index 44cab53e051..06bd9bc4cdf 100644 --- a/otx/core/data/caching/mem_cache_handler.py +++ b/otx/core/data/caching/mem_cache_handler.py @@ -12,7 +12,7 @@ from mmcv.runner import get_dist_info from multiprocess.synchronize import Lock -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger logger = get_logger() diff --git a/otx/core/ov/__init__.py b/otx/core/ov/__init__.py new file mode 100644 index 00000000000..1ac90928687 --- /dev/null +++ b/otx/core/ov/__init__.py @@ -0,0 +1,9 @@ +"""Module for otx.core.ov.""" +# Copyright (C) 2023 Intel Corporation +# +# SPDX-License-Identifier: MIT + +# flake8: noqa +from .graph import * +from .models import * +from .ops import * diff --git a/otx/core/ov/graph/__init__.py b/otx/core/ov/graph/__init__.py new file mode 100644 index 00000000000..ad121dad22d --- /dev/null +++ b/otx/core/ov/graph/__init__.py @@ -0,0 +1,9 @@ +"""Module for otx.core.ov.graph.""" +# Copyright (C) 2023 Intel Corporation +# +# SPDX-License-Identifier: MIT + +# TODO: Need to remove comment with ignore mypy and fix mypy issues +from .graph import Graph # type: ignore[attr-defined] + +__all__ = ["Graph"] diff --git a/otx/mpa/modules/ov/graph/graph.py b/otx/core/ov/graph/graph.py similarity index 80% rename from otx/mpa/modules/ov/graph/graph.py rename to otx/core/ov/graph/graph.py index 5afa8d1aaab..242f6b0991b 100644 --- a/otx/mpa/modules/ov/graph/graph.py +++ b/otx/core/ov/graph/graph.py @@ -1,6 +1,9 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 +# type: ignore +# TODO: Need to remove line 1 (ignore mypy) and fix mypy issues +"""Modules for otx.core.ov.graph.""" +# Copyright (C) 2023 Intel Corporation # +# SPDX-License-Identifier: MIT import inspect from collections import OrderedDict @@ -10,53 +13,73 @@ import _collections_abc import networkx as nx -from openvino.pyopenvino import Model +from openvino.pyopenvino import Model # pylint: disable=no-name-in-module -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger from ..ops.op import Operation -from ..utils import convert_op_to_torch, get_op_name +from ..ops.utils import convert_op_to_torch +from ..utils import get_op_name + +# pylint: disable=too-many-locals, too-many-nested-blocks, arguments-renamed, too-many-branches, too-many-statements logger = get_logger() class SortedDictKeysView(_collections_abc.KeysView): + """SortedDictKeysView class.""" + def __repr__(self): - return f"{self.__class__.__name__}({[i for i in self._mapping]})" + """Function repr of SortedDictKeysView.""" + return f"{self.__class__.__name__}({list(self._mapping)})" def __reversed__(self): + """Function reversed of SortedDictKeysView.""" yield from reversed(self._mapping) class SortedDictValuesView(_collections_abc.ValuesView): + """SortedDictValuesView class.""" + def __repr__(self): + """Sorteddictvaluesview's repr function.""" return f"{self.__class__.__name__}({[self._mapping[i] for i in self._mapping]})" def __reversed__(self): + """Sorteddictvaluesview's reversed function.""" for key in reversed(self._mapping): yield self._mapping[key] class SortedDictItemsView(_collections_abc.ItemsView): + """SortedDictItemsView class.""" + def __repr__(self): + """Sorteddictitemsview's repr function.""" return f"{self.__class__.__name__}({[(i, self._mapping[i]) for i in self._mapping]})" def __reversed__(self): + """Sorteddictitemsview's reversed function.""" for key in reversed(self._mapping): yield (key, self._mapping[key]) class NOOP: - pass + """NOOP class.""" + + pass # pylint: disable=unnecessary-pass class SortedDict(dict): + """SortedDict class.""" + def __init__(self, sort_key, *args, **kwargs): self._sort_key = sort_key self._sorted_keys = [] super().__init__(self, *args, **kwargs) def __setitem__(self, key, value): + """Sorteddict's setitem function.""" assert len(value) == 1 edge_key, edge_attr = next(iter(value.items())) sort_value = float("inf") if self._sort_key not in edge_attr else edge_attr[self._sort_key] @@ -69,43 +92,50 @@ def __setitem__(self, key, value): super().__setitem__(key, value) def __delitem__(self, key): + """Sorteddict's delitem function.""" super().__delitem__(key) for i, (_, key_in, _) in enumerate(self._sorted_keys): if key_in == key: break - self._sorted_keys.pop(i) + self._sorted_keys.pop(i) # pylint: disable=undefined-loop-variable def __iter__(self): + """Sorteddict's iter function.""" for _, key, _ in self._sorted_keys: yield key def __reversed__(self): + """Sorteddict's reversed function.""" for _, key, _ in self._sorted_keys[::-1]: yield key def __repr__(self): - if not len(self): + """Sorteddict's repr function.""" + if not len(self): # pylint: disable=use-implicit-booleaness-not-len return "{}" - repr = "{" + repr_ = "{" for _, key, _ in self._sorted_keys: - repr += f"{key}: {self[key]}, " - repr = repr[:-2] - repr += "}" - return repr + repr_ += f"{key}: {self[key]}, " + repr_ = repr_[:-2] + repr_ += "}" + return repr_ def __deepcopy__(self, memo): + """Sorteddict's deepcopy function.""" cls = self.__class__ result = cls(self._sort_key) memo[id(self)] = result - for k, v in self.items(): - result[k] = deepcopy(v, memo) + for key, value in self.items(): + result[key] = deepcopy(value, memo) return result def clear(self): + """Sorteddict's clear function.""" super().clear() self._sorted_keys = [] def pop(self, key, default=NOOP()): + """Sorteddict's pop function.""" if isinstance(default, NOOP): value = super().pop(key) else: @@ -114,39 +144,49 @@ def pop(self, key, default=NOOP()): for i, (_, key_in, _) in enumerate(self._sorted_keys): if key_in == key: break - self._sorted_keys.pop(i) + self._sorted_keys.pop(i) # pylint: disable=undefined-loop-variable return value def popitem(self): + """Sorteddict's popitem function.""" raise NotImplementedError @staticmethod def fromkeys(iterable, value=None): + """Sorteddict's fromkeys function.""" raise NotImplementedError def keys(self): + """Sorteddict's keys function.""" return SortedDictKeysView(self) def values(self): + """Sorteddict's values function.""" return SortedDictValuesView(self) def items(self): + """Sorteddict's items function.""" return SortedDictItemsView(self) class SortedDictHelper(dict): - def __init__(self, sort_key=None, *args, **kwargs): + """SortedDictHelper class.""" + + def __init__(self, sort_key=None, *args, **kwargs): # pylint: disable=keyword-arg-before-vararg self._sort_key = sort_key super().__init__(*args, **kwargs) def __setitem__(self, key, value): + """Sorteddicthelper's setitem function.""" super().__setitem__(key, SortedDict(self._sort_key)) for v_key, v_value in value.items(): self[key][v_key] = v_value class Graph(nx.MultiDiGraph): + """Graph class.""" + adjlist_outer_dict_factory = SortedDictHelper def __init__(self, *args, **kwargs): @@ -160,12 +200,13 @@ def __init__(self, *args, **kwargs): @staticmethod def from_ov(ov_model: Model) -> "Graph": + """Graph's from_ov function.""" graph = Graph() ov_ops = ov_model.get_ordered_ops() ops_dict = OrderedDict() - parents_dict = {} - children_dict = {} + parents_dict: Dict[str, List[Optional[List]]] = {} + children_dict: Dict[str, List[Optional[List]]] = {} for ov_op in ov_ops: op_name = get_op_name(ov_op) @@ -213,21 +254,26 @@ def from_ov(ov_model: Model) -> "Graph": ) # freeze normalization nodes - graph._freeze_normalize_nodes() + graph._freeze_normalize_nodes() # pylint: disable=protected-access return graph def get_edge_data(self, node_from: Operation, node_to: Operation, default=None) -> Optional[List[Dict[Any, Any]]]: + """Graph's get_edge_data function.""" edge_data = super().get_edge_data(node_from, node_to, None, default) if edge_data is not None: return list(edge_data.values()) - else: - return None + return None def remove_node(self, node: Operation, keep_connect: bool = False): + """Graph's remove_node function.""" edges_to_keep = [] if keep_connect: - predecessors = [predecessor for predecessor in self.predecessors(node) if predecessor.type != "Constant"] + predecessors = [ + predecessor + for predecessor in self.predecessors(node) + if hasattr(predecessor, "type") and predecessor.type != "Constant" + ] if predecessors: assert len(predecessors) == 1 predecessor = predecessors[0] @@ -246,6 +292,7 @@ def remove_node(self, node: Operation, keep_connect: bool = False): self.add_edge(node_from, node_to, **attrs) def replace_node(self, old_node: Operation, new_node: Operation): + """Graph's replace_node function.""" edges = [] for successor in self.successors(old_node): for edge_attrs in self.get_edge_data(old_node, successor): @@ -268,6 +315,7 @@ def add_edge( in_port: Optional[int] = None, **kwargs, ): + """Graph's add_edge function.""" if node_from not in self: self.add_node(node_from) @@ -320,6 +368,7 @@ def predecessors( node: Operation, with_edge_data: bool = False, ) -> Generator[Union[Tuple[Operation, Optional[List]], Operation], None, None]: + """Graph's predecessors function.""" for predecessor in super().predecessors(node): if with_edge_data: yield (predecessor, self.get_edge_data(predecessor, node)) @@ -331,6 +380,7 @@ def successors( node: Operation, with_edge_data: bool = False, ) -> Generator[Union[Tuple[Operation, Optional[List]], Operation], None, None]: + """Graph's successors function.""" for successor in super().successors(node): if with_edge_data: yield (successor, self.get_edge_data(node, successor)) @@ -338,6 +388,7 @@ def successors( yield successor def get_nodes_by_types(self, types: List[str]) -> List[Operation]: + """Graph's get_nodes_by_types function.""" found = [] for node in self.topological_sort(): if node.type in types: @@ -347,9 +398,10 @@ def get_nodes_by_types(self, types: List[str]) -> List[Operation]: def bfs( self, node: Operation, reverse: bool = False, depth_limit: Optional[int] = None ) -> Generator[Union[Tuple[Operation, Operation], Tuple[Operation, Tuple[Operation]]], None, None]: + """Graph's bfs function.""" if reverse: - for s, t in nx.bfs_edges(self, node, reverse=True, depth_limit=depth_limit): - yield (t, s) + for s_value, t_value in nx.bfs_edges(self, node, reverse=True, depth_limit=depth_limit): + yield (t_value, s_value) else: parent = node children = [] @@ -370,6 +422,7 @@ def bfs( # return nx.dfs_predecessors(self, node, depth_limit) def get_nodes_by_type_pattern(self, pattern: List[str], start_node: Optional[Operation] = None, reverse=False): + """Graph's get_nodes_by_type_pattern function.""" if len(pattern) < 1: raise ValueError(f"pattern must be longer than 2 but {len(pattern)} is given") pattern_pairs = [pattern[i : i + 2] for i in range(len(pattern) - 1)] @@ -384,16 +437,16 @@ def get_nodes_by_type_pattern(self, pattern: List[str], start_node: Optional[Ope start_nodes = [start_node] for pattern_pair in pattern_pairs: found_ = {start_node: None for start_node in start_nodes} - for start_node in start_nodes: - for s, ts in self.bfs(start_node, reverse, 1): - if not isinstance(ts, tuple): - ts = (ts,) - for t in ts: - if [s.type, t.type] == pattern_pair: + for start_node_ in start_nodes: + for s_value, ts_ in self.bfs(start_node_, reverse, 1): + if not isinstance(ts_, tuple): + ts_ = (ts_,) + for t in ts_: + if [s_value.type, t.type] == pattern_pair: if reverse: - found_[t] = s + found_[t] = s_value else: - found_[s] = t + found_[s_value] = t if founds: pop_indices = [] for i, found in enumerate(founds): @@ -410,9 +463,11 @@ def get_nodes_by_type_pattern(self, pattern: List[str], start_node: Optional[Ope return founds def _freeze_normalize_nodes(self): # noqa: C901 + """Graph's _freeze_normalize_nodes function.""" invariant_types = ["Transpose", "Convert"] def test_constant(node): + """Graph's test_constant function.""" constant_nodes = [node_ for node_ in self.predecessors(node) if node_.type == "Constant"] if len(constant_nodes) != 1: return False @@ -423,11 +478,13 @@ def test_constant(node): def get_nodes_by_type_from_node( node, - type, - ignore_types=[], + types, + ignore_types=None, reverse=False, depth_limit=-1, ): + """Graph's get_nodes_by_type_from_node function.""" + ignore_types = ignore_types if ignore_types else [] func = self.successors if reverse: func = self.predecessors @@ -435,15 +492,16 @@ def get_nodes_by_type_from_node( candidates = [(i, 1) for i in func(node)] found = [] for candidate, cur_depth in candidates: - if depth_limit > -1 and cur_depth > depth_limit: + if cur_depth > depth_limit > -1: break - if candidate.type == type: + if candidate.type == types: found.append(candidate) elif candidate.type in ignore_types: candidates.extend([(i, cur_depth + 1) for i in func(candidate)]) return found def find_multiply_add(node): + """Graph's find_multiply_add function.""" scale_node = None mean_node = None @@ -463,6 +521,7 @@ def find_multiply_add(node): return (scale_node, mean_node) def find_subtract_divide(node): + """Graph's find_subtract_divide function.""" mean_node = None scale_node = None @@ -482,6 +541,7 @@ def find_subtract_divide(node): return (mean_node, scale_node) def find_subtract_multiply(node): + """Graph's find_subtract_multiply function.""" mean_node = None scale_node = None @@ -513,7 +573,7 @@ def find_subtract_multiply(node): if len([i for i in found if i is not None]) < len([i for i in found_ if i is not None]): found = found_ - if not all([i is not None for i in found]): + if not all(i is not None for i in found): continue self._normalize_nodes.append(found) @@ -533,6 +593,7 @@ def find_subtract_multiply(node): self.replace_node(constant_node, new_constant_node) def remove_normalize_nodes(self): + """Graph's remove_normalize_nodes function.""" for nodes in self._normalize_nodes: first_node, second_node = nodes @@ -545,21 +606,25 @@ def remove_normalize_nodes(self): try: self.remove_node(second_node, keep_connect=True) logger.info(f"Remove normalize node {second_node.name}") - except Exception: + except Exception: # pylint: disable=broad-exception-caught pass self._normalize_nodes = [] def topological_sort(self): + """Graph's topological_sort function.""" return nx.topological_sort(self) def has_path(self, node_from: Operation, node_to: Operation): + """Graph's has_path function.""" return nx.has_path(self, node_from, node_to) def clean_up( self, - nodes_to_keep: List[Operation] = [], + nodes_to_keep: List[Operation] = None, remove_sub_components: bool = True, ): + """Graph's clean_up function.""" + nodes_to_keep = nodes_to_keep if nodes_to_keep else [] if remove_sub_components: # clean up sub components components = list(nx.connected_components(self.to_undirected())) diff --git a/otx/core/ov/graph/parsers/__init__.py b/otx/core/ov/graph/parsers/__init__.py new file mode 100644 index 00000000000..7a158471ce2 --- /dev/null +++ b/otx/core/ov/graph/parsers/__init__.py @@ -0,0 +1,8 @@ +"""Module for otx.core.ov.graph.parser.""" +# Copyright (C) 2023 Intel Corporation +# +# SPDX-License-Identifier: MIT + +from .builder import PARSERS + +__all__ = ["PARSERS"] diff --git a/otx/core/ov/graph/parsers/builder.py b/otx/core/ov/graph/parsers/builder.py new file mode 100644 index 00000000000..b802f44365b --- /dev/null +++ b/otx/core/ov/graph/parsers/builder.py @@ -0,0 +1,8 @@ +"""Builder module for otx.core.ov.graph.parsers.""" +# Copyright (C) 2023 Intel Corporation +# +# SPDX-License-Identifier: MIT + +from otx.core.ov.registry import Registry + +PARSERS = Registry("ov graph parsers") diff --git a/otx/core/ov/graph/parsers/cls/__init__.py b/otx/core/ov/graph/parsers/cls/__init__.py new file mode 100644 index 00000000000..74b90157a52 --- /dev/null +++ b/otx/core/ov/graph/parsers/cls/__init__.py @@ -0,0 +1,8 @@ +"""Module for otx.core.ov.graph.parsers.cls.""" +# Copyright (C) 2023 Intel Corporation +# +# SPDX-License-Identifier: MIT + +from .cls_base_parser import cls_base_parser + +__all__ = ["cls_base_parser"] diff --git a/otx/mpa/modules/ov/graph/parsers/cls/cls_base_parser.py b/otx/core/ov/graph/parsers/cls/cls_base_parser.py similarity index 74% rename from otx/mpa/modules/ov/graph/parsers/cls/cls_base_parser.py rename to otx/core/ov/graph/parsers/cls/cls_base_parser.py index 2ca1c5d27d9..238d4e584bf 100644 --- a/otx/mpa/modules/ov/graph/parsers/cls/cls_base_parser.py +++ b/otx/core/ov/graph/parsers/cls/cls_base_parser.py @@ -1,16 +1,18 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 +"""Class base parser for otx.core.ov.graph.parsers.cls.cls_base_parser.""" +# Copyright (C) 2023 Intel Corporation # +# SPDX-License-Identifier: MIT from typing import Dict, List, Optional -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger from ..builder import PARSERS from ..parser import parameter_parser -logger = get_logger() +# pylint: disable=too-many-return-statements, too-many-branches +logger = get_logger() NECK_INPUT_TYPES = ["ReduceMean", "MaxPool", "AvgPool"] NECK_TYPES = [ @@ -27,6 +29,7 @@ @PARSERS.register() def cls_base_parser(graph, component: str = "backbone") -> Optional[Dict[str, List[str]]]: + """Class base parser for OMZ models.""" assert component in ["backbone", "neck", "head"] result_nodes = graph.get_nodes_by_types(["Result"]) @@ -43,7 +46,7 @@ def cls_base_parser(graph, component: str = "backbone") -> Optional[Dict[str, Li break if neck_input is None: - logger.debug("Can not determine the output of backbone.") + # logger.debug("Can not determine the output of backbone.") return None neck_output = neck_input @@ -73,32 +76,30 @@ def cls_base_parser(graph, component: str = "backbone") -> Optional[Dict[str, Li outputs=outputs, ) - elif component == "neck": + if component == "neck": return dict( inputs=[neck_input.name], outputs=[neck_output.name], ) - elif component == "head": - inputs = list(graph.successors(neck_output)) - # if len(inputs) != 1: - # logger.debug(f"neck_output {neck_output.name} has more than one successors.") - # return None + if component == "head": + head_inputs = list(graph.successors(neck_output)) outputs = graph.get_nodes_by_types(["Result"]) if len(outputs) != 1: - logger.debug("more than one network output are found.") + logger.debug("More than one network output is found.") return None for node_from, node_to in graph.bfs(outputs[0], True, 5): if node_to.type == "Softmax": outputs = [node_from] break - if not graph.has_path(inputs[0], outputs[0]): - logger.debug(f"input({inputs[0].name}) and output({outputs[0].name}) are reversed") + if not graph.has_path(head_inputs[0], outputs[0]): + logger.debug(f"input({head_inputs[0].name}) and output({outputs[0].name}) are reversed") return None return dict( - inputs=[input.name for input in inputs], + inputs=[input_.name for input_ in head_inputs], outputs=[output.name for output in outputs], ) + return None diff --git a/otx/mpa/modules/ov/graph/parsers/parser.py b/otx/core/ov/graph/parsers/parser.py similarity index 60% rename from otx/mpa/modules/ov/graph/parsers/parser.py rename to otx/core/ov/graph/parsers/parser.py index 96046046107..9cbf0583651 100644 --- a/otx/mpa/modules/ov/graph/parsers/parser.py +++ b/otx/core/ov/graph/parsers/parser.py @@ -1,11 +1,13 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 +"""Parser modules for otx.core.ov.graph.parsers.""" +# Copyright (C) 2023 Intel Corporation # +# SPDX-License-Identifier: MIT from typing import List def type_parser(graph, types) -> List[str]: + """Type Parser from graph, types.""" found = [] for node in graph: if node.type in types: @@ -14,8 +16,10 @@ def type_parser(graph, types) -> List[str]: def result_parser(graph) -> List[str]: + """Result Parser from graph.""" return type_parser(graph, ["Result"]) def parameter_parser(graph) -> List[str]: + """Parameter Parser from graph.""" return type_parser(graph, ["Parameter"]) diff --git a/otx/mpa/modules/ov/graph/utils.py b/otx/core/ov/graph/utils.py similarity index 86% rename from otx/mpa/modules/ov/graph/utils.py rename to otx/core/ov/graph/utils.py index 990fbd0d5aa..959b246b20f 100644 --- a/otx/mpa/modules/ov/graph/utils.py +++ b/otx/core/ov/graph/utils.py @@ -1,30 +1,35 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 +"""Utils for otx.core.ov.graph.""" +# Copyright (C) 2023 Intel Corporation # +# SPDX-License-Identifier: MIT -from typing import List +from typing import Any, List import torch -from otx.mpa.utils.logger import get_logger - -from ..ops import OPS, Operation -from ..ops.infrastructures import ConstantV0 -from .graph import Graph +from otx.algorithms.common.utils.logger import get_logger +from otx.core.ov.graph import Graph +from otx.core.ov.ops.builder import OPS +from otx.core.ov.ops.infrastructures import ConstantV0 +from otx.core.ov.ops.op import Operation +# pylint: disable=too-many-locals, protected-access, too-many-branches, too-many-statements, too-many-nested-blocks logger = get_logger() def get_constant_input_nodes(graph: Graph, node: Operation) -> List[Operation]: + """Getter constant input nodes from graph, node.""" found = [] - for node in graph.predecessors(node): - if node.type == "Constant": - found.append(node) + for node_ in graph.predecessors(node): + if node_.type == "Constant": + found.append(node_) return found -def handle_merging_into_batchnorm(graph, type_patterns=[["Multiply", "Add"]], type_mappings=[{"gamma": 0, "beta": 1}]): - +def handle_merging_into_batchnorm(graph, type_patterns=None, type_mappings=None): # noqa: C901 + """Merge function graph into batchnorm.""" + type_patterns = type_patterns if type_patterns else [["Multiply", "Add"]] + type_mappings = type_mappings if type_mappings else [{"gamma": 0, "beta": 1}] assert len(type_patterns) == len(type_mappings) batchnorm_cls = OPS.get_by_type_version("BatchNormInference", 0) constant_cls = OPS.get_by_type_version("Constant", 0) @@ -152,7 +157,9 @@ def handle_merging_into_batchnorm(graph, type_patterns=[["Multiply", "Add"]], ty graph.add_edge(running_variance, batchnorm) -def handle_paired_batchnorm(graph, replace: bool = False, types: List[str] = ["Convolution", "GroupConvolution"]): +def handle_paired_batchnorm(graph, replace: bool = False, types: List[str] = None): + """Handle function paired batchnorm.""" + types = types if types else ["Convolution", "GroupConvolution"] batchnorm_cls = OPS.get_by_type_version("BatchNormInference", 0) constant_cls = OPS.get_by_type_version("Constant", 0) @@ -171,9 +178,9 @@ def handle_paired_batchnorm(graph, replace: bool = False, types: List[str] = ["C ) continue - bias_node = [n for n in graph.successors(node) if n.type == "Add"] - if len(bias_node) == 1: - bias_node = bias_node[0] + bias_node_list: List[Any] = [n for n in graph.successors(node) if n.type == "Add"] + if len(bias_node_list) == 1: + bias_node = bias_node_list[0] else: bias_node = None @@ -182,10 +189,9 @@ def handle_paired_batchnorm(graph, replace: bool = False, types: List[str] = ["C logger.info(f"Skip a paired batch normalization for {node.name} " "becuase it has no bias add node.") continue # if add node is not bias add node - elif not isinstance(list(graph.predecessors(bias_node))[1], ConstantV0): + if not isinstance(list(graph.predecessors(bias_node))[1], ConstantV0): logger.info( - f"Skip a pared batch normalization for {node.name } " - f"because {bias_node.name} is not a bias add node." + f"Skip a pared batch normalization for {node.name} " f"because {bias_node.name} is not a bias add node." ) continue @@ -269,7 +275,7 @@ def handle_paired_batchnorm(graph, replace: bool = False, types: List[str] = ["C def handle_reshape(graph): - + """Reshape function.""" for result in graph.get_nodes_by_types(["Result"]): for node in graph.predecessors(result): # some models, for example, dla-34, have reshape node as its predecessor @@ -280,5 +286,5 @@ def handle_reshape(graph): for shape_ in input_node.shape[0][::-1]: if shape_ != 1: break - logger.info(f"Change reshape to [-1, {shape_}]") - shape.data = torch.tensor([-1, shape_]) + logger.info(f"Change reshape to [-1, {shape_}]") # pylint: disable=undefined-loop-variable + shape.data = torch.tensor([-1, shape_]) # pylint: disable=undefined-loop-variable diff --git a/otx/core/ov/models/__init__.py b/otx/core/ov/models/__init__.py new file mode 100644 index 00000000000..99fff8b1223 --- /dev/null +++ b/otx/core/ov/models/__init__.py @@ -0,0 +1,14 @@ +"""Module for otx.core.ov.models.""" +# Copyright (C) 2023 Intel Corporation +# +# SPDX-License-Identifier: MIT + +from .mmov_model import MMOVModel +from .ov_model import OVModel # type: ignore[attr-defined] +from .parser_mixin import ParserMixin # type: ignore[attr-defined] + +__all__ = [ + "MMOVModel", + "OVModel", + "ParserMixin", +] diff --git a/otx/mpa/modules/ov/models/mmov_model.py b/otx/core/ov/models/mmov_model.py similarity index 73% rename from otx/mpa/modules/ov/models/mmov_model.py rename to otx/core/ov/models/mmov_model.py index 389faa2e25a..3e3398125bc 100644 --- a/otx/mpa/modules/ov/models/mmov_model.py +++ b/otx/core/ov/models/mmov_model.py @@ -1,17 +1,24 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 +"""MMOVModel for otx.core.ov.models.mmov_model.""" +# Copyright (C) 2023 Intel Corporation # +# SPDX-License-Identifier: MIT from typing import Dict, List, Optional, Union import openvino.runtime as ov import torch -from .ov_model import OVModel -from .parser_mixin import ParserMixin +# TODO: Need to remove line 1 (ignore mypy) and fix mypy issues +from .ov_model import OVModel # type: ignore[attr-defined] +from .parser_mixin import ParserMixin # type: ignore[attr-defined] + +# TODO: Need to fix pylint issues +# pylint: disable=keyword-arg-before-vararg class MMOVModel(OVModel, ParserMixin): + """MMOVModel for OMZ model type.""" + def __init__( self, model_path_or_model: Union[str, ov.Model], @@ -42,12 +49,13 @@ def __init__( ) def forward(self, inputs, gt_label=None): + """Function forward.""" if isinstance(inputs, torch.Tensor): inputs = (inputs,) assert len(inputs) == len(self.inputs) feed_dict = dict() - for key, input in zip(self.inputs, inputs): - feed_dict[key] = input + for key, input_ in zip(self.inputs, inputs): + feed_dict[key] = input_ if gt_label is not None: assert "gt_label" not in self.features diff --git a/otx/mpa/modules/ov/models/ov_model.py b/otx/core/ov/models/ov_model.py similarity index 84% rename from otx/mpa/modules/ov/models/ov_model.py rename to otx/core/ov/models/ov_model.py index f6a681418d6..801900649e3 100644 --- a/otx/mpa/modules/ov/models/ov_model.py +++ b/otx/core/ov/models/ov_model.py @@ -1,6 +1,9 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 +# type: ignore +# TODO: Need to remove line 1 (ignore mypy) and fix mypy issues +"""Modules for otx.core.ov.models.ov_model.""" +# Copyright (C) 2023 Intel Corporation # +# SPDX-License-Identifier: MIT import math import os @@ -13,7 +16,7 @@ import torch from torch.nn import init -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger from ..graph import Graph from ..graph.utils import ( @@ -21,22 +24,24 @@ handle_paired_batchnorm, handle_reshape, ) -from ..ops import OPS +from ..ops.builder import OPS from ..utils import load_ov_model, normalize_name -logger = get_logger() +CONNECTION_SEPARATOR = "||" +# pylint: disable=too-many-arguments, too-many-locals, too-many-branches, too-many-statements +logger = get_logger() -CONNECTION_SEPARATOR = "||" +class OVModel(torch.nn.Module): # pylint: disable=too-many-instance-attributes + """OVModel class.""" -class OVModel(torch.nn.Module): - def __init__( + def __init__( # noqa: C901 self, model_path_or_model: Union[str, ov.Model] = None, weight_path: Optional[str] = None, - inputs: Union[str, List[str]] = [], - outputs: Union[str, List[str]] = [], + inputs: Optional[Union[str, List[str]]] = None, + outputs: Optional[Union[str, List[str]]] = None, features_to_keep: Optional[List] = None, remove_normalize: bool = False, merge_bn: bool = True, @@ -54,8 +59,8 @@ def __init__( self._init_weight = init_weight self._verify_shape = verify_shape - self._inputs = [] - self._outputs = [] + self._inputs: List[str] = [] + self._outputs: List[str] = [] self._feature_dict = OrderedDict() # build graph @@ -101,40 +106,42 @@ def __init__( if not isinstance(init_weight, Callable): # internal init weight - def init_weight(m, graph): + def init_weight(module, graph): # pylint: disable=function-redefined from ..ops.op import Operation - if not isinstance(m, Operation): + if not isinstance(module, Operation): return - if m.TYPE == "BatchNormInference": - _, gamma, beta, mean, var = list(graph.predecessors(m)) + if module.TYPE == "BatchNormInference": + _, gamma, beta, mean, var = list(graph.predecessors(module)) init.ones_(gamma.data) init.zeros_(beta.data) mean.data.zero_() var.data.fill_(1) - logger.info(f"Initialize {m.TYPE} -> {m.name}") - elif m.TYPE in [ + logger.info(f"Initialize {module.TYPE} -> {module.name}") + elif module.TYPE in [ "Convolution", "GroupConvolution", "MatMul", ]: - for weight in graph.predecessors(m): + for weight in graph.predecessors(module): if weight.TYPE == "Constant" and isinstance(weight.data, torch.nn.parameter.Parameter): init.kaiming_uniform_(weight.data, a=math.sqrt(5)) - logger.info(f"Initialize {m.TYPE} -> {m.name}") - elif m.TYPE in [ + logger.info(f"Initialize {module.TYPE} -> {module.name}") + elif module.TYPE in [ "Multiply", "Divide", "Add", "Subtract", ]: - for weight in graph.predecessors(m): + for weight in graph.predecessors(module): if weight.TYPE == "Constant" and isinstance(weight.data, torch.nn.parameter.Parameter): - fan_in, _ = init._calculate_fan_in_and_fan_out(weight.data) + fan_in, _ = init._calculate_fan_in_and_fan_out( # pylint: disable=protected-access + weight.data + ) bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0 init.uniform_(weight.data, -bound, bound) - logger.info(f"Initialize {m.TYPE} -> {m.name}") + logger.info(f"Initialize {module.TYPE} -> {module.name}") self.model.apply(lambda m: init_weight(m, graph)) @@ -150,33 +157,39 @@ def init_weight(m, graph): output_shapes[node.name] = node.shape[0] self._input_shapes = OrderedDict() self._output_shapes = OrderedDict() - for input in self._inputs: - self._input_shapes[input] = input_shapes[input] + for input_ in self._inputs: + self._input_shapes[input_] = input_shapes[input_] for output in self._outputs: self._output_shapes[output] = output_shapes[output] @property def inputs(self): + """Property inputs.""" return self._inputs @property def outputs(self): + """Property outputs.""" return self._outputs @property def features(self): + """Property features.""" return self._feature_dict @property def input_shapes(self): + """Property input_shapes.""" return self._input_shapes @property def output_shapes(self): + """Property output_shapes.""" return self._output_shapes @staticmethod def build_graph(model_path_or_model, weight_path=None): + """Function build_graph.""" with tempfile.TemporaryDirectory() as tempdir: if isinstance(model_path_or_model, ov.Model): assert weight_path is None, "if openvino model is given 'weight_path' must be None" @@ -193,7 +206,8 @@ def build_graph(model_path_or_model, weight_path=None): return graph @staticmethod - def build_custom_outputs(graph, outputs): + def build_custom_outputs(graph, outputs): # noqa: C901 + """Function build_custom_outputs.""" cls_result = OPS.get_by_type_version("Result", 0) node_dict = OrderedDict((i.name, i) for i in graph.topological_sort()) @@ -262,7 +276,7 @@ def build_custom_outputs(graph, outputs): for edges in edges_to_add.values(): for edge in edges: edge["in_port"] = 0 - assert set([len(edges) for edges in edges_to_add.values()]) == {1} + assert {len(edges) for edges in edges_to_add.values()} == {1} edges_to_add = [edge for edges in edges_to_add.values() for edge in edges] else: edges_to_add = [] @@ -274,7 +288,8 @@ def build_custom_outputs(graph, outputs): return outputs @staticmethod - def build_custom_inputs(graph, inputs: Union[str, List[str]]): + def build_custom_inputs(graph, inputs: Union[str, List[str]]): # noqa: C901 + """Function build_custom_inputs.""" cls_param = OPS.get_by_type_version("Parameter", 0) node_dict = OrderedDict((i.name, i) for i in graph.topological_sort()) @@ -283,16 +298,16 @@ def build_custom_inputs(graph, inputs: Union[str, List[str]]): edges_to_add = {} nodes_to_remove = [] - for i, input in enumerate(inputs): - input = normalize_name(input) - input = input.split(CONNECTION_SEPARATOR) + for i, input_ in enumerate(inputs): + input_ = normalize_name(input_) + input_ = input_.split(CONNECTION_SEPARATOR) explicit_src = False - if len(input) == 1: + if len(input_) == 1: src = None - tgt = input[0] - elif len(input) == 2: - src, tgt = input + tgt = input_[0] + elif len(input_) == 2: + src, tgt = input_ explicit_src = True else: raise ValueError() @@ -353,7 +368,7 @@ def build_custom_inputs(graph, inputs: Union[str, List[str]]): for edges in edges_to_add.values(): for edge in edges: edge["out_port"] = 0 - assert set([len(edges) for edges in edges_to_add.values()]) == {1} + assert {len(edges) for edges in edges_to_add.values()} == {1} edges_to_add = [edge for edges in edges_to_add.values() for edge in edges] else: edges_to_add = [] @@ -365,14 +380,18 @@ def build_custom_inputs(graph, inputs: Union[str, List[str]]): return inputs @staticmethod - def clean_up(graph, inputs=[], outputs=[]): + def clean_up(graph, inputs=None, outputs=None): + """Function clean_up.""" + inputs = inputs if inputs else [] + outputs = outputs if outputs else [] nodes = list(graph.topological_sort()) nodes_to_keep = [] for node in nodes: if node.name in inputs or node.name in outputs: nodes_to_keep.append(node) - def get_nodes_without_successors(graph, ignores=[]): + def get_nodes_without_successors(graph, ignores=None): + ignores = ignores if ignores else [] outputs = [] for node in reversed(list(graph.topological_sort())): if not list(graph.successors(node)) and node not in ignores: @@ -388,10 +407,12 @@ def get_nodes_without_successors(graph, ignores=[]): @staticmethod def build_torch_module(graph): + """Function build_torch_module.""" node_dict = OrderedDict((i.name, i) for i in graph.topological_sort()) return torch.nn.ModuleDict(list(node_dict.items())) def _build_forward_inputs(self, *args, **kwargs): + """Function _build_forward_inputs.""" inputs = {} if args: for key, arg in zip(self._inputs, args): @@ -404,6 +425,7 @@ def _build_forward_inputs(self, *args, **kwargs): return inputs def forward(self, *args, **kwargs): + """Function forward.""" self._feature_dict.clear() inputs = self._build_forward_inputs(*args, **kwargs) diff --git a/otx/mpa/modules/ov/models/parser_mixin.py b/otx/core/ov/models/parser_mixin.py similarity index 83% rename from otx/mpa/modules/ov/models/parser_mixin.py rename to otx/core/ov/models/parser_mixin.py index 48120ba5d87..f28de3f9333 100644 --- a/otx/mpa/modules/ov/models/parser_mixin.py +++ b/otx/core/ov/models/parser_mixin.py @@ -1,12 +1,15 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 +# type: ignore +# TODO: Need to remove line 1 (ignore mypy) and fix mypy issues +"""Parser mixin modules for otx.core.ov.models.""" +# Copyright (C) 2023 Intel Corporation # +# SPDX-License-Identifier: MIT from typing import Callable, Dict, List, Optional, Tuple, Union import openvino.runtime as ov -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger from ..graph.parsers.builder import PARSERS from .ov_model import OVModel @@ -15,6 +18,8 @@ class ParserMixin: + """ParserMixin class.""" + def parse( self, model_path_or_model: Union[str, ov.Model], @@ -24,7 +29,7 @@ def parse( parser: Optional[Union[str, Callable]] = None, **kwargs, ) -> Tuple[Union[str, List[str]], Union[str, List[str]]]: - + """Parse function of ParserMixin class.""" parser = self.parser if parser is None else parser if isinstance(parser, str): parser = PARSERS.get(parser) @@ -52,5 +57,6 @@ def parse( return inputs, outputs @staticmethod - def parser(graph, **kwargs) -> Dict[str, Union[List[str], Dict[str, List[str]]]]: + def parser(graph, **kwargs) -> Dict[str, Union[List[str], Dict[str, List[str]]]]: # pylint: disable=unused-argument + """Function parser.""" return dict(inputs=[], outputs=[]) diff --git a/otx/mpa/modules/ov/omz_wrapper.py b/otx/core/ov/omz_wrapper.py similarity index 60% rename from otx/mpa/modules/ov/omz_wrapper.py rename to otx/core/ov/omz_wrapper.py index ab321b04023..d9125899703 100644 --- a/otx/mpa/modules/ov/omz_wrapper.py +++ b/otx/core/ov/omz_wrapper.py @@ -1,6 +1,7 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 +"""OMZ wrapper-related code for otx.core.ov.""" +# Copyright (C) 2023 Intel Corporation # +# SPDX-License-Identifier: MIT import hashlib import os @@ -9,6 +10,7 @@ import sys import time from pathlib import Path +from typing import Dict, List import requests from openvino.model_zoo import _common, _reporting @@ -18,13 +20,14 @@ from openvino.model_zoo.omz_converter import ModelOptimizerProperties, convert_to_onnx from requests.exceptions import HTTPError -from otx.mpa.utils.file import MPA_CACHE +# pylint: disable=too-many-locals, too-many-branches +OTX_CACHE = os.path.expanduser(os.getenv("OTX_CACHE", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "otx"))) +os.makedirs(OTX_CACHE, exist_ok=True) +OMZ_CACHE = os.path.join(OTX_CACHE, "omz") +os.makedirs(OMZ_CACHE, exist_ok=True) -MPA_OMZ_CACHE = os.path.join(MPA_CACHE, "omz") -os.makedirs(MPA_OMZ_CACHE, exist_ok=True) - -OMZ_PUBLIC_MODELS = dict( +OMZ_PUBLIC_MODELS: Dict[str, List[str]] = dict( cls=[ "alexnet", "caffenet", @@ -86,37 +89,43 @@ ) -AVAILABLE_OMZ_MODELS = [model for models in OMZ_PUBLIC_MODELS.values() for model in models] +AVAILABLE_OMZ_MODELS: List[str] = [] +for models_ in OMZ_PUBLIC_MODELS.values(): + for model_ in models_: + AVAILABLE_OMZ_MODELS.append(model_) class NameSpace: + """NameSpace class for otx.core.ov.omz_wrapper.""" + def __init__(self, **kwargs): self.__dict__.update(kwargs) def _get_etag(url): + """Getter etag function from url.""" try: - response = requests.head(url, allow_redirects=True) + response = requests.head(url, allow_redirects=True, timeout=100) if response.status_code != 200: return None - else: - return response.headers.get("ETag", None) - except HTTPError as e: + return response.headers.get("ETag", None) + except HTTPError: return None def _get_ir_path(directory): + """Getter IR path function from directory path.""" directory = Path(directory) model_path = list(directory.glob("**/*.xml")) weight_path = list(directory.glob("**/*.bin")) if model_path and weight_path: assert len(model_path) == 1 and len(weight_path) == 1 return dict(model_path=model_path[0], weight_path=weight_path[0]) - else: - return None + return None def _run_pre_convert(reporter, model, output_dir, args): + """Run pre-converting function.""" script = _common.MODEL_ROOT / model.subdirectory_ori / "pre-convert.py" if not script.exists(): return True @@ -146,14 +155,15 @@ def _run_pre_convert(reporter, model, output_dir, args): def _update_model(model): - m = hashlib.sha256() + """Update model configs for omz_wrapper.""" + m_hash = hashlib.sha256() for file in model.files: url = file.source.url etag = _get_etag(url) if etag is not None: - m.update(bytes(etag, "utf-8")) + m_hash.update(bytes(etag, "utf-8")) model.subdirectory_ori = model.subdirectory - model.subdirectory = Path(m.hexdigest()) + model.subdirectory = Path(m_hash.hexdigest()) # FIXME: a bug from openvino-dev==2022.3.0 # It has been fixed on master branch. @@ -165,16 +175,19 @@ def _update_model(model): def get_model_configuration(model_name): + """Getter function of model configuration from name.""" model_configurations = load_models(_common.MODEL_ROOT, {}) - for i, model in enumerate(model_configurations): + for model in model_configurations: if model.name == model_name: _update_model(model) return model return None -def download_model(model, download_dir=MPA_OMZ_CACHE, precisions={"FP32"}, force=False): +def download_model(model, download_dir=OMZ_CACHE, precisions=None, force=False): + """Function for downloading model from directory.""" download_dir = Path("") if download_dir is None else Path(download_dir) + precisions = precisions if precisions else {"FP32"} # TODO: need delicate cache management if not force and (download_dir / model.subdirectory).exists(): @@ -207,15 +220,108 @@ def download_model(model, download_dir=MPA_OMZ_CACHE, precisions={"FP32"}, force sys.exit(1) +def _convert(reporter, model, output_dir, namespace, mo_props, requested_precisions): + """Convert function for OMZ wrapper.""" + if model.mo_args is None: + reporter.print_section_heading("Skipping {} (no conversions defined)", model.name) + reporter.print() + return True + + model_precisions = requested_precisions & model.precisions + if not model_precisions: + reporter.print_section_heading("Skipping {} (all conversions skipped)", model.name) + reporter.print() + return True + + (output_dir / model.subdirectory).mkdir(parents=True, exist_ok=True) + + if not _run_pre_convert(reporter, model, output_dir, namespace): + return False + + model_format = model.framework + mo_extension_dir = mo_props.base_dir / "extensions" + if not mo_extension_dir.exists(): + mo_extension_dir = mo_props.base_dir + + template_variables = { + "config_dir": _common.MODEL_ROOT / model.subdirectory_ori, + "conv_dir": output_dir / model.subdirectory, + "dl_dir": namespace.download_dir / model.subdirectory, + "mo_dir": mo_props.base_dir, + "mo_ext_dir": mo_extension_dir, + } + + if model.conversion_to_onnx_args: + if not convert_to_onnx(reporter, model, output_dir, namespace, template_variables): + return False + model_format = "onnx" + + expanded_mo_args = [string.Template(arg).substitute(template_variables) for arg in model.mo_args] + + for model_precision in sorted(model_precisions): + data_type = model_precision.split("-")[0] + layout_string = ",".join(f"{input.name}({input.layout})" for input in model.input_info if input.layout) + shape_string = ",".join(str(input.shape) for input in model.input_info if input.shape) + + if layout_string: + expanded_mo_args.append(f"--layout={layout_string}") + if shape_string: + expanded_mo_args.append(f"--input_shape={shape_string}") + + mo_cmd = [ + *mo_props.cmd_prefix, + f"--framework={model_format}", + f"--data_type={data_type}", + f"--output_dir={output_dir / model.subdirectory / model_precision}", + f"--model_name={model.name}", + f"--input={','.join(input.name for input in model.input_info)}".format(), + *expanded_mo_args, + *mo_props.extra_args, + ] + + reporter.print_section_heading( + "{}Converting {} to IR ({})", + "(DRY RUN) " if namespace.dry_run else "", + model.name, + model_precision, + ) + + reporter.print("Conversion command: {}", _common.command_string(mo_cmd)) + + if not namespace.dry_run: + reporter.print(flush=True) + + if not reporter.job_context.subprocess(mo_cmd): + # NOTE: mo returns non zero return code (245) even though it successfully generate IR + cur_time = time.time() + time_threshold = 5 + xml_path = output_dir / model.subdirectory / model_precision / f"{model.name}.xml" + bin_path = output_dir / model.subdirectory / model_precision / f"{model.name}.bin" + if not ( + os.path.exists(xml_path) + and os.path.exists(bin_path) + and os.path.getmtime(xml_path) - cur_time < time_threshold + and os.path.getmtime(bin_path) - cur_time < time_threshold + ): + return False + + reporter.print() + + return True + + def convert_model( model, - download_dir=MPA_OMZ_CACHE, - output_dir=MPA_OMZ_CACHE, - precisions={"FP32"}, + download_dir=OMZ_CACHE, + output_dir=OMZ_CACHE, + precisions=None, force=False, -): + *args, +): # pylint: disable=keyword-arg-before-vararg + """Converting model for OMZ wrapping.""" download_dir = Path("") if download_dir is None else Path(download_dir) output_dir = Path("") if output_dir is None else Path(output_dir) + precisions = precisions if precisions else {"FP32"} out = _get_ir_path(output_dir / model.subdirectory) if out and not force: @@ -254,7 +360,7 @@ def convert_model( if mo_package_path is None: mo_package_path, stderr = _common.get_package_path(args.python, "mo") if mo_package_path is None: - sys.exit("Unable to load Model Optimizer. Errors occurred: {}".format(stderr)) + sys.exit(f"Unable to load Model Optimizer. Errors occurred: {stderr}") mo_dir = mo_package_path.parent reporter = _reporting.Reporter(_reporting.DirectOutputContext()) @@ -265,104 +371,14 @@ def convert_model( ) shared_convert_args = (output_dir, namespace, mo_props, precisions) - def convert(reporter, model, output_dir, namespace, mo_props, requested_precisions): - if model.mo_args is None: - reporter.print_section_heading("Skipping {} (no conversions defined)", model.name) - reporter.print() - return True - - model_precisions = requested_precisions & model.precisions - if not model_precisions: - reporter.print_section_heading("Skipping {} (all conversions skipped)", model.name) - reporter.print() - return True - - (output_dir / model.subdirectory).mkdir(parents=True, exist_ok=True) - - if not _run_pre_convert(reporter, model, output_dir, namespace): - return False - - model_format = model.framework - mo_extension_dir = mo_props.base_dir / "extensions" - if not mo_extension_dir.exists(): - mo_extension_dir = mo_props.base_dir - - template_variables = { - "config_dir": _common.MODEL_ROOT / model.subdirectory_ori, - "conv_dir": output_dir / model.subdirectory, - "dl_dir": namespace.download_dir / model.subdirectory, - "mo_dir": mo_props.base_dir, - "mo_ext_dir": mo_extension_dir, - } - - if model.conversion_to_onnx_args: - if not convert_to_onnx(reporter, model, output_dir, namespace, template_variables): - return False - model_format = "onnx" - - expanded_mo_args = [string.Template(arg).substitute(template_variables) for arg in model.mo_args] - - for model_precision in sorted(model_precisions): - data_type = model_precision.split("-")[0] - layout_string = ",".join( - "{}({})".format(input.name, input.layout) for input in model.input_info if input.layout - ) - shape_string = ",".join(str(input.shape) for input in model.input_info if input.shape) - - if layout_string: - expanded_mo_args.append("--layout={}".format(layout_string)) - if shape_string: - expanded_mo_args.append("--input_shape={}".format(shape_string)) - - mo_cmd = [ - *mo_props.cmd_prefix, - "--framework={}".format(model_format), - "--data_type={}".format(data_type), - "--output_dir={}".format(output_dir / model.subdirectory / model_precision), - "--model_name={}".format(model.name), - "--input={}".format(",".join(input.name for input in model.input_info)), - *expanded_mo_args, - *mo_props.extra_args, - ] - - reporter.print_section_heading( - "{}Converting {} to IR ({})", - "(DRY RUN) " if namespace.dry_run else "", - model.name, - model_precision, - ) - - reporter.print("Conversion command: {}", _common.command_string(mo_cmd)) - - if not namespace.dry_run: - reporter.print(flush=True) - - if not reporter.job_context.subprocess(mo_cmd): - # NOTE: mo returns non zero return code (245) even though it successfully generate IR - cur_time = time.time() - time_threshold = 5 - xml_path = output_dir / model.subdirectory / model_precision / f"{model.name}.xml" - bin_path = output_dir / model.subdirectory / model_precision / f"{model.name}.bin" - if not ( - os.path.exists(xml_path) - and os.path.exists(bin_path) - and os.path.getmtime(xml_path) - cur_time < time_threshold - and os.path.getmtime(bin_path) - cur_time < time_threshold - ): - return False - - reporter.print() - - return True - results = [] models = [] if model.model_stages: for model_stage in model.model_stages: - results.append(convert(reporter, model_stage, *shared_convert_args)) + results.append(_convert(reporter, model_stage, *shared_convert_args)) models.append(model_stage) else: - results.append(convert(reporter, model, *shared_convert_args)) + results.append(_convert(reporter, model, *shared_convert_args)) models.append(model) failed_models = [model.name for model, successful in zip(models, results) if not successful] @@ -376,7 +392,8 @@ def convert(reporter, model, output_dir, namespace, mo_props, requested_precisio return _get_ir_path(output_dir / model.subdirectory) -def get_omz_model(model_name, download_dir=MPA_OMZ_CACHE, output_dir=MPA_OMZ_CACHE, force=False): +def get_omz_model(model_name, download_dir=OMZ_CACHE, output_dir=OMZ_CACHE, force=False): + """Get OMZ model from name and download_dir.""" model = get_model_configuration(model_name) download_model(model, download_dir=download_dir, force=force) return convert_model(model, download_dir=download_dir, output_dir=output_dir, force=force) diff --git a/otx/core/ov/ops/__init__.py b/otx/core/ov/ops/__init__.py new file mode 100644 index 00000000000..938a11995a6 --- /dev/null +++ b/otx/core/ov/ops/__init__.py @@ -0,0 +1,140 @@ +"""Module of otx.core.ov.ops.""" +# Copyright (C) 2023 Intel Corporation +# +# SPDX-License-Identifier: MIT + +from .activations import ( + ClampV0, + EluV0, + ExpV0, + GeluV7, + HardSigmoidV0, + HSigmoidV5, + HSwishV4, + MishV4, + PReluV0, + ReluV0, + SeluV0, + SigmoidV0, + SoftMaxV0, + SoftMaxV1, + SwishV4, + TanhV0, +) +from .arithmetics import AddV1, DivideV1, MultiplyV1, SubtractV1, TanV0 +from .builder import OPS, OperationRegistry +from .convolutions import ConvolutionV1, GroupConvolutionV1 +from .generation import RangeV4 +from .image_processings import InterpolateV4 +from .infrastructures import ConstantV0, ParameterV0, ResultV0 +from .matmuls import EinsumV7, MatMulV0 +from .movements import ( + BroadcastV3, + ConcatV0, + GatherV0, + GatherV1, + PadV1, + ScatterNDUpdateV3, + ScatterUpdateV3, + ShuffleChannelsV0, + SplitV1, + StridedSliceV1, + TileV0, + TransposeV1, + VariadicSplitV1, +) +from .normalizations import ( + MVNV6, + BatchNormalizationV0, + LocalResponseNormalizationV0, + NormalizeL2V0, +) +from .object_detections import ( + DetectionOutputV0, + PriorBoxClusteredV0, + PriorBoxV0, + ProposalV4, + RegionYoloV0, + ROIPoolingV0, +) +from .op import Attribute, Operation +from .poolings import AvgPoolV1, MaxPoolV0 +from .reductions import ReduceMeanV1, ReduceMinV1, ReduceProdV1, ReduceSumV1 +from .shape_manipulations import ReshapeV1, ShapeOfV0, ShapeOfV3, SqueezeV0, UnsqueezeV0 +from .sorting_maximization import NonMaxSuppressionV5, NonMaxSuppressionV9, TopKV3 +from .type_conversions import ConvertV0 + +__all__ = [ + "SoftMaxV0", + "SoftMaxV1", + "ReluV0", + "SwishV4", + "SigmoidV0", + "ClampV0", + "PReluV0", + "TanhV0", + "EluV0", + "SeluV0", + "MishV4", + "HSwishV4", + "HSigmoidV5", + "ExpV0", + "HardSigmoidV0", + "GeluV7", + "MultiplyV1", + "DivideV1", + "AddV1", + "SubtractV1", + "TanV0", + "OPS", + "OperationRegistry", + "ConvolutionV1", + "GroupConvolutionV1", + "RangeV4", + "InterpolateV4", + "ParameterV0", + "ResultV0", + "ConstantV0", + "MatMulV0", + "EinsumV7", + "PadV1", + "ConcatV0", + "TransposeV1", + "GatherV0", + "GatherV1", + "StridedSliceV1", + "SplitV1", + "VariadicSplitV1", + "ShuffleChannelsV0", + "BroadcastV3", + "ScatterNDUpdateV3", + "ScatterUpdateV3", + "TileV0", + "BatchNormalizationV0", + "LocalResponseNormalizationV0", + "NormalizeL2V0", + "MVNV6", + "ProposalV4", + "ROIPoolingV0", + "DetectionOutputV0", + "RegionYoloV0", + "PriorBoxV0", + "PriorBoxClusteredV0", + "Operation", + "Attribute", + "MaxPoolV0", + "AvgPoolV1", + "ReduceMeanV1", + "ReduceProdV1", + "ReduceMinV1", + "ReduceSumV1", + "SqueezeV0", + "UnsqueezeV0", + "ReshapeV1", + "ShapeOfV0", + "ShapeOfV3", + "TopKV3", + "NonMaxSuppressionV5", + "NonMaxSuppressionV9", + "ConvertV0", +] diff --git a/otx/core/ov/ops/activations.py b/otx/core/ov/ops/activations.py new file mode 100644 index 00000000000..5d7f7ffc16b --- /dev/null +++ b/otx/core/ov/ops/activations.py @@ -0,0 +1,356 @@ +"""Activation-related modules for otx.core.ov.ops.activations.""" +# Copyright (C) 2023 Intel Corporation +# +# SPDX-License-Identifier: MIT + +import math +from dataclasses import dataclass, field + +import torch +from torch.nn import functional as F + +from otx.core.ov.ops.builder import OPS +from otx.core.ov.ops.op import Attribute, Operation + + +@dataclass +class SoftMaxV0Attribute(Attribute): + """SoftMaxV0Attribute class.""" + + axis: int = field(default=1) + + +@OPS.register() +class SoftMaxV0(Operation[SoftMaxV0Attribute]): + """SoftMaxV0 class.""" + + TYPE = "Softmax" + VERSION = 0 + ATTRIBUTE_FACTORY = SoftMaxV0Attribute + + def forward(self, inputs): + """SoftMaxV0's forward function.""" + return F.softmax(input=inputs, dim=self.attrs.axis) + + +@dataclass +class SoftMaxV1Attribute(Attribute): + """SoftMaxV1Attribute class.""" + + axis: int = field(default=1) + + +@OPS.register() +class SoftMaxV1(Operation[SoftMaxV1Attribute]): + """SoftMaxV1 class.""" + + TYPE = "Softmax" + VERSION = 1 + ATTRIBUTE_FACTORY = SoftMaxV1Attribute + + def forward(self, inputs): + """SoftMaxV1's forward function.""" + return F.softmax(input=inputs, dim=self.attrs.axis) + + +@dataclass +class ReluV0Attribute(Attribute): + """ReluV0Attribute class.""" + + pass # pylint: disable=unnecessary-pass + + +@OPS.register() +class ReluV0(Operation[ReluV0Attribute]): + """ReluV0 class.""" + + TYPE = "Relu" + VERSION = 0 + ATTRIBUTE_FACTORY = ReluV0Attribute + + def forward(self, inputs): + """ReluV0's forward function.""" + return F.relu(inputs) + + +@dataclass +class SwishV4Attribute(Attribute): + """SwishV4Attribute class.""" + + pass # pylint: disable=unnecessary-pass + + +@OPS.register() +class SwishV4(Operation[SwishV4Attribute]): + """SwishV4 class.""" + + TYPE = "Swish" + VERSION = 4 + ATTRIBUTE_FACTORY = SwishV4Attribute + + def forward(self, inputs, beta=1.0): + """SwishV4's forward function.""" + return inputs * torch.sigmoid(inputs * beta) + + +@dataclass +class SigmoidV0Attribute(Attribute): + """SigmoidV0Attribute class.""" + + pass # pylint: disable=unnecessary-pass + + +@OPS.register() +class SigmoidV0(Operation[SigmoidV0Attribute]): + """SigmoidV0 class.""" + + TYPE = "Sigmoid" + VERSION = 0 + ATTRIBUTE_FACTORY = SigmoidV0Attribute + + def forward(self, inputs): + """SigmoidV0's forward function.""" + return torch.sigmoid(inputs) + + +@dataclass +class ClampV0Attribute(Attribute): + """ClampV0Attribute class.""" + + min: float + max: float + + +@OPS.register() +class ClampV0(Operation[ClampV0Attribute]): + """ClampV0 class.""" + + TYPE = "Clamp" + VERSION = 0 + ATTRIBUTE_FACTORY = ClampV0Attribute + + def forward(self, inputs): + """ClampV0's forward function.""" + return inputs.clamp(min=self.attrs.min, max=self.attrs.max) + + +@dataclass +class PReluV0Attribute(Attribute): + """PReluV0Attribute class.""" + + pass # pylint: disable=unnecessary-pass + + +@OPS.register() +class PReluV0(Operation[PReluV0Attribute]): + """PReluV0 class.""" + + TYPE = "PRelu" + VERSION = 0 + ATTRIBUTE_FACTORY = PReluV0Attribute + + def forward(self, inputs, slope): + """PReluV0's forward function.""" + return F.prelu(input=inputs, weight=slope) + + +@dataclass +class TanhV0Attribute(Attribute): + """TanhV0Attribute class.""" + + pass # pylint: disable=unnecessary-pass + + +@OPS.register() +class TanhV0(Operation[TanhV0Attribute]): + """TanhV0 class.""" + + TYPE = "Tanh" + VERSION = 0 + ATTRIBUTE_FACTORY = TanhV0Attribute + + def forward(self, inputs): + """TanhV0's forward function.""" + return F.tanh(inputs) + + +@dataclass +class EluV0Attribute(Attribute): + """EluV0Attribute class.""" + + alpha: float + + +@OPS.register() +class EluV0(Operation[EluV0Attribute]): + """EluV0 class.""" + + TYPE = "Elu" + VERSION = 0 + ATTRIBUTE_FACTORY = EluV0Attribute + + def forward(self, inputs): + """EluV0's forward function.""" + return F.elu(input=inputs, alpha=self.attrs.alpha) + + +@dataclass +class SeluV0Attribute(Attribute): + """SeluV0Attribute class.""" + + pass # pylint: disable=unnecessary-pass + + +@OPS.register() +class SeluV0(Operation[SeluV0Attribute]): + """SeluV0 class.""" + + TYPE = "Selu" + VERSION = 0 + ATTRIBUTE_FACTORY = SeluV0Attribute + + def forward(self, inputs, alpha, lambda_): + """SeluV0's forward function.""" + return lambda_ * F.elu(input=inputs, alpha=alpha) + + +@dataclass +class MishV4Attribute(Attribute): + """MishV4Attribute class.""" + + pass # pylint: disable=unnecessary-pass + + +@OPS.register() +class MishV4(Operation[MishV4Attribute]): + """MishV4 class.""" + + TYPE = "Mish" + VERSION = 4 + ATTRIBUTE_FACTORY = MishV4Attribute + + def forward(self, inputs): + """MishV4's forward function.""" + # NOTE: pytorch 1.8.2 does not have mish function + # return F.mish(input=input) + return inputs * F.tanh(F.softplus(inputs)) + + +@dataclass +class HSwishV4Attribute(Attribute): + """HSwishV4Attribute class.""" + + pass # pylint: disable=unnecessary-pass + + +@OPS.register() +class HSwishV4(Operation[HSwishV4Attribute]): + """HSwishV4 class.""" + + TYPE = "HSwish" + VERSION = 4 + ATTRIBUTE_FACTORY = HSwishV4Attribute + + def forward(self, inputs): + """HSwishV4's forward function.""" + return F.hardswish(input=inputs) + + +@dataclass +class HSigmoidV5Attribute(Attribute): + """HSigmoidV5Attribute class.""" + + pass # pylint: disable=unnecessary-pass + + +@OPS.register() +class HSigmoidV5(Operation[HSigmoidV5Attribute]): + """HSigmoidV5 class.""" + + TYPE = "HSigmoid" + VERSION = 5 + ATTRIBUTE_FACTORY = HSigmoidV5Attribute + + def forward(self, inputs): + """HSigmoidV5's forward function.""" + return F.hardsigmoid(input=inputs) + + +@dataclass +class ExpV0Attribute(Attribute): + """ExpV0Attribute class.""" + + pass # pylint: disable=unnecessary-pass + + +@OPS.register() +class ExpV0(Operation[ExpV0Attribute]): + """ExpV0 class.""" + + TYPE = "Exp" + VERSION = 0 + ATTRIBUTE_FACTORY = ExpV0Attribute + + def forward(self, inputs): + """ExpV0's forward function.""" + return torch.exp(inputs) + + +@dataclass +class HardSigmoidV0Attribute(Attribute): + """HardSigmoidV0Attribute class.""" + + pass # pylint: disable=unnecessary-pass + + +@OPS.register() +class HardSigmoidV0(Operation[HardSigmoidV0Attribute]): + """HardSigmoidV0 class.""" + + TYPE = "HardSigmoid" + VERSION = 0 + ATTRIBUTE_FACTORY = HardSigmoidV0Attribute + + def forward(self, inputs, alpha, beta): + """HardSigmoidV0's forward function.""" + return torch.maximum( + torch.zeros_like(inputs), + torch.minimum(torch.ones_like(inputs), inputs * alpha + beta), + ) + + +@dataclass +class GeluV7Attribute(Attribute): + """GeluV7Attribute class.""" + + approximation_mode: str = field(default="ERF") + + def __post_init__(self): + """GeluV7Attribute's post init function.""" + super().__post_init__() + valid_approximation_mode = ["ERF", "tanh"] + if self.approximation_mode not in valid_approximation_mode: + raise ValueError( + f"Invalid approximation_mode {self.approximation_mode}. " + f"It must be one of {valid_approximation_mode}." + ) + + +@OPS.register() +class GeluV7(Operation[GeluV7Attribute]): + """GeluV7 class.""" + + TYPE = "Gelu" + VERSION = 7 + ATTRIBUTE_FACTORY = GeluV7Attribute + + def forward(self, inputs): + """GeluV7's forward function.""" + mode = self.attrs.approximation_mode + if mode == "ERF": + return F.gelu(input=inputs) + if mode == "tanh": + return ( + inputs * 0.5 * (1 + F.tanh(torch.sqrt(2 / torch.tensor(math.pi)) * (inputs + 0.044715 * inputs**3))) + ) + return None diff --git a/otx/mpa/modules/ov/ops/arithmetics.py b/otx/core/ov/ops/arithmetics.py similarity index 70% rename from otx/mpa/modules/ov/ops/arithmetics.py rename to otx/core/ov/ops/arithmetics.py index f637fca53d3..b959037650c 100644 --- a/otx/mpa/modules/ov/ops/arithmetics.py +++ b/otx/core/ov/ops/arithmetics.py @@ -1,51 +1,61 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 +"""Arithmetics-related codes for otx.core.ov.ops.arithmetics.""" +# Copyright (C) 2023 Intel Corporation # +# SPDX-License-Identifier: MIT from dataclasses import dataclass, field import torch -from .builder import OPS -from .op import Attribute, Operation +from otx.core.ov.ops.builder import OPS +from otx.core.ov.ops.op import Attribute, Operation @dataclass class MultiplyV1Attribute(Attribute): + """MultiplyV1Attribute class.""" + auto_broadcast: str = field(default="numpy") @OPS.register() class MultiplyV1(Operation[MultiplyV1Attribute]): + """MultiplyV1 class.""" + TYPE = "Multiply" VERSION = 1 ATTRIBUTE_FACTORY = MultiplyV1Attribute def forward(self, input_0, input_1): + """MultiplyV1's forward function.""" broadcast = self.attrs.auto_broadcast if broadcast == "none": assert input_0.shape == input_1.shape return input_0 * input_1 - elif broadcast == "numpy": + if broadcast == "numpy": return input_0 * input_1 - else: - raise NotImplementedError + raise NotImplementedError @dataclass class DivideV1Attribute(Attribute): + """DivideV1Attribute class.""" + m_pythondiv: bool = field(default=True) auto_broadcast: str = field(default="numpy") @OPS.register() class DivideV1(Operation[DivideV1Attribute]): + """DivideV1 class.""" + TYPE = "Divide" VERSION = 1 ATTRIBUTE_FACTORY = DivideV1Attribute def forward(self, input_0, input_1): + """DivideV1's forward function.""" broadcast = self.attrs.auto_broadcast if broadcast == "none": @@ -65,60 +75,73 @@ def forward(self, input_0, input_1): @dataclass class AddV1Attribute(Attribute): + """AddV1Attribute class.""" + auto_broadcast: str = field(default="numpy") @OPS.register() class AddV1(Operation[AddV1Attribute]): + """AddV1 class.""" + TYPE = "Add" VERSION = 1 ATTRIBUTE_FACTORY = AddV1Attribute def forward(self, input_0, input_1): + """AddV1's forward function.""" broadcast = self.attrs.auto_broadcast if broadcast == "none": assert input_0.shape == input_1.shape return input_0 + input_1 - elif broadcast == "numpy": + if broadcast == "numpy": return input_0 + input_1 - else: - raise NotImplementedError + raise NotImplementedError @dataclass class SubtractV1Attribute(Attribute): + """SubtractV1Attribute class.""" + auto_broadcast: str = field(default="numpy") @OPS.register() class SubtractV1(Operation[SubtractV1Attribute]): + """SubtractV1 class.""" + TYPE = "Subtract" VERSION = 1 ATTRIBUTE_FACTORY = SubtractV1Attribute def forward(self, input_0, input_1): + """SubtractV1's forward function.""" broadcast = self.attrs.auto_broadcast if broadcast == "none": assert input_0.shape == input_1.shape return input_0 - input_1 - elif broadcast == "numpy": + if broadcast == "numpy": return input_0 - input_1 - else: - raise NotImplementedError + raise NotImplementedError @dataclass class TanV0Attribute(Attribute): - pass + """TanV0Attribute class.""" + + pass # pylint: disable=unnecessary-pass @OPS.register() class TanV0(Operation[TanV0Attribute]): + """TanV0 class.""" + TYPE = "Tan" VERSION = 0 ATTRIBUTE_FACTORY = TanV0Attribute - def forward(self, input): - return torch.tan(input) + def forward(self, inputs): + """TanV0's forward function.""" + return torch.tan(inputs) diff --git a/otx/mpa/modules/ov/ops/builder.py b/otx/core/ov/ops/builder.py similarity index 50% rename from otx/mpa/modules/ov/ops/builder.py rename to otx/core/ov/ops/builder.py index 2b97d93ae5d..ace6e17777f 100644 --- a/otx/mpa/modules/ov/ops/builder.py +++ b/otx/core/ov/ops/builder.py @@ -1,18 +1,23 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 +"""OPS (OperationRegistry) module for otx.core.ov.ops.builder.""" +# Copyright (C) 2023 Intel Corporation # +# SPDX-License-Identifier: MIT from typing import Any, Optional -from ..registry import Registry +from otx.core.ov.registry import Registry class OperationRegistry(Registry): + """OperationRegistry class.""" + def __init__(self, name, add_name_as_attr=False): super().__init__(name, add_name_as_attr) self._registry_dict_by_type = {} def register(self, name: Optional[Any] = None): + """Register function from name.""" + def wrap(obj): layer_name = name if layer_name is None: @@ -27,23 +32,26 @@ def wrap(obj): return wrap - def _register(self, obj, name, type, version): + def _register(self, obj, name, types, version): + """Register function from obj and obj name.""" super()._register(obj, name) - if type not in self._registry_dict_by_type: - self._registry_dict_by_type[type] = {} - if version in self._registry_dict_by_type[type]: - raise KeyError(f"{version} is already registered in {type}") - self._registry_dict_by_type[type][version] = obj + if types not in self._registry_dict_by_type: + self._registry_dict_by_type[types] = {} + if version in self._registry_dict_by_type[types]: + raise KeyError(f"{version} is already registered in {types}") + self._registry_dict_by_type[types][version] = obj def get_by_name(self, name): + """Get obj from name.""" return self.get(name) - def get_by_type_version(self, type, version): - if type not in self._registry_dict_by_type: - raise KeyError(f"type {type} is not registered in {self._name}") - if version not in self._registry_dict_by_type[type]: - raise KeyError(f"version {version} is not registered in {type} of {self._name}") - return self._registry_dict_by_type[type][version] + def get_by_type_version(self, types, version): + """Get obj from type and version.""" + if types not in self._registry_dict_by_type: + raise KeyError(f"type {types} is not registered in {self._name}") + if version not in self._registry_dict_by_type[types]: + raise KeyError(f"version {version} is not registered in {types} of {self._name}") + return self._registry_dict_by_type[types][version] OPS = OperationRegistry("ov ops") diff --git a/otx/mpa/modules/ov/ops/convolutions.py b/otx/core/ov/ops/convolutions.py similarity index 74% rename from otx/mpa/modules/ov/ops/convolutions.py rename to otx/core/ov/ops/convolutions.py index 20d21dcc2f2..a551be27810 100644 --- a/otx/mpa/modules/ov/ops/convolutions.py +++ b/otx/core/ov/ops/convolutions.py @@ -1,20 +1,22 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 +"""Convolutions-related module for otx.core.ov.ops.""" +# Copyright (C) 2023 Intel Corporation # +# SPDX-License-Identifier: MIT from dataclasses import dataclass, field from typing import Callable, List -import torch from torch.nn import functional as F -from .builder import OPS -from .op import Attribute, Operation -from .utils import get_torch_padding +from otx.core.ov.ops.builder import OPS +from otx.core.ov.ops.movements import get_torch_padding +from otx.core.ov.ops.op import Attribute, Operation @dataclass class ConvolutionV1Attribute(Attribute): + """ConvolutionV1Attribute class.""" + strides: List[int] pads_begin: List[int] pads_end: List[int] @@ -22,6 +24,7 @@ class ConvolutionV1Attribute(Attribute): auto_pad: str = field(default="explicit") def __post_init__(self): + """ConvolutionV1Attribute's post-init function.""" super().__post_init__() valid_auto_pad = ["explicit", "same_upper", "same_Lower", "valid"] if self.auto_pad not in valid_auto_pad: @@ -30,11 +33,14 @@ def __post_init__(self): @OPS.register() class ConvolutionV1(Operation[ConvolutionV1Attribute]): + """ConvolutionV1 class.""" + TYPE = "Convolution" VERSION = 1 ATTRIBUTE_FACTORY = ConvolutionV1Attribute - def forward(self, input, weight): + def forward(self, inputs, weight): + """ConvolutionV1's forward function.""" if weight.dim() == 3: func = F.conv1d elif weight.dim() == 4: @@ -48,17 +54,17 @@ def forward(self, input, weight): self.attrs.pads_begin, self.attrs.pads_end, self.attrs.auto_pad, - list(input.shape[2:]), + list(inputs.shape[2:]), list(weight.shape[2:]), self.attrs.strides, self.attrs.dilations, ) if isinstance(padding, Callable): - input = padding(input=input) + inputs = padding(input=inputs) padding = 0 return func( - input=input, + input=inputs, weight=weight, bias=None, stride=self.attrs.strides, @@ -69,16 +75,21 @@ def forward(self, input, weight): @dataclass class GroupConvolutionV1Attribute(ConvolutionV1Attribute): - pass + """GroupConvolutionV1Attribute class.""" + + pass # pylint: disable=unnecessary-pass @OPS.register() class GroupConvolutionV1(Operation[GroupConvolutionV1Attribute]): + """GroupConvolutionV1 class.""" + TYPE = "GroupConvolution" VERSION = 1 ATTRIBUTE_FACTORY = GroupConvolutionV1Attribute - def forward(self, input, weight): + def forward(self, inputs, weight): + """GroupConvolutionV1's forward function.""" if weight.dim() == 4: func = F.conv1d elif weight.dim() == 5: @@ -96,17 +107,17 @@ def forward(self, input, weight): self.attrs.pads_begin, self.attrs.pads_end, self.attrs.auto_pad, - list(input.shape[2:]), + list(inputs.shape[2:]), list(weight.shape[2:]), self.attrs.strides, self.attrs.dilations, ) if isinstance(padding, Callable): - input = padding(input=input) + inputs = padding(input=inputs) padding = 0 output = func( - input=input, + input=inputs, weight=weight, bias=None, stride=self.attrs.strides, diff --git a/otx/mpa/modules/ov/ops/generation.py b/otx/core/ov/ops/generation.py similarity index 59% rename from otx/mpa/modules/ov/ops/generation.py rename to otx/core/ov/ops/generation.py index 5785aa8aa80..395801923bd 100644 --- a/otx/mpa/modules/ov/ops/generation.py +++ b/otx/core/ov/ops/generation.py @@ -1,28 +1,34 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 +"""Generation-related module for otx.core.ov.ops.""" +# Copyright (C) 2023 Intel Corporation # +# SPDX-License-Identifier: MIT from dataclasses import dataclass import torch -from .builder import OPS -from .op import Attribute, Operation -from .type_conversions import _ov_to_torch +from otx.core.ov.ops.builder import OPS +from otx.core.ov.ops.op import Attribute, Operation +from otx.core.ov.ops.type_conversions import _ov_to_torch @dataclass class RangeV4Attribute(Attribute): + """RangeV4Attribute class.""" + output_type: str @OPS.register() class RangeV4(Operation[RangeV4Attribute]): + """RangeV4 class.""" + TYPE = "Range" VERSION = 4 ATTRIBUTE_FACTORY = RangeV4Attribute def forward(self, start, stop, step): + """RangeV4's forward function.""" dtype = _ov_to_torch[self.attrs.output_type] return torch.arange( start=start, diff --git a/otx/mpa/modules/ov/ops/image_processings.py b/otx/core/ov/ops/image_processings.py similarity index 78% rename from otx/mpa/modules/ov/ops/image_processings.py rename to otx/core/ov/ops/image_processings.py index 8c54357a1a1..f6da2fc53d7 100644 --- a/otx/mpa/modules/ov/ops/image_processings.py +++ b/otx/core/ov/ops/image_processings.py @@ -1,6 +1,7 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 +"""Image Processings-related code for otx.core.ov.ops.""" +# Copyright (C) 2023 Intel Corporation # +# SPDX-License-Identifier: MIT from dataclasses import dataclass, field from typing import List @@ -12,9 +13,13 @@ from .movements import PadV1 from .op import Attribute, Operation +# pylint: disable=too-many-instance-attributes, too-many-branches + @dataclass class InterpolateV4Attribute(Attribute): + """InterpolateV4Attribute class.""" + mode: str shape_calculation_mode: str coordinate_transformation_mode: str = field(default="half_pixel") @@ -25,6 +30,7 @@ class InterpolateV4Attribute(Attribute): cube_coeff: float = field(default=-0.75) def __post_init__(self): + """InterpolateV4Attribute's post-init function.""" super().__post_init__() valid_mode = ["nearest", "linear", "linear_onnx", "cubic"] if self.mode not in valid_mode: @@ -60,6 +66,8 @@ def __post_init__(self): @OPS.register() class InterpolateV4(Operation[InterpolateV4Attribute]): + """InterpolateV4 class.""" + TYPE = "Interpolate" VERSION = 4 ATTRIBUTE_FACTORY = InterpolateV4Attribute @@ -68,7 +76,8 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.pad = PadV1("tmp", shape=self.shape, pad_mode="constant") - def forward(self, input, sizes, scales, axes=None): + def forward(self, inputs, sizes, scales, axes=None): + """InterpolateV4's forward function.""" # TODO list # - handle 'linear_onnx' mode # - coordinate_transformation_mode @@ -77,14 +86,14 @@ def forward(self, input, sizes, scales, axes=None): # - antialias if axes is None: - axes = list(range(input.dim())) + axes = list(range(inputs.dim())) else: axes = axes.detach().cpu().tolist() - output = self.pad(input, self.attrs.pads_begin, self.attrs.pads_end, 0) + output = self.pad(inputs, self.attrs.pads_begin, self.attrs.pads_end, 0) mode = self.attrs.mode - if mode == "linear" or mode == "linear_onnx": + if mode in ("linear", "linear_onnx"): align_corners = False if output.dim() == 3: pass @@ -96,13 +105,13 @@ def forward(self, input, sizes, scales, axes=None): align_corners = False if output.dim() == 3: raise NotImplementedError - elif output.dim() == 4: + if output.dim() == 4: mode = "bicubic" elif output.dim() == 5: raise NotImplementedError elif mode == "nearest": align_corners = None - pass + pass # pylint: disable=unnecessary-pass else: raise NotImplementedError @@ -119,16 +128,15 @@ def forward(self, input, sizes, scales, axes=None): mode=mode, align_corners=align_corners, ) - else: - scales = scales.detach().cpu().numpy() - scales = scales[np.argsort(axes)].tolist() - if output.dim() == len(scales): - scales = scales[2:] - - return F.interpolate( - input=output, - size=None, - scale_factor=scales, - mode=mode, - align_corners=align_corners, - ) + scales = scales.detach().cpu().numpy() + scales = scales[np.argsort(axes)].tolist() + if output.dim() == len(scales): + scales = scales[2:] + + return F.interpolate( + input=output, + size=None, + scale_factor=scales, + mode=mode, + align_corners=align_corners, + ) diff --git a/otx/mpa/modules/ov/ops/infrastructures.py b/otx/core/ov/ops/infrastructures.py similarity index 84% rename from otx/mpa/modules/ov/ops/infrastructures.py rename to otx/core/ov/ops/infrastructures.py index 05d1caf0d3c..eb9a4bdc4d7 100644 --- a/otx/mpa/modules/ov/ops/infrastructures.py +++ b/otx/core/ov/ops/infrastructures.py @@ -1,6 +1,7 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 +"""Infrastructure-related modules for otx.core.ov.ops.""" +# Copyright (C) 2023 Intel Corporation # +# SPDX-License-Identifier: MIT from collections import OrderedDict from dataclasses import dataclass, field @@ -9,17 +10,14 @@ import numpy as np import torch -from otx.mpa.utils.logger import get_logger +from otx.algorithms.common.utils.logger import get_logger -from ..utils import get_op_name +from ..utils import get_op_name # type: ignore[attr-defined] from .builder import OPS from .op import Attribute, Operation from .type_conversions import ConvertV0 from .utils import get_dynamic_shape -logger = get_logger() - - NODE_TYPES_WITH_WEIGHT = set( [ "Convolution", @@ -33,9 +31,13 @@ ] ) +logger = get_logger() + @dataclass class ParameterV0Attribute(Attribute): + """ParameterV0Attribute class.""" + element_type: Optional[str] = field(default=None) layout: Optional[Tuple[str]] = field(default=None) @@ -43,6 +45,7 @@ class ParameterV0Attribute(Attribute): verify_shape: bool = field(default=True) def __post_init__(self): + """ParameterV0Attribute's post-init function.""" super().__post_init__() # fmt: off valid_element_type = [ @@ -57,29 +60,33 @@ def __post_init__(self): @OPS.register() class ParameterV0(Operation[ParameterV0Attribute]): + """ParameterV0 class.""" + TYPE = "Parameter" VERSION = 0 ATTRIBUTE_FACTORY = ParameterV0Attribute - def forward(self, input): + def forward(self, inputs): + """ParameterV0's forward function.""" # TODO: validate shape # need to handle new generated op from reshaped model if self.attrs.verify_shape: assert self.shape is not None ov_shape = self.shape[0] - torch_shape = list(input.shape) + torch_shape = list(inputs.shape) for ov_shape_, torch_shape_ in zip(ov_shape, torch_shape): if ov_shape_ == -1: continue assert ov_shape_ == torch_shape_, f"input shape {torch_shape} does not match with ov shape {ov_shape}" if self.attrs.permute: - input = input.permute(self.attrs.permute) + inputs = inputs.permute(self.attrs.permute) - return input + return inputs @classmethod def from_ov(cls, ov_op): + """ParameterV0's from_ov function.""" op_type = ov_op.get_type_name() op_version = ov_op.get_version() op_name = get_op_name(ov_op) @@ -122,8 +129,8 @@ def from_ov(cls, ov_op): new_shape = [] for shape in attrs["shape"]: new_shape.append([-1 if j == i else k for j, k in enumerate(shape)]) - new_shape = tuple(tuple(shape) for shape in new_shape) - attrs["shape"] = new_shape + new_shape = [tuple(shape) for shape in new_shape] + attrs["shape"] = tuple(new_shape) # change shape and layout based on permute if "permute" in attrs and attrs["permute"] != (0, 1, 2, 3): @@ -135,28 +142,35 @@ def from_ov(cls, ov_op): for shape in attrs["shape"]: new_shape.append([shape[i] for i in permute]) attrs["shape"] = tuple(tuple(shape) for shape in new_shape) - attrs["layout"] = tuple([attrs["layout"][i] for i in permute]) + attrs["layout"] = tuple(attrs["layout"][i] for i in permute) return cls(name=op_name, **attrs) @dataclass class ResultV0Attribute(Attribute): - pass + """ResultV0Attribute class.""" + + pass # pylint: disable=unnecessary-pass @OPS.register() class ResultV0(Operation[ResultV0Attribute]): + """ResultV0 class.""" + TYPE = "Result" VERSION = 0 ATTRIBUTE_FACTORY = ResultV0Attribute - def forward(self, input): - return input + def forward(self, inputs): + """ResultV0's forward function.""" + return inputs @dataclass class ConstantV0Attribute(Attribute): + """ConstantV0Attribute class.""" + element_type: str offset: int = field(default=0) size: int = field(default=0) @@ -164,6 +178,7 @@ class ConstantV0Attribute(Attribute): is_parameter: bool = field(default=False) def __post_init__(self): + """ConstantV0Attribute's post-init function.""" super().__post_init__() # fmt: off valid_element_type = [ @@ -177,6 +192,8 @@ def __post_init__(self): @OPS.register() class ConstantV0(Operation[ConstantV0Attribute]): + """ConstantV0 class.""" + TYPE = "Constant" VERSION = 0 ATTRIBUTE_FACTORY = ConstantV0Attribute @@ -194,10 +211,12 @@ def __init__(self, *args, **kwargs): self.register_buffer("data", data) def forward(self): + """ConstantV0's forward function.""" return self.data @classmethod def from_ov(cls, ov_op): + """ConstantV0's from_ov function.""" op_type = ov_op.get_type_name() op_version = ov_op.get_version() op_name = get_op_name(ov_op) @@ -228,6 +247,7 @@ def from_ov(cls, ov_op): # FIXME: need a better way to distinghish if it is parameter or no is_parameter = False + # pylint: disable=too-many-boolean-expressions if ( set(op_node_types).intersection(NODE_TYPES_WITH_WEIGHT) and len(in_port_indices) == 1 diff --git a/otx/mpa/modules/ov/ops/matmuls.py b/otx/core/ov/ops/matmuls.py similarity index 68% rename from otx/mpa/modules/ov/ops/matmuls.py rename to otx/core/ov/ops/matmuls.py index bc5764e66e7..0bf2e3a173b 100644 --- a/otx/mpa/modules/ov/ops/matmuls.py +++ b/otx/core/ov/ops/matmuls.py @@ -1,28 +1,34 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 +"""MatMul-related modules for otx.core.ov.ops.""" +# Copyright (C) 2023 Intel Corporation # +# SPDX-License-Identifier: MIT from dataclasses import dataclass, field import torch -from .builder import OPS -from .op import Attribute, Operation +from otx.core.ov.ops.builder import OPS +from otx.core.ov.ops.op import Attribute, Operation @dataclass class MatMulV0Attribute(Attribute): + """MatMulV0Attribute class.""" + transpose_a: bool = field(default=False) transpose_b: bool = field(default=False) @OPS.register() class MatMulV0(Operation[MatMulV0Attribute]): + """MatMulV0 class.""" + TYPE = "MatMul" VERSION = 0 ATTRIBUTE_FACTORY = MatMulV0Attribute def forward(self, input_a, input_b): + """MatMulV0's forward function.""" if self.attrs.transpose_a: input_a = torch.transpose(input_a, -1, -2) if self.attrs.transpose_b: @@ -32,14 +38,19 @@ def forward(self, input_a, input_b): @dataclass class EinsumV7Attribute(Attribute): + """EinsumV7Attribute class.""" + equation: str @OPS.register() class EinsumV7(Operation[EinsumV7Attribute]): + """EinsumV7 class.""" + TYPE = "Einsum" VERSION = 7 ATTRIBUTE_FACTORY = EinsumV7Attribute def forward(self, *inputs): + """EinsumV7's forward function.""" return torch.einsum(self.attrs.equation, *inputs) diff --git a/otx/core/ov/ops/modules/__init__.py b/otx/core/ov/ops/modules/__init__.py new file mode 100644 index 00000000000..6f18c9c4a33 --- /dev/null +++ b/otx/core/ov/ops/modules/__init__.py @@ -0,0 +1,8 @@ +"""Module for otx.core.ov.pos.modules.""" +# Copyright (C) 2023 Intel Corporation +# +# SPDX-License-Identifier: MIT + +from .op_module import OperationModule + +__all__ = ["OperationModule"] diff --git a/otx/core/ov/ops/modules/op_module.py b/otx/core/ov/ops/modules/op_module.py new file mode 100644 index 00000000000..2f59da59cd3 --- /dev/null +++ b/otx/core/ov/ops/modules/op_module.py @@ -0,0 +1,104 @@ +"""Operation module for otx.core.ov.ops.modeuls.""" +# Copyright (C) 2023 Intel Corporation +# +# SPDX-License-Identifier: MIT + +import inspect +from typing import Dict, List, Optional, Union + +import torch +from openvino.pyopenvino import Node # pylint: disable=no-name-in-module + +from ..op import Operation +from ..utils import convert_op_to_torch + + +class OperationModule(torch.nn.Module): + """OperationModule class.""" + + def __init__( + self, + op_v: Operation, + dependent_ops: Union[List[Operation], Dict[str, Optional[Operation]]], + ): + super().__init__() + + self.op_v = op_v + self._dependent_ops = torch.nn.ModuleDict() + + spec = inspect.getfullargspec(op_v.forward) + kwargs = spec.args[1:] + + self._dependents_with_defaults = [] + if spec.defaults: + self._dependents_with_defaults = spec.args[-len(spec.defaults) :] + + if isinstance(dependent_ops, list): + assert len(dependent_ops) == len(kwargs) + for op_, kwarg in zip(dependent_ops, kwargs): + self._dependent_ops[kwarg] = op_ + elif isinstance(dependent_ops, dict): + for kwarg in kwargs: + self._dependent_ops[kwarg] = dependent_ops[kwarg] + else: + raise NotImplementedError + + def forward(self, *args, **kwargs): + """Operationmodule's forward function.""" + inputs = {k: v() if v is not None else None for k, v in self._dependent_ops.items()} + + if args: + empty_input_keys = [k for k, v in self._dependent_ops.items() if v is None] + for key, val in zip(empty_input_keys, args): + inputs[key] = val + if kwargs: + for key, val in kwargs.items(): + if inputs[key] is not None: + raise ValueError(f"duplicated key {key}") + inputs[key] = val + + assert all(v is not None for v in inputs.values() if v not in self._dependents_with_defaults) + + return self.op_v(**inputs) + + @property + def type(self): # pylint: disable=invalid-overridden-method + """Operationmodule's type property.""" + return self.op_v.type + + @property + def version(self): + """Operationmodule's version property.""" + return self.op_v.version + + @property + def name(self): + """Operationmodule's name property.""" + return self.op_v.name + + @property + def shape(self): + """Operationmodule's shape property.""" + return self.op_v.shape + + @property + def attrs(self): + """Operationmodule's attrs property.""" + return self.op_v.attrs + + +def convert_op_to_torch_module(target_op: Node): + """Convert op Node to torch module.""" + dependent_modules = [] + for in_port in target_op.inputs(): + out_port = in_port.get_source_output() + parent = out_port.get_node() + + parent_type = parent.get_type_name() + if parent_type == "Constant": + dependent_modules.append(convert_op_to_torch(parent)) + else: + dependent_modules.append(None) + module = convert_op_to_torch(target_op) + module = OperationModule(module, dependent_modules) + return module diff --git a/otx/mpa/modules/ov/ops/movements.py b/otx/core/ov/ops/movements.py similarity index 58% rename from otx/mpa/modules/ov/ops/movements.py rename to otx/core/ov/ops/movements.py index 2faf812b4d4..e529214e992 100644 --- a/otx/mpa/modules/ov/ops/movements.py +++ b/otx/core/ov/ops/movements.py @@ -1,9 +1,11 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 +"""Movement-related modules for otx.core.ov.ops.""" +# Copyright (C) 2023 Intel Corporation # +# SPDX-License-Identifier: MIT import math from dataclasses import dataclass, field +from functools import partial from typing import List import torch @@ -12,12 +14,17 @@ from .builder import OPS from .op import Attribute, Operation +# pylint: disable=too-many-branches + @dataclass class PadV1Attribute(Attribute): + """PadV1Attribute class.""" + pad_mode: str def __post_init__(self): + """PadV1Attribute's post-init function.""" super().__post_init__() valid_pad_mode = ["constant", "edge", "reflect", "symmetric"] if self.pad_mode not in valid_pad_mode: @@ -26,6 +33,8 @@ def __post_init__(self): @OPS.register() class PadV1(Operation[PadV1Attribute]): + """PadV1 class.""" + TYPE = "Pad" VERSION = 1 ATTRIBUTE_FACTORY = PadV1Attribute @@ -36,76 +45,91 @@ def __init__(self, *args, **kwargs): @staticmethod def get_torch_pad_mode(pad_mode): + """PadV1's get_torch_pad_mode function.""" if pad_mode == "constant": return "constant" - elif pad_mode == "edge": + if pad_mode == "edge": return "replicate" - elif pad_mode == "reflect": + if pad_mode == "reflect": return "reflect" - elif pad_mode == "symmetric": - raise NotImplementedError - else: - raise NotImplementedError + raise NotImplementedError @staticmethod def get_torch_pad_dim(pads_begin, pads_end): + """PadV1's get_torch_pad_dim function.""" # reverse padding return [val for tup in zip(pads_begin[::-1], pads_end[::-1]) for val in tup] - def forward(self, input, pads_begin, pads_end, pad_value=0): + def forward(self, inputs, pads_begin, pads_end, pad_value=0): + """PadV1's forward function.""" pads_begin = pads_begin if isinstance(pads_begin, list) else pads_begin.detach().cpu().tolist() pads_end = pads_end if isinstance(pads_end, list) else pads_end.detach().cpu().tolist() pad = self.get_torch_pad_dim(pads_begin, pads_end) pad = list(map(math.ceil, pad)) - return F.pad(input=input, pad=pad, mode=self._pad_mode, value=pad_value) + return F.pad(input=inputs, pad=pad, mode=self._pad_mode, value=pad_value) @dataclass class ConcatV0Attribute(Attribute): + """ConcatV0Attribute class.""" + axis: int @OPS.register() class ConcatV0(Operation[ConcatV0Attribute]): + """ConcatV0 class.""" + TYPE = "Concat" VERSION = 0 ATTRIBUTE_FACTORY = ConcatV0Attribute def forward(self, *inputs): + """ConcatV0's forward function.""" return torch.cat(inputs, self.attrs.axis) @dataclass class TransposeV1Attribute(Attribute): - pass + """TransposeV1Attribute class.""" + + pass # pylint: disable=unnecessary-pass @OPS.register() class TransposeV1(Operation[TransposeV1Attribute]): + """TransposeV1 class.""" + TYPE = "Transpose" VERSION = 1 ATTRIBUTE_FACTORY = TransposeV1Attribute - def forward(self, input, order): + def forward(self, inputs, order): + """TransposeV1's forward function.""" if order.numel() == 0: - order = list(range(input.dim()))[::-1] + order = list(range(inputs.dim()))[::-1] elif isinstance(order, torch.Tensor): order = order.detach().cpu().tolist() - return input.permute(order) + return inputs.permute(order) @dataclass class GatherV0Attribute(Attribute): + """GatherV0Attribute class.""" + batch_dims: int = field(default=0) @OPS.register() class GatherV0(Operation[GatherV0Attribute]): + """GatherV0 class.""" + TYPE = "Gather" VERSION = 0 ATTRIBUTE_FACTORY = GatherV0Attribute - def forward(self, input, indices, axis): + def forward(self, inputs, indices, axis): + """GatherV0's forward function.""" assert axis.numel() == 1 axis = axis.squeeze() squeeze_axis = indices.dim() == 0 @@ -119,22 +143,22 @@ def forward(self, input, indices, axis): indices = indices.reshape(*indices_shape[:batch_dims], -1) indices_shape = indices_shape[batch_dims:] - if indices.dim() != input.dim(): + if indices.dim() != inputs.dim(): if indices.dim() != 0: while indices.dim() - 1 < axis: indices = indices.unsqueeze(batch_dims) - while indices.dim() < input.dim(): + while indices.dim() < inputs.dim(): indices = indices.unsqueeze(-1) repeat = [] - for i, (j, k) in enumerate(zip(input.shape, indices.shape)): + for i, (j, k) in enumerate(zip(inputs.shape, indices.shape)): if i == axis: repeat.append(1) else: assert j % k == 0 repeat.append(j // k) indices = indices.repeat(repeat) - output = torch.gather(input=input, dim=axis, index=indices.type(torch.int64)) + output = torch.gather(input=inputs, dim=axis, index=indices.type(torch.int64)) if squeeze_axis: output = output.squeeze(axis) @@ -144,21 +168,28 @@ def forward(self, input, indices, axis): @dataclass class GatherV1Attribute(Attribute): - pass + """GatherV1Attribute class.""" + + pass # pylint: disable=unnecessary-pass @OPS.register() class GatherV1(Operation[GatherV1Attribute]): + """GatherV1 class.""" + TYPE = "Gather" VERSION = 1 ATTRIBUTE_FACTORY = GatherV1Attribute - def forward(self, input, indices, axis): - return torch.gather(input=input, dim=axis, index=indices) + def forward(self, inputs, indices, axis): + """GatherV1's forward function.""" + return torch.gather(input=inputs, dim=axis, index=indices) @dataclass class StridedSliceV1Attribute(Attribute): + """StridedSliceV1Attribute class.""" + begin_mask: List[int] end_mask: List[int] new_axis_mask: List[int] = field(default_factory=lambda: [0]) @@ -168,11 +199,14 @@ class StridedSliceV1Attribute(Attribute): @OPS.register() class StridedSliceV1(Operation[StridedSliceV1Attribute]): + """StridedSliceV1 class.""" + TYPE = "StridedSlice" VERSION = 1 ATTRIBUTE_FACTORY = StridedSliceV1Attribute - def forward(self, input, begin, end, stride=None): + def forward(self, inputs, begin, end, stride=None): + """StridedSliceV1's forward function.""" if sum(self.attrs.ellipsis_mask) > 0: raise NotImplementedError @@ -181,28 +215,28 @@ def forward(self, input, begin, end, stride=None): begin[i] = 0 for i, mask in enumerate(self.attrs.end_mask): if mask == 1: - end[i] = input.size(i) + end[i] = inputs.size(i) if stride is None: stride = torch.tensor([1 for _ in begin], dtype=begin.dtype) - output = input - for i, (b, e, s) in enumerate(zip(begin, end, stride)): - length = input.size(i) + output = inputs + for i, (b, e, stride_0) in enumerate(zip(begin, end, stride)): + length = inputs.size(i) # begin index is inclusive b = torch.clamp(b, -length, length - 1) # end index is exclusive e = torch.clamp(e, -length - 1, length) - if s > 0: + if stride_0 > 0: b = b + length if b < 0 else b e = e + length if e < 0 else e - indices = torch.arange(b, e, s, device=input.device) + indices = torch.arange(b, e, stride_0, device=inputs.device) else: b = b - length if b >= 0 else b e = e - length if e >= 0 else e - indices = torch.arange(b, e, s, device=input.device) + indices = torch.arange(b, e, stride_0, device=inputs.device) indices += length output = torch.index_select(output, i, indices) @@ -224,46 +258,56 @@ def forward(self, input, begin, end, stride=None): @dataclass class SplitV1Attribute(Attribute): + """SplitV1Attribute class.""" + num_splits: int @OPS.register() class SplitV1(Operation[SplitV1Attribute]): + """SplitV1 class.""" + TYPE = "Split" VERSION = 1 ATTRIBUTE_FACTORY = SplitV1Attribute - def forward(self, input, axis): - split_size = input.shape[axis] // self.attrs.num_splits - return torch.split(tensor=input, split_size_or_sections=split_size, dim=axis) + def forward(self, inputs, axis): + """SplitV1's forward function.""" + split_size = inputs.shape[axis] // self.attrs.num_splits + return torch.split(tensor=inputs, split_size_or_sections=split_size, dim=axis) @dataclass class VariadicSplitV1Attribute(Attribute): - pass + """VariadicSplitV1Attribute class.""" + + pass # pylint: disable=unnecessary-pass @OPS.register() class VariadicSplitV1(Operation[VariadicSplitV1Attribute]): + """VariadicSplitV1 class.""" + TYPE = "VariadicSplit" VERSION = 1 ATTRIBUTE_FACTORY = VariadicSplitV1Attribute - def forward(self, input, axis, split_lengths): + def forward(self, inputs, axis, split_lengths): + """VariadicSplitV1's forward function.""" idx = [i for i, j in enumerate(split_lengths) if j == -1] if idx: assert len(idx) == 1 idx = idx[0] - split_lengths[idx] = input.size(axis) - sum(split_lengths) - 1 - assert input.size(axis) == sum(split_lengths) + split_lengths[idx] = inputs.size(axis) - sum(split_lengths) - 1 + assert inputs.size(axis) == sum(split_lengths) outputs = [] start_idx = 0 for length in split_lengths: outputs.append( torch.index_select( - input, + inputs, axis, - torch.arange(start_idx, start_idx + length, device=input.device), + torch.arange(start_idx, start_idx + length, device=inputs.device), ) ) start_idx += length @@ -272,25 +316,30 @@ def forward(self, input, axis, split_lengths): @dataclass class ShuffleChannelsV0Attribute(Attribute): + """ShuffleChannelsV0Attribute class.""" + axis: int = field(default=1) group: int = field(default=1) @OPS.register() class ShuffleChannelsV0(Operation[ShuffleChannelsV0Attribute]): + """ShuffleChannelsV0 class.""" + TYPE = "ShuffleChannels" VERSION = 0 ATTRIBUTE_FACTORY = ShuffleChannelsV0Attribute - def forward(self, input): + def forward(self, inputs): + """ShuffleChannelsV0's forward function.""" # n, c, h, w = input.shape - assert input.dim() == 4 - origin_shape = input.shape - origin_dim = input.dim() + assert inputs.dim() == 4 + origin_shape = inputs.shape + origin_dim = inputs.dim() assert origin_shape[self.attrs.axis] % self.attrs.group == 0 axis = self.attrs.axis - axis = axis if axis >= 0 else axis + input.dim() + axis = axis if axis >= 0 else axis + inputs.dim() target_shape = [ 0, @@ -301,14 +350,14 @@ def forward(self, input): if axis == 0: target_shape[0] = 1 target_shape[-1] = math.prod([origin_shape[i] for i in range(axis + 1, origin_dim)]) - elif axis == input.dim() - 1: + elif axis == inputs.dim() - 1: target_shape[0] = math.prod([origin_shape[i] for i in range(0, axis)]) target_shape[-1] = 1 else: target_shape[0] = math.prod([origin_shape[i] for i in range(0, axis)]) target_shape[-1] = math.prod([origin_shape[i] for i in range(axis + 1, origin_dim)]) - output = input.reshape(target_shape) + output = inputs.reshape(target_shape) output = output.permute([0, 2, 1, 3]) output = output.reshape(origin_shape) return output @@ -316,9 +365,12 @@ def forward(self, input): @dataclass class BroadcastV3Attribute(Attribute): + """BroadcastV3Attribute class.""" + mode: str = field(default="numpy") def __post_init__(self): + """BroadcastV3Attribute's post-init function.""" super().__post_init__() valid_mode = ["numpy", "explicit", "bidirectional"] if self.mode not in valid_mode: @@ -327,40 +379,47 @@ def __post_init__(self): @OPS.register() class BroadcastV3(Operation[BroadcastV3Attribute]): + """BroadcastV3 class.""" + TYPE = "Broadcast" VERSION = 3 ATTRIBUTE_FACTORY = BroadcastV3Attribute - def forward(self, input, target_shape, axes_mapping=None): + def forward(self, inputs, target_shape, axes_mapping=None): + """BroadcastV3's forward function.""" if self.attrs.mode == "numpy": - return input.expand(*target_shape) + return inputs.expand(*target_shape) if self.attrs.mode == "bidirectional": - return torch.ones(*target_shape, device=input.device) * input - else: - assert axes_mapping is not None - prev = -1 - for axes in axes_mapping: + return torch.ones(*target_shape, device=inputs.device) * inputs + assert axes_mapping is not None + prev = -1 + for axes in axes_mapping: + prev += 1 + while axes - prev > 0: + inputs = inputs.unsqueeze(axes - 1) prev += 1 - while axes - prev > 0: - input = input.unsqueeze(axes - 1) - prev += 1 - while input.dim() < len(target_shape): - input = input.unsqueeze(-1) - return input.expand(*target_shape) + while inputs.dim() < len(target_shape): + inputs = inputs.unsqueeze(-1) + return inputs.expand(*target_shape) @dataclass class ScatterNDUpdateV3Attribute(Attribute): - pass + """ScatterNDUpdateV3Attribute class.""" + + pass # pylint: disable=unnecessary-pass @OPS.register() class ScatterNDUpdateV3(Operation[ScatterNDUpdateV3Attribute]): + """ScatterNDUpdateV3 class.""" + TYPE = "ScatterNDUpdate" VERSION = 3 ATTRIBUTE_FACTORY = ScatterNDUpdateV3Attribute - def forward(self, input, indicies, updates): + def forward(self, inputs, indicies, updates): + """ScatterNDUpdateV3's forward function.""" # TODO: need to verify if updates.numel() == 1: raise NotImplementedError @@ -369,53 +428,90 @@ def forward(self, input, indicies, updates): last_dim = indicies.shape[-1] assert last_dim == 2 assert indicies[..., -2].sum() == 0 - input.shape[indicies.shape[-1] :] + inputs.shape[indicies.shape[-1] :] # pylint: disable=pointless-statement index = indicies[..., -1] - for i in input.shape[indicies.shape[-1] :]: + for i in inputs.shape[indicies.shape[-1] :]: index = index.unsqueeze(-1).tile((i,)) - output = torch.scatter(input, 1, index, updates) + output = torch.scatter(inputs, 1, index, updates) return output @dataclass class ScatterUpdateV3Attribute(Attribute): - pass + """ScatterUpdateV3Attribute class.""" + + pass # pylint: disable=unnecessary-pass @OPS.register() class ScatterUpdateV3(Operation[ScatterUpdateV3Attribute]): + """ScatterUpdateV3 class.""" + TYPE = "ScatterUpdate" VERSION = 3 ATTRIBUTE_FACTORY = ScatterUpdateV3Attribute - def forward(self, input, indicies, updates, axis): + def forward(self, inputs, indicies, updates, axis): + """ScatterUpdateV3's forward function.""" # TODO: need to verify axis = axis.item() - if input.dtype != updates.dtype: - updates = updates.type(input.dtype) + if inputs.dtype != updates.dtype: + updates = updates.type(inputs.dtype) if indicies.dim() == 0: assert axis == 0 - output = input + output = inputs output[indicies] = updates - output = torch.scatter(input, axis, indicies, updates) + output = torch.scatter(inputs, axis, indicies, updates) return output @dataclass class TileV0Attribute(Attribute): - pass + """TileV0Attribute class.""" + + pass # pylint: disable=unnecessary-pass @OPS.register() class TileV0(Operation[TileV0Attribute]): + """TileV0 class.""" + TYPE = "Tile" VERSION = 0 ATTRIBUTE_FACTORY = TileV0Attribute - def forward(self, input, repeats): - return torch.tile(input, repeats.tolist()) + def forward(self, inputs, repeats): + """TileV0's forward function.""" + return torch.tile(inputs, repeats.tolist()) + + +def get_torch_padding(pads_begin, pads_end, auto_pad, input_size, weight_size, stride, dilation=None): + """Getter function for torch padding.""" + if dilation is None: + dilation = [1 for _ in input_size] + + if auto_pad == "valid": + return 0 + if auto_pad in ("same_upper", "same_lower"): + assert len(set(dilation)) == 1 and dilation[0] == 1 + pads_begin = [] + pads_end = [] + for input_size_, weight_size_, stride_, _ in zip(input_size, weight_size, stride, dilation): + out_size = math.ceil(input_size_ / stride_) + padding_needed = max(0, (out_size - 1) * stride_ + weight_size_ - input_size_) + padding_lhs = int(padding_needed / 2) + padding_rhs = padding_needed - padding_lhs + + pads_begin.append(padding_lhs if auto_pad == "same_upper" else padding_rhs) + pads_end.append(padding_rhs if auto_pad == "same_upper" else padding_lhs) + pad = PadV1.get_torch_pad_dim(pads_begin, pads_end) + return partial(F.pad, pad=pad, mode="constant", value=0) + if auto_pad == "explicit": + pad = PadV1.get_torch_pad_dim(pads_begin, pads_end) + return partial(F.pad, pad=pad, mode="constant", value=0) + raise NotImplementedError diff --git a/otx/mpa/modules/ov/ops/normalizations.py b/otx/core/ov/ops/normalizations.py similarity index 73% rename from otx/mpa/modules/ov/ops/normalizations.py rename to otx/core/ov/ops/normalizations.py index 5d0df82d163..7098f3541f3 100644 --- a/otx/mpa/modules/ov/ops/normalizations.py +++ b/otx/core/ov/ops/normalizations.py @@ -1,25 +1,30 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 +"""Normalization-related modules for otx.core.ov.ops.""" +# Copyright (C) 2023 Intel Corporation # +# SPDX-License-Identifier: MIT from dataclasses import dataclass, field import torch from torch.nn import functional as F -from .builder import OPS -from .op import Attribute, Operation -from .poolings import AvgPoolV1 +from otx.core.ov.ops.builder import OPS +from otx.core.ov.ops.op import Attribute, Operation +from otx.core.ov.ops.poolings import AvgPoolV1 @dataclass class BatchNormalizationV0Attribute(Attribute): + """BatchNormalizationV0Attribute class.""" + epsilon: float max_init_iter: int = field(default=2) @OPS.register() class BatchNormalizationV0(Operation[BatchNormalizationV0Attribute]): + """BatchNormalizationV0 class.""" + TYPE = "BatchNormInference" VERSION = 0 ATTRIBUTE_FACTORY = BatchNormalizationV0Attribute @@ -28,10 +33,11 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.register_buffer("_num_init_iter", torch.tensor(0)) - def forward(self, input, gamma, beta, mean, variance): + def forward(self, inputs, gamma, beta, mean, variance): + """BatchNormalizationV0's forward function.""" output = F.batch_norm( - input=input, + input=inputs, running_mean=mean, running_var=variance, weight=gamma, @@ -42,13 +48,13 @@ def forward(self, input, gamma, beta, mean, variance): ) if self.training and self._num_init_iter < self.attrs.max_init_iter: - n_dims = input.dim() - 2 + n_dims = inputs.dim() - 2 gamma = gamma.unsqueeze(0) beta = beta.unsqueeze(0) for _ in range(n_dims): gamma = gamma.unsqueeze(-1) beta = beta.unsqueeze(-1) - output = input * gamma + beta + output = inputs * gamma + beta self._num_init_iter += 1 if self._num_init_iter >= self.attrs.max_init_iter: # Adapt weight & bias using the first batch statistics @@ -61,6 +67,8 @@ def forward(self, input, gamma, beta, mean, variance): @dataclass class LocalResponseNormalizationV0Attribute(Attribute): + """LocalResponseNormalizationV0Attribute class.""" + alpha: float beta: float bias: float @@ -69,12 +77,15 @@ class LocalResponseNormalizationV0Attribute(Attribute): @OPS.register() class LocalResponseNormalizationV0(Operation[LocalResponseNormalizationV0Attribute]): + """LocalResponseNormalizationV0 class.""" + TYPE = "LRN" VERSION = 0 ATTRIBUTE_FACTORY = LocalResponseNormalizationV0Attribute - def forward(self, input, axes): - dim = input.dim() + def forward(self, inputs, axes): + """LocalResponseNormalizationV0's forward function.""" + dim = inputs.dim() axes = axes.detach().cpu().tolist() assert all(ax >= 1 for ax in axes) @@ -84,10 +95,10 @@ def forward(self, input, axes): stride = [1 for _ in range(dim - 1)] pads_begin = [0 for _ in range(dim - 1)] pads_end = [0 for _ in range(dim - 1)] - for ax in axes: - kernel[ax] = self.attrs.size - pads_begin[ax] = self.attrs.size // 2 - pads_end[ax] = (self.attrs.size - 1) // 2 + for axe in axes: + kernel[axe] = self.attrs.size + pads_begin[axe] = self.attrs.size // 2 + pads_end[axe] = (self.attrs.size - 1) // 2 avg_attrs = { "auto_pad": "explicit", @@ -100,20 +111,23 @@ def forward(self, input, axes): } avg_pool = AvgPoolV1("temp", **avg_attrs) - div = input.mul(input).unsqueeze(1) + div = inputs.mul(inputs).unsqueeze(1) div = avg_pool(div) div = div.squeeze(1) div = div.mul(self.attrs.alpha).add(self.attrs.bias).pow(self.attrs.beta) - output = input / div + output = inputs / div return output @dataclass class NormalizeL2V0Attribute(Attribute): + """NormalizeL2V0Attribute class.""" + eps: float eps_mode: str def __post_init__(self): + """NormalizeL2V0Attribute post-init function.""" super().__post_init__() valid_eps_mode = ["add", "max"] if self.eps_mode not in valid_eps_mode: @@ -122,11 +136,14 @@ def __post_init__(self): @OPS.register() class NormalizeL2V0(Operation[NormalizeL2V0Attribute]): + """NormalizeL2V0 class.""" + TYPE = "NormalizeL2" VERSION = 0 ATTRIBUTE_FACTORY = NormalizeL2V0Attribute - def forward(self, input, axes): + def forward(self, inputs, axes): + """NormalizeL2V0's forward function.""" eps = self.attrs.eps eps_mode = self.attrs.eps_mode @@ -136,7 +153,7 @@ def forward(self, input, axes): axes = [axes] # normalization layer convert to FP32 in FP16 training - input_float = input.float() + input_float = inputs.float() if axes: norm = input_float.pow(2).sum(axes, keepdim=True) else: @@ -147,16 +164,19 @@ def forward(self, input, axes): elif eps_mode == "max": norm = torch.clamp(norm, max=eps) - return (input_float / norm.sqrt()).type_as(input) + return (input_float / norm.sqrt()).type_as(inputs) @dataclass class MVNV6Attribute(Attribute): + """MVNV6Attribute class.""" + normalize_variance: bool eps: float eps_mode: str def __post_init__(self): + """MVNV6Attribute's post-init function.""" super().__post_init__() valid_eps_mode = ["INSIDE_SQRT", "OUTSIDE_SQRT"] if self.eps_mode not in valid_eps_mode: @@ -165,12 +185,15 @@ def __post_init__(self): @OPS.register() class MVNV6(Operation[MVNV6Attribute]): + """MVNV6 class.""" + TYPE = "MVN" VERSION = 6 ATTRIBUTE_FACTORY = MVNV6Attribute - def forward(self, input, axes): - output = input - input.mean(axes.tolist(), keepdim=True) + def forward(self, inputs, axes): + """MVNV6's forward function.""" + output = inputs - inputs.mean(axes.tolist(), keepdim=True) if self.attrs.normalize_variance: eps_mode = self.attrs.eps_mode eps = self.attrs.eps diff --git a/otx/mpa/modules/ov/ops/object_detections.py b/otx/core/ov/ops/object_detections.py similarity index 80% rename from otx/mpa/modules/ov/ops/object_detections.py rename to otx/core/ov/ops/object_detections.py index 70b4c89a9ca..5d2bfe495e3 100644 --- a/otx/mpa/modules/ov/ops/object_detections.py +++ b/otx/core/ov/ops/object_detections.py @@ -1,16 +1,21 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 +"""Object-detection-related modules for otx.core.ov.ops.""" +# Copyright (C) 2023 Intel Corporation # +# SPDX-License-Identifier: MIT from dataclasses import dataclass, field from typing import List, Optional -from .builder import OPS -from .op import Attribute, Operation +from otx.core.ov.ops.builder import OPS +from otx.core.ov.ops.op import Attribute, Operation + +# pylint: disable=too-many-instance-attributes @dataclass class ProposalV4Attribute(Attribute): + """ProposalV4Attribute class.""" + base_size: int pre_nms_topn: int post_nms_topn: int @@ -27,6 +32,7 @@ class ProposalV4Attribute(Attribute): framework: str = field(default="") def __post_init__(self): + """ProposalV4Attribute's post-init function.""" super().__post_init__() valid_framework = ["", "tensorflow"] if self.framework not in valid_framework: @@ -35,32 +41,21 @@ def __post_init__(self): @OPS.register() class ProposalV4(Operation[ProposalV4Attribute]): + """ProposalV4 class.""" + TYPE = "Proposal" VERSION = 4 ATTRIBUTE_FACTORY = ProposalV4Attribute - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - # from mmdet.core.anchor.anchor_generator import AnchorGenerator - # self._anchor_generator = AnchorGenerator( - # strides=[attrs["feat_stride"]], - # ratios=attrs["ratio"], - # scales=attrs["scale"], - # base_sizes=[attrs["base_size"]], - # ) - - # from torchvision.models.detection.anchor_utils import AnchorGenerator - # self._anchor_generator = AnchorGenerator( - # sizes=(self.attrs["base_size"],), - # aspect_ratios= - def forward(self, class_probs, bbox_deltas, image_shape): + """ProposalV4's forward function.""" raise NotImplementedError @dataclass class ROIPoolingV0Attribute(Attribute): + """ROIPoolingV0Attribute class.""" + pooled_h: int pooled_w: int spatial_scale: float @@ -68,6 +63,7 @@ class ROIPoolingV0Attribute(Attribute): output_size: List[int] = field(default_factory=lambda: []) def __post_init__(self): + """ROIPoolingV0Attribute's post-init function.""" super().__post_init__() valid_method = ["max", "bilinear"] if self.method not in valid_method: @@ -76,16 +72,21 @@ def __post_init__(self): @OPS.register() class ROIPoolingV0(Operation[ROIPoolingV0Attribute]): + """ROIPoolingV0 class.""" + TYPE = "ROIPooling" VERSION = 0 ATTRIBUTE_FACTORY = ROIPoolingV0Attribute - def forward(self, input, boxes): + def forward(self, inputs, boxes): + """ROIPoolingV0's forward function.""" raise NotImplementedError @dataclass class DetectionOutputV0Attribute(Attribute): + """DetectionOutputV0Attribute class.""" + keep_top_k: List[int] nms_threshold: float background_label_id: int = field(default=0) @@ -103,6 +104,7 @@ class DetectionOutputV0Attribute(Attribute): objectness_score: float = field(default=0) def __post_init__(self): + """DetectionOutputV0Attribute's post-init function.""" super().__post_init__() valid_code_type = [ "caffe.PriorBoxParameter.CORNER", @@ -114,16 +116,21 @@ def __post_init__(self): @OPS.register() class DetectionOutputV0(Operation[DetectionOutputV0Attribute]): + """DetectionOutputV0 class.""" + TYPE = "DetectionOutput" VERSION = 0 ATTRIBUTE_FACTORY = DetectionOutputV0Attribute def forward(self, loc_data, conf_data, prior_data, arm_conf_data=None, arm_loc_data=None): + """DetectionOutputV0's forward.""" raise NotImplementedError @dataclass class RegionYoloV0Attribute(Attribute): + """RegionYoloV0Attribute class.""" + axis: int coords: int classes: int @@ -136,16 +143,21 @@ class RegionYoloV0Attribute(Attribute): @OPS.register() class RegionYoloV0(Operation[RegionYoloV0Attribute]): + """RegionYoloV0 class.""" + TYPE = "RegionYolo" VERSION = 0 ATTRIBUTE_FACTORY = RegionYoloV0Attribute - def forward(self, input): + def forward(self, inputs): + """RegionYoloV0's forward function.""" raise NotImplementedError @dataclass class PriorBoxV0Attribute(Attribute): + """PriorBoxV0Attribute class.""" + offset: float min_size: List[float] = field(default_factory=lambda: []) max_size: List[float] = field(default_factory=lambda: []) @@ -162,16 +174,21 @@ class PriorBoxV0Attribute(Attribute): @OPS.register() class PriorBoxV0(Operation[PriorBoxV0Attribute]): + """PriorBoxV0 class.""" + TYPE = "PriorBox" VERSION = 0 ATTRIBUTE_FACTORY = PriorBoxV0Attribute def forward(self, output_size, image_size): + """PriorBoxV0's forward function.""" raise NotImplementedError @dataclass class PriorBoxClusteredV0Attribute(Attribute): + """PriorBoxClusteredV0Attribute class.""" + offset: float width: List[float] = field(default_factory=lambda: [1.0]) height: List[float] = field(default_factory=lambda: [1.0]) @@ -184,9 +201,12 @@ class PriorBoxClusteredV0Attribute(Attribute): @OPS.register() class PriorBoxClusteredV0(Operation[PriorBoxClusteredV0Attribute]): + """PriorBoxClusteredV0 class.""" + TYPE = "PriorBoxClustered" VERSION = 0 ATTRIBUTE_FACTORY = PriorBoxClusteredV0Attribute def forward(self, output_size, image_size): + """PriorBoxClusteredV0's forward function.""" raise NotImplementedError diff --git a/otx/mpa/modules/ov/ops/op.py b/otx/core/ov/ops/op.py similarity index 64% rename from otx/mpa/modules/ov/ops/op.py rename to otx/core/ov/ops/op.py index 3658338f831..ee43731a2e9 100644 --- a/otx/mpa/modules/ov/ops/op.py +++ b/otx/core/ov/ops/op.py @@ -1,6 +1,7 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 +"""Operation-related modules for otx.core.ov.ops.""" +# Copyright (C) 2023 Intel Corporation # +# SPDX-License-Identifier: MIT import re from dataclasses import dataclass, fields @@ -8,15 +9,18 @@ import torch -from ..utils import get_op_name +from ..utils import get_op_name # type: ignore[attr-defined] from .utils import get_dynamic_shape @dataclass class Attribute: + """Attribute class.""" + shape: Optional[Union[Tuple[Tuple[int]], Tuple[int]]] def __post_init__(self): + """Attribute's post-init function.""" if self.shape is not None and not isinstance(self.shape, tuple): raise ValueError("shape must be a tuple of ints or a tuple of tuples of ints.") @@ -24,10 +28,12 @@ def __post_init__(self): _T = TypeVar("_T", bound=Attribute) -class Operation(torch.nn.Module, Generic[_T]): +class Operation(torch.nn.Module, Generic[_T]): # pylint: disable=abstract-method, invalid-overridden-method + """Operation class.""" + TYPE = "" VERSION = -1 - ATTRIBUTE_FACTORY: Type[_T] = Attribute + ATTRIBUTE_FACTORY: Type[Attribute] = Attribute def __init__(self, name: str, **kwargs): super().__init__() @@ -36,6 +42,7 @@ def __init__(self, name: str, **kwargs): @classmethod def from_ov(cls, ov_op): + """Operation's from_ov function.""" op_type = ov_op.get_type_name() op_version = ov_op.get_version() op_name = get_op_name(ov_op) @@ -54,38 +61,40 @@ def from_ov(cls, ov_op): return cls(name=op_name, **attrs) @property - def type(self) -> str: + def type(self) -> str: # pylint: disable=invalid-overridden-method + """Operation's type property.""" return self.TYPE @property def version(self) -> int: + """Operation's version property.""" return self.VERSION @property def name(self) -> str: + """Operation's name property.""" return self._name @property def attrs(self): + """Operation's attrs property.""" return self._attrs @property def shape(self) -> Optional[Union[Tuple[Tuple[int]], Tuple[int]]]: + """Operation's shape property.""" return self.attrs.shape - # shape = self.attrs.get("shape", None) - # if shape is not None and len(shape) == 1: - # shape = shape[0] - # return shape def __repr__(self): - repr = f"{self.__class__.__name__}(" - repr += f"name={self.name}, " + """Operation's __repr__ function.""" + repr_str = f"{self.__class__.__name__}(" + repr_str += f"name={self.name}, " for field in fields(self.attrs): key = field.name if key == "shape": continue value = getattr(self.attrs, key) - repr += f"{key}={value}, " - repr = re.sub(", $", "", repr) - repr += ")" - return repr + repr_str += f"{key}={value}, " + repr_str = re.sub(", $", "", repr_str) + repr_str += ")" + return repr_str diff --git a/otx/mpa/modules/ov/ops/poolings.py b/otx/core/ov/ops/poolings.py similarity index 79% rename from otx/mpa/modules/ov/ops/poolings.py rename to otx/core/ov/ops/poolings.py index bd9752fccf3..1431d194f12 100644 --- a/otx/mpa/modules/ov/ops/poolings.py +++ b/otx/core/ov/ops/poolings.py @@ -1,19 +1,24 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 +"""Pooling-related modules for otx.core.ov.ops.""" +# Copyright (C) 2023 Intel Corporation # +# SPDX-License-Identifier: MIT from dataclasses import dataclass, field from typing import Callable, List from torch.nn import functional as F -from .builder import OPS -from .op import Attribute, Operation -from .utils import get_torch_padding +from otx.core.ov.ops.builder import OPS +from otx.core.ov.ops.movements import get_torch_padding +from otx.core.ov.ops.op import Attribute, Operation + +# pylint: disable=too-many-instance-attributes @dataclass class MaxPoolV0Attribute(Attribute): + """MaxPoolV0Attribute class.""" + strides: List[int] pads_begin: List[int] pads_end: List[int] @@ -25,6 +30,7 @@ class MaxPoolV0Attribute(Attribute): axis: int = field(default=0) def __post_init__(self): + """MaxPoolV0Attribute's post-init functions.""" super().__post_init__() valid_auto_pad = ["explicit", "same_upper", "same_Lower", "valid"] if self.auto_pad not in valid_auto_pad: @@ -50,17 +56,19 @@ def __post_init__(self): @OPS.register() class MaxPoolV0(Operation[MaxPoolV0Attribute]): + """MaxPoolV0 class.""" + TYPE = "MaxPool" VERSION = 0 ATTRIBUTE_FACTORY = MaxPoolV0Attribute - def forward(self, input): - - if input.dim() == 3: + def forward(self, inputs): + """MaxPoolV0's forward function.""" + if inputs.dim() == 3: func = F.max_pool1d - elif input.dim() == 4: + elif inputs.dim() == 4: func = F.max_pool2d - elif input.dim() == 5: + elif inputs.dim() == 5: func = F.max_pool3d else: raise NotImplementedError @@ -69,16 +77,16 @@ def forward(self, input): self.attrs.pads_begin, self.attrs.pads_end, self.attrs.auto_pad, - list(input.shape[2:]), + list(inputs.shape[2:]), self.attrs.kernel, self.attrs.strides, ) if isinstance(padding, Callable): - input = padding(input=input) + inputs = padding(input=inputs) padding = 0 return func( - input=input, + input=inputs, kernel_size=self.attrs.kernel, stride=self.attrs.strides, padding=padding, @@ -90,6 +98,8 @@ def forward(self, input): @dataclass class AvgPoolV1Attribute(Attribute): + """AvgPoolV1Attribute class.""" + exclude_pad: bool strides: List[int] pads_begin: List[int] @@ -99,6 +109,7 @@ class AvgPoolV1Attribute(Attribute): auto_pad: str = field(default="explicit") def __post_init__(self): + """AvgPoolV1Attribute's post-init function.""" super().__post_init__() valid_auto_pad = ["explicit", "same_upper", "same_Lower", "valid"] if self.auto_pad not in valid_auto_pad: @@ -112,6 +123,8 @@ def __post_init__(self): @OPS.register() class AvgPoolV1(Operation[AvgPoolV1Attribute]): + """AvgPoolV1 class.""" + TYPE = "AvgPool" VERSION = 1 ATTRIBUTE_FACTORY = AvgPoolV1Attribute @@ -121,12 +134,13 @@ def __init__(self, *args, **kwargs): kwargs["exclude_pad"] = kwargs.pop("exclude-pad") super().__init__(*args, **kwargs) - def forward(self, input): - if input.dim() == 3: + def forward(self, inputs): + """AvgPoolV1's forward function.""" + if inputs.dim() == 3: func = F.avg_pool1d - elif input.dim() == 4: + elif inputs.dim() == 4: func = F.avg_pool2d - elif input.dim() == 5: + elif inputs.dim() == 5: func = F.avg_pool3d else: raise NotImplementedError @@ -135,16 +149,16 @@ def forward(self, input): self.attrs.pads_begin, self.attrs.pads_end, self.attrs.auto_pad, - list(input.shape[2:]), + list(inputs.shape[2:]), self.attrs.kernel, self.attrs.strides, ) if isinstance(padding, Callable): - input = padding(input=input) + inputs = padding(input=inputs) padding = 0 return func( - input=input, + input=inputs, kernel_size=self.attrs.kernel, stride=self.attrs.strides, padding=padding, diff --git a/otx/mpa/modules/ov/ops/reductions.py b/otx/core/ov/ops/reductions.py similarity index 58% rename from otx/mpa/modules/ov/ops/reductions.py rename to otx/core/ov/ops/reductions.py index ffffe451370..130d767c0a7 100644 --- a/otx/mpa/modules/ov/ops/reductions.py +++ b/otx/core/ov/ops/reductions.py @@ -1,61 +1,72 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 +"""Redunction-related modules for otx.core.ov.ops.""" +# Copyright (C) 2023 Intel Corporation # +# SPDX-License-Identifier: MIT from dataclasses import dataclass, field import torch -from .builder import OPS -from .op import Attribute, Operation +from otx.core.ov.ops.builder import OPS +from otx.core.ov.ops.op import Attribute, Operation @dataclass class ReduceMeanV1Attribute(Attribute): + """ReduceMeanV1Attribute class.""" + keep_dims: bool = field(default=False) @OPS.register() class ReduceMeanV1(Operation[ReduceMeanV1Attribute]): + """ReduceMeanV1 class.""" + TYPE = "ReduceMean" VERSION = 1 ATTRIBUTE_FACTORY = ReduceMeanV1Attribute - def forward(self, input, axes): + def forward(self, inputs, axes): + """ReduceMeanV1's forward function.""" if isinstance(axes, torch.Tensor): axes = axes.tolist() if not axes: - return input + return inputs if not isinstance(axes, (list, tuple)): axes = [axes] - return torch.mean(input=input, dim=axes, keepdim=self.attrs.keep_dims) + return torch.mean(input=inputs, dim=axes, keepdim=self.attrs.keep_dims) @dataclass class ReduceProdV1Attribute(Attribute): + """ReduceMeanV1Attribute class.""" + keep_dims: bool = field(default=False) @OPS.register() class ReduceProdV1(Operation[ReduceProdV1Attribute]): + """ReduceMeanV1Attribute class.""" + TYPE = "ReduceProd" VERSION = 1 ATTRIBUTE_FACTORY = ReduceProdV1Attribute - def forward(self, input, axes): + def forward(self, inputs, axes): + """ReduceMeanV1Attribute's forward function.""" if isinstance(axes, torch.Tensor): axes = axes.tolist() if not axes: - return input + return inputs if not isinstance(axes, (list, tuple)): axes = [axes] - output = input - for ax in axes: - output = torch.prod(input=output, dim=ax, keepdim=True) + output = inputs + for axe in axes: + output = torch.prod(input=output, dim=axe, keepdim=True) if not self.attrs.keep_dims: output = torch.squeeze(output) @@ -64,27 +75,32 @@ def forward(self, input, axes): @dataclass class ReduceMinV1Attribute(Attribute): + """ReduceMinV1Attribute class.""" + keep_dims: bool = field(default=False) @OPS.register() class ReduceMinV1(Operation[ReduceMinV1Attribute]): + """ReduceMinV1 class.""" + TYPE = "ReduceMin" VERSION = 1 ATTRIBUTE_FACTORY = ReduceMinV1Attribute - def forward(self, input, axes): + def forward(self, inputs, axes): + """ReduceMinV1's forward function.""" if isinstance(axes, torch.Tensor): axes = axes.tolist() if not axes: - return input + return inputs if not isinstance(axes, (list, tuple)): axes = [axes] - output = input - for ax in axes: - output = torch.min(input=output, dim=ax, keepdim=True)[0] + output = inputs + for axe in axes: + output = torch.min(input=output, dim=axe, keepdim=True)[0] if not self.attrs.keep_dims: output = torch.squeeze(output) @@ -93,19 +109,24 @@ def forward(self, input, axes): @dataclass class ReduceSumV1Attribute(Attribute): + """ReduceSumV1Attribute class.""" + keep_dims: bool = field(default=False) @OPS.register() class ReduceSumV1(Operation[ReduceSumV1Attribute]): + """ReduceSumV1 class.""" + TYPE = "ReduceSum" VERSION = 1 ATTRIBUTE_FACTORY = ReduceSumV1Attribute - def forward(self, input, axes): + def forward(self, inputs, axes): + """ReduceSumV1's forward function.""" if isinstance(axes, torch.Tensor): axes = axes.tolist() if not axes: - return input + return inputs - return torch.sum(input=input, dim=axes, keepdim=self.attrs.keep_dims) + return torch.sum(input=inputs, dim=axes, keepdim=self.attrs.keep_dims) diff --git a/otx/mpa/modules/ov/ops/shape_manipulations.py b/otx/core/ov/ops/shape_manipulations.py similarity index 64% rename from otx/mpa/modules/ov/ops/shape_manipulations.py rename to otx/core/ov/ops/shape_manipulations.py index 3e14b204a2a..7eb9149225a 100644 --- a/otx/mpa/modules/ov/ops/shape_manipulations.py +++ b/otx/core/ov/ops/shape_manipulations.py @@ -1,41 +1,47 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 +"""Shape-mainpulation-related modules for otx.core.ov.ops.""" +# Copyright (C) 2023 Intel Corporation # +# SPDX-License-Identifier: MIT from dataclasses import dataclass, field import torch -from .builder import OPS -from .op import Attribute, Operation -from .type_conversions import ConvertV0 +from otx.core.ov.ops.builder import OPS +from otx.core.ov.ops.op import Attribute, Operation +from otx.core.ov.ops.type_conversions import ConvertV0 @dataclass class SqueezeV0Attribute(Attribute): - pass + """SqueezeV0Attribute class.""" + + pass # pylint: disable=unnecessary-pass @OPS.register() class SqueezeV0(Operation[SqueezeV0Attribute]): + """SqueezeV0 class.""" + TYPE = "Squeeze" VERSION = 0 ATTRIBUTE_FACTORY = SqueezeV0Attribute - def forward(self, input, dims=None): + def forward(self, inputs, dims=None): + """SqueezeV0's forward function.""" if dims is None: - return torch.squeeze(input) + return torch.squeeze(inputs) if dims.dim() == 0: dims = torch.unsqueeze(dims, 0) - max_dim = input.dim() + max_dim = inputs.dim() dims = dims.detach().cpu().tolist() for i, dim in enumerate(dims): if dim < 0: dims[i] = max_dim + dim - output = input + output = inputs for dim in sorted(dims, reverse=True): output = torch.squeeze(output, dim) @@ -44,28 +50,32 @@ def forward(self, input, dims=None): @dataclass class UnsqueezeV0Attribute(Attribute): - pass + """UnsqueezeV0Attribute class.""" + + pass # pylint: disable=unnecessary-pass @OPS.register() class UnsqueezeV0(Operation[UnsqueezeV0Attribute]): + """UnsqueezeV0 class.""" + TYPE = "Unsqueeze" VERSION = 0 ATTRIBUTE_FACTORY = UnsqueezeV0Attribute - def forward(self, input, dims): - + def forward(self, inputs, dims): + """UnsqueezeV0's forward function.""" if dims.dim() == 0: dims = torch.unsqueeze(dims, 0) - max_dim = input.dim() + max_dim = inputs.dim() dims = dims.detach().cpu().tolist() if len(dims) > 1: for i, dim in enumerate(dims): if dim < 0: dims[i] = max_dim + dim - output = input + output = inputs for dim in sorted(dims, reverse=True): output = torch.unsqueeze(output, dim) @@ -74,18 +84,23 @@ def forward(self, input, dims): @dataclass class ReshapeV1Attribute(Attribute): + """ReshapeV1Attribute class.""" + special_zero: bool @OPS.register() class ReshapeV1(Operation[ReshapeV1Attribute]): + """ReshapeV1 class.""" + TYPE = "Reshape" VERSION = 1 ATTRIBUTE_FACTORY = ReshapeV1Attribute - def forward(self, input, shape): + def forward(self, inputs, shape): + """ReshapeV1's forward function.""" target_shape = shape.detach().cpu().tolist() - origin_shape = list(input.shape) + origin_shape = list(inputs.shape) for i, (origin_dim, target_dim) in enumerate(zip(origin_shape, target_shape)): if target_dim == 0 and self.attrs.special_zero: target_shape[i] = origin_dim @@ -96,29 +111,37 @@ def forward(self, input, shape): target_shape[i] = origin_dim elif target_dim == -1: break - return torch.reshape(input, target_shape) + return torch.reshape(inputs, target_shape) @dataclass class ShapeOfV0Attribute(Attribute): - pass + """ShapeOfV0Attribute class.""" + + pass # pylint: disable=unnecessary-pass @OPS.register() class ShapeOfV0(Operation[ShapeOfV0Attribute]): + """ShapeOfV0 class.""" + TYPE = "ShapeOf" VERSION = 0 ATTRIBUTE_FACTORY = ShapeOfV0Attribute - def forward(self, input): - return torch.tensor(input.shape, device=input.device) + def forward(self, inputs): + """ShapeOfV0's forward function.""" + return torch.tensor(inputs.shape, device=inputs.device) @dataclass class ShapeOfV3Attribute(Attribute): + """ShapeOfV3Attribute class.""" + output_type: str = field(default="i64") def __post_init__(self): + """ShapeOfV3Attribute's post-init function.""" super().__post_init__() valid_output_type = ["i64", "i32"] if self.output_type not in valid_output_type: @@ -127,11 +150,14 @@ def __post_init__(self): @OPS.register() class ShapeOfV3(Operation[ShapeOfV3Attribute]): + """ShapeOfV3 class.""" + TYPE = "ShapeOf" VERSION = 3 ATTRIBUTE_FACTORY = ShapeOfV3Attribute - def forward(self, input): + def forward(self, inputs): + """ShapeOfV3's forward function.""" return ConvertV0("temp", shape=self.shape, destination_type=self.attrs.output_type)( - torch.tensor(input.shape, device=input.device) + torch.tensor(inputs.shape, device=inputs.device) ) diff --git a/otx/mpa/modules/ov/ops/sorting_maximization.py b/otx/core/ov/ops/sorting_maximization.py similarity index 76% rename from otx/mpa/modules/ov/ops/sorting_maximization.py rename to otx/core/ov/ops/sorting_maximization.py index 53d2f880d72..b51a463b4ac 100644 --- a/otx/mpa/modules/ov/ops/sorting_maximization.py +++ b/otx/core/ov/ops/sorting_maximization.py @@ -1,21 +1,25 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 +"""Sorting-maximization-related modules for otx.core.ov.ops.""" +# Copyright (C) 2023 Intel Corporation # +# SPDX-License-Identifier: MIT from dataclasses import dataclass, field -from .builder import OPS -from .op import Attribute, Operation +from otx.core.ov.ops.builder import OPS +from otx.core.ov.ops.op import Attribute, Operation @dataclass class TopKV3Attribute(Attribute): + """TopKV3Attribute class.""" + axis: int mode: str sort: str index_element_type: str = field(default="i32") def __post_init__(self): + """TopKV3Attribute's post-init function.""" super().__post_init__() valid_mode = ["min", "max"] if self.mode not in valid_mode: @@ -35,30 +39,21 @@ def __post_init__(self): @OPS.register() class TopKV3(Operation[TopKV3Attribute]): + """TopKV3 class.""" + TYPE = "TopK" VERSION = 3 ATTRIBUTE_FACTORY = TopKV3Attribute - def forward(self, input, k): + def forward(self, inputs, k): + """TopKV3's forward function.""" raise NotImplementedError - # values, indices = torch.topk( - # input=input, - # k=k, - # dim=self.attrs.axis, - # largest=self.attrs.mode == "max", - # sorted=True, - # ) - # - # if self.attrs.sort == "index": - # sorted = torch.argsort(indices) - # indices = indices[sorted] - # values = values[sorted] - # - # return values, indices @dataclass class NonMaxSuppressionV5Attribute(Attribute): + """NonMaxSuppressionV5Attribute class.""" + box_encoding: str = field(default="corner") sort_result_descending: bool = field(default=True) output_type: str = field(default="i64") @@ -66,6 +61,8 @@ class NonMaxSuppressionV5Attribute(Attribute): @OPS.register() class NonMaxSuppressionV5(Operation[NonMaxSuppressionV5Attribute]): + """NonMaxSuppressionV5 class.""" + TYPE = "NonMaxSuppression" VERSION = 5 ATTRIBUTE_FACTORY = NonMaxSuppressionV5Attribute @@ -79,11 +76,14 @@ def forward( score_threshold=0, soft_nms_sigma=0, ): + """NonMaxSuppressionV5's forward function.""" raise NotImplementedError @dataclass class NonMaxSuppressionV9Attribute(Attribute): + """NonMaxSuppressionV9Attribute class.""" + box_encoding: str = field(default="corner") sort_result_descending: bool = field(default=True) output_type: str = field(default="i64") @@ -91,6 +91,8 @@ class NonMaxSuppressionV9Attribute(Attribute): @OPS.register() class NonMaxSuppressionV9(Operation[NonMaxSuppressionV9Attribute]): + """NonMaxSuppressionV9 class.""" + TYPE = "NonMaxSuppression" VERSION = 9 ATTRIBUTE_FACTORY = NonMaxSuppressionV9Attribute @@ -104,4 +106,5 @@ def forward( score_threshold=0, soft_nms_sigma=0, ): + """NonMaxSuppressionV9's forward function.""" raise NotImplementedError diff --git a/otx/mpa/modules/ov/ops/type_conversions.py b/otx/core/ov/ops/type_conversions.py similarity index 70% rename from otx/mpa/modules/ov/ops/type_conversions.py rename to otx/core/ov/ops/type_conversions.py index f120739076a..9b81d0d1ded 100644 --- a/otx/mpa/modules/ov/ops/type_conversions.py +++ b/otx/core/ov/ops/type_conversions.py @@ -1,13 +1,14 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 +"""Type-conversion-related modules for otx.core.ov.ops.""" +# Copyright (C) 2023 Intel Corporation # +# SPDX-License-Identifier: MIT from dataclasses import dataclass import torch -from .builder import OPS -from .op import Attribute, Operation +from otx.core.ov.ops.builder import OPS +from otx.core.ov.ops.op import Attribute, Operation _torch_to_ov = { torch.uint8: ["u1", "u4", "u8"], @@ -39,26 +40,33 @@ @dataclass class ConvertV0Attribute(Attribute): + """ConvertV0Attribute class.""" + destination_type: str @OPS.register() class ConvertV0(Operation[ConvertV0Attribute]): + """ConvertV0 class.""" + TYPE = "Convert" VERSION = 0 ATTRIBUTE_FACTORY = ConvertV0Attribute @staticmethod def convert_ov_type(ov_type): + """ConvertV0's convert_ov_type function.""" if ov_type not in _ov_to_torch: raise NotImplementedError return _ov_to_torch[ov_type] @staticmethod def convert_torch_type(torch_type): + """ConvertV0's convert_torch_type function.""" if torch_type not in _torch_to_ov: raise NotImplementedError return _torch_to_ov[torch_type][-1] - def forward(self, input): - return input.type(self.convert_ov_type(self.attrs.destination_type)) + def forward(self, inputs): + """ConvertV0's forward function.""" + return inputs.type(self.convert_ov_type(self.attrs.destination_type)) diff --git a/otx/core/ov/ops/utils.py b/otx/core/ov/ops/utils.py new file mode 100644 index 00000000000..a5cf201a581 --- /dev/null +++ b/otx/core/ov/ops/utils.py @@ -0,0 +1,39 @@ +"""Utils function for otx.core.ov.ops.""" +# Copyright (C) 2023 Intel Corporation +# +# SPDX-License-Identifier: MIT + +from openvino.pyopenvino import Node # pylint: disable=no-name-in-module + +from .builder import OPS + +# TODO: We moved the location of otx.mpa.utils.logger, we need to revert the logger in that code again. + + +def get_dynamic_shape(output): + """Getter function for dynamic shape.""" + shape = [str(i) for i in output.get_partial_shape()] + for i, shape_ in enumerate(shape): + try: + shape_ = int(shape_) + except ValueError: + shape_ = -1 + shape[i] = shape_ + return shape + + +def convert_op_to_torch(op_node: Node): + """Convert op Node to torch.""" + op_type = op_node.get_type_name() + op_version = op_node.get_version() + + try: + torch_module = OPS.get_by_type_version(op_type, op_version).from_ov(op_node) + except Exception as e: + # logger.error(e) + # logger.error(op_type) + # logger.error(op_version) + # logger.error(op_node.get_attributes()) + raise e + + return torch_module diff --git a/otx/mpa/modules/ov/registry.py b/otx/core/ov/registry.py similarity index 67% rename from otx/mpa/modules/ov/registry.py rename to otx/core/ov/registry.py index a7e40ad45c4..2d790debe5b 100644 --- a/otx/mpa/modules/ov/registry.py +++ b/otx/core/ov/registry.py @@ -1,11 +1,14 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 +"""Registry Class for otx.core.ov.""" +# Copyright (C) 2023 Intel Corporation # +# SPDX-License-Identifier: MIT from typing import Any, Dict, Optional class Registry: + """Registry Class for OMZ model.""" + REGISTERED_NAME_ATTR = "_registered_name" def __init__(self, name, add_name_as_attr=False): @@ -15,14 +18,18 @@ def __init__(self, name, add_name_as_attr=False): @property def registry_dict(self) -> Dict[Any, Any]: + """Dictionary of registered module.""" return self._registry_dict def _register(self, obj: Any, name: Any): + """Register obj with name.""" if name in self._registry_dict: - raise KeyError("{} is already registered in {}".format(name, self._name)) + raise KeyError(f"{name} is already registered in {self._name}") self._registry_dict[name] = obj def register(self, name: Optional[Any] = None): + """Register from name.""" + def wrap(obj): cls_name = name if cls_name is None: @@ -35,12 +42,15 @@ def wrap(obj): return wrap def get(self, key: Any) -> Any: + """Get from module name (key).""" if key not in self._registry_dict: self._key_not_found(key) return self._registry_dict[key] def _key_not_found(self, key: Any): - raise KeyError("{} is not found in {}".format(key, self._name)) + """Raise KeyError when key not founded.""" + raise KeyError(f"{key} is not found in {self._name}") def __contains__(self, item): + """Check containing of item.""" return item in self._registry_dict.values() diff --git a/otx/mpa/modules/ov/utils.py b/otx/core/ov/utils.py similarity index 68% rename from otx/mpa/modules/ov/utils.py rename to otx/core/ov/utils.py index 7dc487565be..2e95e500249 100644 --- a/otx/mpa/modules/ov/utils.py +++ b/otx/core/ov/utils.py @@ -1,22 +1,24 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 +# type: ignore +# TODO: Need to remove line 1 (ignore mypy) and fix mypy issues +"""Utils for otx.core.ov.""" +# Copyright (C) 2023 Intel Corporation # +# SPDX-License-Identifier: MIT import errno import os -from typing import List, Optional +from typing import Optional -from openvino.pyopenvino import Model, Node +from openvino.pyopenvino import Model, Node # pylint: disable=no-name-in-module from openvino.runtime import Core -from otx.mpa.utils.logger import get_logger - from .omz_wrapper import AVAILABLE_OMZ_MODELS, get_omz_model -logger = get_logger() +# pylint: disable=too-many-locals def to_dynamic_model(ov_model: Model) -> Model: + """Convert ov_model to dynamic Model.""" assert isinstance(ov_model, Model) shapes = {} @@ -57,14 +59,13 @@ def reshape_model(ov_model, shapes): try: ov_model.reshape(shapes) return True - except Exception: + except Exception: # pylint: disable=broad-exception-caught return False pop_targets = [["height", "width"], ["batch"]] pop_targets = pop_targets[::-1] while not reshape_model(ov_model, shapes): - for key in shapes.keys(): - shape = shapes[key] + for key, shape in shapes.items(): target_layout = target_layouts[key] targets = pop_targets.pop() @@ -81,6 +82,7 @@ def reshape_model(ov_model, shapes): def load_ov_model(model_path: str, weight_path: Optional[str] = None, convert_dynamic: bool = False) -> Model: + """Load ov_model from model_path.""" model_path = str(model_path) if model_path.startswith("omz://"): model_path = model_path.replace("omz://", "") @@ -97,8 +99,8 @@ def load_ov_model(model_path: str, weight_path: Optional[str] = None, convert_dy if not os.path.exists(weight_path): raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), weight_path) - ie = Core() - ov_model = ie.read_model(model=model_path, weights=weight_path) + ie_core = Core() + ov_model = ie_core.read_model(model=model_path, weights=weight_path) if convert_dynamic: ov_model = to_dynamic_model(ov_model) @@ -107,54 +109,20 @@ def load_ov_model(model_path: str, weight_path: Optional[str] = None, convert_dy def normalize_name(name: str) -> str: + """Normalize name string.""" # ModuleDict does not allow '.' in module name string name = name.replace(".", "#") - return name + return f"{name}" def unnormalize_name(name: str) -> str: + """Unnormalize name string.""" name = name.replace("#", ".") return name -def get_op_name(op: Node) -> str: - op_name = op.get_friendly_name() +def get_op_name(op_node: Node) -> str: + """Get op name string.""" + op_name = op_node.get_friendly_name() op_name = normalize_name(op_name) return op_name - - -def convert_op_to_torch(op: Node): - - from .ops import OPS - - op_type = op.get_type_name() - op_version = op.get_version() - - try: - torch_module = OPS.get_by_type_version(op_type, op_version).from_ov(op) - except Exception as e: - logger.error(e) - logger.error(op_type) - logger.error(op_version) - logger.error(op.get_attributes()) - raise e - - return torch_module - - -def convert_op_to_torch_module(target_op: Node): - from .ops.modules import OperationModule - - dependent_modules = [] - for in_port in target_op.inputs(): - out_port = in_port.get_source_output() - parent = out_port.get_node() - - parent_type = parent.get_type_name() - if parent_type == "Constant": - dependent_modules.append(convert_op_to_torch(parent)) - else: - dependent_modules.append(None) - module = convert_op_to_torch(target_op) - module = OperationModule(module, dependent_modules) - return module diff --git a/otx/hpo/hpo_base.py b/otx/hpo/hpo_base.py index 2192bf85dcc..f41e8a20caa 100644 --- a/otx/hpo/hpo_base.py +++ b/otx/hpo/hpo_base.py @@ -21,8 +21,8 @@ from enum import IntEnum from typing import Any, Dict, List, Optional, Union -from otx.algorithms.common.utils.utils import check_mode_input, check_positive from otx.hpo.search_space import SearchSpace +from otx.hpo.utils import check_mode_input, check_positive logger = logging.getLogger(__name__) diff --git a/otx/hpo/hyperband.py b/otx/hpo/hyperband.py index 5f16dbfedf4..030a5391cb3 100644 --- a/otx/hpo/hyperband.py +++ b/otx/hpo/hyperband.py @@ -23,13 +23,13 @@ from scipy.stats.qmc import LatinHypercube -from otx.algorithms.common.utils.utils import ( +from otx.hpo.hpo_base import HpoBase, Trial, TrialStatus +from otx.hpo.utils import ( check_mode_input, check_not_negative, check_positive, left_vlaue_is_better, ) -from otx.hpo.hpo_base import HpoBase, Trial, TrialStatus logger = logging.getLogger(__name__) diff --git a/otx/hpo/resource_manager.py b/otx/hpo/resource_manager.py index 0342fb6987b..c514577ab9d 100644 --- a/otx/hpo/resource_manager.py +++ b/otx/hpo/resource_manager.py @@ -21,7 +21,7 @@ import torch -from otx.algorithms.common.utils.utils import check_positive +from otx.hpo.utils import check_positive logger = logging.getLogger(__name__) diff --git a/otx/hpo/search_space.py b/otx/hpo/search_space.py index c64842be7c1..81698b56578 100644 --- a/otx/hpo/search_space.py +++ b/otx/hpo/search_space.py @@ -20,7 +20,7 @@ import typing from typing import Any, Dict, List, Optional, Tuple, Union -from otx.algorithms.common.utils.utils import check_positive +from otx.hpo.utils import check_positive logger = logging.getLogger(__name__) diff --git a/otx/hpo/utils.py b/otx/hpo/utils.py new file mode 100644 index 00000000000..886cfb44853 --- /dev/null +++ b/otx/hpo/utils.py @@ -0,0 +1,91 @@ +"""Collections of Utils for HPO.""" + +# Copyright (C) 2022 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions +# and limitations under the License. + +from typing import Literal, Optional + + +def left_vlaue_is_better(val1, val2, mode: Literal["max", "min"]) -> bool: + """Check left value is better than right value. + + Whether check it's greather or lesser is changed depending on 'model'. + + Args: + val1 : value to check that it's bigger than other value. + val2 : value to check that it's bigger than other value. + mode (Literal['max', 'min']): value to decide whether better means greater or lesser. + + Returns: + bool: whether val1 is better than val2. + """ + check_mode_input(mode) + if mode == "max": + return val1 > val2 + return val1 < val2 + + +def check_positive(value, variable_name: Optional[str] = None, error_message: Optional[str] = None): + """Validate that value is positivle. + + Args: + value (Any): value to validate. + variable_name (Optional[str], optional): name of value. It's used for error message. Defaults to None. + error_message (Optional[str], optional): Error message to use when type is different. Defaults to None. + + Raises: + ValueError: If value isn't positive, the error is raised. + """ + if value <= 0: + if error_message is not None: + message = error_message + elif variable_name: + message = f"{variable_name} should be positive.\n" f"your value : {value}" + else: + raise ValueError + raise ValueError(message) + + +def check_not_negative(value, variable_name: Optional[str] = None, error_message: Optional[str] = None): + """Validate that value isn't negative. + + Args: + value (Any): value to validate. + variable_name (Optional[str], optional): name of value. It's used for error message. Defaults to None. + error_message (Optional[str], optional): Error message to use when type is different. Defaults to None. + + Raises: + ValueError: If value is negative, the error is raised. + """ + if value < 0: + if error_message is not None: + message = error_message + elif variable_name: + message = f"{variable_name} should be positive.\n" f"your value : {value}" + else: + raise ValueError + raise ValueError(message) + + +def check_mode_input(mode: str): + """Validate that mode is 'max' or 'min'. + + Args: + mode (str): string to validate. + + Raises: + ValueError: If 'mode' is not both 'max' and 'min', the error is raised. + """ + if mode not in ["max", "min"]: + raise ValueError("mode should be max or min.\n" f"Your value : {mode}") diff --git a/otx/mpa/cls/__init__.py b/otx/mpa/cls/__init__.py deleted file mode 100644 index 59c5e4f775a..00000000000 --- a/otx/mpa/cls/__init__.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import otx.mpa.modules.datasets.pipelines.transforms.augmix -import otx.mpa.modules.datasets.pipelines.transforms.ote_transforms -import otx.mpa.modules.datasets.pipelines.transforms.random_augment -import otx.mpa.modules.datasets.pipelines.transforms.twocrop_transform -import otx.mpa.modules.hooks -import otx.mpa.modules.models.classifiers -import otx.mpa.modules.models.heads.custom_cls_head -import otx.mpa.modules.models.heads.custom_hierarchical_linear_cls_head -import otx.mpa.modules.models.heads.custom_hierarchical_non_linear_cls_head -import otx.mpa.modules.models.heads.custom_multi_label_linear_cls_head -import otx.mpa.modules.models.heads.custom_multi_label_non_linear_cls_head -import otx.mpa.modules.models.heads.non_linear_cls_head -import otx.mpa.modules.models.heads.semisl_cls_head -import otx.mpa.modules.models.heads.semisl_multilabel_cls_head -import otx.mpa.modules.models.heads.supcon_cls_head -import otx.mpa.modules.models.losses.asymmetric_angular_loss_with_ignore -import otx.mpa.modules.models.losses.asymmetric_loss_with_ignore -import otx.mpa.modules.models.losses.barlowtwins_loss -import otx.mpa.modules.models.losses.cross_entropy_loss -import otx.mpa.modules.models.losses.ib_loss -import otx.mpa.modules.optimizer.lars - -# flake8: noqa -from . import ( - evaluator, - explainer, - exporter, - incremental, - inferrer, - semisl, - stage, - trainer, -) diff --git a/otx/mpa/cls/incremental/inferrer.py b/otx/mpa/cls/incremental/inferrer.py deleted file mode 100644 index 8956526bbe0..00000000000 --- a/otx/mpa/cls/incremental/inferrer.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from otx.mpa.cls.inferrer import ClsInferrer -from otx.mpa.registry import STAGES -from otx.mpa.utils.logger import get_logger - -from .stage import IncrClsStage - -logger = get_logger() - - -@STAGES.register_module() -class IncrClsInferrer(IncrClsStage, ClsInferrer): - def __init__(self, **kwargs): - IncrClsStage.__init__(self, **kwargs) diff --git a/otx/mpa/cls/incremental/trainer.py b/otx/mpa/cls/incremental/trainer.py deleted file mode 100644 index 8fd64d9331c..00000000000 --- a/otx/mpa/cls/incremental/trainer.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from otx.mpa.cls.trainer import ClsTrainer -from otx.mpa.registry import STAGES -from otx.mpa.utils.logger import get_logger - -from .stage import IncrClsStage - -logger = get_logger() - - -@STAGES.register_module() -class IncrClsTrainer(IncrClsStage, ClsTrainer): - def __init__(self, **kwargs): - IncrClsStage.__init__(self, **kwargs) diff --git a/otx/mpa/cls/semisl/inferrer.py b/otx/mpa/cls/semisl/inferrer.py deleted file mode 100644 index e8748ac063f..00000000000 --- a/otx/mpa/cls/semisl/inferrer.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from otx.mpa.cls.inferrer import ClsInferrer -from otx.mpa.registry import STAGES -from otx.mpa.utils.logger import get_logger - -from .stage import SemiSLClsStage - -logger = get_logger() - - -@STAGES.register_module() -class SemiSLClsInferrer(SemiSLClsStage, ClsInferrer): - def __init__(self, **kwargs): - SemiSLClsStage.__init__(self, **kwargs) diff --git a/otx/mpa/cls/semisl/trainer.py b/otx/mpa/cls/semisl/trainer.py deleted file mode 100644 index cacbd1f32d9..00000000000 --- a/otx/mpa/cls/semisl/trainer.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from otx.mpa.cls.trainer import ClsTrainer -from otx.mpa.registry import STAGES -from otx.mpa.utils.logger import get_logger - -from .stage import SemiSLClsStage - -logger = get_logger() - - -@STAGES.register_module() -class SemiSLClsTrainer(SemiSLClsStage, ClsTrainer): - def __init__(self, **kwargs): - SemiSLClsStage.__init__(self, **kwargs) diff --git a/otx/mpa/csrc/mpl/lib_mpl.cpp b/otx/mpa/csrc/mpl/lib_mpl.cpp deleted file mode 100644 index 4ad403bcbcb..00000000000 --- a/otx/mpa/csrc/mpl/lib_mpl.cpp +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) 2018, Sergei Belousov -// SPDX-License-Identifier: BSD-3-Clause -// -// Copyright (C) 2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -// The original repo: https://github.com/bes-dev/mpl.pytorch - -#include -#include - -#include - -void compute_weights(int size, - const torch::Tensor losses, - const torch::Tensor indices, - torch::Tensor weights, - float ratio, - float p) { - const float* losses_data = losses.data_ptr(); - const int64_t* indices_data = indices.data_ptr(); - float* weights_data = weights.data_ptr(); - - // find a first nonzero element - int pos = 0; - while(losses_data[pos] < std::numeric_limits::epsilon()) { - ++pos; - } - - // Algorithm #1 - int n = size - pos; - int m = int(ratio * n); - if (n <= 0 || m <= 0) { - return; - } - - float q = p / (p - 1.0); - int c = m - n; - float a[2] = {0.0, 0.0}; - int i = pos; - float eta = 0.0; - for(; i < size && eta < std::numeric_limits::epsilon(); ++i) { - float loss_q = pow(losses_data[i] / losses_data[size - 1], q); - - a[0] = a[1]; - a[1] += loss_q; - - c += 1; - eta = float(c) * loss_q - a[1]; - } - - // compute alpha - float alpha; - if (eta < std::numeric_limits::epsilon()) { - c += 1; - a[0] = a[1]; - } - alpha = pow(a[0] / c, 1.0 / q) * losses_data[size - 1]; - - // compute weights - float tau = 1.0 / (pow(n, 1.0 / q) * pow(m, 1.0 / p)); - for (int k = i; k < size; ++k) { - weights_data[indices_data[k]] = tau; - } - if (alpha > -std::numeric_limits::epsilon()) { - for(int k = pos; k < i; ++k) { - weights_data[indices_data[k]] = tau * pow(losses_data[k] / alpha, q - 1); - } - } -} diff --git a/otx/mpa/csrc/mpl/lib_mpl.h b/otx/mpa/csrc/mpl/lib_mpl.h deleted file mode 100644 index b7e6b5def82..00000000000 --- a/otx/mpa/csrc/mpl/lib_mpl.h +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) 2018, Sergei Belousov -// SPDX-License-Identifier: BSD-3-Clause -// -// Copyright (C) 2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -void compute_weights(int size, - const torch::Tensor losses, - const torch::Tensor indices, - torch::Tensor weights, - float ratio, - float p); diff --git a/otx/mpa/csrc/mpl/pybind.cpp b/otx/mpa/csrc/mpl/pybind.cpp deleted file mode 100644 index 39bce9c6960..00000000000 --- a/otx/mpa/csrc/mpl/pybind.cpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) 2018, Sergei Belousov -// SPDX-License-Identifier: BSD-3-Clause -// -// Copyright (C) 2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#include - -void compute_weights(int size, - const torch::Tensor losses, - const torch::Tensor indices, - torch::Tensor weights, - float ratio, - float p); - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def("compute_weights", &compute_weights, "compute_weights", - py::arg("size"), py::arg("losses"), py::arg("indices"), - py::arg("weights"), py::arg("ratio"), py::arg("p")); -} diff --git a/otx/mpa/deploy/__init__.py b/otx/mpa/deploy/__init__.py deleted file mode 100644 index cf2e118cd0e..00000000000 --- a/otx/mpa/deploy/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from .utils import is_mmdeploy_enabled - -__all__ = [ - "is_mmdeploy_enabled", -] diff --git a/otx/mpa/det/incremental/inferrer.py b/otx/mpa/det/incremental/inferrer.py deleted file mode 100644 index 599be61c31c..00000000000 --- a/otx/mpa/det/incremental/inferrer.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from otx.mpa.det.inferrer import DetectionInferrer -from otx.mpa.registry import STAGES -from otx.mpa.utils.logger import get_logger - -from .stage import IncrDetectionStage - -logger = get_logger() - - -@STAGES.register_module() -class IncrDetectionInferrer(IncrDetectionStage, DetectionInferrer): - def __init__(self, **kwargs): - IncrDetectionStage.__init__(self, **kwargs) diff --git a/otx/mpa/det/incremental/trainer.py b/otx/mpa/det/incremental/trainer.py deleted file mode 100644 index 1766f31b344..00000000000 --- a/otx/mpa/det/incremental/trainer.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from otx.mpa.det.trainer import DetectionTrainer -from otx.mpa.registry import STAGES -from otx.mpa.utils.logger import get_logger - -from .stage import IncrDetectionStage - -logger = get_logger() - - -@STAGES.register_module() -class IncrDetectionTrainer(IncrDetectionStage, DetectionTrainer): - def __init__(self, **kwargs): - IncrDetectionStage.__init__(self, **kwargs) diff --git a/otx/mpa/det/semisl/trainer.py b/otx/mpa/det/semisl/trainer.py deleted file mode 100644 index 198a2d20015..00000000000 --- a/otx/mpa/det/semisl/trainer.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from otx.mpa.det.trainer import DetectionTrainer -from otx.mpa.registry import STAGES -from otx.mpa.utils.logger import get_logger - -from .stage import SemiSLDetectionStage - -logger = get_logger() - - -@STAGES.register_module() -class SemiSLDetectionTrainer(SemiSLDetectionStage, DetectionTrainer): - def __init__(self, **kwargs): - SemiSLDetectionStage.__init__(self, **kwargs) diff --git a/otx/mpa/modules/__init__.py b/otx/mpa/modules/__init__.py deleted file mode 100644 index fd1fbacf7b3..00000000000 --- a/otx/mpa/modules/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -# flake8: noqa - -try: - import openvino -except ImportError: - pass -else: - from . import ov diff --git a/otx/mpa/modules/datasets/pipelines/transforms/__init__.py b/otx/mpa/modules/datasets/pipelines/transforms/__init__.py deleted file mode 100644 index 4e1701262e2..00000000000 --- a/otx/mpa/modules/datasets/pipelines/transforms/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -# flake8: noqa diff --git a/otx/mpa/modules/datasets/pipelines/transforms/cython_augments/__init__.py b/otx/mpa/modules/datasets/pipelines/transforms/cython_augments/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/otx/mpa/modules/datasets/pipelines/transforms/random_augment.py b/otx/mpa/modules/datasets/pipelines/transforms/random_augment.py deleted file mode 100644 index 3b59eff3351..00000000000 --- a/otx/mpa/modules/datasets/pipelines/transforms/random_augment.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -# code in this file is adpated from -# https://github.com/ildoonet/pytorch-randaugment/blob/master/RandAugment/augmentations.py -# https://github.com/google-research/fixmatch/blob/master/third_party/auto_augment/augmentations.py -# https://github.com/google-research/fixmatch/blob/master/libml/ctaugment.py -import random - -import numpy as np -import PIL -from mmcls.datasets.builder import PIPELINES - -PARAMETER_MAX = 10 - - -def AutoContrast(img, **kwarg): - return PIL.ImageOps.autocontrast(img), None - - -def Brightness(img, v, max_v, bias=0): - v = _float_parameter(v, max_v) + bias - return PIL.ImageEnhance.Brightness(img).enhance(v), v - - -def Color(img, v, max_v, bias=0): - v = _float_parameter(v, max_v) + bias - return PIL.ImageEnhance.Color(img).enhance(v), v - - -def Contrast(img, v, max_v, bias=0): - v = _float_parameter(v, max_v) + bias - return PIL.ImageEnhance.Contrast(img).enhance(v), v - - -def Cutout(img, v, max_v, bias=0): - if v == 0: - return img - v = _float_parameter(v, max_v) + bias - v = int(v * min(img.size)) - return CutoutAbs(img, v), v - - -def CutoutAbs(img, v, **kwarg): - w, h = img.size - x0 = np.random.uniform(0, w) - y0 = np.random.uniform(0, h) - x0 = int(max(0, x0 - v / 2.0)) - y0 = int(max(0, y0 - v / 2.0)) - x1 = int(min(w, x0 + v)) - y1 = int(min(h, y0 + v)) - xy = (x0, y0, x1, y1) - # gray - color = (127, 127, 127) - img = img.copy() - PIL.ImageDraw.Draw(img).rectangle(xy, color) - return img, xy, color - - -def Equalize(img, **kwarg): - return PIL.ImageOps.equalize(img), None - - -def Identity(img, **kwarg): - return img, None - - -def Posterize(img, v, max_v, bias=0): - v = _int_parameter(v, max_v) + bias - return PIL.ImageOps.posterize(img, v), v - - -def Rotate(img, v, max_v, bias=0): - v = _int_parameter(v, max_v) + bias - if random.random() < 0.5: - v = -v - return img.rotate(v), v - - -def Sharpness(img, v, max_v, bias=0): - v = _float_parameter(v, max_v) + bias - return PIL.ImageEnhance.Sharpness(img).enhance(v), v - - -def ShearX(img, v, max_v, bias=0): - v = _float_parameter(v, max_v) + bias - if random.random() < 0.5: - v = -v - return img.transform(img.size, PIL.Image.AFFINE, (1, v, 0, 0, 1, 0)), v - - -def ShearY(img, v, max_v, bias=0): - v = _float_parameter(v, max_v) + bias - if random.random() < 0.5: - v = -v - return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0)), v - - -def Solarize(img, v, max_v, bias=0): - v = _int_parameter(v, max_v) + bias - return PIL.ImageOps.solarize(img, 256 - v), v - - -def TranslateX(img, v, max_v, bias=0): - v = _float_parameter(v, max_v) + bias - if random.random() < 0.5: - v = -v - v = int(v * img.size[0]) - return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0)), v - - -def TranslateY(img, v, max_v, bias=0): - v = _float_parameter(v, max_v) + bias - if random.random() < 0.5: - v = -v - v = int(v * img.size[1]) - return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v)), v - - -def _float_parameter(v, max_v): - return float(v) * max_v / PARAMETER_MAX - - -def _int_parameter(v, max_v): - return int(v * max_v / PARAMETER_MAX) - - -rand_augment_pool = [ - (AutoContrast, None, None), - (Brightness, 0.9, 0.05), - (Color, 0.9, 0.05), - (Contrast, 0.9, 0.05), - (Equalize, None, None), - (Identity, None, None), - (Posterize, 4, 4), - (Rotate, 30, 0), - (Sharpness, 0.9, 0.05), - (ShearX, 0.3, 0), - (ShearY, 0.3, 0), - (Solarize, 256, 0), - (TranslateX, 0.3, 0), - (TranslateY, 0.3, 0), -] - -# TODO: [Jihwan]: Can be removed by mmcls.datasets.pipeline.auto_augment Line 95 RandAugment class -@PIPELINES.register_module() -class MPARandAugment(object): - def __init__(self, n, m, cutout=16): - assert n >= 1 - assert 1 <= m <= 10 - self.n = n - self.m = m - self.cutout = cutout - self.augment_pool = rand_augment_pool - - def __call__(self, results): - for key in results.get("img_fields", ["img"]): - img = results[key] - if not PIL.Image.isImageType(img): - img = PIL.Image.fromarray(results[key]) - ops = random.choices(self.augment_pool, k=self.n) - for op, max_v, bias in ops: - v = np.random.randint(1, self.m) - if random.random() < 0.5: - img, v = op(img, v=v, max_v=max_v, bias=bias) - results["rand_mc_{}".format(op.__name__)] = v - img, xy, color = CutoutAbs(img, self.cutout) - results["CutoutAbs"] = (xy, self.cutout, color) - results[key] = np.array(img) - return results diff --git a/otx/mpa/modules/datasets/pipelines/transforms/seg_custom_pipelines.py b/otx/mpa/modules/datasets/pipelines/transforms/seg_custom_pipelines.py deleted file mode 100644 index 51829425725..00000000000 --- a/otx/mpa/modules/datasets/pipelines/transforms/seg_custom_pipelines.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import mmcv -import numpy as np -from mmcv.parallel import DataContainer as DC -from mmseg.datasets import PIPELINES -from mmseg.datasets.pipelines.formatting import to_tensor - - -@PIPELINES.register_module(force=True) -class Normalize(object): - """Normalize the image. - - Added key is "img_norm_cfg". - - Args: - mean (sequence): Mean values of 3 channels. - std (sequence): Std values of 3 channels. - to_rgb (bool): Whether to convert the image from BGR to RGB, - default is true. - """ - - def __init__(self, mean, std, to_rgb=True): - self.mean = np.array(mean, dtype=np.float32) - self.std = np.array(std, dtype=np.float32) - self.to_rgb = to_rgb - - def __call__(self, results): - """Call function to normalize images. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Normalized results, 'img_norm_cfg' key is added into - result dict. - """ - - for target in ["img", "ul_w_img", "aux_img"]: - if target in results: - results[target] = mmcv.imnormalize(results[target], self.mean, self.std, self.to_rgb) - results["img_norm_cfg"] = dict(mean=self.mean, std=self.std, to_rgb=self.to_rgb) - - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f"(mean={self.mean}, std={self.std}, to_rgb=" f"{self.to_rgb})" - return repr_str - - -@PIPELINES.register_module(force=True) -class DefaultFormatBundle(object): - """Default formatting bundle. - - It simplifies the pipeline of formatting common fields, including "img" - and "gt_semantic_seg". These fields are formatted as follows. - - - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True) - - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, - (3)to DataContainer (stack=True) - """ - - def __call__(self, results): - """Call function to transform and format common fields in results. - - Args: - results (dict): Result dict contains the data to convert. - - Returns: - dict: The result dict contains the data that is formatted with - default bundle. - """ - for target in ["img", "ul_w_img", "aux_img"]: - if target not in results: - continue - - img = results[target] - if len(img.shape) < 3: - img = np.expand_dims(img, -1) - - if len(img.shape) == 3: - img = np.ascontiguousarray(img.transpose(2, 0, 1)).astype(np.float32) - elif len(img.shape) == 4: - # for selfsl or supcon - img = np.ascontiguousarray(img.transpose(0, 3, 1, 2)).astype(np.float32) - else: - raise ValueError(f"img.shape={img.shape} is not supported.") - - results[target] = DC(to_tensor(img), stack=True) - - for trg_name in ["gt_semantic_seg", "gt_class_borders", "pixel_weights"]: - if trg_name not in results: - continue - - out_type = np.float32 if trg_name == "pixel_weights" else np.int64 - results[trg_name] = DC(to_tensor(results[trg_name][None, ...].astype(out_type)), stack=True) - - return results - - def __repr__(self): - return self.__class__.__name__ - - -@PIPELINES.register_module() -class BranchImage(object): - def __init__(self, key_map={}): - self.key_map = key_map - - def __call__(self, results): - for k1, k2 in self.key_map.items(): - if k1 in results: - results[k2] = results[k1] - if k1 in results["img_fields"]: - results["img_fields"].append(k2) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - return repr_str diff --git a/otx/mpa/modules/hooks/__init__.py b/otx/mpa/modules/hooks/__init__.py deleted file mode 100644 index 5bd14347002..00000000000 --- a/otx/mpa/modules/hooks/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -# flake8: noqa -from . import ( - adaptive_training_hooks, - checkpoint_hook, - composed_dataloaders_hook, - early_stopping_hook, - fp16_sam_optimizer_hook, - ib_loss_hook, - logger_replace_hook, - model_ema_hook, - model_ema_v2_hook, - no_bias_decay_hook, - recording_forward_hooks, - sam_optimizer_hook, - save_initial_weight_hook, - semisl_cls_hook, - task_adapt_hook, - unbiased_teacher_hook, - workflow_hooks, -) diff --git a/otx/mpa/modules/hooks/cancel_interface_hook.py b/otx/mpa/modules/hooks/cancel_interface_hook.py deleted file mode 100644 index 1cadb1e7af4..00000000000 --- a/otx/mpa/modules/hooks/cancel_interface_hook.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from mmcv.runner import HOOKS, EpochBasedRunner, Hook - -from otx.mpa.utils.logger import get_logger - -logger = get_logger() - - -@HOOKS.register_module() -class CancelInterfaceHook(Hook): - def __init__(self, init_callback: callable, interval=5): - self.on_init_callback = init_callback - self.runner = None - self.interval = interval - - def cancel(self): - logger.info("CancelInterfaceHook.cancel() is called.") - if self.runner is None: - logger.warning("runner is not configured yet. ignored this request.") - return - - if self.runner.should_stop: - logger.warning("cancel already requested.") - return - - if isinstance(self.runner, EpochBasedRunner): - epoch = self.runner.epoch - self.runner._max_epochs = epoch # Force runner to stop by pretending it has reached it's max_epoch - self.runner.should_stop = True # Set this flag to true to stop the current training epoch - logger.info("requested stopping to the runner") - - def before_run(self, runner): - self.runner = runner - self.on_init_callback(self) - - def after_run(self, runner): - self.runner = None diff --git a/otx/mpa/modules/hooks/logger_replace_hook.py b/otx/mpa/modules/hooks/logger_replace_hook.py deleted file mode 100644 index 3cb64b40a4d..00000000000 --- a/otx/mpa/modules/hooks/logger_replace_hook.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from mmcv.runner import HOOKS, Hook - -from otx.mpa.utils.logger import get_logger - -logger = get_logger() - - -@HOOKS.register_module() -class LoggerReplaceHook(Hook): - """replace logger in the runner to the MPA logger. - DO NOT INCLUDE this hook to the recipe directly. - mpa will add this hook to all recipe internally. - """ - - def __init__(self, **kwargs): - super().__init__(**kwargs) - - def before_run(self, runner): - runner.logger = logger - logger.info("logger in the runner is replaced to the MPA logger") diff --git a/otx/mpa/modules/hooks/no_bias_decay_hook.py b/otx/mpa/modules/hooks/no_bias_decay_hook.py deleted file mode 100644 index a8bacfe61cf..00000000000 --- a/otx/mpa/modules/hooks/no_bias_decay_hook.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import torch.nn as nn -from mmcv.runner import HOOKS, Hook - -from otx.mpa.utils.logger import get_logger - -logger = get_logger() - - -@HOOKS.register_module() -class NoBiasDecayHook(Hook): - """Hook for No Bias Decay Method (Bag of Tricks for Image Classification) - - This hook divides model's weight & bias to 3 parameter groups - [weight with decay, weight without decay, bias without decay] - """ - - def before_train_epoch(self, runner): - weight_decay, bias_no_decay, weight_no_decay = [], [], [] - for m in runner.model.modules(): - if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): - weight_decay.append(m.weight) - if m.bias is not None: - bias_no_decay.append(m.bias) - elif hasattr(m, "weight") or hasattr(m, "bias"): - if hasattr(m, "weight"): - weight_no_decay.append(m.weight) - if hasattr(m, "bias"): - bias_no_decay.append(m.bias) - elif len(list(m.children())) == 0: - for p in m.parameters(): - weight_decay.append(p) - - weight_decay_group = runner.optimizer.param_groups[0].copy() - weight_decay_group["params"] = weight_decay - - bias_group = runner.optimizer.param_groups[0].copy() - bias_group["params"] = bias_no_decay - bias_group["weight_decay"] = 0.0 - bias_group["lr"] *= 2 - - weight_no_decay_group = runner.optimizer.param_groups[0].copy() - weight_no_decay_group["params"] = weight_no_decay - weight_no_decay_group["weight_decay"] = 0.0 - - param_groups = [weight_decay_group, bias_group, weight_no_decay_group] - runner.optimizer.param_groups = param_groups - - def after_train_epoch(self, runner): - params = [] - for m in runner.model.modules(): - if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): - params.append(m.weight) - if m.bias is not None: - params.append(m.bias) - elif hasattr(m, "weight") or hasattr(m, "bias"): - if hasattr(m, "weight"): - params.append(m.weight) - if hasattr(m, "bias"): - params.append(m.bias) - elif len(list(m.children())) == 0: - for p in m.parameters(): - params.append(p) - - param_groups = runner.optimizer.param_groups[0].copy() - param_groups["params"] = params - runner.optimizer.param_groups = [param_groups] diff --git a/otx/mpa/modules/hooks/save_initial_weight_hook.py b/otx/mpa/modules/hooks/save_initial_weight_hook.py deleted file mode 100644 index 0cafd1ce283..00000000000 --- a/otx/mpa/modules/hooks/save_initial_weight_hook.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from mmcv.runner import HOOKS, Hook - - -@HOOKS.register_module() -class SaveInitialWeightHook(Hook): - def __init__(self, save_path, file_name: str = "weights.pth", **kwargs): - self._save_path = save_path - self._file_name = file_name - self._args = kwargs - - def before_run(self, runner): - runner.logger.info("Saving weight before training") - runner.save_checkpoint( - self._save_path, filename_tmpl=self._file_name, save_optimizer=False, create_symlink=False, **self._args - ) diff --git a/otx/mpa/modules/models/__init__.py b/otx/mpa/modules/models/__init__.py deleted file mode 100644 index 4e1701262e2..00000000000 --- a/otx/mpa/modules/models/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -# flake8: noqa diff --git a/otx/mpa/modules/models/backbones/__init__.py b/otx/mpa/modules/models/backbones/__init__.py deleted file mode 100644 index 4e1701262e2..00000000000 --- a/otx/mpa/modules/models/backbones/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -# flake8: noqa diff --git a/otx/mpa/modules/models/builder.py b/otx/mpa/modules/models/builder.py deleted file mode 100644 index f064c6321d1..00000000000 --- a/otx/mpa/modules/models/builder.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from mmseg.models.builder import MODELS - -SCALAR_SCHEDULERS = MODELS - - -def build_scalar_scheduler(cfg, default_value=None): - if cfg is None: - if default_value is not None: - assert isinstance(default_value, (int, float)) - cfg = dict(type="ConstantScalarScheduler", scale=float(default_value)) - else: - return None - elif isinstance(cfg, (int, float)): - cfg = dict(type="ConstantScalarScheduler", scale=float(cfg)) - - return SCALAR_SCHEDULERS.build(cfg) diff --git a/otx/mpa/modules/models/classifiers/__init__.py b/otx/mpa/modules/models/classifiers/__init__.py deleted file mode 100644 index d1c98448092..00000000000 --- a/otx/mpa/modules/models/classifiers/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -# flake8: noqa -from . import ( - sam_classifier, - semisl_classifier, - semisl_multilabel_classifier, - supcon_classifier, -) diff --git a/otx/mpa/modules/models/heads/__init__.py b/otx/mpa/modules/models/heads/__init__.py deleted file mode 100644 index 4e1701262e2..00000000000 --- a/otx/mpa/modules/models/heads/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -# flake8: noqa diff --git a/otx/mpa/modules/models/heads/aggregator_mixin.py b/otx/mpa/modules/models/heads/aggregator_mixin.py deleted file mode 100644 index 4f320c7bb9d..00000000000 --- a/otx/mpa/modules/models/heads/aggregator_mixin.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (c) 2020-2021 The MMSegmentation Authors -# SPDX-License-Identifier: Apache-2.0 -# -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import torch.nn as nn - -from ..utils import IterativeAggregator, IterativeConcatAggregator - - -class AggregatorMixin(nn.Module): - def __init__( - self, - *args, - enable_aggregator=False, - aggregator_min_channels=None, - aggregator_merge_norm=None, - aggregator_use_concat=False, - **kwargs - ): - - in_channels = kwargs.get("in_channels") - in_index = kwargs.get("in_index") - norm_cfg = kwargs.get("norm_cfg") - conv_cfg = kwargs.get("conv_cfg") - input_transform = kwargs.get("input_transform") - - aggregator = None - if enable_aggregator: - assert isinstance(in_channels, (tuple, list)) - assert len(in_channels) > 1 - - aggregator = IterativeAggregator( - in_channels=in_channels, - min_channels=aggregator_min_channels, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - merge_norm=aggregator_merge_norm, - use_concat=aggregator_use_concat, - ) - - aggregator_min_channels = aggregator_min_channels if aggregator_min_channels is not None else 0 - # change arguments temporarily - kwargs["in_channels"] = max(in_channels[0], aggregator_min_channels) - kwargs["input_transform"] = None - if in_index is not None: - kwargs["in_index"] = in_index[0] - - super(AggregatorMixin, self).__init__(*args, **kwargs) - - self.aggregator = aggregator - # re-define variables - self.in_channels = in_channels - self.input_transform = input_transform - self.in_index = in_index - - def _transform_inputs(self, inputs): - inputs = super()._transform_inputs(inputs) - if self.aggregator is not None: - inputs = self.aggregator(inputs)[0] - return inputs diff --git a/otx/mpa/modules/models/heads/mix_loss_mixin.py b/otx/mpa/modules/models/heads/mix_loss_mixin.py deleted file mode 100644 index 7dccb520748..00000000000 --- a/otx/mpa/modules/models/heads/mix_loss_mixin.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.runner import force_fp32 - - -class MixLossMixin(nn.Module): - @staticmethod - def _mix_loss(logits, target, ignore_index=255): - num_samples = logits.size(0) - assert num_samples % 2 == 0 - - with torch.no_grad(): - probs = F.softmax(logits, dim=1) - probs_a, probs_b = torch.split(probs, num_samples // 2) - mean_probs = 0.5 * (probs_a + probs_b) - trg_probs = torch.cat([mean_probs, mean_probs], dim=0) - - log_probs = torch.log_softmax(logits, dim=1) - losses = torch.sum(trg_probs * log_probs, dim=1).neg() - - valid_mask = target != ignore_index - valid_losses = torch.where(valid_mask, losses, torch.zeros_like(losses)) - - return valid_losses.mean() - - @force_fp32(apply_to=("seg_logit",)) - def losses(self, seg_logit, seg_label, train_cfg, *args, **kwargs): - loss = super().losses(seg_logit, seg_label, train_cfg, *args, **kwargs) - if train_cfg.get("mix_loss", None) and train_cfg.mix_loss.get("enable", False): - mix_loss = self._mix_loss(seg_logit, seg_label, ignore_index=self.ignore_index) - - mix_loss_weight = train_cfg.mix_loss.get("weight", 1.0) - loss["loss_mix"] = mix_loss_weight * mix_loss - - return loss diff --git a/otx/mpa/modules/models/heads/segment_out_norm_mixin.py b/otx/mpa/modules/models/heads/segment_out_norm_mixin.py deleted file mode 100644 index c82af88c817..00000000000 --- a/otx/mpa/modules/models/heads/segment_out_norm_mixin.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import torch.nn as nn - -from ..utils import AngularPWConv, normalize - - -class SegmentOutNormMixin(nn.Module): - def __init__(self, *args, enable_out_seg=True, enable_out_norm=False, **kwargs): - super().__init__(*args, **kwargs) - - self.enable_out_seg = enable_out_seg - self.enable_out_norm = enable_out_norm - - if enable_out_seg: - if enable_out_norm: - self.conv_seg = AngularPWConv(self.channels, self.out_channels, clip_output=True) - else: - self.conv_seg = None - - def cls_seg(self, feat): - """Classify each pixel.""" - if self.dropout is not None: - feat = self.dropout(feat) - if self.enable_out_norm: - feat = normalize(feat, dim=1, p=2) - if self.conv_seg is not None: - return self.conv_seg(feat) - else: - return feat diff --git a/otx/mpa/modules/models/heads/utils.py b/otx/mpa/modules/models/heads/utils.py deleted file mode 100644 index 9390fb85c31..00000000000 --- a/otx/mpa/modules/models/heads/utils.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (C) 2023 Intel Corporation -# SPDX-License-Identifier: MIT -# - -from torch import nn - - -def generate_aux_mlp(aux_mlp_cfg: dict, in_channels: int): - out_channels = aux_mlp_cfg["out_channels"] - if out_channels <= 0: - raise ValueError(f"out_channels={out_channels} must be a positive integer") - if "hid_channels" in aux_mlp_cfg and aux_mlp_cfg["hid_channels"] > 0: - hid_channels = aux_mlp_cfg["hid_channels"] - mlp = nn.Sequential( - nn.Linear(in_features=in_channels, out_features=hid_channels), - nn.ReLU(inplace=True), - nn.Linear(in_features=hid_channels, out_features=out_channels), - ) - else: - mlp = nn.Linear(in_features=in_channels, out_features=out_channels) - - return mlp - - -class EMAMeter: - def __init__(self, alpha=0.9): - self.alpha = alpha - self.reset() - - def reset(self): - self.val = 0 - - def update(self, val): - self.val = self.alpha * self.val + (1 - self.alpha) * val - - -class LossBalancer: - def __init__(self, num_losses, weights=None, ema_weight=0.7) -> None: - self.EPS = 1e-9 - self.avg_estimators = [EMAMeter(ema_weight) for _ in range(num_losses)] - - if weights is not None: - assert len(weights) == num_losses - self.final_weights = weights - else: - self.final_weights = [1.0] * num_losses - - def balance_losses(self, losses): - total_loss = 0.0 - for i, l in enumerate(losses): - self.avg_estimators[i].update(float(l)) - total_loss += ( - self.final_weights[i] * l / (self.avg_estimators[i].val + self.EPS) * self.avg_estimators[0].val - ) - - return total_loss diff --git a/otx/mpa/modules/models/losses/__init__.py b/otx/mpa/modules/models/losses/__init__.py deleted file mode 100644 index 4e1701262e2..00000000000 --- a/otx/mpa/modules/models/losses/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -# flake8: noqa diff --git a/otx/mpa/modules/models/scalar_schedulers/__init__.py b/otx/mpa/modules/models/scalar_schedulers/__init__.py deleted file mode 100644 index f79e183f50f..00000000000 --- a/otx/mpa/modules/models/scalar_schedulers/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from .constant import ConstantScalarScheduler -from .poly import PolyScalarScheduler -from .step import StepScalarScheduler - -__all__ = [ - "ConstantScalarScheduler", - "PolyScalarScheduler", - "StepScalarScheduler", -] diff --git a/otx/mpa/modules/models/segmentors/__init__.py b/otx/mpa/modules/models/segmentors/__init__.py deleted file mode 100644 index d7b6a1934c7..00000000000 --- a/otx/mpa/modules/models/segmentors/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -# flake8: noqa -from . import class_incr_encoder_decoder, mean_teacher_segmentor, otx_encoder_decoder diff --git a/otx/mpa/modules/models/segmentors/class_incr_encoder_decoder.py b/otx/mpa/modules/models/segmentors/class_incr_encoder_decoder.py deleted file mode 100644 index 6cb17955106..00000000000 --- a/otx/mpa/modules/models/segmentors/class_incr_encoder_decoder.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import functools - -from mmseg.models import SEGMENTORS -from mmseg.utils import get_root_logger - -from otx.mpa.modules.utils.task_adapt import map_class_names - -from .mix_loss_mixin import MixLossMixin -from .otx_encoder_decoder import OTXEncoderDecoder -from .pixel_weights_mixin import PixelWeightsMixin - - -@SEGMENTORS.register_module() -class ClassIncrEncoderDecoder(MixLossMixin, PixelWeightsMixin, OTXEncoderDecoder): - """ """ - - def __init__(self, *args, task_adapt=None, **kwargs): - super().__init__(*args, **kwargs) - - # Hook for class-sensitive weight loading - assert task_adapt is not None, "When using task_adapt, task_adapt must be set." - - self._register_load_state_dict_pre_hook( - functools.partial( - self.load_state_dict_pre_hook, - self, # model - task_adapt["dst_classes"], # model_classes - task_adapt["src_classes"], # chkpt_classes - ) - ) - - @staticmethod - def load_state_dict_pre_hook(model, model_classes, chkpt_classes, chkpt_dict, prefix, *args, **kwargs): - """Modify input state_dict according to class name matching before weight loading""" - logger = get_root_logger("INFO") - logger.info(f"----------------- ClassIncrEncoderDecoder.load_state_dict_pre_hook() called w/ prefix: {prefix}") - - # Dst to src mapping index - model_classes = list(model_classes) - chkpt_classes = list(chkpt_classes) - model2chkpt = map_class_names(model_classes, chkpt_classes) - logger.info(f"{chkpt_classes} -> {model_classes} ({model2chkpt})") - - model_dict = model.state_dict() - param_names = [ - "decode_head.conv_seg.weight", - "decode_head.conv_seg.bias", - ] - for model_name in param_names: - chkpt_name = prefix + model_name - if model_name not in model_dict or chkpt_name not in chkpt_dict: - logger.info(f"Skipping weight copy: {chkpt_name}") - continue - - # Mix weights - model_param = model_dict[model_name].clone() - chkpt_param = chkpt_dict[chkpt_name] - for m, c in enumerate(model2chkpt): - if c >= 0: - model_param[m].copy_(chkpt_param[c]) - - # Replace checkpoint weight by mixed weights - chkpt_dict[chkpt_name] = model_param diff --git a/otx/mpa/modules/models/segmentors/mix_loss_mixin.py b/otx/mpa/modules/models/segmentors/mix_loss_mixin.py deleted file mode 100644 index 63a820d612a..00000000000 --- a/otx/mpa/modules/models/segmentors/mix_loss_mixin.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import torch -import torch.nn as nn - - -class MixLossMixin(object): - def forward_train(self, img, img_metas, gt_semantic_seg, aux_img=None, **kwargs): - """Forward function for training. - - Args: - img (Tensor): Input images. - img_metas (list[dict]): List of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmseg/datasets/pipelines/formatting.py:Collect`. - gt_semantic_seg (Tensor): Semantic segmentation masks - used if the architecture supports semantic segmentation task. - aux_img (Tensor): Auxiliary images. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - - if aux_img is not None: - mix_loss_enabled = False - mix_loss_cfg = self.train_cfg.get("mix_loss", None) - if mix_loss_cfg is not None: - mix_loss_enabled = mix_loss_cfg.get("enable", False) - if mix_loss_enabled: - self.train_cfg.mix_loss.enable = mix_loss_enabled - - if self.train_cfg.mix_loss.enable: - img = torch.cat([img, aux_img], dim=0) - gt_semantic_seg = torch.cat([gt_semantic_seg, gt_semantic_seg], dim=0) - - return super().forward_train(img, img_metas, gt_semantic_seg, **kwargs) diff --git a/otx/mpa/modules/optimizer/__init__.py b/otx/mpa/modules/optimizer/__init__.py deleted file mode 100644 index 4e1701262e2..00000000000 --- a/otx/mpa/modules/optimizer/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -# flake8: noqa diff --git a/otx/mpa/modules/ov/__init__.py b/otx/mpa/modules/ov/__init__.py deleted file mode 100644 index 0eadb4fe25d..00000000000 --- a/otx/mpa/modules/ov/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -# flake8: noqa -from .graph import * -from .models import * -from .ops import * diff --git a/otx/mpa/modules/ov/graph/__init__.py b/otx/mpa/modules/ov/graph/__init__.py deleted file mode 100644 index 8f15e00642e..00000000000 --- a/otx/mpa/modules/ov/graph/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -# flake8: noqa -from .graph import Graph diff --git a/otx/mpa/modules/ov/graph/parsers/__init__.py b/otx/mpa/modules/ov/graph/parsers/__init__.py deleted file mode 100644 index 37e7f3d50d2..00000000000 --- a/otx/mpa/modules/ov/graph/parsers/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from . import cls - -# flake8: noqa -from .builder import PARSERS diff --git a/otx/mpa/modules/ov/graph/parsers/builder.py b/otx/mpa/modules/ov/graph/parsers/builder.py deleted file mode 100644 index 92eaf9c069b..00000000000 --- a/otx/mpa/modules/ov/graph/parsers/builder.py +++ /dev/null @@ -1,7 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from ...registry import Registry - -PARSERS = Registry("ov graph parsers") diff --git a/otx/mpa/modules/ov/graph/parsers/cls/__init__.py b/otx/mpa/modules/ov/graph/parsers/cls/__init__.py deleted file mode 100644 index e02b04e6302..00000000000 --- a/otx/mpa/modules/ov/graph/parsers/cls/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -# flake8: noqa -from .cls_base_parser import * diff --git a/otx/mpa/modules/ov/models/mmcls/__init__.py b/otx/mpa/modules/ov/models/mmcls/__init__.py deleted file mode 100644 index d054cbc3fa3..00000000000 --- a/otx/mpa/modules/ov/models/mmcls/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -# flake8: noqa -from . import backbones, heads, necks diff --git a/otx/mpa/modules/ov/models/mmcls/backbones/__init__.py b/otx/mpa/modules/ov/models/mmcls/backbones/__init__.py deleted file mode 100644 index 1ad81562177..00000000000 --- a/otx/mpa/modules/ov/models/mmcls/backbones/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -# flake8: noqa -from .mmov_backbone import MMOVBackbone diff --git a/otx/mpa/modules/ov/models/mmcls/backbones/mmov_backbone.py b/otx/mpa/modules/ov/models/mmcls/backbones/mmov_backbone.py deleted file mode 100644 index 5c901229f37..00000000000 --- a/otx/mpa/modules/ov/models/mmcls/backbones/mmov_backbone.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from typing import Dict, List - -from mmcls.models.builder import BACKBONES - -from ....graph.parsers.cls.cls_base_parser import cls_base_parser -from ...mmov_model import MMOVModel - - -@BACKBONES.register_module() -class MMOVBackbone(MMOVModel): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - @staticmethod - def parser(graph, **kwargs) -> Dict[str, List[str]]: - output = cls_base_parser(graph, "backbone") - if output is None: - raise ValueError("Parser can not determine input and output of model. " "Please provide them explicitly") - return output - - def init_weights(self, pretrained=None): - # TODO - pass diff --git a/otx/mpa/modules/ov/models/mmcls/heads/__init__.py b/otx/mpa/modules/ov/models/mmcls/heads/__init__.py deleted file mode 100644 index c7bb496ede5..00000000000 --- a/otx/mpa/modules/ov/models/mmcls/heads/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from .cls_head import ClsHead -from .conv_head import ConvClsHead - -# flake8: noqa -from .mmov_cls_head import MMOVClsHead diff --git a/otx/mpa/modules/ov/models/mmcls/necks/__init__.py b/otx/mpa/modules/ov/models/mmcls/necks/__init__.py deleted file mode 100644 index 300cf80ef3f..00000000000 --- a/otx/mpa/modules/ov/models/mmcls/necks/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -# flake8: noqa -from .mmov_neck import MMOVNeck diff --git a/otx/mpa/modules/ov/models/mmcls/necks/mmov_neck.py b/otx/mpa/modules/ov/models/mmcls/necks/mmov_neck.py deleted file mode 100644 index f37a6c2e699..00000000000 --- a/otx/mpa/modules/ov/models/mmcls/necks/mmov_neck.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from typing import Dict, List - -from mmcls.models.builder import NECKS - -from ....graph.parsers.cls.cls_base_parser import cls_base_parser -from ...mmov_model import MMOVModel - - -@NECKS.register_module() -class MMOVNeck(MMOVModel): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - @staticmethod - def parser(graph, **kwargs) -> Dict[str, List[str]]: - output = cls_base_parser(graph, "neck") - if output is None: - raise ValueError("Parser can not determine input and output of model. " "Please provide them explicitly") - return output diff --git a/otx/mpa/modules/ov/models/mmseg/__init__.py b/otx/mpa/modules/ov/models/mmseg/__init__.py deleted file mode 100644 index f5601bf53ac..00000000000 --- a/otx/mpa/modules/ov/models/mmseg/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -# flake8: noqa -from . import backbones, decode_heads diff --git a/otx/mpa/modules/ov/models/mmseg/backbones/__init__.py b/otx/mpa/modules/ov/models/mmseg/backbones/__init__.py deleted file mode 100644 index 1ad81562177..00000000000 --- a/otx/mpa/modules/ov/models/mmseg/backbones/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -# flake8: noqa -from .mmov_backbone import MMOVBackbone diff --git a/otx/mpa/modules/ov/models/mmseg/decode_heads/__init__.py b/otx/mpa/modules/ov/models/mmseg/decode_heads/__init__.py deleted file mode 100644 index 26515f29058..00000000000 --- a/otx/mpa/modules/ov/models/mmseg/decode_heads/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -# flake8: noqa -from .mmov_decode_head import MMOVDecodeHead diff --git a/otx/mpa/modules/ov/ops/__init__.py b/otx/mpa/modules/ov/ops/__init__.py deleted file mode 100644 index 693b7dffa84..00000000000 --- a/otx/mpa/modules/ov/ops/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from .activations import * -from .arithmetics import * -from .builder import OPS -from .convolutions import * -from .generation import * -from .image_processings import * -from .infrastructures import * -from .matmuls import * -from .movements import * -from .normalizations import * -from .object_detections import * - -# flake8: noqa -from .op import * -from .poolings import * -from .reductions import * -from .shape_manipulations import * -from .sorting_maximization import * -from .type_conversions import * diff --git a/otx/mpa/modules/ov/ops/activations.py b/otx/mpa/modules/ov/ops/activations.py deleted file mode 100644 index 6cf73192367..00000000000 --- a/otx/mpa/modules/ov/ops/activations.py +++ /dev/null @@ -1,271 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import math -from dataclasses import dataclass, field - -import torch -from torch.nn import functional as F - -from .builder import OPS -from .op import Attribute, Operation - - -@dataclass -class SoftMaxV0Attribute(Attribute): - axis: int = field(default=1) - - -@OPS.register() -class SoftMaxV0(Operation[SoftMaxV0Attribute]): - TYPE = "Softmax" - VERSION = 0 - ATTRIBUTE_FACTORY = SoftMaxV0Attribute - - def forward(self, input): - return F.softmax(input=input, dim=self.attrs.axis) - - -@dataclass -class SoftMaxV1Attribute(Attribute): - axis: int = field(default=1) - - -@OPS.register() -class SoftMaxV1(Operation[SoftMaxV1Attribute]): - TYPE = "Softmax" - VERSION = 1 - ATTRIBUTE_FACTORY = SoftMaxV1Attribute - - def forward(self, input): - return F.softmax(input=input, dim=self.attrs.axis) - - -@dataclass -class ReluV0Attribute(Attribute): - pass - - -@OPS.register() -class ReluV0(Operation[ReluV0Attribute]): - TYPE = "Relu" - VERSION = 0 - ATTRIBUTE_FACTORY = ReluV0Attribute - - def forward(self, input): - return F.relu(input) - - -@dataclass -class SwishV4Attribute(Attribute): - pass - - -@OPS.register() -class SwishV4(Operation[SwishV4Attribute]): - TYPE = "Swish" - VERSION = 4 - ATTRIBUTE_FACTORY = SwishV4Attribute - - def forward(self, input, beta=1.0): - return input * torch.sigmoid(input * beta) - - -@dataclass -class SigmoidV0Attribute(Attribute): - pass - - -@OPS.register() -class SigmoidV0(Operation[SigmoidV0Attribute]): - TYPE = "Sigmoid" - VERSION = 0 - ATTRIBUTE_FACTORY = SigmoidV0Attribute - - def forward(self, input): - return torch.sigmoid(input) - - -@dataclass -class ClampV0Attribute(Attribute): - min: float - max: float - - -@OPS.register() -class ClampV0(Operation[ClampV0Attribute]): - TYPE = "Clamp" - VERSION = 0 - ATTRIBUTE_FACTORY = ClampV0Attribute - - def forward(self, input): - return input.clamp(min=self.attrs.min, max=self.attrs.max) - - -@dataclass -class PReluV0Attribute(Attribute): - pass - - -@OPS.register() -class PReluV0(Operation[PReluV0Attribute]): - TYPE = "PRelu" - VERSION = 0 - ATTRIBUTE_FACTORY = PReluV0Attribute - - def forward(self, input, slope): - return F.prelu(input=input, weight=slope) - - -@dataclass -class TanhV0Attribute(Attribute): - pass - - -@OPS.register() -class TanhV0(Operation[TanhV0Attribute]): - TYPE = "Tanh" - VERSION = 0 - ATTRIBUTE_FACTORY = TanhV0Attribute - - def forward(self, input): - return F.tanh(input) - - -@dataclass -class EluV0Attribute(Attribute): - alpha: float - - -@OPS.register() -class EluV0(Operation[EluV0Attribute]): - TYPE = "Elu" - VERSION = 0 - ATTRIBUTE_FACTORY = EluV0Attribute - - def forward(self, input): - return F.elu(input=input, alpha=self.attrs.alpha) - - -@dataclass -class SeluV0Attribute(Attribute): - pass - - -@OPS.register() -class SeluV0(Operation[SeluV0Attribute]): - TYPE = "Selu" - VERSION = 0 - ATTRIBUTE_FACTORY = SeluV0Attribute - - def forward(self, input, alpha, lambda_): - return lambda_ * F.elu(input=input, alpha=alpha) - - -@dataclass -class MishV4Attribute(Attribute): - pass - - -@OPS.register() -class MishV4(Operation[MishV4Attribute]): - TYPE = "Mish" - VERSION = 4 - ATTRIBUTE_FACTORY = MishV4Attribute - - def forward(self, input): - # NOTE: pytorch 1.8.2 does not have mish function - # return F.mish(input=input) - return input * F.tanh(F.softplus(input)) - - -@dataclass -class HSwishV4Attribute(Attribute): - pass - - -@OPS.register() -class HSwishV4(Operation[HSwishV4Attribute]): - TYPE = "HSwish" - VERSION = 4 - ATTRIBUTE_FACTORY = HSwishV4Attribute - - def forward(self, input): - return F.hardswish(input=input) - - -@dataclass -class HSigmoidV5Attribute(Attribute): - pass - - -@OPS.register() -class HSigmoidV5(Operation[HSigmoidV5Attribute]): - TYPE = "HSigmoid" - VERSION = 5 - ATTRIBUTE_FACTORY = HSigmoidV5Attribute - - def forward(self, input): - return F.hardsigmoid(input=input) - - -@dataclass -class ExpV0Attribute(Attribute): - pass - - -@OPS.register() -class ExpV0(Operation[ExpV0Attribute]): - TYPE = "Exp" - VERSION = 0 - ATTRIBUTE_FACTORY = ExpV0Attribute - - def forward(self, input): - return torch.exp(input) - - -@dataclass -class HardSigmoidV0Attribute(Attribute): - pass - - -@OPS.register() -class HardSigmoidV0(Operation[HardSigmoidV0Attribute]): - TYPE = "HardSigmoid" - VERSION = 0 - ATTRIBUTE_FACTORY = HardSigmoidV0Attribute - - def forward(self, input, alpha, beta): - return torch.maximum( - torch.zeros_like(input), - torch.minimum(torch.ones_like(input), input * alpha + beta), - ) - - -@dataclass -class GeluV7Attribute(Attribute): - approximation_mode: str = field(default="ERF") - - def __post_init__(self): - super().__post_init__() - valid_approximation_mode = ["ERF", "tanh"] - if self.approximation_mode not in valid_approximation_mode: - raise ValueError( - f"Invalid approximation_mode {self.approximation_mode}. " - f"It must be one of {valid_approximation_mode}." - ) - - -@OPS.register() -class GeluV7(Operation[GeluV7Attribute]): - TYPE = "Gelu" - VERSION = 7 - ATTRIBUTE_FACTORY = GeluV7Attribute - - def forward(self, input): - mode = self.attrs.approximation_mode - if mode == "ERF": - return F.gelu(input=input) - elif mode == "tanh": - return input * 0.5 * (1 + F.tanh(torch.sqrt(2 / torch.tensor(math.pi)) * (input + 0.044715 * input**3))) diff --git a/otx/mpa/modules/ov/ops/modules/__init__.py b/otx/mpa/modules/ov/ops/modules/__init__.py deleted file mode 100644 index 2b6e6bb7ef0..00000000000 --- a/otx/mpa/modules/ov/ops/modules/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -# flake8: noqa -from .op_module import OperationModule diff --git a/otx/mpa/modules/ov/ops/modules/op_module.py b/otx/mpa/modules/ov/ops/modules/op_module.py deleted file mode 100644 index 1d4ad1ba8b3..00000000000 --- a/otx/mpa/modules/ov/ops/modules/op_module.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import inspect -from typing import Dict, List, Optional, Union - -import torch - -from ..op import Operation - - -class OperationModule(torch.nn.Module): - def __init__( - self, - op: Operation, - dependent_ops: Union[List[Optional[Operation]], Dict[str, Optional[Operation]]], - ): - super().__init__() - - self.op = op - self._dependent_ops = torch.nn.ModuleDict() - - spec = inspect.getfullargspec(op.forward) - kwargs = spec.args[1:] - - self._dependents_with_defaults = [] - if spec.defaults: - self._dependents_with_defaults = spec.args[-len(spec.defaults) :] - - if isinstance(dependent_ops, list): - assert len(dependent_ops) == len(kwargs) - for op, kwarg in zip(dependent_ops, kwargs): - self._dependent_ops[kwarg] = op - elif isinstance(dependent_ops, dict): - for kwarg in kwargs: - self._dependent_ops[kwarg] = dependent_ops[kwarg] - else: - raise NotImplementedError - - def forward(self, *args, **kwargs): - inputs = {k: v() if v is not None else None for k, v in self._dependent_ops.items()} - - if args: - empty_input_keys = [k for k, v in self._dependent_ops.items() if v is None] - for key, val in zip(empty_input_keys, args): - inputs[key] = val - if kwargs: - for key, val in kwargs.items(): - if inputs[key] is not None: - raise ValueError(f"duplicated key {key}") - inputs[key] = val - - assert all(v is not None for v in inputs.values() if v not in self._dependents_with_defaults) - - return self.op(**inputs) - - @property - def type(self): - return self.op.type - - @property - def version(self): - return self.op.version - - @property - def name(self): - return self.op.name - - @property - def shape(self): - return self.op.shape - - @property - def attrs(self): - return self.op.attrs diff --git a/otx/mpa/modules/ov/ops/utils.py b/otx/mpa/modules/ov/ops/utils.py deleted file mode 100644 index 0877ec6409f..00000000000 --- a/otx/mpa/modules/ov/ops/utils.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import math -from functools import partial - -from torch.nn import functional as F - - -def get_dynamic_shape(op): - shape = [str(i) for i in op.get_partial_shape()] - for i, shape_ in enumerate(shape): - try: - shape_ = int(shape_) - except ValueError: - shape_ = -1 - shape[i] = shape_ - return shape - - -def get_torch_padding(pads_begin, pads_end, auto_pad, input_size, weight_size, stride, dilation=None): - from .movements import PadV1 - - if dilation is None: - dilation = [1 for _ in input_size] - - if auto_pad == "valid": - return 0 - elif auto_pad == "same_upper" or auto_pad == "same_lower": - assert len(set(dilation)) == 1 and dilation[0] == 1 - pads_begin = [] - pads_end = [] - for input_size_, weight_size_, stride_, dilation_ in zip(input_size, weight_size, stride, dilation): - out_size = math.ceil(input_size_ / stride_) - padding_needed = max(0, (out_size - 1) * stride_ + weight_size_ - input_size_) - padding_lhs = int(padding_needed / 2) - padding_rhs = padding_needed - padding_lhs - - pads_begin.append(padding_lhs if auto_pad == "same_upper" else padding_rhs) - pads_end.append(padding_rhs if auto_pad == "same_upper" else padding_lhs) - pad = PadV1.get_torch_pad_dim(pads_begin, pads_end) - return partial(F.pad, pad=pad, mode="constant", value=0) - elif auto_pad == "explicit": - pad = PadV1.get_torch_pad_dim(pads_begin, pads_end) - return partial(F.pad, pad=pad, mode="constant", value=0) - else: - raise NotImplementedError diff --git a/otx/mpa/modules/utils/__init__.py b/otx/mpa/modules/utils/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/otx/mpa/modules/utils/distance_utils.py b/otx/mpa/modules/utils/distance_utils.py deleted file mode 100644 index d352c16ffcb..00000000000 --- a/otx/mpa/modules/utils/distance_utils.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import torch.distributed as dist -import torch.utils.data.distributed - - -def get_dist_info(): - try: - # data distributed parallel - return dist.get_rank(), dist.get_world_size(), True - except Exception: - return 0, 1, False diff --git a/otx/mpa/modules/utils/seg_utils.py b/otx/mpa/modules/utils/seg_utils.py deleted file mode 100644 index a293c4fd378..00000000000 --- a/otx/mpa/modules/utils/seg_utils.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import torch - - -def get_valid_label_mask_per_batch(img_metas, num_classes): - valid_label_mask_per_batch = [] - for _, meta in enumerate(img_metas): - valid_label_mask = torch.Tensor([1 for _ in range(num_classes)]) - if "ignored_labels" in meta and meta["ignored_labels"]: - valid_label_mask[meta["ignored_labels"]] = 0 - valid_label_mask_per_batch.append(valid_label_mask) - return valid_label_mask_per_batch diff --git a/otx/mpa/seg/__init__.py b/otx/mpa/seg/__init__.py deleted file mode 100644 index 8c091f1f20c..00000000000 --- a/otx/mpa/seg/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import otx.mpa.modules.datasets.pipelines.compose -import otx.mpa.modules.datasets.pipelines.transforms.seg_custom_pipelines -import otx.mpa.modules.hooks -import otx.mpa.modules.models.backbones.litehrnet -import otx.mpa.modules.models.heads.custom_fcn_head -import otx.mpa.modules.models.losses.cross_entropy_loss_with_ignore -import otx.mpa.modules.models.scalar_schedulers.constant -import otx.mpa.modules.models.scalar_schedulers.poly -import otx.mpa.modules.models.scalar_schedulers.step -import otx.mpa.modules.models.segmentors -from otx.mpa.seg.incremental import IncrSegInferrer, IncrSegTrainer -from otx.mpa.seg.semisl import SemiSLSegExporter, SemiSLSegInferrer, SemiSLSegTrainer - -# flake8: noqa -from . import exporter, inferrer, stage, trainer diff --git a/otx/mpa/seg/incremental/inferrer.py b/otx/mpa/seg/incremental/inferrer.py deleted file mode 100644 index 4645ce72d89..00000000000 --- a/otx/mpa/seg/incremental/inferrer.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from otx.mpa.registry import STAGES -from otx.mpa.seg.inferrer import SegInferrer - -from .stage import IncrSegStage - - -@STAGES.register_module() -class IncrSegInferrer(IncrSegStage, SegInferrer): - def __init__(self, **kwargs): - IncrSegStage.__init__(self, **kwargs) diff --git a/otx/mpa/seg/incremental/trainer.py b/otx/mpa/seg/incremental/trainer.py deleted file mode 100644 index 11f07846651..00000000000 --- a/otx/mpa/seg/incremental/trainer.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from otx.mpa.registry import STAGES -from otx.mpa.seg.trainer import SegTrainer -from otx.mpa.utils.logger import get_logger - -from .stage import IncrSegStage - -logger = get_logger() - - -@STAGES.register_module() -class IncrSegTrainer(IncrSegStage, SegTrainer): - def __init__(self, **kwargs): - IncrSegStage.__init__(self, **kwargs) diff --git a/otx/mpa/seg/semisl/__init__.py b/otx/mpa/seg/semisl/__init__.py deleted file mode 100644 index bd7216fbeae..00000000000 --- a/otx/mpa/seg/semisl/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from .exporter import SemiSLSegExporter -from .inferrer import SemiSLSegInferrer -from .trainer import SemiSLSegTrainer diff --git a/otx/mpa/seg/semisl/trainer.py b/otx/mpa/seg/semisl/trainer.py deleted file mode 100644 index a44a2ae9ba3..00000000000 --- a/otx/mpa/seg/semisl/trainer.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from otx.mpa.registry import STAGES -from otx.mpa.seg.trainer import SegTrainer -from otx.mpa.utils.logger import get_logger - -from .stage import SemiSLSegStage - -logger = get_logger() - - -@STAGES.register_module() -class SemiSLSegTrainer(SemiSLSegStage, SegTrainer): - def __init__(self, **kwargs): - SemiSLSegStage.__init__(self, **kwargs) diff --git a/otx/mpa/utils/__init__.py b/otx/mpa/utils/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/otx/mpa/utils/config_utils.py b/otx/mpa/utils/config_utils.py deleted file mode 100644 index e9c644a8e0d..00000000000 --- a/otx/mpa/utils/config_utils.py +++ /dev/null @@ -1,185 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import os.path as osp -import platform -import shutil -import sys -import tempfile -import warnings -from importlib import import_module -from typing import Any, Callable, Union - -from mmcv.utils import Config, ConfigDict -from mmcv.utils.config import BASE_KEY, DEPRECATION_KEY -from mmcv.utils.misc import import_modules_from_strings -from mmcv.utils.path import check_file_exist - -from otx.mpa.utils.logger import get_logger - -logger = get_logger() - - -class MPAConfig(Config): - @staticmethod - def _file2dict(filename, use_predefined_variables=True): - filename = osp.abspath(osp.expanduser(filename)) - check_file_exist(filename) - fileExtname = osp.splitext(filename)[1] - if fileExtname not in [".py", ".json", ".yaml", ".yml"]: - raise IOError("Only py/yml/yaml/json type are supported now!") - - with tempfile.TemporaryDirectory() as temp_config_dir: - temp_config_file = tempfile.NamedTemporaryFile(dir=temp_config_dir, suffix=fileExtname) - if platform.system() == "Windows": - temp_config_file.close() - temp_config_name = osp.basename(temp_config_file.name) - # Substitute predefined variables - if use_predefined_variables: - Config._substitute_predefined_vars(filename, temp_config_file.name) - else: - shutil.copyfile(filename, temp_config_file.name) - # Substitute base variables from placeholders to strings - base_var_dict = Config._pre_substitute_base_vars(temp_config_file.name, temp_config_file.name) - if filename.endswith(".py"): - temp_module_name = osp.splitext(temp_config_name)[0] - sys.path.insert(0, temp_config_dir) - Config._validate_py_syntax(filename) - mod = import_module(temp_module_name) - sys.path.pop(0) - cfg_dict = {name: value for name, value in mod.__dict__.items() if not name.startswith("__")} - # delete imported module - del sys.modules[temp_module_name] - elif filename.endswith((".yml", ".yaml", ".json")): - import mmcv - - cfg_dict = mmcv.load(temp_config_file.name) - # close temp file - temp_config_file.close() - - # check deprecation information - if DEPRECATION_KEY in cfg_dict: - deprecation_info = cfg_dict.pop(DEPRECATION_KEY) - warning_msg = f"The config file {filename} will be deprecated " "in the future." - if "expected" in deprecation_info: - warning_msg += f' Please use {deprecation_info["expected"]} ' "instead." - if "reference" in deprecation_info: - warning_msg += " More information can be found at " f'{deprecation_info["reference"]}' - warnings.warn(warning_msg) - - cfg_text = filename + "\n" - with open(filename, "r", encoding="utf-8") as f: - # Setting encoding explicitly to resolve coding issue on windows - cfg_text += f.read() - - if BASE_KEY in cfg_dict: - cfg_dir = osp.dirname(filename) - base_filename = cfg_dict.pop(BASE_KEY) - base_filename = base_filename if isinstance(base_filename, list) else [base_filename] - - cfg_dict_list = list() - cfg_text_list = list() - for f in base_filename: - _cfg_dict, _cfg_text = MPAConfig._file2dict(osp.join(cfg_dir, f)) - cfg_dict_list.append(_cfg_dict) - cfg_text_list.append(_cfg_text) - - base_cfg_dict = dict() - # for c in cfg_dict_list: - # duplicate_keys = base_cfg_dict.keys() & c.keys() - # if len(duplicate_keys) > 0: - # raise KeyError('Duplicate key is not allowed among bases. ' - # f'Duplicate keys: {duplicate_keys}') - # base_cfg_dict.update(c) - for c in cfg_dict_list: - if len(base_cfg_dict.keys() & c.keys()) > 0: - # raise KeyError(f'Duplicate key is not allowed among bases [{base_cfg_dict.keys() & c.keys()}]') - logger.warning(f"Duplicate key is detected among bases [{base_cfg_dict.keys() & c.keys()}]") - logger.debug(f"base = {base_cfg_dict}, cfg = {c}") - base_cfg_dict = Config._merge_a_into_b(base_cfg_dict, c) - logger.debug(f"merged dict = {base_cfg_dict}") - else: - base_cfg_dict.update(c) - - # Subtitute base variables from strings to their actual values - cfg_dict = Config._substitute_base_vars(cfg_dict, base_var_dict, base_cfg_dict) - - base_cfg_dict = Config._merge_a_into_b(cfg_dict, base_cfg_dict) - cfg_dict = base_cfg_dict - - # merge cfg_text - cfg_text_list.append(cfg_text) - cfg_text = "\n".join(cfg_text_list) - - return cfg_dict, cfg_text - - @staticmethod - def fromfile(filename, use_predefined_variables=True, import_custom_modules=True): - cfg_dict, cfg_text = MPAConfig._file2dict(filename, use_predefined_variables) - if import_custom_modules and cfg_dict.get("custom_imports", None): - import_modules_from_strings(**cfg_dict["custom_imports"]) - return Config(cfg_dict, cfg_text=cfg_text, filename=filename) - - -def copy_config(cfg): - if not isinstance(cfg, Config): - ValueError(f"cannot copy this instance {type(cfg)}") - # new_cfg = copy.deepcopy(cfg) - # new_cfg._cfg_dict = copy.deepcopy(cfg._cfg_dict) - # new_cfg.filename = cfg.filename - import pickle - - data = pickle.dumps(cfg) - return pickle.loads(data) - - -def update_or_add_custom_hook(cfg: Config, hook_cfg: ConfigDict): - """Update hook cfg if same type is in custom_hook or append it""" - custom_hooks = cfg.get("custom_hooks", []) - custom_hooks_updated = False - for custom_hook in custom_hooks: - if custom_hook["type"] == hook_cfg["type"]: - custom_hook.update(hook_cfg) - custom_hooks_updated = True - break - if not custom_hooks_updated: - custom_hooks.append(hook_cfg) - cfg["custom_hooks"] = custom_hooks - - -def remove_custom_hook(cfg: Config, hook_type: str): - """Remove hook cfg if hook_type is in custom_hook""" - custom_hooks = cfg.get("custom_hooks", []) - if len(custom_hooks) > 0: - idx_to_del = None - for i, custom_hook in enumerate(custom_hooks): - if custom_hook["type"] == hook_type: - idx_to_del = i - break - if idx_to_del is not None: - del custom_hooks[idx_to_del] - - -def recursively_update_cfg( - cfg: Union[Config, dict], - criterion: Callable[[Any, Any], bool], - update_dict: Any, -): - for k, v in list(cfg.items()): - if isinstance(v, dict): - recursively_update_cfg(v, criterion, update_dict) - if criterion(k, v): - cfg.update(update_dict) - - -def add_custom_hook_if_not_exists(cfg: Config, hook_cfg: ConfigDict): - custom_hooks = cfg.get("custom_hooks", []) - found = False - for hook in custom_hooks: - if hook["type"] == hook_cfg["type"]: - found = True - break - if not found: - custom_hooks.append(hook_cfg) - cfg["custom_hooks"] = custom_hooks diff --git a/otx/mpa/utils/convert_keys.py b/otx/mpa/utils/convert_keys.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/otx/mpa/utils/ext_loader.py b/otx/mpa/utils/ext_loader.py deleted file mode 100644 index 0776160e83a..00000000000 --- a/otx/mpa/utils/ext_loader.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import importlib - - -def load_ext(path, funcs): - ext = importlib.import_module(path) - for fun in funcs: - assert hasattr(ext, fun), f"{fun} miss in module {path}" - - return ext diff --git a/otx/mpa/utils/file.py b/otx/mpa/utils/file.py deleted file mode 100644 index c3fd47846b0..00000000000 --- a/otx/mpa/utils/file.py +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import os - -MPA_CACHE = os.path.expanduser(os.getenv("MPA_CACHE", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "mpa"))) -os.makedirs(MPA_CACHE, exist_ok=True) diff --git a/pyproject.toml b/pyproject.toml index f4ed51f28fc..560b922add7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -110,6 +110,9 @@ good-names = [ "y2", "x", "y", + "xy", + "x0", + "y0", "r", "id", "type", @@ -122,6 +125,7 @@ good-names = [ "t", "w", "h", + "fc", ] [tool.pylint.imports] diff --git a/requirements/api.txt b/requirements/api.txt index 6baa41a1243..4db8862a507 100644 --- a/requirements/api.txt +++ b/requirements/api.txt @@ -9,3 +9,4 @@ pymongo==3.12.0 scikit-learn==0.24.* Shapely>=1.7.1,<=1.8.0 imagesize==1.4.1 +dill>=0.3.6 diff --git a/setup.py b/setup.py index 6660edad616..3875e8c254b 100644 --- a/setup.py +++ b/setup.py @@ -4,6 +4,7 @@ # SPDX-License-Identifier: Apache-2.0 import os +import platform import subprocess import sys import warnings @@ -100,25 +101,28 @@ def get_requirements(requirement_files: Union[str, List[str]]) -> List[str]: def get_extensions(): + if platform.system() == "Windows": + return [] + def _cython_modules(): package_root = os.path.dirname(__file__) cython_files = [ - "otx/mpa/modules/datasets/pipelines/transforms/cython_augments/pil_augment.pyx", - "otx/mpa/modules/datasets/pipelines/transforms/cython_augments/cv_augment.pyx", + "otx/algorithms/common/adapters/mmcv/pipelines/transforms/cython_augments/pil_augment.pyx", + "otx/algorithms/common/adapters/mmcv/pipelines/transforms/cython_augments/cv_augment.pyx" ] ext_modules = [ Extension( cython_file.rstrip(".pyx").replace("/", "."), - [os.path.join(package_root, cython_file)], + [cython_file], include_dirs=[numpy.get_include()], extra_compile_args=["-O3"], ) for cython_file in cython_files ] - return cythonize(ext_modules, annotate=True) + return cythonize(ext_modules) extensions = [] extensions.extend(_cython_modules()) diff --git a/tests/e2e/cli/classification/reference/Custom_Image_Classification_EfficientNet-V2-S/compressed_model.yml b/tests/e2e/cli/classification/reference/Custom_Image_Classification_EfficientNet-V2-S/compressed_model.yml index 154bb9b4307..9e0c559a076 100644 --- a/tests/e2e/cli/classification/reference/Custom_Image_Classification_EfficientNet-V2-S/compressed_model.yml +++ b/tests/e2e/cli/classification/reference/Custom_Image_Classification_EfficientNet-V2-S/compressed_model.yml @@ -1,11 +1,11 @@ TestToolsMultiClassClassification: pot: - number_of_fakequantizers: 216 + number_of_fakequantizers: 208 nncf: number_of_fakequantizers: 267 TestToolsMultilabelClassification: pot: - number_of_fakequantizers: 220 + number_of_fakequantizers: 210 nncf: number_of_fakequantizers: 269 TestToolsHierarchicalClassification: diff --git a/tests/e2e/cli/classification/reference/Custom_Image_Classification_EfficinetNet-B0/compressed_model.yml b/tests/e2e/cli/classification/reference/Custom_Image_Classification_EfficinetNet-B0/compressed_model.yml index a2bea89bd63..2ea456f8f38 100644 --- a/tests/e2e/cli/classification/reference/Custom_Image_Classification_EfficinetNet-B0/compressed_model.yml +++ b/tests/e2e/cli/classification/reference/Custom_Image_Classification_EfficinetNet-B0/compressed_model.yml @@ -1,11 +1,11 @@ TestToolsMultiClassClassification: pot: - number_of_fakequantizers: 100 + number_of_fakequantizers: 92 nncf: number_of_fakequantizers: 124 TestToolsMultilabelClassification: pot: - number_of_fakequantizers: 104 + number_of_fakequantizers: 94 nncf: number_of_fakequantizers: 126 TestToolsHierarchicalClassification: diff --git a/tests/e2e/cli/classification/reference/Custom_Image_Classification_MobileNet-V3-large-1x/compressed_model.yml b/tests/e2e/cli/classification/reference/Custom_Image_Classification_MobileNet-V3-large-1x/compressed_model.yml index 35a8eea185e..757ec370d16 100644 --- a/tests/e2e/cli/classification/reference/Custom_Image_Classification_MobileNet-V3-large-1x/compressed_model.yml +++ b/tests/e2e/cli/classification/reference/Custom_Image_Classification_MobileNet-V3-large-1x/compressed_model.yml @@ -1,11 +1,11 @@ TestToolsMultiClassClassification: pot: - number_of_fakequantizers: 146 + number_of_fakequantizers: 135 nncf: number_of_fakequantizers: 91 TestToolsMultilabelClassification: pot: - number_of_fakequantizers: 146 + number_of_fakequantizers: 135 nncf: number_of_fakequantizers: 93 TestToolsHierarchicalClassification: diff --git a/tests/e2e/cli/classification/test_classification.py b/tests/e2e/cli/classification/test_classification.py index a3278f0eb13..fe0e12498e8 100644 --- a/tests/e2e/cli/classification/test_classification.py +++ b/tests/e2e/cli/classification/test_classification.py @@ -303,7 +303,7 @@ def test_otx_train(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "multi_class_cls/test_semisl" args_semisl = copy.deepcopy(args0) args_semisl["--unlabeled-data-roots"] = args["--train-data-roots"] - args_semisl["train_params"].extend(["--algo_backend.train_type", "SEMISUPERVISED"]) + args_semisl["train_params"].extend(["--algo_backend.train_type", "Semisupervised"]) otx_train_testing(template, tmp_dir_path, otx_dir, args_semisl) @e2e_pytest_component @@ -322,7 +322,7 @@ def test_otx_multi_gpu_train_semisl(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "multi_class_cls/test_multi_gpu_semisl" args_semisl_multigpu = copy.deepcopy(args0) args_semisl_multigpu["--unlabeled-data-roots"] = args["--train-data-roots"] - args_semisl_multigpu["train_params"].extend(["--algo_backend.train_type", "SEMISUPERVISED"]) + args_semisl_multigpu["train_params"].extend(["--algo_backend.train_type", "Semisupervised"]) args_semisl_multigpu["--gpus"] = "0,1" otx_train_testing(template, tmp_dir_path, otx_dir, args_semisl_multigpu) @@ -757,7 +757,7 @@ def test_otx_multi_gpu_train(self, template, tmp_dir_path): "--learning_parameters.learning_rate", "1e-07", "--algo_backend.train_type", - "SELFSUPERVISED", + "Selfsupervised", ], } diff --git a/tests/e2e/cli/detection/reference/Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B/compressed_model.yml b/tests/e2e/cli/detection/reference/Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B/compressed_model.yml index 957a7a0dd90..2b1f14454be 100644 --- a/tests/e2e/cli/detection/reference/Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B/compressed_model.yml +++ b/tests/e2e/cli/detection/reference/Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B/compressed_model.yml @@ -1,10 +1,10 @@ TestToolsMPAInstanceSegmentation: pot: - number_of_fakequantizers: 143 + number_of_fakequantizers: 137 nncf: number_of_fakequantizers: 204 TestToolsTilingInstanceSegmentation: pot: - number_of_fakequantizers: 143 + number_of_fakequantizers: 137 nncf: number_of_fakequantizers: -1 diff --git a/tests/e2e/cli/detection/reference/Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50/compressed_model.yml b/tests/e2e/cli/detection/reference/Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50/compressed_model.yml index 780930d6dfc..7d25949404a 100644 --- a/tests/e2e/cli/detection/reference/Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50/compressed_model.yml +++ b/tests/e2e/cli/detection/reference/Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50/compressed_model.yml @@ -1,10 +1,10 @@ TestToolsMPAInstanceSegmentation: pot: - number_of_fakequantizers: 82 + number_of_fakequantizers: 76 nncf: number_of_fakequantizers: 92 TestToolsTilingInstanceSegmentation: pot: - number_of_fakequantizers: 82 + number_of_fakequantizers: 76 nncf: number_of_fakequantizers: -1 diff --git a/tests/e2e/cli/detection/reference/Custom_Object_Detection_Gen3_ATSS/compressed_model.yml b/tests/e2e/cli/detection/reference/Custom_Object_Detection_Gen3_ATSS/compressed_model.yml index d181b6c65be..aae68242725 100644 --- a/tests/e2e/cli/detection/reference/Custom_Object_Detection_Gen3_ATSS/compressed_model.yml +++ b/tests/e2e/cli/detection/reference/Custom_Object_Detection_Gen3_ATSS/compressed_model.yml @@ -1,10 +1,10 @@ TestToolsMPADetection: pot: - number_of_fakequantizers: 212 + number_of_fakequantizers: 196 nncf: number_of_fakequantizers: 155 TestToolsTilingDetection: pot: - number_of_fakequantizers: 212 + number_of_fakequantizers: 196 nncf: number_of_fakequantizers: -1 diff --git a/tests/e2e/cli/detection/reference/Custom_Object_Detection_Gen3_SSD/compressed_model.yml b/tests/e2e/cli/detection/reference/Custom_Object_Detection_Gen3_SSD/compressed_model.yml index 8cd26e145f8..6063c5cdaf9 100644 --- a/tests/e2e/cli/detection/reference/Custom_Object_Detection_Gen3_SSD/compressed_model.yml +++ b/tests/e2e/cli/detection/reference/Custom_Object_Detection_Gen3_SSD/compressed_model.yml @@ -1,10 +1,10 @@ TestToolsMPADetection: pot: - number_of_fakequantizers: 77 + number_of_fakequantizers: 67 nncf: number_of_fakequantizers: 67 TestToolsTilingDetection: pot: - number_of_fakequantizers: 77 + number_of_fakequantizers: 67 nncf: number_of_fakequantizers: -1 diff --git a/tests/e2e/cli/detection/reference/Custom_Object_Detection_YOLOX/compressed_model.yml b/tests/e2e/cli/detection/reference/Custom_Object_Detection_YOLOX/compressed_model.yml index 851297fd424..0ada6aed686 100644 --- a/tests/e2e/cli/detection/reference/Custom_Object_Detection_YOLOX/compressed_model.yml +++ b/tests/e2e/cli/detection/reference/Custom_Object_Detection_YOLOX/compressed_model.yml @@ -1,10 +1,10 @@ TestToolsMPADetection: pot: - number_of_fakequantizers: 97 + number_of_fakequantizers: 85 nncf: number_of_fakequantizers: 84 TestToolsTilingDetection: pot: - number_of_fakequantizers: 97 + number_of_fakequantizers: 85 nncf: number_of_fakequantizers: -1 diff --git a/tests/e2e/cli/detection/test_detection.py b/tests/e2e/cli/detection/test_detection.py index 4572293aaf8..fade859dbe5 100644 --- a/tests/e2e/cli/detection/test_detection.py +++ b/tests/e2e/cli/detection/test_detection.py @@ -68,7 +68,7 @@ "--learning_parameters.batch_size", "4", "--algo_backend.train_type", - "SEMISUPERVISED", + "Semisupervised", ], } diff --git a/tests/e2e/cli/segmentation/test_segmentation.py b/tests/e2e/cli/segmentation/test_segmentation.py index 96086fa25eb..1df46ecfba1 100644 --- a/tests/e2e/cli/segmentation/test_segmentation.py +++ b/tests/e2e/cli/segmentation/test_segmentation.py @@ -276,7 +276,7 @@ def test_otx_multi_gpu_train(self, template, tmp_dir_path): "--learning_parameters.batch_size", "4", "--algo_backend.train_type", - "SEMISUPERVISED", + "Semisupervised", ], } @@ -317,7 +317,7 @@ def test_otx_multi_gpu_train_semisl(self, template, tmp_dir_path): "--learning_parameters.batch_size", "4", "--algo_backend.train_type", - "SELFSUPERVISED", + "Selfsupervised", ], } diff --git a/tests/fuzzing/cli_fuzzing.py b/tests/fuzzing/cli_fuzzing.py index d1224a4ec3b..ed6d17f89ad 100644 --- a/tests/fuzzing/cli_fuzzing.py +++ b/tests/fuzzing/cli_fuzzing.py @@ -4,6 +4,7 @@ from helper import FuzzingHelper from otx.cli.tools.cli import main as cli_main +from otx.cli.utils.errors import CliException @atheris.instrument_func @@ -21,6 +22,8 @@ def fuzz_otx(input_bytes): # argparser will throw SystemExit with code 2 when some required arguments are missing if e.code != 2: raise + except CliException: + pass # some known exceptions can be catched here finally: sys.argv = backup_argv diff --git a/tests/integration/api/xai/test_api_xai_validity.py b/tests/integration/api/xai/test_api_xai_validity.py index 26b8660ce50..aa6e0719b48 100644 --- a/tests/integration/api/xai/test_api_xai_validity.py +++ b/tests/integration/api/xai/test_api_xai_validity.py @@ -11,11 +11,13 @@ from mmdet.models import build_detector from otx.algorithms.classification.tasks import ClassificationInferenceTask # noqa +from otx.algorithms.common.adapters.mmcv.hooks.recording_forward_hook import ( + ReciproCAMHook, +) +from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig from otx.algorithms.detection.adapters.mmdet.hooks import DetSaliencyMapHook from otx.cli.registry import Registry from otx.mpa.det.stage import DetectionStage # noqa -from otx.mpa.modules.hooks.recording_forward_hooks import ReciproCAMHook -from otx.mpa.utils.config_utils import MPAConfig from tests.test_suite.e2e_test_system import e2e_pytest_unit templates_cls = Registry("otx/algorithms").filter(task_type="CLASSIFICATION").templates diff --git a/tests/integration/cli/classification/test_classification.py b/tests/integration/cli/classification/test_classification.py index ee26e5cbc35..459ef89c033 100644 --- a/tests/integration/cli/classification/test_classification.py +++ b/tests/integration/cli/classification/test_classification.py @@ -53,7 +53,7 @@ "--learning_parameters.batch_size", "4", "--algo_backend.train_type", - "SELFSUPERVISED", + "Selfsupervised", ], } @@ -190,7 +190,7 @@ def test_otx_train_semisl(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "multi_class_cls/test_semisl" args_semisl = copy.deepcopy(args) args_semisl["--unlabeled-data-roots"] = args["--train-data-roots"] - args_semisl["train_params"].extend(["--algo_backend.train_type", "SEMISUPERVISED"]) + args_semisl["train_params"].extend(["--algo_backend.train_type", "Semisupervised"]) otx_train_testing(template, tmp_dir_path, otx_dir, args_semisl) @e2e_pytest_component @@ -201,7 +201,7 @@ def test_otx_multi_gpu_train_semisl(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "multi_class_cls/test_multi_gpu_semisl" args_semisl_multigpu = copy.deepcopy(args) args_semisl_multigpu["--unlabeled-data-roots"] = args["--train-data-roots"] - args_semisl_multigpu["train_params"].extend(["--algo_backend.train_type", "SEMISUPERVISED"]) + args_semisl_multigpu["train_params"].extend(["--algo_backend.train_type", "Semisupervised"]) args_semisl_multigpu["--gpus"] = "0,1" otx_train_testing(template, tmp_dir_path, otx_dir, args_semisl_multigpu) @@ -308,7 +308,7 @@ def test_otx_train_semisl(self, template, tmp_dir_path): tmp_dir_path = tmp_dir_path / "multi_label_cls" / "test_semisl" args_semisl = copy.deepcopy(args_m) args_semisl["--unlabeled-data-roots"] = args_m["--train-data-roots"] - args_semisl["train_params"].extend(["--algo_backend.train_type", "SEMISUPERVISED"]) + args_semisl["train_params"].extend(["--algo_backend.train_type", "Semisupervised"]) otx_train_testing(template, tmp_dir_path, otx_dir, args_semisl) diff --git a/tests/integration/cli/detection/test_detection.py b/tests/integration/cli/detection/test_detection.py index 7e071a1ee35..44a92ad812c 100644 --- a/tests/integration/cli/detection/test_detection.py +++ b/tests/integration/cli/detection/test_detection.py @@ -48,7 +48,7 @@ "--learning_parameters.batch_size", "4", "--algo_backend.train_type", - "SEMISUPERVISED", + "Semisupervised", ], } diff --git a/tests/integration/cli/segmentation/test_segmentation.py b/tests/integration/cli/segmentation/test_segmentation.py index be027226fb6..54557124f8a 100644 --- a/tests/integration/cli/segmentation/test_segmentation.py +++ b/tests/integration/cli/segmentation/test_segmentation.py @@ -54,7 +54,7 @@ "--learning_parameters.batch_size", "4", "--algo_backend.train_type", - "SEMISUPERVISED", + "Semisupervised", ], } @@ -68,7 +68,7 @@ "--learning_parameters.batch_size", "4", "--algo_backend.train_type", - "SELFSUPERVISED", + "Selfsupervised", ], } diff --git a/tests/integration/cli/test_cli.py b/tests/integration/cli/test_cli.py index 1653f0cf939..825a811750b 100644 --- a/tests/integration/cli/test_cli.py +++ b/tests/integration/cli/test_cli.py @@ -36,9 +36,9 @@ "default": "EfficientNet-B0", "--task": "classification", "--model": "MobileNet-V3-large-1x", - "--train-type": "semisupervised", + "--train-type": "Semisupervised", }, - "detection": {"default": "ATSS", "--task": "detection", "--model": "SSD", "--train-type": "semisupervised"}, + "detection": {"default": "ATSS", "--task": "detection", "--model": "SSD", "--train-type": "Semisupervised"}, } @@ -59,19 +59,19 @@ def test_otx_build_rebuild(self, tmp_dir_path, case): tmp_dir_path = tmp_dir_path / "test_rebuild" / case # 1. Only Task build_arg = {"--task": rebuild_args[case]["--task"]} - expected = {"model": rebuild_args[case]["default"], "train_type": "INCREMENTAL"} + expected = {"model": rebuild_args[case]["default"], "train_type": "Incremental"} otx_build_testing(tmp_dir_path, build_arg, expected=expected) # 2. Change Model build_arg = {"--model": rebuild_args[case]["--model"]} - expected = {"model": rebuild_args[case]["--model"], "train_type": "INCREMENTAL"} + expected = {"model": rebuild_args[case]["--model"], "train_type": "Incremental"} otx_build_testing(tmp_dir_path, build_arg, expected=expected) # 3. Change Train-type build_arg = {"--train-type": rebuild_args[case]["--train-type"]} expected = {"model": rebuild_args[case]["--model"], "train_type": rebuild_args[case]["--train-type"]} otx_build_testing(tmp_dir_path, build_arg, expected=expected) # 4. Change to Default - build_arg = {"--model": rebuild_args[case]["default"], "--train-type": "INCREMENTAL"} - expected = {"model": rebuild_args[case]["default"], "train_type": "INCREMENTAL"} + build_arg = {"--model": rebuild_args[case]["default"], "--train-type": "Incremental"} + expected = {"model": rebuild_args[case]["default"], "train_type": "Incremental"} otx_build_testing(tmp_dir_path, build_arg, expected=expected) diff --git a/tests/regression/classification/test_classification.py b/tests/regression/classification/test_classification.py index 49a0593f014..f19fc6a72fd 100644 --- a/tests/regression/classification/test_classification.py +++ b/tests/regression/classification/test_classification.py @@ -170,7 +170,7 @@ def test_otx_train_semisl(self, template, tmp_dir_path): "--learning_parameters.num_iters", REGRESSION_TEST_EPOCHS, "--algo_backend.train_type", - "SEMISUPERVISED", + "Semisupervised", ] train_start_time = timer() otx_train_testing(template, tmp_dir_path, otx_dir, args_semisl) @@ -228,7 +228,7 @@ def test_otx_train_selfsl(self, template, tmp_dir_path): "--learning_parameters.num_iters", "10", "--algo_backend.train_type", - "SELFSUPERVISED", + "Selfsupervised", ] # Self-supervised Training diff --git a/tests/regression/detection/test_detection.py b/tests/regression/detection/test_detection.py index 5837682e381..309fc539804 100644 --- a/tests/regression/detection/test_detection.py +++ b/tests/regression/detection/test_detection.py @@ -169,7 +169,7 @@ def test_otx_train_semisl(self, template, tmp_dir_path): "--learning_parameters.num_iters", REGRESSION_TEST_EPOCHS, "--algo_backend.train_type", - "SEMISUPERVISED", + "Semisupervised", ] train_start_time = timer() otx_train_testing(template, tmp_dir_path, otx_dir, args_semisl) diff --git a/tests/regression/segmentation/test_segmentation.py b/tests/regression/segmentation/test_segmentation.py index e179559ce2c..f194245fd5a 100644 --- a/tests/regression/segmentation/test_segmentation.py +++ b/tests/regression/segmentation/test_segmentation.py @@ -170,7 +170,7 @@ def test_otx_train_semisl(self, template, tmp_dir_path): "--learning_parameters.num_iters", REGRESSION_TEST_EPOCHS, "--algo_backend.train_type", - "SEMISUPERVISED", + "Semisupervised", ] train_start_time = timer() otx_train_testing(template, tmp_dir_path, otx_dir, args_semisl) @@ -221,7 +221,7 @@ def test_otx_train_selfsl(self, template, tmp_dir_path): args_selfsl = config_selfsl["data_path"] selfsl_train_args = copy.deepcopy(args_selfsl) - selfsl_train_args["train_params"] = ["params", "--algo_backend.train_type", "SELFSUPERVISED"] + selfsl_train_args["train_params"] = ["params", "--algo_backend.train_type", "Selfsupervised"] # Self-supervised Training train_start_time = timer() diff --git a/tests/test_suite/run_test_command.py b/tests/test_suite/run_test_command.py index f385ce47382..9323ee43041 100644 --- a/tests/test_suite/run_test_command.py +++ b/tests/test_suite/run_test_command.py @@ -791,7 +791,7 @@ def otx_build_backbone_testing(root, backbone_args): task_workspace, ] check_run(command_line) - from otx.mpa.utils.config_utils import MPAConfig + from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig model_config = MPAConfig.fromfile(os.path.join(task_workspace, "model.py")) assert os.path.exists(os.path.join(task_workspace, "model.py")) @@ -807,13 +807,13 @@ def otx_build_testing(root, args: Dict[str, str], expected: Dict[str, str]): for option, value in args.items(): command_line.extend([option, value]) check_run(command_line) - from otx.mpa.utils.config_utils import MPAConfig + from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig template_config = MPAConfig.fromfile(os.path.join(workspace_root, "template.yaml")) assert template_config.name == expected["model"] assert ( template_config.hyper_parameters.parameter_overrides.algo_backend.train_type.default_value - == expected["train_type"].upper() + == expected["train_type"] ) diff --git a/tests/unit/algorithms/action/adapters/mmaction/utils/test_action_config_utils.py b/tests/unit/algorithms/action/adapters/mmaction/utils/test_action_config_utils.py index 202cc129767..e94ecad22c0 100644 --- a/tests/unit/algorithms/action/adapters/mmaction/utils/test_action_config_utils.py +++ b/tests/unit/algorithms/action/adapters/mmaction/utils/test_action_config_utils.py @@ -4,6 +4,7 @@ # SPDX-License-Identifier: Apache-2.0 # +import tempfile from collections import defaultdict import pytest @@ -42,29 +43,28 @@ def test_patch_config() -> None: """ cls_datapipeline_path = "otx/algorithms/action/configs/classification/x3d/data_pipeline.py" - work_dir = "OTX-tempdir9104" - - with pytest.raises(NotImplementedError): - patch_config(CLS_CONFIG, cls_datapipeline_path, work_dir, TaskType.CLASSIFICATION) - - patch_config(CLS_CONFIG, cls_datapipeline_path, work_dir, TaskType.ACTION_CLASSIFICATION) - assert CLS_CONFIG.work_dir == work_dir - assert CLS_CONFIG.get("train_pipeline", None) - for subset in ("train", "val", "test", "unlabeled"): - cfg = CLS_CONFIG.data.get(subset, None) - if not cfg: - continue - assert cfg.type == "OTXActionClsDataset" - - det_datapipeline_path = "otx/algorithms/action/configs/detection/x3d_fast_rcnn/data_pipeline.py" - patch_config(DET_CONFIG, det_datapipeline_path, work_dir, TaskType.ACTION_DETECTION) - assert DET_CONFIG.work_dir == work_dir - assert DET_CONFIG.get("train_pipeline", None) - for subset in ("train", "val", "test", "unlabeled"): - cfg = DET_CONFIG.data.get(subset, None) - if not cfg: - continue - assert cfg.type == "OTXActionDetDataset" + with tempfile.TemporaryDirectory() as work_dir: + with pytest.raises(NotImplementedError): + patch_config(CLS_CONFIG, cls_datapipeline_path, work_dir, TaskType.CLASSIFICATION) + + patch_config(CLS_CONFIG, cls_datapipeline_path, work_dir, TaskType.ACTION_CLASSIFICATION) + assert CLS_CONFIG.work_dir == work_dir + assert CLS_CONFIG.get("train_pipeline", None) + for subset in ("train", "val", "test", "unlabeled"): + cfg = CLS_CONFIG.data.get(subset, None) + if not cfg: + continue + assert cfg.type == "OTXActionClsDataset" + + det_datapipeline_path = "otx/algorithms/action/configs/detection/x3d_fast_rcnn/data_pipeline.py" + patch_config(DET_CONFIG, det_datapipeline_path, work_dir, TaskType.ACTION_DETECTION) + assert DET_CONFIG.work_dir == work_dir + assert DET_CONFIG.get("train_pipeline", None) + for subset in ("train", "val", "test", "unlabeled"): + cfg = DET_CONFIG.data.get(subset, None) + if not cfg: + continue + assert cfg.type == "OTXActionDetDataset" @e2e_pytest_unit diff --git a/tests/unit/algorithms/action/tools/test_action_sample_classification.py b/tests/unit/algorithms/action/tools/test_action_sample_classification.py index 472907637dc..f821864bb39 100644 --- a/tests/unit/algorithms/action/tools/test_action_sample_classification.py +++ b/tests/unit/algorithms/action/tools/test_action_sample_classification.py @@ -50,7 +50,7 @@ def test_load_test_dataset() -> None: class MockTemplate: task_type = TaskType.ACTION_CLASSIFICATION hyper_parameters = Config( - {"parameter_overrides": {"algo_backend": {"train_type": {"default_value": TrainType.INCREMENTAL.value}}}} + {"parameter_overrides": {"algo_backend": {"train_type": {"default_value": TrainType.Incremental.value}}}} ) dataset, label_schema = load_test_dataset(MockTemplate()) diff --git a/tests/unit/algorithms/action/tools/test_action_sample_detection.py b/tests/unit/algorithms/action/tools/test_action_sample_detection.py index c002337526b..b774a6af8b7 100644 --- a/tests/unit/algorithms/action/tools/test_action_sample_detection.py +++ b/tests/unit/algorithms/action/tools/test_action_sample_detection.py @@ -51,7 +51,7 @@ def test_load_test_dataset() -> None: class MockTemplate: task_type = TaskType.ACTION_DETECTION hyper_parameters = Config( - {"parameter_overrides": {"algo_backend": {"train_type": {"default_value": TrainType.INCREMENTAL.value}}}} + {"parameter_overrides": {"algo_backend": {"train_type": {"default_value": TrainType.Incremental.value}}}} ) dataset, label_schema = load_test_dataset(MockTemplate()) diff --git a/tests/unit/algorithms/action/utils/test_action_convert_public_data_to_cvat.py b/tests/unit/algorithms/action/utils/test_action_convert_public_data_to_cvat.py index a7a19014098..26aa6099275 100644 --- a/tests/unit/algorithms/action/utils/test_action_convert_public_data_to_cvat.py +++ b/tests/unit/algorithms/action/utils/test_action_convert_public_data_to_cvat.py @@ -3,6 +3,8 @@ # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # +import tempfile + import numpy as np import pytest @@ -95,24 +97,24 @@ def test_convert_action_cls_dataset_to_datumaro(mocker) -> None: """Test convert_jester_dataset_to_datumaro function.""" src_path = "dummy_src_path" - dst_path = "dummy_dst_path" ann_file = "dummy_ann_file" - mocker.patch("otx.algorithms.action.utils.convert_public_data_to_cvat.open", return_value=MockFileObject()) - mocker.patch("otx.algorithms.action.utils.convert_public_data_to_cvat.pathlib.Path.mkdir", return_value=True) - # mocker.patch("otx.algorithms.action.utils.convert_public_data_to_cvat.os.makedirs", return_value=True) - mocker.patch("otx.algorithms.action.utils.convert_public_data_to_cvat.shutil.copy", return_value=True) - mocker.patch( - "otx.algorithms.action.utils.convert_public_data_to_cvat.generate_default_cvat_xml_fields", - return_value=([], (256, 256, 3), []), - ) - mocker.patch( - "otx.algorithms.action.utils.convert_public_data_to_cvat.os.listdir", return_value=(["frame0", "frame1"]) - ) - mocker.patch( - "otx.algorithms.action.utils.convert_public_data_to_cvat.etree.ElementTree", return_value=MockElementTree() - ) - convert_action_cls_dataset_to_datumaro(src_path, dst_path, ann_file) + with tempfile.TemporaryDirectory() as dst_path: + mocker.patch("otx.algorithms.action.utils.convert_public_data_to_cvat.open", return_value=MockFileObject()) + mocker.patch("otx.algorithms.action.utils.convert_public_data_to_cvat.pathlib.Path.mkdir", return_value=True) + # mocker.patch("otx.algorithms.action.utils.convert_public_data_to_cvat.os.makedirs", return_value=True) + mocker.patch("otx.algorithms.action.utils.convert_public_data_to_cvat.shutil.copy", return_value=True) + mocker.patch( + "otx.algorithms.action.utils.convert_public_data_to_cvat.generate_default_cvat_xml_fields", + return_value=([], (256, 256, 3), []), + ) + mocker.patch( + "otx.algorithms.action.utils.convert_public_data_to_cvat.os.listdir", return_value=(["frame0", "frame1"]) + ) + mocker.patch( + "otx.algorithms.action.utils.convert_public_data_to_cvat.etree.ElementTree", return_value=MockElementTree() + ) + convert_action_cls_dataset_to_datumaro(src_path, dst_path, ann_file) @e2e_pytest_unit @@ -120,23 +122,23 @@ def test_convert_ava_dataset_to_datumaro(mocker) -> None: """Test convert_ava_dataset_to_datumaro function.""" src_path = "dummy_src_path" - dst_path = "dummy_dst_path" ann_file = "dummy_ann_file" - mocker.patch( - "otx.algorithms.action.utils.convert_public_data_to_cvat.read_ava_csv", - return_value={"video_0": {"frame_idx": [[0, 0, 1, 1, "action"]]}}, - ) - mocker.patch("otx.algorithms.action.utils.convert_public_data_to_cvat.os.listdir", return_value=["video_0"]) - mocker.patch("otx.algorithms.action.utils.convert_public_data_to_cvat.shutil.copytree", return_value=True) - mocker.patch( - "otx.algorithms.action.utils.convert_public_data_to_cvat.generate_default_cvat_xml_fields", - return_value=([], (256, 256, 3), []), - ) - mocker.patch( - "otx.algorithms.action.utils.convert_public_data_to_cvat.etree.ElementTree", return_value=MockElementTree() - ) - convert_ava_dataset_to_datumaro(src_path, dst_path, ann_file) + with tempfile.TemporaryDirectory() as dst_path: + mocker.patch( + "otx.algorithms.action.utils.convert_public_data_to_cvat.read_ava_csv", + return_value={"video_0": {"frame_idx": [[0, 0, 1, 1, "action"]]}}, + ) + mocker.patch("otx.algorithms.action.utils.convert_public_data_to_cvat.os.listdir", return_value=["video_0"]) + mocker.patch("otx.algorithms.action.utils.convert_public_data_to_cvat.shutil.copytree", return_value=True) + mocker.patch( + "otx.algorithms.action.utils.convert_public_data_to_cvat.generate_default_cvat_xml_fields", + return_value=([], (256, 256, 3), []), + ) + mocker.patch( + "otx.algorithms.action.utils.convert_public_data_to_cvat.etree.ElementTree", return_value=MockElementTree() + ) + convert_ava_dataset_to_datumaro(src_path, dst_path, ann_file) @e2e_pytest_unit diff --git a/tests/unit/algorithms/classification/adapters/mmcls/data/test_datasets.py b/tests/unit/algorithms/classification/adapters/mmcls/data/test_datasets.py index 3fb7a4d2227..13e28c4e3e6 100644 --- a/tests/unit/algorithms/classification/adapters/mmcls/data/test_datasets.py +++ b/tests/unit/algorithms/classification/adapters/mmcls/data/test_datasets.py @@ -5,12 +5,12 @@ import numpy as np import pytest -from otx.algorithms.classification.adapters.mmcls.data import ( +from otx.algorithms.classification.adapters.mmcls.datasets import ( OTXClsDataset, OTXHierarchicalClsDataset, OTXMultilabelClsDataset, + SelfSLDataset, ) -from otx.algorithms.classification.adapters.mmcls.data.datasets import SelfSLDataset from otx.algorithms.classification.utils import get_multihead_class_info from otx.api.entities.annotation import ( Annotation, diff --git a/tests/unit/algorithms/classification/adapters/mmcls/data/test_pipelines.py b/tests/unit/algorithms/classification/adapters/mmcls/data/test_pipelines.py index be2c4e8eace..caaaab22845 100644 --- a/tests/unit/algorithms/classification/adapters/mmcls/data/test_pipelines.py +++ b/tests/unit/algorithms/classification/adapters/mmcls/data/test_pipelines.py @@ -2,7 +2,7 @@ import pytest from PIL import Image -from otx.algorithms.classification.adapters.mmcls.data.pipelines import ( +from otx.algorithms.classification.adapters.mmcls.datasets.pipelines.otx_pipelines import ( GaussianBlur, LoadImageFromOTXDataset, OTXColorJitter, @@ -57,7 +57,10 @@ def test_load_image_from_otx_dataset_call(to_float32): @e2e_pytest_unit def test_random_applied_transforms(mocker, inputs_np): """Test RandomAppliedTrans.""" - mocker.patch("otx.algorithms.classification.adapters.mmcls.data.pipelines.build_from_cfg", return_value=lambda x: x) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.datasets.pipelines.otx_pipelines.build_from_cfg", + return_value=lambda x: x, + ) random_applied_transforms = RandomAppliedTrans(transforms=[dict()]) @@ -106,7 +109,10 @@ def test_pil_image_to_nd_array(inputs_PIL) -> None: @e2e_pytest_unit def test_post_aug(mocker, inputs_np): """Test PostAug.""" - mocker.patch("otx.algorithms.classification.adapters.mmcls.data.pipelines.Compose", return_value=lambda x: x) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.datasets.pipelines.otx_pipelines.Compose", + return_value=lambda x: x, + ) post_aug = PostAug(keys=dict(orig=lambda x: x)) diff --git a/tests/unit/mpa/modules/models/classifiers/test_sam_classifier.py b/tests/unit/algorithms/classification/adapters/mmcls/models/classifiers/test_sam_classifier.py similarity index 97% rename from tests/unit/mpa/modules/models/classifiers/test_sam_classifier.py rename to tests/unit/algorithms/classification/adapters/mmcls/models/classifiers/test_sam_classifier.py index 4c2a3b3a73f..77dbf87f2c7 100644 --- a/tests/unit/mpa/modules/models/classifiers/test_sam_classifier.py +++ b/tests/unit/algorithms/classification/adapters/mmcls/models/classifiers/test_sam_classifier.py @@ -3,7 +3,7 @@ import pytest import torch -from otx.mpa.modules.models.classifiers.sam_classifier import ( +from otx.algorithms.classification.adapters.mmcls.models.classifiers.sam_classifier import ( ImageClassifier, SAMImageClassifier, ) diff --git a/tests/unit/mpa/modules/models/classifiers/test_semisl_classifier.py b/tests/unit/algorithms/classification/adapters/mmcls/models/classifiers/test_semisl_classifier.py similarity index 90% rename from tests/unit/mpa/modules/models/classifiers/test_semisl_classifier.py rename to tests/unit/algorithms/classification/adapters/mmcls/models/classifiers/test_semisl_classifier.py index 63109bb0b04..9612be1cf74 100644 --- a/tests/unit/mpa/modules/models/classifiers/test_semisl_classifier.py +++ b/tests/unit/algorithms/classification/adapters/mmcls/models/classifiers/test_semisl_classifier.py @@ -5,7 +5,7 @@ import pytest import torch -from otx.mpa.modules.models.classifiers.semisl_classifier import ( +from otx.algorithms.classification.adapters.mmcls.models.classifiers.semisl_classifier import ( SAMImageClassifier, SemiSLClassifier, ) diff --git a/tests/unit/mpa/modules/models/classifiers/test_semisl_mlc_classifier.py b/tests/unit/algorithms/classification/adapters/mmcls/models/classifiers/test_semisl_mlc_classifier.py similarity index 90% rename from tests/unit/mpa/modules/models/classifiers/test_semisl_mlc_classifier.py rename to tests/unit/algorithms/classification/adapters/mmcls/models/classifiers/test_semisl_mlc_classifier.py index 3eae403dbe6..1d0d64fcebe 100644 --- a/tests/unit/mpa/modules/models/classifiers/test_semisl_mlc_classifier.py +++ b/tests/unit/algorithms/classification/adapters/mmcls/models/classifiers/test_semisl_mlc_classifier.py @@ -5,7 +5,7 @@ import pytest import torch -from otx.mpa.modules.models.classifiers.semisl_multilabel_classifier import ( +from otx.algorithms.classification.adapters.mmcls.models.classifiers.semisl_multilabel_classifier import ( SAMImageClassifier, SemiSLMultilabelClassifier, ) diff --git a/tests/unit/mpa/modules/models/classifiers/test_supcon_classifier.py b/tests/unit/algorithms/classification/adapters/mmcls/models/classifiers/test_supcon_classifier.py similarity index 89% rename from tests/unit/mpa/modules/models/classifiers/test_supcon_classifier.py rename to tests/unit/algorithms/classification/adapters/mmcls/models/classifiers/test_supcon_classifier.py index a87357fb5e2..cff88b03e24 100644 --- a/tests/unit/mpa/modules/models/classifiers/test_supcon_classifier.py +++ b/tests/unit/algorithms/classification/adapters/mmcls/models/classifiers/test_supcon_classifier.py @@ -1,7 +1,7 @@ import pytest import torch -from otx.mpa.modules.models.classifiers.supcon_classifier import ( +from otx.algorithms.classification.adapters.mmcls.models.classifiers.supcon_classifier import ( ImageClassifier, SupConClassifier, ) diff --git a/tests/unit/mpa/modules/heads/test_custom_cls_head.py b/tests/unit/algorithms/classification/adapters/mmcls/models/heads/test_custom_cls_head.py similarity index 95% rename from tests/unit/mpa/modules/heads/test_custom_cls_head.py rename to tests/unit/algorithms/classification/adapters/mmcls/models/heads/test_custom_cls_head.py index a3ee1db02a3..377966ad505 100644 --- a/tests/unit/mpa/modules/heads/test_custom_cls_head.py +++ b/tests/unit/algorithms/classification/adapters/mmcls/models/heads/test_custom_cls_head.py @@ -5,7 +5,7 @@ import pytest import torch -from otx.mpa.modules.models.heads.custom_cls_head import ( +from otx.algorithms.classification.adapters.mmcls.models.heads.custom_cls_head import ( CustomLinearClsHead, CustomNonLinearClsHead, ) diff --git a/tests/unit/mpa/modules/heads/test_custom_hierarchical_cls_head.py b/tests/unit/algorithms/classification/adapters/mmcls/models/heads/test_custom_hierarchical_cls_head.py similarity index 90% rename from tests/unit/mpa/modules/heads/test_custom_hierarchical_cls_head.py rename to tests/unit/algorithms/classification/adapters/mmcls/models/heads/test_custom_hierarchical_cls_head.py index 43c9d73ea2a..11f6e100996 100644 --- a/tests/unit/mpa/modules/heads/test_custom_hierarchical_cls_head.py +++ b/tests/unit/algorithms/classification/adapters/mmcls/models/heads/test_custom_hierarchical_cls_head.py @@ -5,13 +5,13 @@ import pytest import torch -from otx.mpa.modules.models.heads.custom_hierarchical_linear_cls_head import ( +from otx.algorithms.classification.adapters.mmcls.models.heads.custom_hierarchical_linear_cls_head import ( CustomHierarchicalLinearClsHead, ) -from otx.mpa.modules.models.heads.custom_hierarchical_non_linear_cls_head import ( +from otx.algorithms.classification.adapters.mmcls.models.heads.custom_hierarchical_non_linear_cls_head import ( CustomHierarchicalNonLinearClsHead, ) -from otx.mpa.modules.models.losses.asymmetric_loss_with_ignore import ( +from otx.algorithms.classification.adapters.mmcls.models.losses.asymmetric_loss_with_ignore import ( AsymmetricLossWithIgnore, ) from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/heads/test_custom_multilabel_cls_head.py b/tests/unit/algorithms/classification/adapters/mmcls/models/heads/test_custom_multilabel_cls_head.py similarity index 87% rename from tests/unit/mpa/modules/heads/test_custom_multilabel_cls_head.py rename to tests/unit/algorithms/classification/adapters/mmcls/models/heads/test_custom_multilabel_cls_head.py index 628051b5c06..c67afc16840 100644 --- a/tests/unit/mpa/modules/heads/test_custom_multilabel_cls_head.py +++ b/tests/unit/algorithms/classification/adapters/mmcls/models/heads/test_custom_multilabel_cls_head.py @@ -5,13 +5,13 @@ import pytest import torch -from otx.mpa.modules.models.heads.custom_multi_label_linear_cls_head import ( +from otx.algorithms.classification.adapters.mmcls.models.heads.custom_multi_label_linear_cls_head import ( CustomMultiLabelLinearClsHead, ) -from otx.mpa.modules.models.heads.custom_multi_label_non_linear_cls_head import ( +from otx.algorithms.classification.adapters.mmcls.models.heads.custom_multi_label_non_linear_cls_head import ( CustomMultiLabelNonLinearClsHead, ) -from otx.mpa.modules.models.losses.asymmetric_loss_with_ignore import ( +from otx.algorithms.classification.adapters.mmcls.models.losses.asymmetric_loss_with_ignore import ( AsymmetricLossWithIgnore, ) from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/heads/test_multilabel_semisl.py b/tests/unit/algorithms/classification/adapters/mmcls/models/heads/test_multilabel_semisl.py similarity index 89% rename from tests/unit/mpa/modules/heads/test_multilabel_semisl.py rename to tests/unit/algorithms/classification/adapters/mmcls/models/heads/test_multilabel_semisl.py index 45367bb426b..eef4be46304 100644 --- a/tests/unit/mpa/modules/heads/test_multilabel_semisl.py +++ b/tests/unit/algorithms/classification/adapters/mmcls/models/heads/test_multilabel_semisl.py @@ -5,14 +5,16 @@ import pytest import torch -from otx.mpa.modules.models.heads.semisl_multilabel_cls_head import ( +from otx.algorithms.classification.adapters.mmcls.models.heads.semisl_multilabel_cls_head import ( SemiLinearMultilabelClsHead, SemiNonLinearMultilabelClsHead, ) -from otx.mpa.modules.models.losses.asymmetric_loss_with_ignore import ( +from otx.algorithms.classification.adapters.mmcls.models.losses.asymmetric_loss_with_ignore import ( AsymmetricLossWithIgnore, ) -from otx.mpa.modules.models.losses.barlowtwins_loss import BarlowTwinsLoss +from otx.algorithms.classification.adapters.mmcls.models.losses.barlowtwins_loss import ( + BarlowTwinsLoss, +) from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/heads/test_semisl_cls_head.py b/tests/unit/algorithms/classification/adapters/mmcls/models/heads/test_semisl_cls_head.py similarity index 97% rename from tests/unit/mpa/modules/heads/test_semisl_cls_head.py rename to tests/unit/algorithms/classification/adapters/mmcls/models/heads/test_semisl_cls_head.py index 180bb5899ee..4f0ddbd0a04 100644 --- a/tests/unit/mpa/modules/heads/test_semisl_cls_head.py +++ b/tests/unit/algorithms/classification/adapters/mmcls/models/heads/test_semisl_cls_head.py @@ -2,7 +2,7 @@ import torch from mmcls.models.builder import build_head -from otx.mpa.modules.models.heads.semisl_cls_head import ( +from otx.algorithms.classification.adapters.mmcls.models.heads.semisl_cls_head import ( SemiLinearClsHead, SemiNonLinearClsHead, ) diff --git a/tests/unit/algorithms/classification/adapters/mmcls/models/losses/__init__.py b/tests/unit/algorithms/classification/adapters/mmcls/models/losses/__init__.py new file mode 100644 index 00000000000..d369804d764 --- /dev/null +++ b/tests/unit/algorithms/classification/adapters/mmcls/models/losses/__init__.py @@ -0,0 +1,4 @@ +"""Test for otx.algorithms.classification.adapters.mmcls.models.losses.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# diff --git a/tests/unit/mpa/modules/losses/test_asymmetric_multilabel.py b/tests/unit/algorithms/classification/adapters/mmcls/models/losses/test_asymmetric_multilabel.py similarity index 91% rename from tests/unit/mpa/modules/losses/test_asymmetric_multilabel.py rename to tests/unit/algorithms/classification/adapters/mmcls/models/losses/test_asymmetric_multilabel.py index 14e355401e1..5c9d6311a55 100644 --- a/tests/unit/mpa/modules/losses/test_asymmetric_multilabel.py +++ b/tests/unit/algorithms/classification/adapters/mmcls/models/losses/test_asymmetric_multilabel.py @@ -5,10 +5,10 @@ import pytest import torch -from otx.mpa.modules.models.losses.asymmetric_angular_loss_with_ignore import ( +from otx.algorithms.classification.adapters.mmcls.models.losses.asymmetric_angular_loss_with_ignore import ( AsymmetricAngularLossWithIgnore, ) -from otx.mpa.modules.models.losses.asymmetric_loss_with_ignore import ( +from otx.algorithms.classification.adapters.mmcls.models.losses.asymmetric_loss_with_ignore import ( AsymmetricLossWithIgnore, ) from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/losses/test_cross_entropy.py b/tests/unit/algorithms/classification/adapters/mmcls/models/losses/test_cross_entropy.py similarity index 92% rename from tests/unit/mpa/modules/losses/test_cross_entropy.py rename to tests/unit/algorithms/classification/adapters/mmcls/models/losses/test_cross_entropy.py index 6d20943853d..cadafc007f8 100644 --- a/tests/unit/mpa/modules/losses/test_cross_entropy.py +++ b/tests/unit/algorithms/classification/adapters/mmcls/models/losses/test_cross_entropy.py @@ -5,7 +5,9 @@ import pytest import torch -from otx.mpa.modules.models.losses.cross_entropy_loss import CrossEntropyLossWithIgnore +from otx.algorithms.classification.adapters.mmcls.models.losses.cross_entropy_loss import ( + CrossEntropyLossWithIgnore, +) from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/algorithms/classification/adapters/mmcls/optimizer/__init__.py b/tests/unit/algorithms/classification/adapters/mmcls/optimizer/__init__.py new file mode 100644 index 00000000000..5df735fd824 --- /dev/null +++ b/tests/unit/algorithms/classification/adapters/mmcls/optimizer/__init__.py @@ -0,0 +1,4 @@ +"""Test for otx.algorithms.classification.adapters.mmcls.optimizer""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# diff --git a/tests/unit/mpa/modules/optimizer/test_lars.py b/tests/unit/algorithms/classification/adapters/mmcls/optimizer/test_lars.py similarity index 94% rename from tests/unit/mpa/modules/optimizer/test_lars.py rename to tests/unit/algorithms/classification/adapters/mmcls/optimizer/test_lars.py index 2e31ea58276..f82abf3b1ee 100644 --- a/tests/unit/mpa/modules/optimizer/test_lars.py +++ b/tests/unit/algorithms/classification/adapters/mmcls/optimizer/test_lars.py @@ -3,9 +3,9 @@ import torch import torch.nn as nn -from otx.mpa.modules.optimizer.lars import LARS +from otx.algorithms.classification.adapters.mmcls.optimizer.lars import LARS from tests.test_suite.e2e_test_system import e2e_pytest_unit -from tests.unit.mpa.test_helpers import ( +from tests.unit.algorithms.common.adapters.mmcv.tasks.test_helpers import ( generate_random_torch_image, generate_toy_cnn_model, ) diff --git a/tests/unit/mpa/cls/incremental/test_cls_incremental_stage.py b/tests/unit/algorithms/classification/adapters/mmcls/tasks/incremental/test_cls_incremental_stage.py similarity index 77% rename from tests/unit/mpa/cls/incremental/test_cls_incremental_stage.py rename to tests/unit/algorithms/classification/adapters/mmcls/tasks/incremental/test_cls_incremental_stage.py index 4a6e5bbdaca..00d304cad1a 100644 --- a/tests/unit/mpa/cls/incremental/test_cls_incremental_stage.py +++ b/tests/unit/algorithms/classification/adapters/mmcls/tasks/incremental/test_cls_incremental_stage.py @@ -1,6 +1,8 @@ import pytest -from otx.mpa.cls.incremental.stage import IncrClsStage +from otx.algorithms.classification.adapters.mmcls.tasks.incremental.stage import ( + IncrClsStage, +) from tests.test_suite.e2e_test_system import e2e_pytest_unit from tests.unit.algorithms.classification.test_helper import setup_mpa_task_parameters @@ -20,7 +22,10 @@ def test_configure_classes(self, mode, mocker): self.stage.cfg.merge_from_dict(self.data_cfg) self.stage.cfg.task_adapt.op = mode origin_model_classes = ["label_0", "label_3", "label_n"] - mocker.patch("otx.mpa.cls.incremental.stage.IncrClsStage.get_model_classes", return_value=origin_model_classes) + mocker.patch( + "otx.algorithms.classification.adapters.mmcls.tasks.incremental.stage.IncrClsStage.get_model_classes", + return_value=origin_model_classes, + ) self.stage.data_classes = self.data_cfg.data.train.data_classes # ["label_0", "label_1"] self.stage.configure_classes(self.stage.cfg) merge_target = ["label_0", "label_1", "label_3", "label_n"] @@ -32,7 +37,9 @@ def test_configure_classes(self, mode, mocker): @e2e_pytest_unit def test_configure_task_modules(self, monkeypatch, mocker): - mock_update_hook = mocker.patch("otx.mpa.cls.incremental.stage.update_or_add_custom_hook") + mock_update_hook = mocker.patch( + "otx.algorithms.classification.adapters.mmcls.tasks.incremental.stage.update_or_add_custom_hook" + ) # some dummy classes self.stage.data_classes = [0, 1] self.stage.model_classes = [0, 1] diff --git a/tests/unit/mpa/cls/semisl/test_cls_semisl_stage.py b/tests/unit/algorithms/classification/adapters/mmcls/tasks/semisl/test_cls_semisl_stage.py similarity index 88% rename from tests/unit/mpa/cls/semisl/test_cls_semisl_stage.py rename to tests/unit/algorithms/classification/adapters/mmcls/tasks/semisl/test_cls_semisl_stage.py index bc3b91a58b6..eac1ea7a091 100644 --- a/tests/unit/mpa/cls/semisl/test_cls_semisl_stage.py +++ b/tests/unit/algorithms/classification/adapters/mmcls/tasks/semisl/test_cls_semisl_stage.py @@ -4,8 +4,10 @@ import pytest -from otx.mpa.cls.semisl.stage import SemiSLClsStage -from otx.mpa.cls.stage import ClsStage +from otx.algorithms.classification.adapters.mmcls.tasks.semisl.stage import ( + SemiSLClsStage, +) +from otx.algorithms.classification.adapters.mmcls.tasks.stage import ClsStage from tests.test_suite.e2e_test_system import e2e_pytest_unit from tests.unit.algorithms.classification.test_helper import setup_mpa_task_parameters diff --git a/tests/unit/mpa/cls/test_cls_evaluator.py b/tests/unit/algorithms/classification/adapters/mmcls/tasks/test_cls_evaluator.py similarity index 85% rename from tests/unit/mpa/cls/test_cls_evaluator.py rename to tests/unit/algorithms/classification/adapters/mmcls/tasks/test_cls_evaluator.py index 0bd881db7dc..94832285791 100644 --- a/tests/unit/mpa/cls/test_cls_evaluator.py +++ b/tests/unit/algorithms/classification/adapters/mmcls/tasks/test_cls_evaluator.py @@ -1,8 +1,8 @@ import pytest +from otx.algorithms.classification.adapters.mmcls.tasks.evaluator import ClsEvaluator +from otx.algorithms.classification.adapters.mmcls.tasks.inferrer import ClsInferrer from otx.algorithms.classification.adapters.mmcls.utils.builder import build_classifier -from otx.mpa.cls.evaluator import ClsEvaluator -from otx.mpa.cls.inferrer import ClsInferrer from tests.test_suite.e2e_test_system import e2e_pytest_unit from tests.unit.algorithms.classification.test_helper import setup_mpa_task_parameters diff --git a/tests/unit/mpa/cls/test_cls_explanier.py b/tests/unit/algorithms/classification/adapters/mmcls/tasks/test_cls_explanier.py similarity index 81% rename from tests/unit/mpa/cls/test_cls_explanier.py rename to tests/unit/algorithms/classification/adapters/mmcls/tasks/test_cls_explanier.py index f7137791e2d..3e21f3dd9bb 100644 --- a/tests/unit/mpa/cls/test_cls_explanier.py +++ b/tests/unit/algorithms/classification/adapters/mmcls/tasks/test_cls_explanier.py @@ -2,9 +2,11 @@ import pytest -from otx.mpa.cls.explainer import ClsExplainer -from otx.mpa.cls.stage import ClsStage -from otx.mpa.modules.hooks.recording_forward_hooks import ActivationMapHook +from otx.algorithms.classification.adapters.mmcls.tasks.explainer import ClsExplainer +from otx.algorithms.classification.adapters.mmcls.tasks.stage import ClsStage +from otx.algorithms.common.adapters.mmcv.hooks.recording_forward_hook import ( + ActivationMapHook, +) from tests.test_suite.e2e_test_system import e2e_pytest_unit from tests.unit.algorithms.classification.test_helper import ( generate_cls_dataset, @@ -30,7 +32,7 @@ def test_run(self, mocker): @e2e_pytest_unit def test_explain(self, mocker): - mocker.patch("otx.mpa.cls.explainer.build_data_parallel") + mocker.patch("otx.algorithms.classification.adapters.mmcls.tasks.explainer.build_data_parallel") mock_build_model = mocker.patch.object(ClsStage, "build_model") mocker.patch.object(ClsStage, "configure_samples_per_gpu") data_cfg = copy.deepcopy(self.data_cfg) diff --git a/tests/unit/mpa/cls/test_cls_exporter.py b/tests/unit/algorithms/classification/adapters/mmcls/tasks/test_cls_exporter.py similarity index 83% rename from tests/unit/mpa/cls/test_cls_exporter.py rename to tests/unit/algorithms/classification/adapters/mmcls/tasks/test_cls_exporter.py index 427c58281e0..68b87e78fff 100644 --- a/tests/unit/mpa/cls/test_cls_exporter.py +++ b/tests/unit/algorithms/classification/adapters/mmcls/tasks/test_cls_exporter.py @@ -1,9 +1,9 @@ import pytest +from otx.algorithms.classification.adapters.mmcls.tasks.exporter import ClsExporter from otx.algorithms.classification.adapters.mmcls.utils.builder import build_classifier -from otx.mpa.cls.exporter import ClsExporter -from otx.mpa.deploy.apis import NaiveExporter -from otx.mpa.exporter_mixin import ExporterMixin +from otx.algorithms.common.adapters.mmcv.tasks.exporter_mixin import ExporterMixin +from otx.algorithms.common.adapters.mmdeploy.apis import NaiveExporter from tests.test_suite.e2e_test_system import e2e_pytest_unit from tests.unit.algorithms.classification.test_helper import setup_mpa_task_parameters diff --git a/tests/unit/mpa/cls/test_cls_inferrer.py b/tests/unit/algorithms/classification/adapters/mmcls/tasks/test_cls_inferrer.py similarity index 90% rename from tests/unit/mpa/cls/test_cls_inferrer.py rename to tests/unit/algorithms/classification/adapters/mmcls/tasks/test_cls_inferrer.py index 6d77050f3fa..2f3bae0d35c 100644 --- a/tests/unit/mpa/cls/test_cls_inferrer.py +++ b/tests/unit/algorithms/classification/adapters/mmcls/tasks/test_cls_inferrer.py @@ -1,6 +1,6 @@ import pytest -from otx.mpa.cls.inferrer import ClsInferrer +from otx.algorithms.classification.adapters.mmcls.tasks.inferrer import ClsInferrer from tests.test_suite.e2e_test_system import e2e_pytest_unit from tests.unit.algorithms.classification.test_helper import setup_mpa_task_parameters @@ -29,7 +29,7 @@ def test_infer(self, mocker): mocker.patch.object(ClsInferrer, "configure_samples_per_gpu") mocker.patch.object(ClsInferrer, "configure_compat_cfg") mock_infer_callback = mocker.patch.object(ClsInferrer, "set_inference_progress_callback") - mocker.patch("otx.mpa.cls.inferrer.build_data_parallel") + mocker.patch("otx.algorithms.classification.adapters.mmcls.tasks.inferrer.build_data_parallel") mock_build_model = mocker.patch.object(ClsInferrer, "build_model") returned_value = self.inferrer.infer(cfg) diff --git a/tests/unit/mpa/cls/test_cls_stage.py b/tests/unit/algorithms/classification/adapters/mmcls/tasks/test_cls_stage.py similarity index 92% rename from tests/unit/mpa/cls/test_cls_stage.py rename to tests/unit/algorithms/classification/adapters/mmcls/tasks/test_cls_stage.py index 3fef96cf3e4..a82be0ddbf3 100644 --- a/tests/unit/mpa/cls/test_cls_stage.py +++ b/tests/unit/algorithms/classification/adapters/mmcls/tasks/test_cls_stage.py @@ -4,8 +4,8 @@ import pytest -from otx.mpa import Stage -from otx.mpa.cls.stage import ClsStage +from otx.algorithms.classification.adapters.mmcls.tasks.stage import ClsStage +from otx.algorithms.common.adapters.mmcv.tasks.stage import Stage from tests.test_suite.e2e_test_system import e2e_pytest_unit from tests.unit.algorithms.classification.test_helper import setup_mpa_task_parameters @@ -27,24 +27,24 @@ def test_configure(self, mocker): fake_arg = {"pretrained": True, "foo": "bar"} returned_value = self.stage.configure(self.model_cfg, "", self.data_cfg, True, **fake_arg) - mock_cfg_model.assert_called_once_with(self.stage.cfg, self.model_cfg, True, **fake_arg) + mock_cfg_model.assert_called_once_with(self.stage.cfg, self.model_cfg, **fake_arg) mock_cfg_ckpt.assert_called_once_with(self.stage.cfg, "", fake_arg.get("pretrained", None)) - mock_cfg_data.assert_called_once_with(self.stage.cfg, self.data_cfg, True, **fake_arg) - mock_cfg_task.assert_called_once_with(self.stage.cfg, True, **fake_arg) + mock_cfg_data.assert_called_once_with(self.stage.cfg, self.data_cfg, True) + mock_cfg_task.assert_called_once_with(self.stage.cfg, True) assert returned_value == self.stage.cfg @e2e_pytest_unit def test_configure_model(self): fake_arg = {"ir_model_path": {"ir_weight_path": "", "ir_weight_init": ""}} - self.stage.configure_model(self.stage.cfg, self.model_cfg, True, **fake_arg) + self.stage.configure_model(self.stage.cfg, self.model_cfg, **fake_arg) assert self.stage.cfg.model_task @e2e_pytest_unit def test_configure_data(self, mocker): mock_super_cfg_data = mocker.patch.object(Stage, "configure_data") - self.stage.configure_data(self.stage.cfg, self.data_cfg, True, pretrained=None) + self.stage.configure_data(self.stage.cfg, self.data_cfg, True) mock_super_cfg_data.assert_called_once() assert self.stage.cfg.data diff --git a/tests/unit/mpa/cls/test_cls_trainer.py b/tests/unit/algorithms/classification/adapters/mmcls/tasks/test_cls_trainer.py similarity index 82% rename from tests/unit/mpa/cls/test_cls_trainer.py rename to tests/unit/algorithms/classification/adapters/mmcls/tasks/test_cls_trainer.py index 7f4bf4644a8..69d13602027 100644 --- a/tests/unit/mpa/cls/test_cls_trainer.py +++ b/tests/unit/algorithms/classification/adapters/mmcls/tasks/test_cls_trainer.py @@ -1,6 +1,6 @@ import pytest -from otx.mpa.cls.trainer import ClsTrainer +from otx.algorithms.classification.adapters.mmcls.tasks.trainer import ClsTrainer from tests.test_suite.e2e_test_system import e2e_pytest_unit from tests.unit.algorithms.classification.test_helper import setup_mpa_task_parameters @@ -16,7 +16,7 @@ def test_run(self, mocker): mocker.patch.object(ClsTrainer, "configure_samples_per_gpu") mocker.patch.object(ClsTrainer, "configure_fp16_optimizer") mocker.patch.object(ClsTrainer, "configure_compat_cfg") - mock_train_classifier = mocker.patch("otx.mpa.cls.trainer.train_model") + mock_train_classifier = mocker.patch("otx.algorithms.classification.adapters.mmcls.tasks.trainer.train_model") mocker.patch.object(ClsTrainer, "build_model") self.trainer.run(self.model_cfg, "", self.data_cfg) @@ -30,7 +30,7 @@ def test_run_with_distributed(self, mocker): mocker.patch.object(ClsTrainer, "configure_compat_cfg") spy_cfg_dist = mocker.spy(ClsTrainer, "_modify_cfg_for_distributed") mocker.patch.object(ClsTrainer, "build_model") - mock_train_classifier = mocker.patch("otx.mpa.cls.trainer.train_model") + mock_train_classifier = mocker.patch("otx.algorithms.classification.adapters.mmcls.tasks.trainer.train_model") self.trainer.run(self.model_cfg, "", self.data_cfg) spy_cfg_dist.assert_called_once() diff --git a/tests/unit/algorithms/classification/adapters/mmcls/test_cls_config_builder.py b/tests/unit/algorithms/classification/adapters/mmcls/test_cls_config_builder.py index 85b6c5b4eba..ec1f98e466f 100644 --- a/tests/unit/algorithms/classification/adapters/mmcls/test_cls_config_builder.py +++ b/tests/unit/algorithms/classification/adapters/mmcls/test_cls_config_builder.py @@ -5,7 +5,7 @@ import pytest from otx.algorithms.classification.adapters.mmcls.utils import patch_evaluation -from otx.mpa.utils.config_utils import MPAConfig +from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/algorithms/classification/adapters/mmcls/test_mmcls_data_params_validation.py b/tests/unit/algorithms/classification/adapters/mmcls/test_mmcls_data_params_validation.py index 2e8b9a0bc00..b7d0dd6128d 100644 --- a/tests/unit/algorithms/classification/adapters/mmcls/test_mmcls_data_params_validation.py +++ b/tests/unit/algorithms/classification/adapters/mmcls/test_mmcls_data_params_validation.py @@ -4,7 +4,7 @@ import numpy as np import pytest -from otx.algorithms.classification.adapters.mmcls.data import OTXClsDataset +from otx.algorithms.classification.adapters.mmcls.datasets import OTXClsDataset from otx.api.entities.annotation import ( Annotation, AnnotationSceneEntity, diff --git a/tests/unit/algorithms/classification/test_helper.py b/tests/unit/algorithms/classification/test_helper.py index 4a26c87e48b..d64fd06e55c 100644 --- a/tests/unit/algorithms/classification/test_helper.py +++ b/tests/unit/algorithms/classification/test_helper.py @@ -8,6 +8,7 @@ import cv2 as cv import numpy as np +from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig from otx.api.configuration.helper import create from otx.api.entities.annotation import ( Annotation, @@ -25,7 +26,6 @@ from otx.api.entities.shapes.rectangle import Rectangle from otx.api.entities.subset import Subset from otx.api.entities.task_environment import TaskEnvironment -from otx.mpa.utils.config_utils import MPAConfig DEFAULT_CLS_TEMPLATE_DIR = Path("otx") / "algorithms" / "classification" / "configs" / "mobilenet_v3_large_1_cls_incr" DEFAULT_CLS_TEMPLATE = DEFAULT_CLS_TEMPLATE_DIR / "template.yaml" diff --git a/tests/unit/mpa/modules/hooks/test_mpa_adaptive_training_hooks.py b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_adaptive_training_hooks.py similarity index 89% rename from tests/unit/mpa/modules/hooks/test_mpa_adaptive_training_hooks.py rename to tests/unit/algorithms/common/adapters/mmcv/hooks/test_adaptive_training_hooks.py index 3d3a9a0eba8..51a30756b97 100644 --- a/tests/unit/mpa/modules/hooks/test_mpa_adaptive_training_hooks.py +++ b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_adaptive_training_hooks.py @@ -1,4 +1,4 @@ -"""Unit test for otx.mpa.modules.hooks.adaptive_training_hooks.""" +"""Unit test for otx.algorithms.common.adapters.mmcv.hooks.adaptive_training_hooks.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -8,8 +8,12 @@ from mmcv.runner.hooks.evaluation import EvalHook from mmcv.utils import Config -from otx.mpa.modules.hooks.adaptive_training_hooks import AdaptiveTrainSchedulingHook -from otx.mpa.modules.hooks.early_stopping_hook import EarlyStoppingHook +from otx.algorithms.common.adapters.mmcv.hooks.adaptive_training_hook import ( + AdaptiveTrainSchedulingHook, +) +from otx.algorithms.common.adapters.mmcv.hooks.early_stopping_hook import ( + EarlyStoppingHook, +) from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/hooks/test_mpa_cancel_interface_hook.py b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_cancel_interface_hook.py similarity index 89% rename from tests/unit/mpa/modules/hooks/test_mpa_cancel_interface_hook.py rename to tests/unit/algorithms/common/adapters/mmcv/hooks/test_cancel_interface_hook.py index e7a4610f2c3..1147fa5a73b 100644 --- a/tests/unit/mpa/modules/hooks/test_mpa_cancel_interface_hook.py +++ b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_cancel_interface_hook.py @@ -1,11 +1,11 @@ -"""Unit test for otx.mpa.modules.hooks.cancel_interface_hook.""" +"""Unit test for otx.algorithms.common.adapters.mmcv.hooks.cancel_interface_hook.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # from mmcv.runner import EpochBasedRunner -from otx.mpa.modules.hooks.cancel_interface_hook import CancelInterfaceHook +from otx.algorithms.common.adapters.mmcv.hooks.cancel_hook import CancelInterfaceHook from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/hooks/test_mpa_checkpoint_hook.py b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_checkpoint_hook.py similarity index 84% rename from tests/unit/mpa/modules/hooks/test_mpa_checkpoint_hook.py rename to tests/unit/algorithms/common/adapters/mmcv/hooks/test_checkpoint_hook.py index 55e7e519a85..4b05a5f7089 100644 --- a/tests/unit/mpa/modules/hooks/test_mpa_checkpoint_hook.py +++ b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_checkpoint_hook.py @@ -1,11 +1,13 @@ -"""Unit test for otx.mpa.modules.hooks.checkpoint_hook.""" +"""Unit test for otx.algorithms.common.adapters.mmcv.hooks.checkpoint_hook.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # from mmcv.utils import Config -from otx.mpa.modules.hooks.checkpoint_hook import CheckpointHookWithValResults +from otx.algorithms.common.adapters.mmcv.hooks.checkpoint_hook import ( + CheckpointHookWithValResults, +) from tests.test_suite.e2e_test_system import e2e_pytest_unit @@ -52,7 +54,7 @@ def test_after_train_epoch(self, mocker) -> None: """Test after_train_epoch function.""" mocker.patch.object(CheckpointHookWithValResults, "every_n_epochs", return_value=True) - mocker.patch("otx.mpa.modules.hooks.checkpoint_hook.allreduce_params", return_value=True) + mocker.patch("otx.algorithms.common.adapters.mmcv.hooks.checkpoint_hook.allreduce_params", return_value=True) hook = CheckpointHookWithValResults(sync_buffer=True, out_dir="./tmp_dir/") runner = MockRunner() hook.after_train_epoch(runner) diff --git a/tests/unit/mpa/modules/hooks/test_mpa_composed_dataloader_hook.py b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_composed_dataloader_hook.py similarity index 65% rename from tests/unit/mpa/modules/hooks/test_mpa_composed_dataloader_hook.py rename to tests/unit/algorithms/common/adapters/mmcv/hooks/test_composed_dataloader_hook.py index 97a6b07fd9c..c97204b9aed 100644 --- a/tests/unit/mpa/modules/hooks/test_mpa_composed_dataloader_hook.py +++ b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_composed_dataloader_hook.py @@ -1,9 +1,11 @@ -"""Unit test for otx.mpa.modules.hooks.composed_dataloaders_hook.""" +"""Unit test for otx.algorithms.common.adapters.mmcv.hooks.composed_dataloaders_hook.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -from otx.mpa.modules.hooks.composed_dataloaders_hook import ComposedDataLoadersHook +from otx.algorithms.common.adapters.mmcv.hooks.composed_dataloaders_hook import ( + ComposedDataLoadersHook, +) from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/hooks/test_mpa_early_stopping_hook.py b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_early_stopping_hook.py similarity index 90% rename from tests/unit/mpa/modules/hooks/test_mpa_early_stopping_hook.py rename to tests/unit/algorithms/common/adapters/mmcv/hooks/test_early_stopping_hook.py index a27291f2e81..fd952042d27 100644 --- a/tests/unit/mpa/modules/hooks/test_mpa_early_stopping_hook.py +++ b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_early_stopping_hook.py @@ -1,4 +1,4 @@ -"""Unit test for otx.mpa.modules.hooks.early_stopping_hook.""" +"""Unit test for otx.algorithms.common.adapters.mmcv.hooks.early_stopping_hook.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -11,7 +11,7 @@ from mmcv.runner import BaseRunner, LrUpdaterHook from mmcv.utils import Config -from otx.mpa.modules.hooks.early_stopping_hook import ( +from otx.algorithms.common.adapters.mmcv.hooks.early_stopping_hook import ( EarlyStoppingHook, LazyEarlyStoppingHook, ReduceLROnPlateauLrUpdaterHook, @@ -204,36 +204,32 @@ def test_init_rule(self) -> None: assert hook.compare_func(5, 9) is True @e2e_pytest_unit - def test_should_check_stopping(self) -> None: + def test_is_check_timing(self) -> None: """Test _should_check_stopping function.""" hook = ReduceLROnPlateauLrUpdaterHook(interval=5, min_lr=1e-5) hook.by_epoch = False runner = MockRunner() - assert hook._should_check_stopping(runner) is True - - runner._iter = 8 - assert hook._should_check_stopping(runner) is False + assert hook._is_check_timing(runner) is False @e2e_pytest_unit def test_get_lr(self, mocker) -> None: """Test function for get_lr.""" - mocker.patch.object(ReduceLROnPlateauLrUpdaterHook, "_should_check_stopping", return_value=False) + mocker.patch.object(ReduceLROnPlateauLrUpdaterHook, "_is_check_timing", return_value=False) hook = ReduceLROnPlateauLrUpdaterHook(interval=5, min_lr=1e-5) hook.warmup_iters = 3 runner = MockRunner() assert hook.get_lr(runner, 1e-2) == 1e-2 - mocker.patch.object(ReduceLROnPlateauLrUpdaterHook, "_should_check_stopping", return_value=True) + mocker.patch.object(ReduceLROnPlateauLrUpdaterHook, "_is_check_timing", return_value=True) hook = ReduceLROnPlateauLrUpdaterHook(interval=5, min_lr=1e-5) hook.warmup_iters = 3 runner = MockRunner() assert hook.get_lr(runner, 1e-2) == 1e-2 assert hook.bad_count == 0 - assert hook.last_iter == 9 - mocker.patch.object(ReduceLROnPlateauLrUpdaterHook, "_should_check_stopping", return_value=True) + mocker.patch.object(ReduceLROnPlateauLrUpdaterHook, "_is_check_timing", return_value=True) hook = ReduceLROnPlateauLrUpdaterHook(interval=5, min_lr=1e-5) hook.best_score = 90 hook.warmup_iters = 3 @@ -250,9 +246,9 @@ def test_get_lr(self, mocker) -> None: hook.iteration_patience = 5 hook.last_iter = 2 runner = MockRunner() - assert hook.get_lr(runner, 1e-2) == 1e-3 - assert hook.last_iter == 9 - assert hook.bad_count == 0 + assert hook.get_lr(runner, 1e-3) == 1e-3 + assert hook.last_iter == 2 + assert hook.bad_count == 2 @e2e_pytest_unit def test_before_run(self) -> None: @@ -264,7 +260,7 @@ def test_before_run(self) -> None: assert hook.base_lr == [1e-4] assert hook.bad_count == 0 assert hook.last_iter == 0 - assert hook.current_lr is None + assert hook.current_lr == -1.0 assert hook.best_score == -inf diff --git a/tests/unit/mpa/modules/hooks/test_mpa_ema_v2_hook.py b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_ema_v2_hook.py similarity index 75% rename from tests/unit/mpa/modules/hooks/test_mpa_ema_v2_hook.py rename to tests/unit/algorithms/common/adapters/mmcv/hooks/test_ema_v2_hook.py index ff46b406906..eeb1c8d3b1a 100644 --- a/tests/unit/mpa/modules/hooks/test_mpa_ema_v2_hook.py +++ b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_ema_v2_hook.py @@ -1,9 +1,12 @@ -"""Unit test for otx.mpa.modules.hooks.model_ema_v2_hook.""" +"""Unit test for otx.algorithms.common.adapters.mmcv.hooks.model_ema_v2_hook.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -from otx.mpa.modules.hooks.model_ema_v2_hook import ModelEmaV2, ModelEmaV2Hook +from otx.algorithms.common.adapters.mmcv.hooks.model_ema_v2_hook import ( + ModelEmaV2, + ModelEmaV2Hook, +) from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/hooks/test_mpa_eval_hook.py b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_eval_hook.py similarity index 96% rename from tests/unit/mpa/modules/hooks/test_mpa_eval_hook.py rename to tests/unit/algorithms/common/adapters/mmcv/hooks/test_eval_hook.py index 6d1626382e0..960705fd980 100644 --- a/tests/unit/mpa/modules/hooks/test_mpa_eval_hook.py +++ b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_eval_hook.py @@ -1,4 +1,4 @@ -"""Unit test for otx.mpa.modules.hooks.eval_hook.""" +"""Unit test for otx.algorithms.common.adapters.mmcv.hooks.eval_hook.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -8,7 +8,7 @@ from mmcv.runner import BaseRunner from torch.utils.data import DataLoader -from otx.mpa.modules.hooks.eval_hook import ( +from otx.algorithms.common.adapters.mmcv.hooks.eval_hook import ( CustomEvalHook, DistCustomEvalHook, single_gpu_test, @@ -100,7 +100,7 @@ def test_do_evaluate(self, mocker) -> None: hook = CustomEvalHook(metric="accuracy", dataloader=MockDataloader()) runner = MockRunner() - mocker.patch("otx.mpa.modules.hooks.eval_hook.single_gpu_test", return_value=[]) + mocker.patch("otx.algorithms.common.adapters.mmcv.hooks.eval_hook.single_gpu_test", return_value=[]) mocker.patch.object(CustomEvalHook, "evaluate", return_value=True) hook._do_evaluate(runner, ema=False) hook.ema_eval_start_epoch = 3 diff --git a/tests/unit/mpa/modules/hooks/test_mpafp16_sam_optimizer_hook.py b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_fp16_sam_optimizer_hook.py similarity index 66% rename from tests/unit/mpa/modules/hooks/test_mpafp16_sam_optimizer_hook.py rename to tests/unit/algorithms/common/adapters/mmcv/hooks/test_fp16_sam_optimizer_hook.py index 3464c8dd385..bb1c24217e1 100644 --- a/tests/unit/mpa/modules/hooks/test_mpafp16_sam_optimizer_hook.py +++ b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_fp16_sam_optimizer_hook.py @@ -1,9 +1,11 @@ -"""Unit test for otx.mpa.modules.hooks.fp16_sam_optimizer_hook.""" +"""Unit test for otx.algorithms.common.adapters.mmcv.hooks.fp16_sam_optimizer_hook.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -from otx.mpa.modules.hooks.fp16_sam_optimizer_hook import Fp16SAMOptimizerHook +from otx.algorithms.common.adapters.mmcv.hooks.fp16_sam_optimizer_hook import ( + Fp16SAMOptimizerHook, +) from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/hooks/test_mpa_ib_loss_hook.py b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_ib_loss_hook.py similarity index 70% rename from tests/unit/mpa/modules/hooks/test_mpa_ib_loss_hook.py rename to tests/unit/algorithms/common/adapters/mmcv/hooks/test_ib_loss_hook.py index 6c788ffa82f..e6086c0b1d2 100644 --- a/tests/unit/mpa/modules/hooks/test_mpa_ib_loss_hook.py +++ b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_ib_loss_hook.py @@ -1,9 +1,9 @@ -"""Unit test for otx.mpa.modules.hooks.ib_loss_hook.""" +"""Unit test for otx.algorithms.common.adapters.mmcv.hooks.ib_loss_hook.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -from otx.mpa.modules.hooks.ib_loss_hook import IBLossHook +from otx.algorithms.common.adapters.mmcv.hooks.ib_loss_hook import IBLossHook from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/hooks/test_mpa_logger_replace_hook.py b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_logger_replace_hook.py similarity index 72% rename from tests/unit/mpa/modules/hooks/test_mpa_logger_replace_hook.py rename to tests/unit/algorithms/common/adapters/mmcv/hooks/test_logger_replace_hook.py index 1a5c5324d72..f0c5b523737 100644 --- a/tests/unit/mpa/modules/hooks/test_mpa_logger_replace_hook.py +++ b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_logger_replace_hook.py @@ -1,9 +1,9 @@ -"""Unit test for otx.mpa.modules.hooks.logger_replace_hook.""" +"""Unit test for otx.algorithms.common.adapters.mmcv.hooks.logger_replace_hook.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -from otx.mpa.modules.hooks.logger_replace_hook import LoggerReplaceHook +from otx.algorithms.common.adapters.mmcv.hooks import LoggerReplaceHook from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/hooks/test_mpa_model_ema_hook.py b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_model_ema_hook.py similarity index 94% rename from tests/unit/mpa/modules/hooks/test_mpa_model_ema_hook.py rename to tests/unit/algorithms/common/adapters/mmcv/hooks/test_model_ema_hook.py index b07bd97dd29..f387ae604f0 100644 --- a/tests/unit/mpa/modules/hooks/test_mpa_model_ema_hook.py +++ b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_model_ema_hook.py @@ -1,4 +1,4 @@ -"""Unit test for otx.mpa.modules.hooks.model_ema_hook.""" +"""Unit test for otx.algorithms.common.adapters.mmcv.hooks.model_ema_hook.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -9,7 +9,10 @@ from mmcv.runner import BaseRunner from mmcv.runner.hooks.ema import EMAHook -from otx.mpa.modules.hooks.model_ema_hook import CustomModelEMAHook, DualModelEMAHook +from otx.algorithms.common.adapters.mmcv.hooks import ( + CustomModelEMAHook, + DualModelEMAHook, +) from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/hooks/test_mpa_no_bias_decay_hook.py b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_no_bias_decay_hook.py similarity index 90% rename from tests/unit/mpa/modules/hooks/test_mpa_no_bias_decay_hook.py rename to tests/unit/algorithms/common/adapters/mmcv/hooks/test_no_bias_decay_hook.py index 5e28958cb0f..11e95fdfba2 100644 --- a/tests/unit/mpa/modules/hooks/test_mpa_no_bias_decay_hook.py +++ b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_no_bias_decay_hook.py @@ -1,4 +1,4 @@ -"""Unit test for otx.mpa.modules.hooks.no_bias_decay_hook.""" +"""Unit test for otx.algorithms.common.adapters.mmcv.hooks.no_bias_decay_hook.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -6,7 +6,7 @@ import torch from mmcv.utils import Config -from otx.mpa.modules.hooks.no_bias_decay_hook import NoBiasDecayHook +from otx.algorithms.common.adapters.mmcv.hooks.no_bias_decay_hook import NoBiasDecayHook from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/hooks/test_mpa_recording_forward_hooks.py b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_recording_forward_hooks.py similarity index 95% rename from tests/unit/mpa/modules/hooks/test_mpa_recording_forward_hooks.py rename to tests/unit/algorithms/common/adapters/mmcv/hooks/test_recording_forward_hooks.py index c1574ef8edb..b994a166bf4 100644 --- a/tests/unit/mpa/modules/hooks/test_mpa_recording_forward_hooks.py +++ b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_recording_forward_hooks.py @@ -1,4 +1,4 @@ -"""Unit test for otx.mpa.modules.hooks.recording_forward_hooks.""" +"""Unit test for otx.algorithms.common.adapters.mmcv.hooks.recording_forward_hook.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -7,7 +7,7 @@ import pytest import torch -from otx.mpa.modules.hooks.recording_forward_hooks import ( +from otx.algorithms.common.adapters.mmcv.hooks.recording_forward_hook import ( ActivationMapHook, BaseRecordingForwardHook, EigenCamHook, diff --git a/tests/unit/mpa/modules/hooks/test_mpa_save_initial_weight_hook.py b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_save_initial_weight_hook.py similarity index 70% rename from tests/unit/mpa/modules/hooks/test_mpa_save_initial_weight_hook.py rename to tests/unit/algorithms/common/adapters/mmcv/hooks/test_save_initial_weight_hook.py index 584cc2aa5aa..e666e75cd2b 100644 --- a/tests/unit/mpa/modules/hooks/test_mpa_save_initial_weight_hook.py +++ b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_save_initial_weight_hook.py @@ -1,9 +1,9 @@ -"""Unit test for otx.mpa.modules.hooks.save_initial_weight_hook.""" +"""Unit test for otx.algorithms.common.adapters.mmcv.hooks.save_initial_weight_hook.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -from otx.mpa.modules.hooks.save_initial_weight_hook import SaveInitialWeightHook +from otx.algorithms.common.adapters.mmcv.hooks import SaveInitialWeightHook from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/hooks/test_mpa_semisl_cls_hook.py b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_semisl_cls_hook.py similarity index 69% rename from tests/unit/mpa/modules/hooks/test_mpa_semisl_cls_hook.py rename to tests/unit/algorithms/common/adapters/mmcv/hooks/test_semisl_cls_hook.py index 294f376bfed..47086c34391 100644 --- a/tests/unit/mpa/modules/hooks/test_mpa_semisl_cls_hook.py +++ b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_semisl_cls_hook.py @@ -1,9 +1,9 @@ -"""Unit test for otx.mpa.modules.hooks.semisl_cls_hook.""" +"""Unit test for otx.algorithms.common.adapters.mmcv.hooks.semisl_cls_hook.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -from otx.mpa.modules.hooks.semisl_cls_hook import SemiSLClsHook +from otx.algorithms.common.adapters.mmcv.hooks.semisl_cls_hook import SemiSLClsHook from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/hooks/test_mpa_task_adapt_hook.py b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_task_adapt_hook.py similarity index 69% rename from tests/unit/mpa/modules/hooks/test_mpa_task_adapt_hook.py rename to tests/unit/algorithms/common/adapters/mmcv/hooks/test_task_adapt_hook.py index 5d8155572d8..15fdb8c8cee 100644 --- a/tests/unit/mpa/modules/hooks/test_mpa_task_adapt_hook.py +++ b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_task_adapt_hook.py @@ -1,9 +1,9 @@ -"""Unit test for otx.mpa.modules.hooks.task_adapt_hook.""" +"""Unit test for otx.algorithms.common.adapters.mmcv.hooks.task_adapt_hook.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -from otx.mpa.modules.hooks.task_adapt_hook import TaskAdaptHook +from otx.algorithms.common.adapters.mmcv.hooks.task_adapt_hook import TaskAdaptHook from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/hooks/test_mpa_unbiased_teacher_hook.py b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_unbiased_teacher_hook.py similarity index 66% rename from tests/unit/mpa/modules/hooks/test_mpa_unbiased_teacher_hook.py rename to tests/unit/algorithms/common/adapters/mmcv/hooks/test_unbiased_teacher_hook.py index 2b845d4c9d5..ea3877683d0 100644 --- a/tests/unit/mpa/modules/hooks/test_mpa_unbiased_teacher_hook.py +++ b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_unbiased_teacher_hook.py @@ -1,9 +1,11 @@ -"""Unit test for otx.mpa.modules.hooks.unbiased_teacher_hook.""" +"""Unit test for otx.algorithms.common.adapters.mmcv.hooks.unbiased_teacher_hook.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -from otx.mpa.modules.hooks.unbiased_teacher_hook import UnbiasedTeacherHook +from otx.algorithms.common.adapters.mmcv.hooks.unbiased_teacher_hook import ( + UnbiasedTeacherHook, +) from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/hooks/test_mpa_workflow_hooks.py b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_workflow_hooks.py similarity index 89% rename from tests/unit/mpa/modules/hooks/test_mpa_workflow_hooks.py rename to tests/unit/algorithms/common/adapters/mmcv/hooks/test_workflow_hooks.py index 973f973a0f1..1fe3709436c 100644 --- a/tests/unit/mpa/modules/hooks/test_mpa_workflow_hooks.py +++ b/tests/unit/algorithms/common/adapters/mmcv/hooks/test_workflow_hooks.py @@ -1,9 +1,9 @@ -"""Unit test for otx.mpa.modules.hooks.workflow_hooks.""" +"""Unit test for otx.algorithms.common.adapters.mmcv.hooks.workflow_hooks.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -from otx.mpa.modules.hooks.workflow_hooks import ( +from otx.algorithms.common.adapters.mmcv.hooks.workflow_hook import ( AfterStageWFHook, SampleLoggingHook, WFProfileHook, diff --git a/tests/unit/algorithms/common/adapters/mmcv/pipelines/__init__.py b/tests/unit/algorithms/common/adapters/mmcv/pipelines/__init__.py new file mode 100644 index 00000000000..e946fcc52f5 --- /dev/null +++ b/tests/unit/algorithms/common/adapters/mmcv/pipelines/__init__.py @@ -0,0 +1,4 @@ +"""Test for otx.algorithms.common.adapters.mmcv.pipelines""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# diff --git a/tests/unit/algorithms/common/adapters/mmcv/pipelines/transforms/__init__.py b/tests/unit/algorithms/common/adapters/mmcv/pipelines/transforms/__init__.py new file mode 100644 index 00000000000..e046173f05a --- /dev/null +++ b/tests/unit/algorithms/common/adapters/mmcv/pipelines/transforms/__init__.py @@ -0,0 +1,4 @@ +"""Test for otx.algorithms.common.adapters.mmcv.pipelines.transforms""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# diff --git a/tests/unit/mpa/modules/datasets/pipelines/transforms/test_augments.py b/tests/unit/algorithms/common/adapters/mmcv/pipelines/transforms/test_augments.py similarity index 97% rename from tests/unit/mpa/modules/datasets/pipelines/transforms/test_augments.py rename to tests/unit/algorithms/common/adapters/mmcv/pipelines/transforms/test_augments.py index 4611962e10f..26f213dba45 100644 --- a/tests/unit/mpa/modules/datasets/pipelines/transforms/test_augments.py +++ b/tests/unit/algorithms/common/adapters/mmcv/pipelines/transforms/test_augments.py @@ -10,7 +10,7 @@ import pytest from PIL import Image -from otx.mpa.modules.datasets.pipelines.transforms.augments import ( +from otx.algorithms.common.adapters.mmcv.pipelines.transforms.augments import ( Augments, CythonAugments, ) diff --git a/tests/unit/mpa/modules/datasets/pipelines/transforms/test_augmix.py b/tests/unit/algorithms/common/adapters/mmcv/pipelines/transforms/test_augmix.py similarity index 91% rename from tests/unit/mpa/modules/datasets/pipelines/transforms/test_augmix.py rename to tests/unit/algorithms/common/adapters/mmcv/pipelines/transforms/test_augmix.py index de9695e07d0..fd078c54167 100644 --- a/tests/unit/mpa/modules/datasets/pipelines/transforms/test_augmix.py +++ b/tests/unit/algorithms/common/adapters/mmcv/pipelines/transforms/test_augmix.py @@ -10,11 +10,13 @@ import pytest from PIL import Image -from otx.mpa.modules.datasets.pipelines.transforms.augments import CythonAugments -from otx.mpa.modules.datasets.pipelines.transforms.augmix import ( +from otx.algorithms.classification.adapters.mmcls.datasets.pipelines.transforms.augmix import ( AugMixAugment, OpsFabric, ) +from otx.algorithms.common.adapters.mmcv.pipelines.transforms.augments import ( + CythonAugments, +) @pytest.fixture @@ -32,10 +34,10 @@ def test_init(self, ops_fabric: OpsFabric) -> None: "fillcolor": 128, "resample": (Image.BILINEAR, Image.BICUBIC), } - assert ops_fabric.magnitude == 5 - assert ops_fabric.magnitude_std == float("inf") - assert ops_fabric.level_fn == ops_fabric._rotate_level_to_arg - assert ops_fabric.aug_fn == CythonAugments.rotate + assert ops_fabric.aug_factory.magnitude == 5 + assert ops_fabric.aug_factory.magnitude_std == float("inf") + assert ops_fabric.aug_factory.level_fn == ops_fabric._rotate_level_to_arg + assert ops_fabric.aug_factory.aug_fn == CythonAugments.rotate def test_randomly_negate(self) -> None: """Test randomly_negate function.""" diff --git a/tests/unit/mpa/modules/datasets/pipelines/transforms/test_ote_transforms.py b/tests/unit/algorithms/common/adapters/mmcv/pipelines/transforms/test_otx_transforms.py similarity index 91% rename from tests/unit/mpa/modules/datasets/pipelines/transforms/test_ote_transforms.py rename to tests/unit/algorithms/common/adapters/mmcv/pipelines/transforms/test_otx_transforms.py index 9900ed8a520..2acff8f8e61 100644 --- a/tests/unit/mpa/modules/datasets/pipelines/transforms/test_ote_transforms.py +++ b/tests/unit/algorithms/common/adapters/mmcv/pipelines/transforms/test_otx_transforms.py @@ -1,4 +1,4 @@ -"""Unit Tests for the MPA Dataset Pipelines OTE Transforms.""" +"""Unit Tests for the OTX Dataset Pipelines OTX Transforms.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 @@ -11,7 +11,7 @@ from PIL import Image from torchvision.transforms import functional as F -from otx.mpa.modules.datasets.pipelines.transforms.ote_transforms import ( +from otx.algorithms.classification.adapters.mmcls.datasets.pipelines.transforms.otx_transforms import ( PILToTensor, RandomRotate, TensorNormalize, diff --git a/tests/unit/mpa/modules/datasets/pipelines/transforms/test_random_augment.py b/tests/unit/algorithms/common/adapters/mmcv/pipelines/transforms/test_random_augment.py similarity index 64% rename from tests/unit/mpa/modules/datasets/pipelines/transforms/test_random_augment.py rename to tests/unit/algorithms/common/adapters/mmcv/pipelines/transforms/test_random_augment.py index 7070248caf0..fcb94846c02 100644 --- a/tests/unit/mpa/modules/datasets/pipelines/transforms/test_random_augment.py +++ b/tests/unit/algorithms/common/adapters/mmcv/pipelines/transforms/test_random_augment.py @@ -8,9 +8,9 @@ import pytest from PIL import Image -from otx.mpa.modules.datasets.pipelines.transforms.random_augment import ( - CutoutAbs, - MPARandAugment, +from otx.algorithms.classification.adapters.mmcls.datasets.pipelines.transforms.random_augment import ( + OTXRandAugment, + cutout_abs, rand_augment_pool, ) @@ -27,15 +27,15 @@ def sample_pil_image() -> Image: def test_all_transforms_return_valid_image(sample_pil_image: Image.Image) -> None: """Test all transforms return valid image.""" - for transform, v, max_v in rand_augment_pool: - img, *extra = transform(sample_pil_image, v=v, max_v=max_v) + for transform, value, max_value in rand_augment_pool: + img, *extra = transform(sample_pil_image, value=value, max_value=max_value) assert isinstance(img, Image.Image) assert img.size == sample_pil_image.size def test_cutoutabs_transform(sample_pil_image: Image.Image) -> None: - """Test CutoutAbs transform.""" - img, (x0, y0, x1, y1), color = CutoutAbs(sample_pil_image, 2) + """Test cutout_abs transform.""" + img, (x0, y0, x1, y1), color = cutout_abs(sample_pil_image, 2) assert isinstance(img, Image.Image) assert img.size == sample_pil_image.size assert x0 >= 0 and y0 >= 0 @@ -43,10 +43,11 @@ def test_cutoutabs_transform(sample_pil_image: Image.Image) -> None: assert color == (127, 127, 127) -class TestMPARandAugment: - def test_with_default_arguments(self, sample_np_image: np.ndarray) -> None: +class TestOTXRandAugment: + def test_with_default_arguments(self, mocker, sample_np_image: np.ndarray) -> None: """Test case with default arguments.""" - transform = MPARandAugment(n=2, m=5, cutout=16) + mocker.patch("random.random", return_value=0.1) # RandAugment is applied only when random.random() < 0.5 + transform = OTXRandAugment(num_aug=2, magnitude=5, cutout_value=16) data = {"img": sample_np_image} results = transform(data) @@ -54,9 +55,10 @@ def test_with_default_arguments(self, sample_np_image: np.ndarray) -> None: assert any(item.startswith("rand_mc_") for item in results.keys()) assert "CutoutAbs" in results - def test_with_img_fields_argument(self, sample_np_image: np.ndarray) -> None: + def test_with_img_fields_argument(self, mocker, sample_np_image: np.ndarray) -> None: """Test case with img_fields argument.""" - transform = MPARandAugment(n=2, m=5, cutout=16) + mocker.patch("random.random", return_value=0.1) # RandAugment is applied only when random.random() < 0.5 + transform = OTXRandAugment(num_aug=2, magnitude=5, cutout_value=16) data = { "img1": sample_np_image, "img2": sample_np_image, @@ -69,7 +71,7 @@ def test_with_img_fields_argument(self, sample_np_image: np.ndarray) -> None: def test_with_pil_image_input(self, sample_pil_image: Image.Image) -> None: """Test case with PIL.Image input.""" - transform = MPARandAugment(n=2, m=5, cutout=16) + transform = OTXRandAugment(num_aug=2, magnitude=5, cutout_value=16) data = {"img": sample_pil_image} results = transform(data) diff --git a/tests/unit/mpa/modules/datasets/pipelines/transforms/test_twocrop_transform.py b/tests/unit/algorithms/common/adapters/mmcv/pipelines/transforms/test_twocrop_transform.py similarity index 92% rename from tests/unit/mpa/modules/datasets/pipelines/transforms/test_twocrop_transform.py rename to tests/unit/algorithms/common/adapters/mmcv/pipelines/transforms/test_twocrop_transform.py index 9e69b779faf..8118b67d0c2 100644 --- a/tests/unit/mpa/modules/datasets/pipelines/transforms/test_twocrop_transform.py +++ b/tests/unit/algorithms/common/adapters/mmcv/pipelines/transforms/test_twocrop_transform.py @@ -9,7 +9,7 @@ from mmcls.datasets.pipelines import Compose from mmcv.utils import build_from_cfg -from otx.mpa.modules.datasets.pipelines.transforms.twocrop_transform import ( +from otx.algorithms.classification.adapters.mmcls.datasets.pipelines.transforms.twocrop_transform import ( TwoCropTransform, ) diff --git a/tests/unit/algorithms/common/adapters/mmcv/tasks/test_builder.py b/tests/unit/algorithms/common/adapters/mmcv/tasks/test_builder.py new file mode 100644 index 00000000000..9ebddb94c01 --- /dev/null +++ b/tests/unit/algorithms/common/adapters/mmcv/tasks/test_builder.py @@ -0,0 +1,37 @@ +import mmcv + +from otx.algorithms.common.adapters.mmcv.tasks.builder import build +from tests.test_suite.e2e_test_system import e2e_pytest_unit + + +@e2e_pytest_unit +def test_build_with_stages(mocker): + cfg = mmcv.ConfigDict( + stages=[mocker.MagicMock()], + type=mocker.MagicMock(), + workflow_hooks=[mocker.MagicMock()], + ) + mocker.patch("otx.algorithms.common.adapters.mmcv.tasks.builder.build_workflow_hook") + mock_build_from_cfg = mocker.patch("otx.algorithms.common.adapters.mmcv.tasks.builder.build_from_cfg") + mock_workflow = mocker.patch("otx.algorithms.common.adapters.mmcv.tasks.builder.Workflow") + mocker.patch("otx.algorithms.common.adapters.mmcv.tasks.builder.config_logger") + mocker.patch("os.makedirs") + mocker.patch("os.unlink") + mocker.patch("os.symlink") + + build(cfg) + + mock_build_from_cfg.assert_called() + mock_workflow.assert_called_once() + + +@e2e_pytest_unit +def test_build_without_stages(mocker): + cfg = mmcv.ConfigDict() + + mocker.patch("otx.algorithms.common.adapters.mmcv.tasks.builder.get_available_types", return_value="MockStage") + mock_build_from_cfg = mocker.patch("otx.algorithms.common.adapters.mmcv.tasks.builder.build_from_cfg") + + build(cfg, None, "MockStage") + + mock_build_from_cfg.assert_called_once() diff --git a/tests/unit/mpa/test_export_mixin.py b/tests/unit/algorithms/common/adapters/mmcv/tasks/test_export_mixin.py similarity index 94% rename from tests/unit/mpa/test_export_mixin.py rename to tests/unit/algorithms/common/adapters/mmcv/tasks/test_export_mixin.py index 6a3d045ca0d..3498038d260 100644 --- a/tests/unit/mpa/test_export_mixin.py +++ b/tests/unit/algorithms/common/adapters/mmcv/tasks/test_export_mixin.py @@ -1,7 +1,7 @@ import mmcv import pytest -from otx.mpa.exporter_mixin import ExporterMixin +from otx.algorithms.common.adapters.mmcv.tasks.exporter_mixin import ExporterMixin from tests.test_suite.e2e_test_system import e2e_pytest_unit @@ -62,7 +62,7 @@ def mock_mmdeploy_export(output_dir, model_builder, precision, cfg, deploy_cfg, @e2e_pytest_unit def test_mmdeploy_export(self, mocker): - from otx.mpa.deploy.apis import MMdeployExporter + from otx.algorithms.common.adapters.mmdeploy.apis import MMdeployExporter mock_export_openvino = mocker.patch.object(MMdeployExporter, "export2openvino") diff --git a/tests/unit/mpa/test_helpers.py b/tests/unit/algorithms/common/adapters/mmcv/tasks/test_helpers.py similarity index 100% rename from tests/unit/mpa/test_helpers.py rename to tests/unit/algorithms/common/adapters/mmcv/tasks/test_helpers.py diff --git a/tests/unit/mpa/test_stage.py b/tests/unit/algorithms/common/adapters/mmcv/tasks/test_stage.py similarity index 91% rename from tests/unit/mpa/test_stage.py rename to tests/unit/algorithms/common/adapters/mmcv/tasks/test_stage.py index 0f96e6dc81a..4bab8b87057 100644 --- a/tests/unit/mpa/test_stage.py +++ b/tests/unit/algorithms/common/adapters/mmcv/tasks/test_stage.py @@ -3,7 +3,7 @@ import mmcv import pytest -from otx.mpa.stage import Stage, get_available_types +from otx.algorithms.common.adapters.mmcv.tasks.stage import Stage, get_available_types from tests.test_suite.e2e_test_system import e2e_pytest_unit @@ -65,7 +65,10 @@ def test_configure_data(self): ), ) ) - Stage.configure_data(data_cfg, True) + fake_cfg = {"work_dir": "test_init_distributed"} + fake_common_cfg = {"output_path": "/tmp/output"} + stage = Stage("mpa_test", "", fake_cfg, fake_common_cfg, 0) + stage.configure_data(data_cfg, True) assert data_cfg.data.train.dataset.pipeline[1].img_scale == (224, 224) assert data_cfg.data.train.dataset.pipeline[1].transforms == _transform @@ -100,7 +103,10 @@ def test_configure_samples_per_gpu(self, mocker): mock_otx_dataset = mocker.MagicMock() mock_otx_dataset.__len__.return_value = 1 - mocker.patch("otx.mpa.stage.get_data_cfg", return_value=mmcv.ConfigDict(otx_dataset=mock_otx_dataset)) + mocker.patch( + "otx.algorithms.common.adapters.mmcv.tasks.stage.get_data_cfg", + return_value=mmcv.ConfigDict(otx_dataset=mock_otx_dataset), + ) Stage.configure_samples_per_gpu(cfg, "train", False) assert "train_dataloader" in cfg.data @@ -136,8 +142,8 @@ def test_configure_unlabeled_dataloader(self, mocker): ) mocker.patch("importlib.import_module") - mock_build_ul_dataset = mocker.patch("otx.mpa.stage.build_dataset") - mock_build_ul_dataloader = mocker.patch("otx.mpa.stage.build_dataloader") + mock_build_ul_dataset = mocker.patch("otx.algorithms.common.adapters.mmcv.tasks.stage.build_dataset") + mock_build_ul_dataloader = mocker.patch("otx.algorithms.common.adapters.mmcv.tasks.stage.build_dataloader") Stage.configure_unlabeled_dataloader(cfg) diff --git a/tests/unit/mpa/test_version.py b/tests/unit/algorithms/common/adapters/mmcv/tasks/test_version.py similarity index 66% rename from tests/unit/mpa/test_version.py rename to tests/unit/algorithms/common/adapters/mmcv/tasks/test_version.py index 3550ae769ea..301a4d48971 100644 --- a/tests/unit/mpa/test_version.py +++ b/tests/unit/algorithms/common/adapters/mmcv/tasks/test_version.py @@ -1,4 +1,4 @@ -from otx.mpa.version import __version__, get_version +from otx.algorithms.common.adapters.mmcv.tasks.version import __version__, get_version from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/test_workflow.py b/tests/unit/algorithms/common/adapters/mmcv/tasks/test_workflow.py similarity index 77% rename from tests/unit/mpa/test_workflow.py rename to tests/unit/algorithms/common/adapters/mmcv/tasks/test_workflow.py index 7363c19f34b..2d61477f7c5 100644 --- a/tests/unit/mpa/test_workflow.py +++ b/tests/unit/algorithms/common/adapters/mmcv/tasks/test_workflow.py @@ -1,10 +1,15 @@ import mmcv -from otx.mpa.stage import Stage -from otx.mpa.workflow import Workflow +from otx.algorithms.common.adapters.mmcv.tasks.stage import Stage +from otx.algorithms.common.adapters.mmcv.tasks.workflow import Workflow from tests.test_suite.e2e_test_system import e2e_pytest_unit +class MockStage(Stage): + def run(*args, **kwargs): + pass + + class TestWorkflow: @e2e_pytest_unit def test_run(self, mocker): @@ -14,7 +19,7 @@ def test_run(self, mocker): } fake_common_cfg = {"output_path": "/path/output"} mocker.patch.object(mmcv, "mkdir_or_exist") - stage = Stage( + stage = MockStage( "MockStage", "", fake_cfg, diff --git a/tests/unit/algorithms/common/adapters/mmdeploy/__init__.py b/tests/unit/algorithms/common/adapters/mmdeploy/__init__.py new file mode 100644 index 00000000000..ff847f01203 --- /dev/null +++ b/tests/unit/algorithms/common/adapters/mmdeploy/__init__.py @@ -0,0 +1,3 @@ +# Copyright (C) 2023 Intel Corporation +# +# SPDX-License-Identifier: MIT diff --git a/tests/unit/mpa/deploy/test_deploy_apis.py b/tests/unit/algorithms/common/adapters/mmdeploy/test_deploy_apis.py similarity index 93% rename from tests/unit/mpa/deploy/test_deploy_apis.py rename to tests/unit/algorithms/common/adapters/mmdeploy/test_deploy_apis.py index 3b72c29fd0b..fb4bfd33b52 100644 --- a/tests/unit/mpa/deploy/test_deploy_apis.py +++ b/tests/unit/algorithms/common/adapters/mmdeploy/test_deploy_apis.py @@ -9,10 +9,13 @@ import torch from mmcv.utils import Config -from otx.mpa.deploy.apis import NaiveExporter -from otx.mpa.deploy.utils import is_mmdeploy_enabled +from otx.algorithms.common.adapters.mmdeploy.apis import NaiveExporter +from otx.algorithms.common.adapters.mmdeploy.utils import is_mmdeploy_enabled from tests.test_suite.e2e_test_system import e2e_pytest_unit -from tests.unit.mpa.deploy.test_helpers import create_config, create_model +from tests.unit.algorithms.common.adapters.mmdeploy.test_helpers import ( + create_config, + create_model, +) class TestNaiveExporter: @@ -58,7 +61,7 @@ def test_export2openvino(self): if is_mmdeploy_enabled(): from mmdeploy.core import FUNCTION_REWRITER, mark - from otx.mpa.deploy.apis import MMdeployExporter + from otx.algorithms.common.adapters.mmdeploy.apis import MMdeployExporter class TestMMdeployExporter: @e2e_pytest_unit @@ -197,7 +200,9 @@ def test_partition(self): ) create_model("mmcls") - @FUNCTION_REWRITER.register_rewriter("tests.unit.mpa.deploy.test_helpers.MockModel.forward") + @FUNCTION_REWRITER.register_rewriter( + "tests.unit.algorithms.common.adapters.mmdeploy.test_helpers.MockModel.forward" + ) @mark("test", inputs=["input"], outputs=["output"]) def forward(ctx, self, *args, **kwargs): return ctx.origin_func(self, *args, **kwargs) diff --git a/tests/unit/mpa/deploy/test_helpers.py b/tests/unit/algorithms/common/adapters/mmdeploy/test_helpers.py similarity index 100% rename from tests/unit/mpa/deploy/test_helpers.py rename to tests/unit/algorithms/common/adapters/mmdeploy/test_helpers.py diff --git a/tests/unit/mpa/deploy/utils/test_deploy_utils_mmdeploy.py b/tests/unit/algorithms/common/adapters/mmdeploy/utils/test_deploy_utils_mmdeploy.py similarity index 83% rename from tests/unit/mpa/deploy/utils/test_deploy_utils_mmdeploy.py rename to tests/unit/algorithms/common/adapters/mmdeploy/utils/test_deploy_utils_mmdeploy.py index 88b86c9a092..b22c8178350 100644 --- a/tests/unit/mpa/deploy/utils/test_deploy_utils_mmdeploy.py +++ b/tests/unit/algorithms/common/adapters/mmdeploy/utils/test_deploy_utils_mmdeploy.py @@ -6,12 +6,15 @@ from mmcv.utils import Config -from otx.mpa.deploy.utils.mmdeploy import ( +from otx.algorithms.common.adapters.mmdeploy.utils.mmdeploy import ( is_mmdeploy_enabled, mmdeploy_init_model_helper, ) from tests.test_suite.e2e_test_system import e2e_pytest_unit -from tests.unit.mpa.deploy.test_helpers import create_config, create_model +from tests.unit.algorithms.common.adapters.mmdeploy.test_helpers import ( + create_config, + create_model, +) @e2e_pytest_unit diff --git a/tests/unit/mpa/deploy/utils/test_deploy_utils_onnx.py b/tests/unit/algorithms/common/adapters/mmdeploy/utils/test_deploy_utils_onnx.py similarity index 85% rename from tests/unit/mpa/deploy/utils/test_deploy_utils_onnx.py rename to tests/unit/algorithms/common/adapters/mmdeploy/utils/test_deploy_utils_onnx.py index 376d9c16664..d6ea85a004c 100644 --- a/tests/unit/mpa/deploy/utils/test_deploy_utils_onnx.py +++ b/tests/unit/algorithms/common/adapters/mmdeploy/utils/test_deploy_utils_onnx.py @@ -8,10 +8,13 @@ import onnx import torch -from otx.mpa.deploy.apis import NaiveExporter -from otx.mpa.deploy.utils.onnx import prepare_onnx_for_openvino, remove_nodes_by_op_type +from otx.algorithms.common.adapters.mmdeploy.apis import NaiveExporter +from otx.algorithms.common.adapters.mmdeploy.utils.onnx import ( + prepare_onnx_for_openvino, + remove_nodes_by_op_type, +) from tests.test_suite.e2e_test_system import e2e_pytest_unit -from tests.unit.mpa.deploy.test_helpers import create_model +from tests.unit.algorithms.common.adapters.mmdeploy.test_helpers import create_model @e2e_pytest_unit diff --git a/tests/unit/mpa/deploy/utils/test_deploy_utils_operations_domain.py b/tests/unit/algorithms/common/adapters/mmdeploy/utils/test_deploy_utils_operations_domain.py similarity index 67% rename from tests/unit/mpa/deploy/utils/test_deploy_utils_operations_domain.py rename to tests/unit/algorithms/common/adapters/mmdeploy/utils/test_deploy_utils_operations_domain.py index d6573075619..6a74f2aed8c 100644 --- a/tests/unit/mpa/deploy/utils/test_deploy_utils_operations_domain.py +++ b/tests/unit/algorithms/common/adapters/mmdeploy/utils/test_deploy_utils_operations_domain.py @@ -2,7 +2,10 @@ # SPDX-License-Identifier: Apache-2.0 # -from otx.mpa.deploy.utils.operations_domain import DOMAIN_CUSTOM_OPS_NAME, add_domain +from otx.algorithms.common.adapters.mmdeploy.utils.operations_domain import ( + DOMAIN_CUSTOM_OPS_NAME, + add_domain, +) from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/deploy/utils/test_deploy_utils_utils.py b/tests/unit/algorithms/common/adapters/mmdeploy/utils/test_deploy_utils_utils.py similarity index 95% rename from tests/unit/mpa/deploy/utils/test_deploy_utils_utils.py rename to tests/unit/algorithms/common/adapters/mmdeploy/utils/test_deploy_utils_utils.py index 6328cc6bbbd..b4e1dd41ee6 100644 --- a/tests/unit/mpa/deploy/utils/test_deploy_utils_utils.py +++ b/tests/unit/algorithms/common/adapters/mmdeploy/utils/test_deploy_utils_utils.py @@ -5,7 +5,10 @@ import numpy as np import torch -from otx.mpa.deploy.utils.utils import numpy_2_list, sync_batchnorm_2_batchnorm +from otx.algorithms.common.adapters.mmdeploy.utils.utils import ( + numpy_2_list, + sync_batchnorm_2_batchnorm, +) from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/algorithms/common/adapters/torch/dataloaders/__init__.py b/tests/unit/algorithms/common/adapters/torch/dataloaders/__init__.py new file mode 100644 index 00000000000..482cdca4b24 --- /dev/null +++ b/tests/unit/algorithms/common/adapters/torch/dataloaders/__init__.py @@ -0,0 +1,4 @@ +"""Test for otx.algorithms.common.adapters.torch.dataloaders""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# diff --git a/tests/unit/mpa/modules/datasets/samplers/test_balanced_sampler.py b/tests/unit/algorithms/common/adapters/torch/dataloaders/samplers/test_balanced_sampler.py similarity index 81% rename from tests/unit/mpa/modules/datasets/samplers/test_balanced_sampler.py rename to tests/unit/algorithms/common/adapters/torch/dataloaders/samplers/test_balanced_sampler.py index d6abd9bcd06..8a52e8334ab 100644 --- a/tests/unit/mpa/modules/datasets/samplers/test_balanced_sampler.py +++ b/tests/unit/algorithms/common/adapters/torch/dataloaders/samplers/test_balanced_sampler.py @@ -1,7 +1,7 @@ import pytest from torch.utils.data import Dataset -from otx.mpa.modules.datasets.samplers.cls_incr_sampler import ClsIncrSampler +from otx.algorithms.common.adapters.torch.dataloaders.samplers import ClsIncrSampler from tests.test_suite.e2e_test_system import e2e_pytest_unit @@ -17,7 +17,8 @@ def __len__(self): self.mock_dataset = MockDataset() mocker.patch( - "otx.mpa.modules.datasets.samplers.cls_incr_sampler.unwrap_dataset", return_value=(self.mock_dataset, 1) + "otx.algorithms.common.adapters.torch.dataloaders.samplers.cls_incr_sampler.unwrap_dataset", + return_value=(self.mock_dataset, 1), ) @e2e_pytest_unit diff --git a/tests/unit/mpa/modules/datasets/samplers/test_cls_incr_sampler.py b/tests/unit/algorithms/common/adapters/torch/dataloaders/samplers/test_cls_incr_sampler.py similarity index 92% rename from tests/unit/mpa/modules/datasets/samplers/test_cls_incr_sampler.py rename to tests/unit/algorithms/common/adapters/torch/dataloaders/samplers/test_cls_incr_sampler.py index 12da66b6321..7111866e969 100644 --- a/tests/unit/mpa/modules/datasets/samplers/test_cls_incr_sampler.py +++ b/tests/unit/algorithms/common/adapters/torch/dataloaders/samplers/test_cls_incr_sampler.py @@ -1,7 +1,7 @@ import pytest from torch.utils.data import Dataset -from otx.mpa.modules.datasets.samplers.balanced_sampler import BalancedSampler +from otx.algorithms.common.adapters.torch.dataloaders.samplers import BalancedSampler from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/det/incremental/test_det_incremental_stage.py b/tests/unit/algorithms/detection/adapters/mmdet/tasks/incremental/test_det_incremental_stage.py similarity index 66% rename from tests/unit/mpa/det/incremental/test_det_incremental_stage.py rename to tests/unit/algorithms/detection/adapters/mmdet/tasks/incremental/test_det_incremental_stage.py index 76b355a7907..7979c34c78c 100644 --- a/tests/unit/mpa/det/incremental/test_det_incremental_stage.py +++ b/tests/unit/algorithms/detection/adapters/mmdet/tasks/incremental/test_det_incremental_stage.py @@ -1,7 +1,9 @@ import pytest -from otx.mpa.det.incremental.stage import IncrDetectionStage -from otx.mpa.utils.config_utils import MPAConfig +from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig +from otx.algorithms.detection.adapters.mmdet.tasks.incremental.stage import ( + IncrDetectionStage, +) from tests.test_suite.e2e_test_system import e2e_pytest_unit @@ -13,6 +15,6 @@ def test_configure_task(model_classes, mocker): stage.task_adapt_type = "mpa" stage.org_model_classes = ["red", "green"] stage.model_classes = model_classes - mock_config_task = mocker.patch("otx.mpa.det.stage.DetectionStage.configure_task") + mock_config_task = mocker.patch("otx.algorithms.detection.adapters.mmdet.tasks.stage.DetectionStage.configure_task") stage.configure_task(cfg, True) mock_config_task.assert_called_once() diff --git a/tests/unit/mpa/det/semisl/test_det_semisl_stage.py b/tests/unit/algorithms/detection/adapters/mmdet/tasks/semisl/test_det_semisl_stage.py similarity index 81% rename from tests/unit/mpa/det/semisl/test_det_semisl_stage.py rename to tests/unit/algorithms/detection/adapters/mmdet/tasks/semisl/test_det_semisl_stage.py index 6a5d313e440..90800388757 100644 --- a/tests/unit/mpa/det/semisl/test_det_semisl_stage.py +++ b/tests/unit/algorithms/detection/adapters/mmdet/tasks/semisl/test_det_semisl_stage.py @@ -1,8 +1,10 @@ import pytest -from otx.mpa.det.semisl.stage import SemiSLDetectionStage -from otx.mpa.det.stage import DetectionStage -from otx.mpa.utils.config_utils import MPAConfig +from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig +from otx.algorithms.detection.adapters.mmdet.tasks.semisl.stage import ( + SemiSLDetectionStage, +) +from otx.algorithms.detection.adapters.mmdet.tasks.stage import DetectionStage from tests.test_suite.e2e_test_system import e2e_pytest_unit SEMISL_RECIPE_CONFIG_PATH = "otx/recipes/stages/detection/semisl.py" diff --git a/tests/unit/mpa/det/test_det_exporter.py b/tests/unit/algorithms/detection/adapters/mmdet/tasks/test_det_exporter.py similarity index 85% rename from tests/unit/mpa/det/test_det_exporter.py rename to tests/unit/algorithms/detection/adapters/mmdet/tasks/test_det_exporter.py index 0cc89684254..9914cab8d5d 100644 --- a/tests/unit/mpa/det/test_det_exporter.py +++ b/tests/unit/algorithms/detection/adapters/mmdet/tasks/test_det_exporter.py @@ -2,11 +2,11 @@ import pytest +from otx.algorithms.common.adapters.mmcv.tasks.exporter_mixin import ExporterMixin +from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig +from otx.algorithms.common.adapters.mmdeploy.apis import NaiveExporter +from otx.algorithms.detection.adapters.mmdet.tasks.exporter import DetectionExporter from otx.algorithms.detection.adapters.mmdet.utils.builder import build_detector -from otx.mpa.deploy.apis import NaiveExporter -from otx.mpa.det.exporter import DetectionExporter -from otx.mpa.exporter_mixin import ExporterMixin -from otx.mpa.utils.config_utils import MPAConfig from tests.test_suite.e2e_test_system import e2e_pytest_unit from tests.unit.algorithms.detection.test_helpers import ( DEFAULT_DET_RECIPE_CONFIG_PATH, diff --git a/tests/unit/mpa/det/test_det_inferrer.py b/tests/unit/algorithms/detection/adapters/mmdet/tasks/test_det_inferrer.py similarity index 95% rename from tests/unit/mpa/det/test_det_inferrer.py rename to tests/unit/algorithms/detection/adapters/mmdet/tasks/test_det_inferrer.py index dc99ae61e22..7d9cb54c984 100644 --- a/tests/unit/mpa/det/test_det_inferrer.py +++ b/tests/unit/algorithms/detection/adapters/mmdet/tasks/test_det_inferrer.py @@ -5,8 +5,11 @@ import numpy as np import pytest -from otx.mpa.det.inferrer import DetectionInferrer, replace_ImageToTensor -from otx.mpa.utils.config_utils import MPAConfig +from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig +from otx.algorithms.detection.adapters.mmdet.tasks.inferrer import ( + DetectionInferrer, + replace_ImageToTensor, +) from tests.test_suite.e2e_test_system import e2e_pytest_unit from tests.unit.algorithms.detection.test_helpers import ( DEFAULT_DET_RECIPE_CONFIG_PATH, diff --git a/tests/unit/mpa/det/test_det_stage.py b/tests/unit/algorithms/detection/adapters/mmdet/tasks/test_det_stage.py similarity index 92% rename from tests/unit/mpa/det/test_det_stage.py rename to tests/unit/algorithms/detection/adapters/mmdet/tasks/test_det_stage.py index c52730bb84f..b71a13d0d31 100644 --- a/tests/unit/mpa/det/test_det_stage.py +++ b/tests/unit/algorithms/detection/adapters/mmdet/tasks/test_det_stage.py @@ -3,9 +3,9 @@ import pytest -from otx.mpa import Stage -from otx.mpa.det.stage import DetectionStage -from otx.mpa.utils.config_utils import MPAConfig +from otx.algorithms.common.adapters.mmcv.tasks import Stage +from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig +from otx.algorithms.detection.adapters.mmdet.tasks.stage import DetectionStage from tests.test_suite.e2e_test_system import e2e_pytest_unit from tests.unit.algorithms.detection.test_helpers import ( DEFAULT_DET_RECIPE_CONFIG_PATH, @@ -31,17 +31,17 @@ def test_configure(self, mocker): fake_arg = {"pretrained": True, "foo": "bar"} returned_value = self.stage.configure(self.model_cfg, "", self.data_cfg, True, **fake_arg) - mock_cfg_model.assert_called_once_with(self.stage.cfg, self.model_cfg, True, **fake_arg) + mock_cfg_model.assert_called_once_with(self.stage.cfg, self.model_cfg, **fake_arg) mock_cfg_ckpt.assert_called_once_with(self.stage.cfg, "", fake_arg.get("pretrained", None)) mock_cfg_regularization.assert_called_once_with(self.stage.cfg, True) - mock_cfg_task.assert_called_once_with(self.stage.cfg, True, **fake_arg) + mock_cfg_task.assert_called_once_with(self.stage.cfg, True) mock_cfg_hook.assert_called_once_with(self.stage.cfg) assert returned_value == self.stage.cfg @e2e_pytest_unit def test_configure_model(self): fake_arg = {"ir_model_path": {"ir_weight_path": "", "ir_weight_init": ""}} - self.stage.configure_model(self.stage.cfg, self.model_cfg, True, **fake_arg) + self.stage.configure_model(self.stage.cfg, self.model_cfg, **fake_arg) assert self.stage.cfg.model_task @e2e_pytest_unit @@ -50,7 +50,7 @@ def test_configure_model_without_model(self): model_cfg = copy.deepcopy(self.model_cfg) model_cfg.pop("model") with pytest.raises(ValueError): - self.stage.configure_model(self.stage.cfg, model_cfg, True, **fake_arg) + self.stage.configure_model(self.stage.cfg, model_cfg, **fake_arg) @e2e_pytest_unit def test_configure_model_not_detection_task(self): @@ -58,12 +58,12 @@ def test_configure_model_not_detection_task(self): stage_cfg = copy.deepcopy(self.stage.cfg) stage_cfg.model.task = "classification" with pytest.raises(ValueError): - self.stage.configure_model(stage_cfg, self.model_cfg, True, **fake_arg) + self.stage.configure_model(stage_cfg, self.model_cfg, **fake_arg) @e2e_pytest_unit def test_configure_data(self, mocker): mock_super_cfg_data = mocker.patch.object(Stage, "configure_data") - self.stage.configure_data(self.stage.cfg, True, self.data_cfg, pretrained=None) + self.stage.configure_data(self.stage.cfg, True, self.data_cfg) mock_super_cfg_data.assert_called_once() assert self.stage.cfg.data assert self.stage.cfg.data.train @@ -90,7 +90,7 @@ def test_configure_regularization(self): def test_configure_hyperparams(self): stage_cfg = copy.deepcopy(self.stage.cfg) stage_cfg.hyperparams = dict() - self.stage.configure_hyperparams(stage_cfg, True, hyperparams=dict(bs=2, lr=0.002)) + self.stage.configure_hyperparams(stage_cfg, hyperparams=dict(bs=2, lr=0.002)) assert stage_cfg.data.samples_per_gpu == 2 assert stage_cfg.optimizer.lr == 0.002 @@ -101,7 +101,7 @@ def test_configure_anchor(self): stage_cfg.merge_from_dict( dict(model=dict(bbox_head=dict(anchor_generator=dict(type="SSDAnchorGeneratorClustered")))) ) - self.stage.configure_anchor(stage_cfg, True) + self.stage.configure_anchor(stage_cfg) @e2e_pytest_unit def test_add_yolox_hooks(self): diff --git a/tests/unit/mpa/det/test_det_trainer.py b/tests/unit/algorithms/detection/adapters/mmdet/tasks/test_det_trainer.py similarity index 87% rename from tests/unit/mpa/det/test_det_trainer.py rename to tests/unit/algorithms/detection/adapters/mmdet/tasks/test_det_trainer.py index 73d9eb350db..cdeebb19ce5 100644 --- a/tests/unit/mpa/det/test_det_trainer.py +++ b/tests/unit/algorithms/detection/adapters/mmdet/tasks/test_det_trainer.py @@ -3,8 +3,8 @@ import pytest -from otx.mpa.det.trainer import DetectionTrainer -from otx.mpa.utils.config_utils import MPAConfig +from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig +from otx.algorithms.detection.adapters.mmdet.tasks.trainer import DetectionTrainer from tests.test_suite.e2e_test_system import e2e_pytest_unit from tests.unit.algorithms.detection.test_helpers import ( DEFAULT_DET_RECIPE_CONFIG_PATH, @@ -26,7 +26,7 @@ def test_run(self, mocker): mocker.patch.object(DetectionTrainer, "configure_samples_per_gpu") mocker.patch.object(DetectionTrainer, "configure_fp16_optimizer") mocker.patch.object(DetectionTrainer, "configure_compat_cfg") - mock_train_detector = mocker.patch("otx.mpa.det.trainer.train_detector") + mock_train_detector = mocker.patch("otx.algorithms.detection.adapters.mmdet.tasks.trainer.train_detector") with tempfile.TemporaryDirectory() as tmp_dir: fake_json_path = os.path.join(tmp_dir, "fake_data.json") create_dummy_coco_json(fake_json_path) @@ -45,7 +45,7 @@ def test_run_with_distributed(self, mocker): mocker.patch.object(DetectionTrainer, "configure_fp16_optimizer") mocker.patch.object(DetectionTrainer, "configure_compat_cfg") spy_cfg_dist = mocker.spy(DetectionTrainer, "_modify_cfg_for_distributed") - mock_train_detector = mocker.patch("otx.mpa.det.trainer.train_detector") + mock_train_detector = mocker.patch("otx.algorithms.detection.adapters.mmdet.tasks.trainer.train_detector") with tempfile.TemporaryDirectory() as tmp_dir: fake_json_path = os.path.join(tmp_dir, "fake_data.json") diff --git a/tests/unit/algorithms/detection/adapters/mmdet/utils/test_detection_builder.py b/tests/unit/algorithms/detection/adapters/mmdet/utils/test_detection_builder.py index a4b9a914490..4b3bd6e2998 100644 --- a/tests/unit/algorithms/detection/adapters/mmdet/utils/test_detection_builder.py +++ b/tests/unit/algorithms/detection/adapters/mmdet/utils/test_detection_builder.py @@ -5,8 +5,8 @@ # import pytest +from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig from otx.algorithms.detection.adapters.mmdet.utils import build_detector -from otx.mpa.utils.config_utils import MPAConfig from tests.test_suite.e2e_test_system import e2e_pytest_unit from tests.unit.algorithms.detection.test_helpers import ( DEFAULT_DET_MODEL_CONFIG_PATH, diff --git a/tests/unit/algorithms/detection/adapters/mmdet/utils/test_detection_config_utils.py b/tests/unit/algorithms/detection/adapters/mmdet/utils/test_detection_config_utils.py index 31c6e523631..11e2b272fe0 100644 --- a/tests/unit/algorithms/detection/adapters/mmdet/utils/test_detection_config_utils.py +++ b/tests/unit/algorithms/detection/adapters/mmdet/utils/test_detection_config_utils.py @@ -11,6 +11,7 @@ from mmcv.utils import Config, ConfigDict from otx.algorithms.common.adapters.mmcv.utils import is_epoch_based_runner +from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig from otx.algorithms.detection.adapters.mmdet.utils.config_utils import ( cluster_anchors, patch_adaptive_repeat_dataset, @@ -25,7 +26,6 @@ from otx.algorithms.detection.configs.base import DetectionConfig from otx.api.entities.label import Domain from otx.api.entities.model_template import TaskType -from otx.mpa.utils.config_utils import MPAConfig from tests.test_suite.e2e_test_system import e2e_pytest_unit from tests.unit.algorithms.detection.test_helpers import ( DEFAULT_DET_MODEL_CONFIG_PATH, diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/data/__init__.py b/tests/unit/algorithms/segmentation/adapters/mmseg/data/__init__.py deleted file mode 100644 index 3bdbe22ef68..00000000000 --- a/tests/unit/algorithms/segmentation/adapters/mmseg/data/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -"""Test for otx.algorithms.segmentation.adapters.mmseg.data""" -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/data/test_pipelines.py b/tests/unit/algorithms/segmentation/adapters/mmseg/data/test_pipelines.py deleted file mode 100644 index 2ff48d5c195..00000000000 --- a/tests/unit/algorithms/segmentation/adapters/mmseg/data/test_pipelines.py +++ /dev/null @@ -1,151 +0,0 @@ -from typing import Any, Dict - -import numpy as np -import pytest -from PIL import Image - -from otx.algorithms.segmentation.adapters.mmseg.data.pipelines import ( - NDArrayToPILImage, - PILImageToNDArray, - RandomColorJitter, - RandomGaussianBlur, - RandomGrayscale, - RandomResizedCrop, - RandomSolarization, - TwoCropTransform, -) -from tests.test_suite.e2e_test_system import e2e_pytest_unit - - -@pytest.fixture(scope="module") -def inputs_np(): - return { - "img": np.random.randint(0, 10, (16, 16, 3), dtype=np.uint8), - "gt_semantic_seg": np.random.rand(16, 16), - "flip": True, - } - - -@pytest.fixture(scope="module") -def inputs_PIL(): - return { - "img": Image.fromarray(np.random.randint(0, 10, (16, 16, 3), dtype=np.uint8)), - "gt_semantic_seg": np.random.randint(0, 5, (16, 16), dtype=np.uint8), - "seg_fields": ["gt_semantic_seg"], - "ori_shape": (16, 16, 3), - } - - -class TestTwoCropTransform: - @pytest.fixture(autouse=True) - def setup(self, mocker) -> None: - mocker.patch( - "otx.algorithms.segmentation.adapters.mmseg.data.pipelines.build_from_cfg", return_value=lambda x: x - ) - self.two_crop_transform = TwoCropTransform(view0=[], view1=[]) - - @e2e_pytest_unit - def test_call(self, mocker, inputs_np: Dict[str, Any]) -> None: - """Test __call__.""" - results = self.two_crop_transform(inputs_np) - - assert isinstance(results, dict) - assert "img" in results and results["img"].ndim == 4 - assert "gt_semantic_seg" in results and results["gt_semantic_seg"].ndim == 3 - assert "flip" in results and isinstance(results["flip"], list) - - @e2e_pytest_unit - def test_call_with_single_pipeline(self, mocker, inputs_np: Dict[str, Any]) -> None: - """Test __call__ with single pipeline.""" - self.two_crop_transform.is_both = False - - results = self.two_crop_transform(inputs_np) - - assert isinstance(results, dict) - assert "img" in results and results["img"].ndim == 3 - assert "gt_semantic_seg" in results and results["gt_semantic_seg"].ndim == 2 - assert "flip" in results and isinstance(results["flip"], bool) - - -@e2e_pytest_unit -def test_random_resized_crop(inputs_PIL: Dict[str, Any]) -> None: - """Test RandomResizedCrop.""" - random_resized_crop = RandomResizedCrop(size=(8, 8)) - - results = random_resized_crop(inputs_PIL) - - assert isinstance(results, dict) - assert "img" in results and results["img"].size == (8, 8) - assert "gt_semantic_seg" in results and results["gt_semantic_seg"].shape == (8, 8) - assert "img_shape" in results - assert "ori_shape" in results - assert "scale_factor" in results - - -@e2e_pytest_unit -def test_random_color_jitter(inputs_PIL: Dict[str, Any]) -> None: - """Test RandomColorJitter.""" - random_color_jitter = RandomColorJitter(p=1.0) - - results = random_color_jitter(inputs_PIL) - - assert isinstance(results, dict) - assert "img" in results - - -@e2e_pytest_unit -def test_random_grayscale(inputs_PIL: Dict[str, Any]) -> None: - """Test RandomGrayscale.""" - random_grayscale = RandomGrayscale() - - results = random_grayscale(inputs_PIL) - - assert isinstance(results, dict) - assert "img" in results - - -@e2e_pytest_unit -def test_random_gaussian_blur(inputs_PIL: Dict[str, Any]) -> None: - """Test RandomGaussianBlur.""" - random_gaussian_blur = RandomGaussianBlur(p=1.0, kernel_size=3) - - results = random_gaussian_blur(inputs_PIL) - - assert isinstance(results, dict) - assert "img" in results - - -@e2e_pytest_unit -def test_random_solarization(inputs_np: Dict[str, Any]) -> None: - """Test RandomSolarization.""" - random_solarization = RandomSolarization(p=1.0) - - results = random_solarization(inputs_np) - - assert isinstance(results, dict) - assert "img" in results - assert repr(random_solarization) == "RandomSolarization" - - -@e2e_pytest_unit -def test_nd_array_to_pil_image(inputs_np: Dict[str, Any]) -> None: - """Test NDArrayToPILImage.""" - nd_array_to_pil_image = NDArrayToPILImage(keys=["img"]) - - results = nd_array_to_pil_image(inputs_np) - - assert "img" in results - assert isinstance(results["img"], Image.Image) - assert repr(nd_array_to_pil_image) == "NDArrayToPILImage" - - -@e2e_pytest_unit -def test_pil_image_to_nd_array(inputs_PIL: Dict[str, Any]) -> None: - """Test PILImageToNDArray.""" - pil_image_to_nd_array = PILImageToNDArray(keys=["img"]) - - results = pil_image_to_nd_array(inputs_PIL) - - assert "img" in results - assert isinstance(results["img"], np.ndarray) - assert repr(pil_image_to_nd_array) == "PILImageToNDArray" diff --git a/otx/mpa/modules/datasets/pipelines/__init__.py b/tests/unit/algorithms/segmentation/adapters/mmseg/datasets/__init__.py similarity index 54% rename from otx/mpa/modules/datasets/pipelines/__init__.py rename to tests/unit/algorithms/segmentation/adapters/mmseg/datasets/__init__.py index 4e1701262e2..d671e6bb59c 100644 --- a/otx/mpa/modules/datasets/pipelines/__init__.py +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/datasets/__init__.py @@ -1,5 +1,4 @@ +"""Test for otx.algorithms.segmentation.adapters.mmseg.datasets""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # - -# flake8: noqa diff --git a/otx/mpa/modules/datasets/samplers/__init__.py b/tests/unit/algorithms/segmentation/adapters/mmseg/datasets/pipelines/__init__.py similarity index 50% rename from otx/mpa/modules/datasets/samplers/__init__.py rename to tests/unit/algorithms/segmentation/adapters/mmseg/datasets/pipelines/__init__.py index 4e1701262e2..e2b1bd6ce7b 100644 --- a/otx/mpa/modules/datasets/samplers/__init__.py +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/datasets/pipelines/__init__.py @@ -1,5 +1,4 @@ +"""Test for otx.algorithms.segmentation.adapters.mmseg.datasets.pipelines""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # - -# flake8: noqa diff --git a/tests/unit/mpa/modules/datasets/pipelines/test_compose.py b/tests/unit/algorithms/segmentation/adapters/mmseg/datasets/pipelines/test_compose.py similarity index 98% rename from tests/unit/mpa/modules/datasets/pipelines/test_compose.py rename to tests/unit/algorithms/segmentation/adapters/mmseg/datasets/pipelines/test_compose.py index 96b9602fd8b..4c777d83a3f 100644 --- a/tests/unit/mpa/modules/datasets/pipelines/test_compose.py +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/datasets/pipelines/test_compose.py @@ -12,7 +12,7 @@ from mmseg.datasets.builder import PIPELINES from mmseg.datasets.pipelines import RandomCrop -from otx.mpa.modules.datasets.pipelines.compose import MaskCompose, ProbCompose +from otx.algorithms.segmentation.adapters.mmseg.datasets import MaskCompose, ProbCompose class TestProbCompose: diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/datasets/pipelines/test_loads.py b/tests/unit/algorithms/segmentation/adapters/mmseg/datasets/pipelines/test_loads.py new file mode 100644 index 00000000000..37d5275fb2e --- /dev/null +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/datasets/pipelines/test_loads.py @@ -0,0 +1,53 @@ +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +import numpy as np +import pytest + +from otx.algorithms.segmentation.adapters.mmseg.datasets.pipelines.loads import ( + LoadAnnotationFromOTXDataset, +) +from otx.api.entities.annotation import ( + Annotation, + AnnotationSceneEntity, + AnnotationSceneKind, +) +from otx.api.entities.dataset_item import DatasetItemEntity +from otx.api.entities.image import Image +from otx.api.entities.label import Domain, LabelEntity +from otx.api.entities.scored_label import ScoredLabel +from otx.api.entities.shapes.rectangle import Rectangle +from tests.test_suite.e2e_test_system import e2e_pytest_unit + + +def label_entity(name="test label") -> LabelEntity: + return LabelEntity(name=name, domain=Domain.SEGMENTATION) + + +def dataset_item() -> DatasetItemEntity: + image: Image = Image(data=np.random.randint(low=0, high=255, size=(10, 16, 3))) + annotation: Annotation = Annotation(shape=Rectangle.generate_full_box(), labels=[ScoredLabel(label_entity())]) + annotation_scene: AnnotationSceneEntity = AnnotationSceneEntity( + annotations=[annotation], kind=AnnotationSceneKind.ANNOTATION + ) + return DatasetItemEntity(media=image, annotation_scene=annotation_scene) + + +class TestLoadAnnotationFromOTXDataset: + @pytest.fixture(autouse=True) + def setUp(self) -> None: + + self.dataset_item: DatasetItemEntity = dataset_item() + self.results: dict = { + "dataset_item": self.dataset_item, + "ann_info": {"labels": [label_entity("class_1")]}, + "seg_fields": [], + } + self.pipeline: LoadAnnotationFromOTXDataset = LoadAnnotationFromOTXDataset() + + @e2e_pytest_unit + def test_call(self) -> None: + loaded_annotations: dict = self.pipeline(self.results) + assert "gt_semantic_seg" in loaded_annotations + assert loaded_annotations["dataset_item"] == self.dataset_item diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/test_pipelines_params_validation.py b/tests/unit/algorithms/segmentation/adapters/mmseg/datasets/pipelines/test_pipelines_params_validation.py similarity index 97% rename from tests/unit/algorithms/segmentation/adapters/mmseg/test_pipelines_params_validation.py rename to tests/unit/algorithms/segmentation/adapters/mmseg/datasets/pipelines/test_pipelines_params_validation.py index 41373df066c..db88ce2a8ad 100644 --- a/tests/unit/algorithms/segmentation/adapters/mmseg/test_pipelines_params_validation.py +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/datasets/pipelines/test_pipelines_params_validation.py @@ -4,7 +4,7 @@ import pytest -from otx.algorithms.segmentation.adapters.mmseg.data.pipelines import ( +from otx.algorithms.segmentation.adapters.mmseg.datasets.pipelines import ( LoadAnnotationFromOTXDataset, LoadImageFromOTXDataset, ) diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/datasets/pipelines/test_transforms.py b/tests/unit/algorithms/segmentation/adapters/mmseg/datasets/pipelines/test_transforms.py new file mode 100644 index 00000000000..facded59996 --- /dev/null +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/datasets/pipelines/test_transforms.py @@ -0,0 +1,313 @@ +from typing import Any, Dict + +import numpy as np +import pytest +import torch +from mmcv.parallel import DataContainer +from PIL import Image + +from otx.algorithms.segmentation.adapters.mmseg.datasets.pipelines.transforms import ( + BranchImage, + DefaultFormatBundle, + NDArrayToPILImage, + Normalize, + PILImageToNDArray, + RandomColorJitter, + RandomGaussianBlur, + RandomGrayscale, + RandomResizedCrop, + RandomSolarization, + TwoCropTransform, +) +from tests.test_suite.e2e_test_system import e2e_pytest_unit + + +@pytest.fixture(scope="module") +def inputs_np(): + return { + "img": np.random.randint(0, 10, (16, 16, 3), dtype=np.uint8), + "gt_semantic_seg": np.random.rand(16, 16), + "flip": True, + } + + +@pytest.fixture(scope="module") +def inputs_PIL(): + return { + "img": Image.fromarray(np.random.randint(0, 10, (16, 16, 3), dtype=np.uint8)), + "gt_semantic_seg": np.random.randint(0, 5, (16, 16), dtype=np.uint8), + "seg_fields": ["gt_semantic_seg"], + "ori_shape": (16, 16, 3), + } + + +class TestNDArrayToPILImage: + @pytest.fixture(autouse=True) + def setUp(self) -> None: + self.results: dict = {"img": np.random.randint(0, 255, (3, 3, 3), dtype=np.uint8)} + self.nd_array_to_pil_image: NDArrayToPILImage = NDArrayToPILImage(keys=["img"]) + + @e2e_pytest_unit + def test_call(self) -> None: + converted_img: dict = self.nd_array_to_pil_image(self.results) + assert "img" in converted_img + assert isinstance(converted_img["img"], Image.Image) + + @e2e_pytest_unit + def test_repr(self) -> None: + assert str(self.nd_array_to_pil_image) == "NDArrayToPILImage" + + +class TestPILImageToNDArray: + @pytest.fixture(autouse=True) + def setUp(self) -> None: + self.results: dict = {"img": Image.new("RGB", (3, 3))} + self.pil_image_to_nd_array: PILImageToNDArray = PILImageToNDArray(keys=["img"]) + + @e2e_pytest_unit + def test_call(self) -> None: + converted_array: dict = self.pil_image_to_nd_array(self.results) + assert "img" in converted_array + assert isinstance(converted_array["img"], np.ndarray) + + @e2e_pytest_unit + def test_repr(self) -> None: + assert str(self.pil_image_to_nd_array) == "PILImageToNDArray" + + +class TestRandomResizedCrop: + @pytest.fixture(autouse=True) + def setUp(self) -> None: + self.results: dict = {"img": Image.new("RGB", (10, 16)), "img_shape": (10, 16), "ori_shape": (10, 16)} + self.random_resized_crop: RandomResizedCrop = RandomResizedCrop((5, 5), (0.5, 1.0)) + + @e2e_pytest_unit + def test_call(self) -> None: + cropped_img: dict = self.random_resized_crop(self.results) + assert cropped_img["img_shape"] == (5, 5) + assert cropped_img["ori_shape"] == (10, 16) + + +class TestRandomSolarization: + @pytest.fixture(autouse=True) + def setUp(self) -> None: + self.results: dict = {"img": np.random.randint(0, 255, (3, 3, 3), dtype=np.uint8)} + self.random_solarization: RandomSolarization = RandomSolarization(p=1.0) + + @e2e_pytest_unit + def test_call(self) -> None: + solarized: dict = self.random_solarization(self.results) + assert "img" in solarized + assert isinstance(solarized["img"], np.ndarray) + + @e2e_pytest_unit + def test_repr(self) -> None: + assert str(self.random_solarization) == "RandomSolarization" + + +class TestNormalize: + @e2e_pytest_unit + @pytest.mark.parametrize( + "mean,std,to_rgb,expected", + [ + (1.0, 1.0, True, np.array([[[1.0, 0.0, 0.0]]], dtype=np.float32)), + (1.0, 1.0, False, np.array([[[-1.0, 0.0, 0.0]]], dtype=np.float32)), + ], + ) + def test_call(self, mean: float, std: float, to_rgb: bool, expected: np.array) -> None: + """Test __call__.""" + normalize = Normalize(mean=mean, std=std, to_rgb=to_rgb) + inputs = dict(img=np.arange(3).reshape(1, 1, 3)) + + results = normalize(inputs.copy()) + + assert "img" in results + assert "img_norm_cfg" in results + assert np.all(results["img"] == expected) + + @e2e_pytest_unit + @pytest.mark.parametrize("mean,std,to_rgb", [(1.0, 1.0, True)]) + def test_repr(self, mean: float, std: float, to_rgb: bool) -> None: + """Test __repr__.""" + normalize = Normalize(mean=mean, std=std, to_rgb=to_rgb) + + assert repr(normalize) == normalize.__class__.__name__ + f"(mean={mean}, std={std}, to_rgb=" f"{to_rgb})" + + +class TestDefaultFormatBundle: + @pytest.fixture(autouse=True) + def setup(self) -> None: + self.default_format_bundle = DefaultFormatBundle() + + @e2e_pytest_unit + @pytest.mark.parametrize("img", [np.ones((1, 1)), np.ones((1, 1, 1)), np.ones((1, 1, 1, 1))]) + @pytest.mark.parametrize("gt_semantic_seg,pixel_weights", [(np.ones((1, 1)), np.ones((1, 1)))]) + def test_call(self, img: np.array, gt_semantic_seg: np.array, pixel_weights: np.array) -> None: + """Test __call__.""" + inputs = dict(img=img, gt_semantic_seg=gt_semantic_seg, pixel_weights=pixel_weights) + + results = self.default_format_bundle(inputs.copy()) + + assert isinstance(results, dict) + assert "img" in results + assert isinstance(results["img"], DataContainer) + assert len(results["img"].data.shape) >= 3 + assert results["img"].data.dtype == torch.float32 + assert "gt_semantic_seg" in results + assert len(results["gt_semantic_seg"].data.shape) == len(inputs["gt_semantic_seg"].shape) + 1 + assert results["gt_semantic_seg"].data.dtype == torch.int64 + assert "pixel_weights" in results + assert len(results["pixel_weights"].data.shape) == len(inputs["pixel_weights"].shape) + 1 + assert results["pixel_weights"].data.dtype == torch.float32 + + @e2e_pytest_unit + @pytest.mark.parametrize("img", [np.ones((1,))]) + def test_call_invalid_shape(self, img: np.array): + inputs = dict(img=img) + + with pytest.raises(ValueError): + self.default_format_bundle(inputs.copy()) + + @e2e_pytest_unit + def test_repr(self) -> None: + """Test __repr__.""" + assert repr(self.default_format_bundle) == self.default_format_bundle.__class__.__name__ + + +class TestBranchImage: + @pytest.fixture(autouse=True) + def setup(self) -> None: + self.branch_image = BranchImage(key_map={"key1": "key2"}) + + @e2e_pytest_unit + def test_call(self) -> None: + """Test __call__.""" + inputs = dict(key1="key1", img_fields=["key1"]) + + results = self.branch_image(inputs.copy()) + + assert isinstance(results, dict) + assert "key2" in results + assert results["key1"] == results["key2"] + assert "key2" in results["img_fields"] + + @e2e_pytest_unit + def test_repr(self) -> None: + """Test __repr__.""" + assert repr(self.branch_image) == self.branch_image.__class__.__name__ + + +class TestTwoCropTransform: + @pytest.fixture(autouse=True) + def setup(self, mocker) -> None: + mocker.patch( + "otx.algorithms.segmentation.adapters.mmseg.datasets.pipelines.transforms.build_from_cfg", + return_value=lambda x: x, + ) + self.two_crop_transform = TwoCropTransform(view0=[], view1=[]) + + @e2e_pytest_unit + def test_call(self, mocker, inputs_np: Dict[str, Any]) -> None: + """Test __call__.""" + results = self.two_crop_transform(inputs_np) + + assert isinstance(results, dict) + assert "img" in results and results["img"].ndim == 4 + assert "gt_semantic_seg" in results and results["gt_semantic_seg"].ndim == 3 + assert "flip" in results and isinstance(results["flip"], list) + + @e2e_pytest_unit + def test_call_with_single_pipeline(self, mocker, inputs_np: Dict[str, Any]) -> None: + """Test __call__ with single pipeline.""" + self.two_crop_transform.is_both = False + + results = self.two_crop_transform(inputs_np) + + assert isinstance(results, dict) + assert "img" in results and results["img"].ndim == 3 + assert "gt_semantic_seg" in results and results["gt_semantic_seg"].ndim == 2 + assert "flip" in results and isinstance(results["flip"], bool) + + +@e2e_pytest_unit +def test_random_resized_crop(inputs_PIL: Dict[str, Any]) -> None: + """Test RandomResizedCrop.""" + random_resized_crop = RandomResizedCrop(size=(8, 8)) + + results = random_resized_crop(inputs_PIL) + + assert isinstance(results, dict) + assert "img" in results and results["img"].size == (8, 8) + assert "gt_semantic_seg" in results and results["gt_semantic_seg"].shape == (8, 8) + assert "img_shape" in results + assert "ori_shape" in results + assert "scale_factor" in results + + +@e2e_pytest_unit +def test_random_color_jitter(inputs_PIL: Dict[str, Any]) -> None: + """Test RandomColorJitter.""" + random_color_jitter = RandomColorJitter(p=1.0) + + results = random_color_jitter(inputs_PIL) + + assert isinstance(results, dict) + assert "img" in results + + +@e2e_pytest_unit +def test_random_grayscale(inputs_PIL: Dict[str, Any]) -> None: + """Test RandomGrayscale.""" + random_grayscale = RandomGrayscale() + + results = random_grayscale(inputs_PIL) + + assert isinstance(results, dict) + assert "img" in results + + +@e2e_pytest_unit +def test_random_gaussian_blur(inputs_PIL: Dict[str, Any]) -> None: + """Test RandomGaussianBlur.""" + random_gaussian_blur = RandomGaussianBlur(p=1.0, kernel_size=3) + + results = random_gaussian_blur(inputs_PIL) + + assert isinstance(results, dict) + assert "img" in results + + +@e2e_pytest_unit +def test_random_solarization(inputs_np: Dict[str, Any]) -> None: + """Test RandomSolarization.""" + random_solarization = RandomSolarization(p=1.0) + + results = random_solarization(inputs_np) + + assert isinstance(results, dict) + assert "img" in results + assert repr(random_solarization) == "RandomSolarization" + + +@e2e_pytest_unit +def test_nd_array_to_pil_image(inputs_np: Dict[str, Any]) -> None: + """Test NDArrayToPILImage.""" + nd_array_to_pil_image = NDArrayToPILImage(keys=["img"]) + + results = nd_array_to_pil_image(inputs_np) + + assert "img" in results + assert isinstance(results["img"], Image.Image) + assert repr(nd_array_to_pil_image) == "NDArrayToPILImage" + + +@e2e_pytest_unit +def test_pil_image_to_nd_array(inputs_PIL: Dict[str, Any]) -> None: + """Test PILImageToNDArray.""" + pil_image_to_nd_array = PILImageToNDArray(keys=["img"]) + + results = pil_image_to_nd_array(inputs_PIL) + + assert "img" in results + assert isinstance(results["img"], np.ndarray) + assert repr(pil_image_to_nd_array) == "PILImageToNDArray" diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/test_dataset.py b/tests/unit/algorithms/segmentation/adapters/mmseg/datasets/test_dataset.py similarity index 97% rename from tests/unit/algorithms/segmentation/adapters/mmseg/test_dataset.py rename to tests/unit/algorithms/segmentation/adapters/mmseg/datasets/test_dataset.py index 5b797baed65..7327c1f3b35 100644 --- a/tests/unit/algorithms/segmentation/adapters/mmseg/test_dataset.py +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/datasets/test_dataset.py @@ -5,7 +5,7 @@ import numpy as np import pytest -from otx.algorithms.segmentation.adapters.mmseg.data.dataset import MPASegDataset +from otx.algorithms.segmentation.adapters.mmseg.datasets import MPASegDataset from otx.api.entities.annotation import ( Annotation, AnnotationSceneEntity, diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/test_dataset_params_validation.py b/tests/unit/algorithms/segmentation/adapters/mmseg/datasets/test_dataset_params_validation.py similarity index 99% rename from tests/unit/algorithms/segmentation/adapters/mmseg/test_dataset_params_validation.py rename to tests/unit/algorithms/segmentation/adapters/mmseg/datasets/test_dataset_params_validation.py index ca629d38734..7ad55560b65 100644 --- a/tests/unit/algorithms/segmentation/adapters/mmseg/test_dataset_params_validation.py +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/datasets/test_dataset_params_validation.py @@ -4,7 +4,7 @@ import numpy as np import pytest -from otx.algorithms.segmentation.adapters.mmseg.data.dataset import ( +from otx.algorithms.segmentation.adapters.mmseg.datasets.dataset import ( OTXSegDataset, get_annotation_mmseg_format, ) diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/models/backbones/__init__.py b/tests/unit/algorithms/segmentation/adapters/mmseg/models/backbones/__init__.py new file mode 100644 index 00000000000..dc3be0d0648 --- /dev/null +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/models/backbones/__init__.py @@ -0,0 +1,4 @@ +"""Test for otx.algorithms.segmentation.adapters.mmseg.models.backbones.""" + +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 diff --git a/tests/unit/mpa/modules/models/backbones/test_litehrnet.py b/tests/unit/algorithms/segmentation/adapters/mmseg/models/backbones/test_litehrnet.py similarity index 97% rename from tests/unit/mpa/modules/models/backbones/test_litehrnet.py rename to tests/unit/algorithms/segmentation/adapters/mmseg/models/backbones/test_litehrnet.py index f59878fb309..361e1527f1d 100644 --- a/tests/unit/mpa/modules/models/backbones/test_litehrnet.py +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/models/backbones/test_litehrnet.py @@ -6,7 +6,7 @@ from otx.algorithms.common.adapters.mmcv.configs.backbones.lite_hrnet_18 import ( model as model_cfg, ) -from otx.mpa.modules.models.backbones.litehrnet import ( +from otx.algorithms.segmentation.adapters.mmseg.models.backbones.litehrnet import ( LiteHRNet, NeighbourSupport, SpatialWeightingV2, diff --git a/tests/unit/mpa/modules/ov/models/mmseg/backbones/test_ov_mmseg_mmov_backbone.py b/tests/unit/algorithms/segmentation/adapters/mmseg/models/backbones/test_mmseg_mmov_backbone.py similarity index 94% rename from tests/unit/mpa/modules/ov/models/mmseg/backbones/test_ov_mmseg_mmov_backbone.py rename to tests/unit/algorithms/segmentation/adapters/mmseg/models/backbones/test_mmseg_mmov_backbone.py index 8796f89ef2a..24aebfcbe81 100644 --- a/tests/unit/mpa/modules/ov/models/mmseg/backbones/test_ov_mmseg_mmov_backbone.py +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/models/backbones/test_mmseg_mmov_backbone.py @@ -7,7 +7,7 @@ import pytest import torch -from otx.mpa.modules.ov.models.mmseg.backbones.mmov_backbone import MMOVBackbone +from otx.algorithms.segmentation.adapters.mmseg.models.backbones import MMOVBackbone from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/models/heads/__init__.py b/tests/unit/algorithms/segmentation/adapters/mmseg/models/heads/__init__.py new file mode 100644 index 00000000000..60b6f78ebef --- /dev/null +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/models/heads/__init__.py @@ -0,0 +1,4 @@ +"""Test for otx.algorithms.segmentation.adapters.mmseg.models.heads.""" + +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 diff --git a/tests/unit/mpa/modules/ov/models/mmseg/decode_heads/test_ov_mmseg_mmov_decode_head.py b/tests/unit/algorithms/segmentation/adapters/mmseg/models/heads/test_mmseg_mmov_decode_head.py similarity index 95% rename from tests/unit/mpa/modules/ov/models/mmseg/decode_heads/test_ov_mmseg_mmov_decode_head.py rename to tests/unit/algorithms/segmentation/adapters/mmseg/models/heads/test_mmseg_mmov_decode_head.py index c4fccf7ab9b..07f46d0e8ae 100644 --- a/tests/unit/mpa/modules/ov/models/mmseg/decode_heads/test_ov_mmseg_mmov_decode_head.py +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/models/heads/test_mmseg_mmov_decode_head.py @@ -7,7 +7,7 @@ import pytest import torch -from otx.mpa.modules.ov.models.mmseg.decode_heads.mmov_decode_head import MMOVDecodeHead +from otx.algorithms.segmentation.adapters.mmseg.models import MMOVDecodeHead from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/models/scalar_schedulers/__init__.py b/tests/unit/algorithms/segmentation/adapters/mmseg/models/scalar_schedulers/__init__.py new file mode 100644 index 00000000000..c1b25fc5a37 --- /dev/null +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/models/scalar_schedulers/__init__.py @@ -0,0 +1,4 @@ +"""Test for otx.algorithms.segmentation.adapters.mmseg.models.scheculers""" + +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 diff --git a/tests/unit/mpa/modules/models/scalar_schedulers/test_schedulers.py b/tests/unit/algorithms/segmentation/adapters/mmseg/models/scalar_schedulers/test_schedulers.py similarity index 98% rename from tests/unit/mpa/modules/models/scalar_schedulers/test_schedulers.py rename to tests/unit/algorithms/segmentation/adapters/mmseg/models/scalar_schedulers/test_schedulers.py index a79cbdc67e6..1295b8d37eb 100644 --- a/tests/unit/mpa/modules/models/scalar_schedulers/test_schedulers.py +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/models/scalar_schedulers/test_schedulers.py @@ -6,7 +6,7 @@ import pytest -from otx.mpa.modules.models.scalar_schedulers import ( +from otx.algorithms.segmentation.adapters.mmseg.models.schedulers import ( ConstantScalarScheduler, PolyScalarScheduler, StepScalarScheduler, diff --git a/tests/unit/mpa/modules/losses/test_utils.py b/tests/unit/algorithms/segmentation/adapters/mmseg/models/utils/test_utils.py similarity index 90% rename from tests/unit/mpa/modules/losses/test_utils.py rename to tests/unit/algorithms/segmentation/adapters/mmseg/models/utils/test_utils.py index f49b6f01675..7ff9fc63803 100644 --- a/tests/unit/mpa/modules/losses/test_utils.py +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/models/utils/test_utils.py @@ -6,7 +6,7 @@ import pytest import torch -from otx.mpa.modules.models.losses.utils import LossEqualizer +from otx.algorithms.segmentation.adapters.mmseg.models.utils import LossEqualizer class TestLossEqualizer: diff --git a/otx/mpa/modules/ov/models/__init__.py b/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/__init__.py similarity index 100% rename from otx/mpa/modules/ov/models/__init__.py rename to tests/unit/algorithms/segmentation/adapters/mmseg/tasks/__init__.py diff --git a/tests/unit/mpa/seg/__init__.py b/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/incremental/__init__.py similarity index 100% rename from tests/unit/mpa/seg/__init__.py rename to tests/unit/algorithms/segmentation/adapters/mmseg/tasks/incremental/__init__.py diff --git a/tests/unit/mpa/seg/incremental/test_seg_incremental_stage.py b/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/incremental/test_seg_incremental_stage.py similarity index 63% rename from tests/unit/mpa/seg/incremental/test_seg_incremental_stage.py rename to tests/unit/algorithms/segmentation/adapters/mmseg/tasks/incremental/test_seg_incremental_stage.py index 59f353bd989..637b11524d1 100644 --- a/tests/unit/mpa/seg/incremental/test_seg_incremental_stage.py +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/incremental/test_seg_incremental_stage.py @@ -1,7 +1,9 @@ import pytest -from otx.mpa.seg.incremental.stage import IncrSegStage -from otx.mpa.utils.config_utils import MPAConfig +from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig +from otx.algorithms.segmentation.adapters.mmseg.tasks.incremental.stage import ( + IncrSegStage, +) from tests.test_suite.e2e_test_system import e2e_pytest_unit from tests.unit.algorithms.segmentation.test_helpers import DEFAULT_RECIPE_CONFIG_PATH @@ -14,7 +16,9 @@ def setup(self) -> None: @e2e_pytest_unit def test_configure_task(self, mocker): - mock_update_hook = mocker.patch("otx.mpa.seg.incremental.stage.update_or_add_custom_hook") + mock_update_hook = mocker.patch( + "otx.algorithms.segmentation.adapters.mmseg.tasks.incremental.stage.update_or_add_custom_hook" + ) self.stage.configure_task(self.stage.cfg, True) mock_update_hook.assert_called_once() diff --git a/tests/unit/mpa/seg/incremental/__init__.py b/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/semisl/__init__.py similarity index 100% rename from tests/unit/mpa/seg/incremental/__init__.py rename to tests/unit/algorithms/segmentation/adapters/mmseg/tasks/semisl/__init__.py diff --git a/tests/unit/mpa/seg/semisl/test_seg_semisl_inferrer.py b/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/semisl/test_seg_semisl_inferrer.py similarity index 84% rename from tests/unit/mpa/seg/semisl/test_seg_semisl_inferrer.py rename to tests/unit/algorithms/segmentation/adapters/mmseg/tasks/semisl/test_seg_semisl_inferrer.py index 46ca864567e..48f955d54a8 100644 --- a/tests/unit/mpa/seg/semisl/test_seg_semisl_inferrer.py +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/semisl/test_seg_semisl_inferrer.py @@ -2,8 +2,10 @@ import pytest -from otx.mpa.seg.semisl.inferrer import SemiSLSegInferrer -from otx.mpa.utils.config_utils import MPAConfig +from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig +from otx.algorithms.segmentation.adapters.mmseg.tasks.semisl.inferrer import ( + SemiSLSegInferrer, +) from tests.test_suite.e2e_test_system import e2e_pytest_unit from tests.unit.algorithms.segmentation.test_helpers import DEFAULT_SEG_TEMPLATE_DIR diff --git a/tests/unit/mpa/seg/semisl/test_seg_semisl_stage.py b/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/semisl/test_seg_semisl_stage.py similarity index 74% rename from tests/unit/mpa/seg/semisl/test_seg_semisl_stage.py rename to tests/unit/algorithms/segmentation/adapters/mmseg/tasks/semisl/test_seg_semisl_stage.py index d17921dbf9c..25b8ff7156b 100644 --- a/tests/unit/mpa/seg/semisl/test_seg_semisl_stage.py +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/semisl/test_seg_semisl_stage.py @@ -1,8 +1,8 @@ import pytest -from otx.mpa.seg.semisl.stage import SemiSLSegStage -from otx.mpa.seg.stage import SegStage -from otx.mpa.utils.config_utils import MPAConfig +from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig +from otx.algorithms.segmentation.adapters.mmseg.tasks.semisl.stage import SemiSLSegStage +from otx.algorithms.segmentation.adapters.mmseg.tasks.stage import SegStage from tests.test_suite.e2e_test_system import e2e_pytest_unit SEMISL_RECIPE_CONFIG_PATH = "otx/recipes/stages/segmentation/semisl.py" @@ -26,7 +26,9 @@ def test_configure_data(self, mocker): def test_configure_task(self, mocker): fake_model_cfg = {"model": {"type": "", "task_adapt": True}} self.stage.cfg.merge_from_dict(fake_model_cfg) - mock_remove_hook = mocker.patch("otx.mpa.seg.semisl.stage.remove_custom_hook") + mock_remove_hook = mocker.patch( + "otx.algorithms.segmentation.adapters.mmseg.tasks.semisl.stage.remove_custom_hook" + ) self.stage.configure_task(self.stage.cfg, True) assert "task_adapt" not in self.stage.cfg.model diff --git a/tests/unit/mpa/seg/test_seg_exporter.py b/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/test_seg_exporter.py similarity index 81% rename from tests/unit/mpa/seg/test_seg_exporter.py rename to tests/unit/algorithms/segmentation/adapters/mmseg/tasks/test_seg_exporter.py index ccfc595be4f..0d787740b4b 100644 --- a/tests/unit/mpa/seg/test_seg_exporter.py +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/test_seg_exporter.py @@ -2,11 +2,11 @@ import pytest +from otx.algorithms.common.adapters.mmcv.tasks.exporter_mixin import ExporterMixin +from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig +from otx.algorithms.common.adapters.mmdeploy.apis import NaiveExporter +from otx.algorithms.segmentation.adapters.mmseg.tasks.exporter import SegExporter from otx.algorithms.segmentation.adapters.mmseg.utils.builder import build_segmentor -from otx.mpa.deploy.apis import NaiveExporter -from otx.mpa.exporter_mixin import ExporterMixin -from otx.mpa.seg.exporter import SegExporter -from otx.mpa.utils.config_utils import MPAConfig from tests.test_suite.e2e_test_system import e2e_pytest_unit from tests.unit.algorithms.segmentation.test_helpers import ( DEFAULT_RECIPE_CONFIG_PATH, diff --git a/tests/unit/mpa/seg/test_seg_inferrer.py b/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/test_seg_inferrer.py similarity index 92% rename from tests/unit/mpa/seg/test_seg_inferrer.py rename to tests/unit/algorithms/segmentation/adapters/mmseg/tasks/test_seg_inferrer.py index e58fba0c98c..0d58cb09b87 100644 --- a/tests/unit/mpa/seg/test_seg_inferrer.py +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/test_seg_inferrer.py @@ -2,8 +2,11 @@ import pytest -from otx.mpa.seg.inferrer import SegInferrer, replace_ImageToTensor -from otx.mpa.utils.config_utils import MPAConfig +from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig +from otx.algorithms.segmentation.adapters.mmseg.tasks.inferrer import ( + SegInferrer, + replace_ImageToTensor, +) from tests.test_suite.e2e_test_system import e2e_pytest_unit from tests.unit.algorithms.segmentation.test_helpers import ( DEFAULT_RECIPE_CONFIG_PATH, diff --git a/tests/unit/mpa/seg/test_seg_stage.py b/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/test_seg_stage.py similarity index 93% rename from tests/unit/mpa/seg/test_seg_stage.py rename to tests/unit/algorithms/segmentation/adapters/mmseg/tasks/test_seg_stage.py index 986c56583db..0b064cfabde 100644 --- a/tests/unit/mpa/seg/test_seg_stage.py +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/test_seg_stage.py @@ -2,9 +2,9 @@ import pytest -from otx.mpa import Stage -from otx.mpa.seg.stage import SegStage -from otx.mpa.utils.config_utils import MPAConfig +from otx.algorithms.common.adapters.mmcv.tasks.stage import Stage +from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig +from otx.algorithms.segmentation.adapters.mmseg.tasks.stage import SegStage from tests.test_suite.e2e_test_system import e2e_pytest_unit from tests.unit.algorithms.segmentation.test_helpers import ( DEFAULT_RECIPE_CONFIG_PATH, @@ -30,10 +30,10 @@ def test_configure(self, mocker): fake_arg = {"pretrained": True, "foo": "bar"} returned_value = self.stage.configure(self.model_cfg, "", self.data_cfg, True, **fake_arg) - mock_cfg_model.assert_called_once_with(self.stage.cfg, self.model_cfg, True, **fake_arg) + mock_cfg_model.assert_called_once_with(self.stage.cfg, self.model_cfg, **fake_arg) mock_cfg_ckpt.assert_called_once_with(self.stage.cfg, "", fake_arg.get("pretrained", None)) mock_cfg_data.assert_called_once_with(self.stage.cfg, True, self.data_cfg) - mock_cfg_task.assert_called_once_with(self.stage.cfg, True, **fake_arg) + mock_cfg_task.assert_called_once_with(self.stage.cfg, True) mock_cfg_hook.assert_called_once_with(self.stage.cfg) assert returned_value == self.stage.cfg @@ -41,14 +41,14 @@ def test_configure(self, mocker): @e2e_pytest_unit def test_configure_model(self): fake_arg = {"ir_model_path": {"ir_weight_path": "", "ir_weight_init": ""}} - self.stage.configure_model(self.stage.cfg, self.model_cfg, True, **fake_arg) + self.stage.configure_model(self.stage.cfg, self.model_cfg, **fake_arg) assert self.stage.cfg.model_task @e2e_pytest_unit def test_configure_data(self, mocker): mock_super_cfg_data = mocker.patch.object(Stage, "configure_data") - self.stage.configure_data(self.stage.cfg, True, self.data_cfg, pretrained=None) + self.stage.configure_data(self.stage.cfg, True, self.data_cfg) mock_super_cfg_data.assert_called_once() assert self.stage.cfg.data diff --git a/tests/unit/mpa/seg/test_seg_trainer.py b/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/test_seg_trainer.py similarity index 79% rename from tests/unit/mpa/seg/test_seg_trainer.py rename to tests/unit/algorithms/segmentation/adapters/mmseg/tasks/test_seg_trainer.py index 4ee7b14390f..8bdb6783529 100644 --- a/tests/unit/mpa/seg/test_seg_trainer.py +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/test_seg_trainer.py @@ -2,8 +2,8 @@ import pytest -from otx.mpa.seg.trainer import SegTrainer -from otx.mpa.utils.config_utils import MPAConfig +from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig +from otx.algorithms.segmentation.adapters.mmseg.tasks.trainer import SegTrainer from tests.test_suite.e2e_test_system import e2e_pytest_unit from tests.unit.algorithms.segmentation.test_helpers import ( DEFAULT_RECIPE_CONFIG_PATH, @@ -24,7 +24,7 @@ def test_run(self, mocker): mocker.patch.object(SegTrainer, "configure_samples_per_gpu") mocker.patch.object(SegTrainer, "configure_fp16_optimizer") mocker.patch.object(SegTrainer, "configure_compat_cfg") - mock_train_segmentor = mocker.patch("otx.mpa.seg.trainer.train_segmentor") + mock_train_segmentor = mocker.patch("otx.algorithms.segmentation.adapters.mmseg.tasks.trainer.train_segmentor") self.trainer.run(self.model_cfg, "", self.data_cfg) mock_train_segmentor.assert_called_once() @@ -36,7 +36,7 @@ def test_run_with_distributed(self, mocker): mocker.patch.object(SegTrainer, "configure_fp16_optimizer") mocker.patch.object(SegTrainer, "configure_compat_cfg") spy_cfg_dist = mocker.spy(SegTrainer, "_modify_cfg_for_distributed") - mock_train_segmentor = mocker.patch("otx.mpa.seg.trainer.train_segmentor") + mock_train_segmentor = mocker.patch("otx.algorithms.segmentation.adapters.mmseg.tasks.trainer.train_segmentor") self.trainer.run(self.model_cfg, "", self.data_cfg) spy_cfg_dist.assert_called_once() diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/test_pipelines.py b/tests/unit/algorithms/segmentation/adapters/mmseg/test_pipelines.py deleted file mode 100644 index d59987c3820..00000000000 --- a/tests/unit/algorithms/segmentation/adapters/mmseg/test_pipelines.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright (C) 2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import numpy as np -import PIL.Image -import pytest - -from otx.algorithms.segmentation.adapters.mmseg.data.pipelines import ( - LoadAnnotationFromOTXDataset, - NDArrayToPILImage, - PILImageToNDArray, - RandomResizedCrop, - RandomSolarization, -) -from otx.api.entities.annotation import ( - Annotation, - AnnotationSceneEntity, - AnnotationSceneKind, -) -from otx.api.entities.dataset_item import DatasetItemEntity -from otx.api.entities.image import Image -from otx.api.entities.label import Domain, LabelEntity -from otx.api.entities.scored_label import ScoredLabel -from otx.api.entities.shapes.rectangle import Rectangle -from tests.test_suite.e2e_test_system import e2e_pytest_unit - - -def label_entity(name="test label") -> LabelEntity: - return LabelEntity(name=name, domain=Domain.SEGMENTATION) - - -def dataset_item() -> DatasetItemEntity: - image: Image = Image(data=np.random.randint(low=0, high=255, size=(10, 16, 3))) - annotation: Annotation = Annotation(shape=Rectangle.generate_full_box(), labels=[ScoredLabel(label_entity())]) - annotation_scene: AnnotationSceneEntity = AnnotationSceneEntity( - annotations=[annotation], kind=AnnotationSceneKind.ANNOTATION - ) - return DatasetItemEntity(media=image, annotation_scene=annotation_scene) - - -class TestLoadAnnotationFromOTXDataset: - @pytest.fixture(autouse=True) - def setUp(self) -> None: - - self.dataset_item: DatasetItemEntity = dataset_item() - self.results: dict = { - "dataset_item": self.dataset_item, - "ann_info": {"labels": [label_entity("class_1")]}, - "seg_fields": [], - } - self.pipeline: LoadAnnotationFromOTXDataset = LoadAnnotationFromOTXDataset() - - @e2e_pytest_unit - def test_call(self) -> None: - loaded_annotations: dict = self.pipeline(self.results) - assert "gt_semantic_seg" in loaded_annotations - assert loaded_annotations["dataset_item"] == self.dataset_item - - -class TestNDArrayToPILImage: - @pytest.fixture(autouse=True) - def setUp(self) -> None: - self.results: dict = {"img": np.random.randint(0, 255, (3, 3, 3), dtype=np.uint8)} - self.nd_array_to_pil_image: NDArrayToPILImage = NDArrayToPILImage(keys=["img"]) - - @e2e_pytest_unit - def test_call(self) -> None: - converted_img: dict = self.nd_array_to_pil_image(self.results) - assert "img" in converted_img - assert isinstance(converted_img["img"], PIL.Image.Image) - - @e2e_pytest_unit - def test_repr(self) -> None: - assert str(self.nd_array_to_pil_image) == "NDArrayToPILImage" - - -class TestPILImageToNDArray: - @pytest.fixture(autouse=True) - def setUp(self) -> None: - self.results: dict = {"img": PIL.Image.new("RGB", (3, 3))} - self.pil_image_to_nd_array: PILImageToNDArray = PILImageToNDArray(keys=["img"]) - - @e2e_pytest_unit - def test_call(self) -> None: - converted_array: dict = self.pil_image_to_nd_array(self.results) - assert "img" in converted_array - assert isinstance(converted_array["img"], np.ndarray) - - @e2e_pytest_unit - def test_repr(self) -> None: - assert str(self.pil_image_to_nd_array) == "PILImageToNDArray" - - -class TestRandomResizedCrop: - @pytest.fixture(autouse=True) - def setUp(self) -> None: - self.results: dict = {"img": PIL.Image.new("RGB", (10, 16)), "img_shape": (10, 16), "ori_shape": (10, 16)} - self.random_resized_crop: RandomResizedCrop = RandomResizedCrop((5, 5), (0.5, 1.0)) - - @e2e_pytest_unit - def test_call(self) -> None: - cropped_img: dict = self.random_resized_crop(self.results) - assert cropped_img["img_shape"] == (5, 5) - assert cropped_img["ori_shape"] == (10, 16) - - -class TestRandomSolarization: - @pytest.fixture(autouse=True) - def setUp(self) -> None: - self.results: dict = {"img": np.random.randint(0, 255, (3, 3, 3), dtype=np.uint8)} - self.random_solarization: RandomSolarization = RandomSolarization(p=1.0) - - @e2e_pytest_unit - def test_call(self) -> None: - solarized: dict = self.random_solarization(self.results) - assert "img" in solarized - assert isinstance(solarized["img"], np.ndarray) - - @e2e_pytest_unit - def test_repr(self) -> None: - assert str(self.random_solarization) == "RandomSolarization" diff --git a/otx/mpa/modules/datasets/__init__.py b/tests/unit/algorithms/segmentation/adapters/mmseg/utils/__init__.py similarity index 55% rename from otx/mpa/modules/datasets/__init__.py rename to tests/unit/algorithms/segmentation/adapters/mmseg/utils/__init__.py index 4e1701262e2..2e7d4985d06 100644 --- a/otx/mpa/modules/datasets/__init__.py +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/utils/__init__.py @@ -1,5 +1,4 @@ +"""Test for otx.algorithms.segmentation.adapters.mmseg.utils.""" # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # - -# flake8: noqa diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/test_config_utils.py b/tests/unit/algorithms/segmentation/adapters/mmseg/utils/test_config_utils.py similarity index 100% rename from tests/unit/algorithms/segmentation/adapters/mmseg/test_config_utils.py rename to tests/unit/algorithms/segmentation/adapters/mmseg/utils/test_config_utils.py diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/test_config_utils_params_validation.py b/tests/unit/algorithms/segmentation/adapters/mmseg/utils/test_config_utils_params_validation.py similarity index 100% rename from tests/unit/algorithms/segmentation/adapters/mmseg/test_config_utils_params_validation.py rename to tests/unit/algorithms/segmentation/adapters/mmseg/utils/test_config_utils_params_validation.py diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/test_data_utils.py b/tests/unit/algorithms/segmentation/adapters/mmseg/utils/test_data_utils.py similarity index 100% rename from tests/unit/algorithms/segmentation/adapters/mmseg/test_data_utils.py rename to tests/unit/algorithms/segmentation/adapters/mmseg/utils/test_data_utils.py diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/test_data_utils_params_validation.py b/tests/unit/algorithms/segmentation/adapters/mmseg/utils/test_data_utils_params_validation.py similarity index 100% rename from tests/unit/algorithms/segmentation/adapters/mmseg/test_data_utils_params_validation.py rename to tests/unit/algorithms/segmentation/adapters/mmseg/utils/test_data_utils_params_validation.py diff --git a/tests/unit/cli/builder/test_cli_builder.py b/tests/unit/cli/builder/test_cli_builder.py index 64922a59c5f..74ef4364c2a 100644 --- a/tests/unit/cli/builder/test_cli_builder.py +++ b/tests/unit/cli/builder/test_cli_builder.py @@ -9,6 +9,7 @@ from mmcv.utils import Registry from torch import nn +from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig from otx.cli.builder.builder import ( Builder, get_backbone_out_channels, @@ -16,7 +17,6 @@ update_channels, ) from otx.cli.utils.importing import get_otx_root_path -from otx.mpa.utils.config_utils import MPAConfig from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/cli/manager/test_config_manager.py b/tests/unit/cli/manager/test_config_manager.py index d56c2add368..c53d56f1153 100644 --- a/tests/unit/cli/manager/test_config_manager.py +++ b/tests/unit/cli/manager/test_config_manager.py @@ -10,6 +10,12 @@ set_workspace, ) from otx.cli.registry import Registry +from otx.cli.utils.errors import ( + CliException, + ConfigValueError, + FileNotExistError, + NotSupportedError, +) from tests.test_suite.e2e_test_system import e2e_pytest_unit @@ -132,7 +138,7 @@ def test_export_data_cfg(self, mocker, config_manager): def test_build_workspace(self, mocker): # Setup task_type = "CLASSIFICATION" - train_type = "SEMISUPERVISED" + train_type = "Semisupervised" workspace_path = "./otx-workspace" args = mocker.Mock() args.autosplit = None @@ -319,7 +325,7 @@ def test_data_config_file_path(self, mocker, tmp_dir_path): expected_file_path = tmp_dir_path / "data.yaml" args = parser.parse_args(["--data", str(expected_file_path)]) config_manager.args = args - with pytest.raises(FileNotFoundError): + with pytest.raises(FileNotExistError): config_manager.data_config_file_path mock_exists.return_value = True @@ -339,7 +345,7 @@ def test_configure_template(self, mocker): "otx.cli.manager.config_manager.ConfigManager.check_workspace", return_value=True ) mocker.patch("otx.cli.manager.config_manager.ConfigManager._get_template", return_value=mock_template) - mocker.patch("otx.cli.manager.config_manager.ConfigManager._get_train_type", return_value="INCREMENTAL") + mocker.patch("otx.cli.manager.config_manager.ConfigManager._get_train_type", return_value="Incremental") mock_parse_model_template = mocker.patch( "otx.cli.manager.config_manager.parse_model_template", return_value=mock_template ) @@ -352,7 +358,7 @@ def test_configure_template(self, mocker): # Then assert config_manager.task_type == "CLASSIFICATION" assert config_manager.model == "template_name" - assert config_manager.train_type == "INCREMENTAL" + assert config_manager.train_type == "Incremental" config_manager.mode = "build" mocker.patch("otx.cli.manager.config_manager.ConfigManager._check_rebuild", return_value=True) @@ -360,7 +366,7 @@ def test_configure_template(self, mocker): assert config_manager.rebuild assert config_manager.task_type == "CLASSIFICATION" assert config_manager.model == "template_name" - assert config_manager.train_type == "INCREMENTAL" + assert config_manager.train_type == "Incremental" mock_check_workspace.return_value = False mocker.patch("pathlib.Path.exists", return_value=True) @@ -376,7 +382,7 @@ def test_configure_template(self, mocker): config_manager.configure_template() assert config_manager.task_type == "CLASSIFICATION" assert config_manager.model == "template_name" - assert config_manager.train_type == "INCREMENTAL" + assert config_manager.train_type == "Incremental" @e2e_pytest_unit def test__check_rebuild(self, mocker): @@ -389,7 +395,7 @@ def test__check_rebuild(self, mocker): mock_args.template = mock_template config_manager = ConfigManager(mock_args) - with pytest.raises(NotImplementedError): + with pytest.raises(NotSupportedError): config_manager._check_rebuild() config_manager.template.task_type = "DETECTION" @@ -399,7 +405,7 @@ def test__check_rebuild(self, mocker): config_manager.args.model = "SSD" config_manager.template.name = "ATSS" - config_manager.args.train_type = "SEMISUPERVISED" + config_manager.args.train_type = "Semisupervised" assert config_manager._check_rebuild() @e2e_pytest_unit @@ -427,7 +433,7 @@ def test_configure_data_config(self, mocker): mock_args.mode = "build" config_manager = ConfigManager(mock_args) - config_manager.train_type = "INCREMENTAL" + config_manager.train_type = "Incremental" config_manager.configure_data_config(update_data_yaml=True) mock_configure_dataset.assert_called_once() @@ -440,39 +446,39 @@ def test_configure_data_config(self, mocker): @e2e_pytest_unit def test__get_train_type(self, mocker): mock_args = mocker.MagicMock() - mock_params_dict = {"algo_backend": {"train_type": {"value": "SEMISUPERVISED"}}} + mock_params_dict = {"algo_backend": {"train_type": {"value": "Semisupervised"}}} mock_configure_dataset = mocker.patch( "otx.cli.manager.config_manager.gen_params_dict_from_args", return_value=mock_params_dict ) config_manager = ConfigManager(args=mock_args) config_manager.mode = "build" - assert config_manager._get_train_type() == "SEMISUPERVISED" + assert config_manager._get_train_type() == "Semisupervised" - config_manager.args.train_type = "INCREMENTAL" + config_manager.args.train_type = "Incremental" mock_configure_dataset.return_value = {} - assert config_manager._get_train_type() == "INCREMENTAL" + assert config_manager._get_train_type() == "Incremental" mock_template = mocker.MagicMock() mock_template.hyper_parameters.parameter_overrides = { - "algo_backend": {"train_type": {"default_value": "SELFSUPERVISED"}} + "algo_backend": {"train_type": {"default_value": "Selfsupervised"}} } config_manager.template = mock_template - assert config_manager._get_train_type(ignore_args=True) == "SELFSUPERVISED" + assert config_manager._get_train_type(ignore_args=True) == "Selfsupervised" config_manager.template.hyper_parameters.parameter_overrides = {} - assert config_manager._get_train_type(ignore_args=True) == "INCREMENTAL" + assert config_manager._get_train_type(ignore_args=True) == "Incremental" @e2e_pytest_unit def test_auto_task_detection(self, mocker): mock_args = mocker.MagicMock() config_manager = ConfigManager(args=mock_args) - with pytest.raises(ValueError): + with pytest.raises(CliException): config_manager.auto_task_detection("") mock_get_data_format = mocker.patch( "otx.cli.manager.config_manager.DatasetManager.get_data_format", return_value="Unexpected" ) - with pytest.raises(ValueError): + with pytest.raises(ConfigValueError): config_manager.auto_task_detection("data/roots") mock_get_data_format.return_value = "coco" diff --git a/tests/unit/cli/tools/test_build.py b/tests/unit/cli/tools/test_build.py index d54652c58bf..3ddb60689bc 100644 --- a/tests/unit/cli/tools/test_build.py +++ b/tests/unit/cli/tools/test_build.py @@ -16,7 +16,7 @@ def test_get_args(mocker): "--unlabeled-data-roots": "unlabeled/data/root", "--unlabeled-file-list": "unlabeled/file/list", "--task": "detection", - "--train-type": "SEMISUPERVISED", + "--train-type": "Semisupervised", "--work-dir": "work/dir/path", "--model": "SSD", "--backbone": "torchvision.resnet18", @@ -37,7 +37,7 @@ def test_get_args(mocker): assert parsed_args.unlabeled_file_list == "unlabeled/file/list" assert parsed_args.work_dir == "work/dir/path" assert parsed_args.task == "detection" - assert parsed_args.train_type == "SEMISUPERVISED" + assert parsed_args.train_type == "Semisupervised" assert parsed_args.model == "SSD" assert parsed_args.backbone == "torchvision.resnet18" diff --git a/tests/unit/core/data/adapter/test_init.py b/tests/unit/core/data/adapter/test_init.py index 031a8b2ebd6..b647a53715f 100644 --- a/tests/unit/core/data/adapter/test_init.py +++ b/tests/unit/core/data/adapter/test_init.py @@ -13,7 +13,7 @@ @e2e_pytest_unit @pytest.mark.parametrize("task_name", TASK_NAME_TO_TASK_TYPE.keys()) -@pytest.mark.parametrize("train_type", [TrainType.INCREMENTAL.value]) +@pytest.mark.parametrize("train_type", [TrainType.Incremental.value]) def test_get_dataset_adapter_incremental(task_name, train_type): root_path = os.getcwd() task_type = TASK_NAME_TO_TASK_TYPE[task_name] @@ -35,7 +35,7 @@ def test_get_dataset_adapter_incremental(task_name, train_type): @e2e_pytest_unit @pytest.mark.parametrize("task_name", ["classification"]) -@pytest.mark.parametrize("train_type", [TrainType.SELFSUPERVISED.value]) +@pytest.mark.parametrize("train_type", [TrainType.Selfsupervised.value]) def test_get_dataset_adapter_selfsl_classification(task_name, train_type): root_path = os.getcwd() task_type = TASK_NAME_TO_TASK_TYPE[task_name] @@ -56,7 +56,7 @@ def test_get_dataset_adapter_selfsl_classification(task_name, train_type): @e2e_pytest_unit @pytest.mark.parametrize("task_name", ["segmentation"]) -@pytest.mark.parametrize("train_type", [TrainType.SELFSUPERVISED.value]) +@pytest.mark.parametrize("train_type", [TrainType.Selfsupervised.value]) def test_get_dataset_adapter_selfsl_segmentation(task_name, train_type): root_path = os.getcwd() task_type = TASK_NAME_TO_TASK_TYPE[task_name] diff --git a/tests/unit/mpa/modules/ov/graph/parsers/test_ov_graph_cls_parser.py b/tests/unit/core/ov/graph/parsers/test_ov_graph_cls_parser.py similarity index 81% rename from tests/unit/mpa/modules/ov/graph/parsers/test_ov_graph_cls_parser.py rename to tests/unit/core/ov/graph/parsers/test_ov_graph_cls_parser.py index 71eee579996..37e0519a318 100644 --- a/tests/unit/mpa/modules/ov/graph/parsers/test_ov_graph_cls_parser.py +++ b/tests/unit/core/ov/graph/parsers/test_ov_graph_cls_parser.py @@ -2,9 +2,9 @@ # SPDX-License-Identifier: Apache-2.0 # -from otx.mpa.modules.ov.graph.graph import Graph -from otx.mpa.modules.ov.graph.parsers.cls.cls_base_parser import cls_base_parser -from otx.mpa.modules.ov.utils import load_ov_model +from otx.core.ov.graph import Graph +from otx.core.ov.graph.parsers.cls import cls_base_parser +from otx.core.ov.utils import load_ov_model from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/ov/graph/parsers/test_ov_graph_parser.py b/tests/unit/core/ov/graph/parsers/test_ov_graph_parser.py similarity index 78% rename from tests/unit/mpa/modules/ov/graph/parsers/test_ov_graph_parser.py rename to tests/unit/core/ov/graph/parsers/test_ov_graph_parser.py index 405f3f37ebc..8d26f8479cb 100644 --- a/tests/unit/mpa/modules/ov/graph/parsers/test_ov_graph_parser.py +++ b/tests/unit/core/ov/graph/parsers/test_ov_graph_parser.py @@ -2,9 +2,9 @@ # SPDX-License-Identifier: Apache-2.0 # -from otx.mpa.modules.ov.graph.graph import Graph -from otx.mpa.modules.ov.graph.parsers.parser import parameter_parser, result_parser -from otx.mpa.modules.ov.utils import load_ov_model +from otx.core.ov.graph import Graph +from otx.core.ov.graph.parsers.parser import parameter_parser, result_parser +from otx.core.ov.utils import load_ov_model from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/ov/graph/test_ov_graph_grapy.py b/tests/unit/core/ov/graph/test_ov_graph_grapy.py similarity index 99% rename from tests/unit/mpa/modules/ov/graph/test_ov_graph_grapy.py rename to tests/unit/core/ov/graph/test_ov_graph_grapy.py index afa43e09e55..c00743df343 100644 --- a/tests/unit/mpa/modules/ov/graph/test_ov_graph_grapy.py +++ b/tests/unit/core/ov/graph/test_ov_graph_grapy.py @@ -9,7 +9,7 @@ import openvino.runtime as ov import pytest -from otx.mpa.modules.ov.graph.graph import Graph, SortedDict +from otx.core.ov.graph.graph import Graph, SortedDict from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/ov/graph/test_ov_graph_utils.py b/tests/unit/core/ov/graph/test_ov_graph_utils.py similarity index 90% rename from tests/unit/mpa/modules/ov/graph/test_ov_graph_utils.py rename to tests/unit/core/ov/graph/test_ov_graph_utils.py index 5847bc76d98..7133f523da4 100644 --- a/tests/unit/mpa/modules/ov/graph/test_ov_graph_utils.py +++ b/tests/unit/core/ov/graph/test_ov_graph_utils.py @@ -2,13 +2,13 @@ # SPDX-License-Identifier: Apache-2.0 # -from otx.mpa.modules.ov.graph.graph import Graph -from otx.mpa.modules.ov.graph.utils import ( +from otx.core.ov.graph.graph import Graph +from otx.core.ov.graph.utils import ( get_constant_input_nodes, handle_paired_batchnorm, handle_reshape, ) -from otx.mpa.modules.ov.utils import load_ov_model +from otx.core.ov.utils import load_ov_model from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/ov/models/__init__.py b/tests/unit/core/ov/models/__init__.py similarity index 100% rename from tests/unit/mpa/modules/ov/models/__init__.py rename to tests/unit/core/ov/models/__init__.py diff --git a/tests/unit/mpa/modules/ov/models/mmcls/__init__.py b/tests/unit/core/ov/models/mmcls/__init__.py similarity index 100% rename from tests/unit/mpa/modules/ov/models/mmcls/__init__.py rename to tests/unit/core/ov/models/mmcls/__init__.py diff --git a/tests/unit/mpa/modules/ov/models/mmcls/backbones/test_ov_mmcls_mmov_backbone.py b/tests/unit/core/ov/models/mmcls/backbones/test_ov_mmcls_mmov_backbone.py similarity index 86% rename from tests/unit/mpa/modules/ov/models/mmcls/backbones/test_ov_mmcls_mmov_backbone.py rename to tests/unit/core/ov/models/mmcls/backbones/test_ov_mmcls_mmov_backbone.py index 5651d064400..87ed5aae9de 100644 --- a/tests/unit/mpa/modules/ov/models/mmcls/backbones/test_ov_mmcls_mmov_backbone.py +++ b/tests/unit/core/ov/models/mmcls/backbones/test_ov_mmcls_mmov_backbone.py @@ -5,9 +5,11 @@ import pytest import torch -from otx.mpa.modules.ov.models.mmcls.backbones.mmov_backbone import MMOVBackbone +from otx.algorithms.classification.adapters.mmcls.models.backbones.mmov_backbone import ( + MMOVBackbone, +) from tests.test_suite.e2e_test_system import e2e_pytest_unit -from tests.unit.mpa.modules.ov.models.mmcls.test_helpers import create_ov_model +from tests.unit.core.ov.models.mmcls.test_helpers import create_ov_model class TestMMOVBackbone: diff --git a/tests/unit/mpa/modules/ov/models/mmcls/heads/test_ov_mmcls_cls_head.py b/tests/unit/core/ov/models/mmcls/heads/test_ov_mmcls_cls_head.py similarity index 90% rename from tests/unit/mpa/modules/ov/models/mmcls/heads/test_ov_mmcls_cls_head.py rename to tests/unit/core/ov/models/mmcls/heads/test_ov_mmcls_cls_head.py index 63676b1a64e..ea25d9794ea 100644 --- a/tests/unit/mpa/modules/ov/models/mmcls/heads/test_ov_mmcls_cls_head.py +++ b/tests/unit/core/ov/models/mmcls/heads/test_ov_mmcls_cls_head.py @@ -5,7 +5,7 @@ import pytest import torch -from otx.mpa.modules.ov.models.mmcls.heads.cls_head import ClsHead +from otx.algorithms.classification.adapters.mmcls.models.heads.cls_head import ClsHead from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/ov/models/mmcls/heads/test_ov_mmcls_conv_head.py b/tests/unit/core/ov/models/mmcls/heads/test_ov_mmcls_conv_head.py similarity index 91% rename from tests/unit/mpa/modules/ov/models/mmcls/heads/test_ov_mmcls_conv_head.py rename to tests/unit/core/ov/models/mmcls/heads/test_ov_mmcls_conv_head.py index 490c1cd560f..a58d46f2d01 100644 --- a/tests/unit/mpa/modules/ov/models/mmcls/heads/test_ov_mmcls_conv_head.py +++ b/tests/unit/core/ov/models/mmcls/heads/test_ov_mmcls_conv_head.py @@ -6,7 +6,9 @@ import pytest import torch -from otx.mpa.modules.ov.models.mmcls.heads.conv_head import ConvClsHead +from otx.algorithms.classification.adapters.mmcls.models.heads.conv_head import ( + ConvClsHead, +) from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/ov/models/mmcls/heads/test_ov_mmcls_mmcv_cls_head.py b/tests/unit/core/ov/models/mmcls/heads/test_ov_mmcls_mmcv_cls_head.py similarity index 86% rename from tests/unit/mpa/modules/ov/models/mmcls/heads/test_ov_mmcls_mmcv_cls_head.py rename to tests/unit/core/ov/models/mmcls/heads/test_ov_mmcls_mmcv_cls_head.py index b5e5039dc43..c12e05642b6 100644 --- a/tests/unit/mpa/modules/ov/models/mmcls/heads/test_ov_mmcls_mmcv_cls_head.py +++ b/tests/unit/core/ov/models/mmcls/heads/test_ov_mmcls_mmcv_cls_head.py @@ -5,9 +5,11 @@ import pytest import torch -from otx.mpa.modules.ov.models.mmcls.heads.mmov_cls_head import MMOVClsHead +from otx.algorithms.classification.adapters.mmcls.models.heads.mmov_cls_head import ( + MMOVClsHead, +) from tests.test_suite.e2e_test_system import e2e_pytest_unit -from tests.unit.mpa.modules.ov.models.mmcls.test_helpers import create_ov_model +from tests.unit.core.ov.models.mmcls.test_helpers import create_ov_model class TestMMOVClsHead: diff --git a/tests/unit/mpa/modules/ov/models/mmcls/necks/test_ov_mmcls_mmov_neck.py b/tests/unit/core/ov/models/mmcls/necks/test_ov_mmcls_mmov_neck.py similarity index 73% rename from tests/unit/mpa/modules/ov/models/mmcls/necks/test_ov_mmcls_mmov_neck.py rename to tests/unit/core/ov/models/mmcls/necks/test_ov_mmcls_mmov_neck.py index 2c2756e9583..d6255891516 100644 --- a/tests/unit/mpa/modules/ov/models/mmcls/necks/test_ov_mmcls_mmov_neck.py +++ b/tests/unit/core/ov/models/mmcls/necks/test_ov_mmcls_mmov_neck.py @@ -4,9 +4,9 @@ import pytest -from otx.mpa.modules.ov.models.mmcls.necks.mmov_neck import MMOVNeck +from otx.algorithms.classification.adapters.mmcls.models.necks import MMOVNeck from tests.test_suite.e2e_test_system import e2e_pytest_unit -from tests.unit.mpa.modules.ov.models.mmcls.test_helpers import create_ov_model +from tests.unit.core.ov.models.mmcls.test_helpers import create_ov_model class TestMMOVNeck: diff --git a/tests/unit/mpa/modules/ov/models/mmcls/test_helpers.py b/tests/unit/core/ov/models/mmcls/test_helpers.py similarity index 100% rename from tests/unit/mpa/modules/ov/models/mmcls/test_helpers.py rename to tests/unit/core/ov/models/mmcls/test_helpers.py diff --git a/tests/unit/mpa/modules/ov/models/test_ov_models_ov_model.py b/tests/unit/core/ov/models/test_ov_models_ov_model.py similarity index 96% rename from tests/unit/mpa/modules/ov/models/test_ov_models_ov_model.py rename to tests/unit/core/ov/models/test_ov_models_ov_model.py index dda0f86c4b8..f3bf7fe78ca 100644 --- a/tests/unit/mpa/modules/ov/models/test_ov_models_ov_model.py +++ b/tests/unit/core/ov/models/test_ov_models_ov_model.py @@ -6,7 +6,7 @@ import openvino.runtime as ov import torch -from otx.mpa.modules.ov.models.ov_model import OVModel +from otx.core.ov.models.ov_model import OVModel from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/ov/ops/test_ov_ops_activations.py b/tests/unit/core/ov/ops/test_ov_ops_activations.py similarity index 99% rename from tests/unit/mpa/modules/ov/ops/test_ov_ops_activations.py rename to tests/unit/core/ov/ops/test_ov_ops_activations.py index 553f002eafe..a2c6e8f08f7 100644 --- a/tests/unit/mpa/modules/ov/ops/test_ov_ops_activations.py +++ b/tests/unit/core/ov/ops/test_ov_ops_activations.py @@ -8,7 +8,7 @@ import torch from torch.nn import functional as F -from otx.mpa.modules.ov.ops.activations import ( +from otx.core.ov.ops.activations import ( ClampV0, EluV0, ExpV0, diff --git a/tests/unit/mpa/modules/ov/ops/test_ov_ops_arithmetics.py b/tests/unit/core/ov/ops/test_ov_ops_arithmetics.py similarity index 97% rename from tests/unit/mpa/modules/ov/ops/test_ov_ops_arithmetics.py rename to tests/unit/core/ov/ops/test_ov_ops_arithmetics.py index 046d8f95f0f..e477286114b 100644 --- a/tests/unit/mpa/modules/ov/ops/test_ov_ops_arithmetics.py +++ b/tests/unit/core/ov/ops/test_ov_ops_arithmetics.py @@ -5,13 +5,7 @@ import pytest import torch -from otx.mpa.modules.ov.ops.arithmetics import ( - AddV1, - DivideV1, - MultiplyV1, - SubtractV1, - TanV0, -) +from otx.core.ov.ops.arithmetics import AddV1, DivideV1, MultiplyV1, SubtractV1, TanV0 from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/ov/ops/test_ov_ops_builder.py b/tests/unit/core/ov/ops/test_ov_ops_builder.py similarity index 92% rename from tests/unit/mpa/modules/ov/ops/test_ov_ops_builder.py rename to tests/unit/core/ov/ops/test_ov_ops_builder.py index 84a1f8bf6f2..f175747fc9b 100644 --- a/tests/unit/mpa/modules/ov/ops/test_ov_ops_builder.py +++ b/tests/unit/core/ov/ops/test_ov_ops_builder.py @@ -6,8 +6,8 @@ import pytest -from otx.mpa.modules.ov.ops.builder import OperationRegistry -from otx.mpa.modules.ov.ops.op import Attribute, Operation +from otx.core.ov.ops.builder import OperationRegistry +from otx.core.ov.ops.op import Attribute, Operation from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/ov/ops/test_ov_ops_convolutions.py b/tests/unit/core/ov/ops/test_ov_ops_convolutions.py similarity index 97% rename from tests/unit/mpa/modules/ov/ops/test_ov_ops_convolutions.py rename to tests/unit/core/ov/ops/test_ov_ops_convolutions.py index 3cd37523080..ec0a6c41500 100644 --- a/tests/unit/mpa/modules/ov/ops/test_ov_ops_convolutions.py +++ b/tests/unit/core/ov/ops/test_ov_ops_convolutions.py @@ -6,7 +6,7 @@ import torch from torch.nn import functional as F -from otx.mpa.modules.ov.ops.convolutions import ConvolutionV1, GroupConvolutionV1 +from otx.core.ov.ops.convolutions import ConvolutionV1, GroupConvolutionV1 from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/ov/ops/test_ov_ops_generation.py b/tests/unit/core/ov/ops/test_ov_ops_generation.py similarity index 93% rename from tests/unit/mpa/modules/ov/ops/test_ov_ops_generation.py rename to tests/unit/core/ov/ops/test_ov_ops_generation.py index dc0919ceb2c..c8efe5d31de 100644 --- a/tests/unit/mpa/modules/ov/ops/test_ov_ops_generation.py +++ b/tests/unit/core/ov/ops/test_ov_ops_generation.py @@ -5,7 +5,7 @@ import pytest import torch -from otx.mpa.modules.ov.ops.generation import RangeV4 +from otx.core.ov.ops.generation import RangeV4 from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/ov/ops/test_ov_ops_image_processings.py b/tests/unit/core/ov/ops/test_ov_ops_image_processings.py similarity index 97% rename from tests/unit/mpa/modules/ov/ops/test_ov_ops_image_processings.py rename to tests/unit/core/ov/ops/test_ov_ops_image_processings.py index dc1dfa321e5..1c86c54f691 100644 --- a/tests/unit/mpa/modules/ov/ops/test_ov_ops_image_processings.py +++ b/tests/unit/core/ov/ops/test_ov_ops_image_processings.py @@ -6,7 +6,7 @@ import torch from torch.nn import functional as F -from otx.mpa.modules.ov.ops.image_processings import InterpolateV4 +from otx.core.ov.ops.image_processings import InterpolateV4 from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/ov/ops/test_ov_ops_infrastructures.py b/tests/unit/core/ov/ops/test_ov_ops_infrastructures.py similarity index 97% rename from tests/unit/mpa/modules/ov/ops/test_ov_ops_infrastructures.py rename to tests/unit/core/ov/ops/test_ov_ops_infrastructures.py index f688229d693..91df181ac81 100644 --- a/tests/unit/mpa/modules/ov/ops/test_ov_ops_infrastructures.py +++ b/tests/unit/core/ov/ops/test_ov_ops_infrastructures.py @@ -7,7 +7,7 @@ import pytest import torch -from otx.mpa.modules.ov.ops.infrastructures import ConstantV0, ParameterV0, ResultV0 +from otx.core.ov.ops.infrastructures import ConstantV0, ParameterV0, ResultV0 from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/ov/ops/test_ov_ops_matmuls.py b/tests/unit/core/ov/ops/test_ov_ops_matmuls.py similarity index 97% rename from tests/unit/mpa/modules/ov/ops/test_ov_ops_matmuls.py rename to tests/unit/core/ov/ops/test_ov_ops_matmuls.py index e766472efca..e795aea7536 100644 --- a/tests/unit/mpa/modules/ov/ops/test_ov_ops_matmuls.py +++ b/tests/unit/core/ov/ops/test_ov_ops_matmuls.py @@ -5,7 +5,7 @@ import pytest import torch -from otx.mpa.modules.ov.ops.matmuls import EinsumV7, MatMulV0 +from otx.core.ov.ops.matmuls import EinsumV7, MatMulV0 from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/ov/ops/test_ov_ops_module.py b/tests/unit/core/ov/ops/test_ov_ops_module.py similarity index 93% rename from tests/unit/mpa/modules/ov/ops/test_ov_ops_module.py rename to tests/unit/core/ov/ops/test_ov_ops_module.py index 158bc7e4c9d..56e3942ada8 100644 --- a/tests/unit/mpa/modules/ov/ops/test_ov_ops_module.py +++ b/tests/unit/core/ov/ops/test_ov_ops_module.py @@ -5,8 +5,8 @@ import pytest import torch -from otx.mpa.modules.ov.ops import OPS -from otx.mpa.modules.ov.ops.modules.op_module import OperationModule +from otx.core.ov.ops.builder import OPS +from otx.core.ov.ops.modules.op_module import OperationModule from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/ov/ops/test_ov_ops_movements.py b/tests/unit/core/ov/ops/test_ov_ops_movements.py similarity index 99% rename from tests/unit/mpa/modules/ov/ops/test_ov_ops_movements.py rename to tests/unit/core/ov/ops/test_ov_ops_movements.py index 52333569da2..ec5b0faeb04 100644 --- a/tests/unit/mpa/modules/ov/ops/test_ov_ops_movements.py +++ b/tests/unit/core/ov/ops/test_ov_ops_movements.py @@ -5,7 +5,7 @@ import pytest import torch -from otx.mpa.modules.ov.ops.movements import ( +from otx.core.ov.ops.movements import ( BroadcastV3, ConcatV0, GatherV0, diff --git a/tests/unit/mpa/modules/ov/ops/test_ov_ops_normalizations.py b/tests/unit/core/ov/ops/test_ov_ops_normalizations.py similarity index 98% rename from tests/unit/mpa/modules/ov/ops/test_ov_ops_normalizations.py rename to tests/unit/core/ov/ops/test_ov_ops_normalizations.py index cba839b572c..268b12aa7c9 100644 --- a/tests/unit/mpa/modules/ov/ops/test_ov_ops_normalizations.py +++ b/tests/unit/core/ov/ops/test_ov_ops_normalizations.py @@ -6,7 +6,7 @@ import torch from torch.nn import functional as F -from otx.mpa.modules.ov.ops.normalizations import ( +from otx.core.ov.ops.normalizations import ( MVNV6, BatchNormalizationV0, LocalResponseNormalizationV0, diff --git a/tests/unit/mpa/modules/ov/ops/test_ov_ops_object_detections.py b/tests/unit/core/ov/ops/test_ov_ops_object_detections.py similarity index 98% rename from tests/unit/mpa/modules/ov/ops/test_ov_ops_object_detections.py rename to tests/unit/core/ov/ops/test_ov_ops_object_detections.py index 91aa2e674d8..76315d67d61 100644 --- a/tests/unit/mpa/modules/ov/ops/test_ov_ops_object_detections.py +++ b/tests/unit/core/ov/ops/test_ov_ops_object_detections.py @@ -4,7 +4,7 @@ import pytest -from otx.mpa.modules.ov.ops.object_detections import ( +from otx.core.ov.ops.object_detections import ( DetectionOutputV0, PriorBoxClusteredV0, PriorBoxV0, diff --git a/tests/unit/mpa/modules/ov/ops/test_ov_ops_op.py b/tests/unit/core/ov/ops/test_ov_ops_op.py similarity index 92% rename from tests/unit/mpa/modules/ov/ops/test_ov_ops_op.py rename to tests/unit/core/ov/ops/test_ov_ops_op.py index 275f06ab391..d4e9557503c 100644 --- a/tests/unit/mpa/modules/ov/ops/test_ov_ops_op.py +++ b/tests/unit/core/ov/ops/test_ov_ops_op.py @@ -4,7 +4,7 @@ import openvino.runtime as ov -from otx.mpa.modules.ov.ops.arithmetics import MultiplyV1 +from otx.core.ov.ops.arithmetics import MultiplyV1 from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/ov/ops/test_ov_ops_poolings.py b/tests/unit/core/ov/ops/test_ov_ops_poolings.py similarity index 98% rename from tests/unit/mpa/modules/ov/ops/test_ov_ops_poolings.py rename to tests/unit/core/ov/ops/test_ov_ops_poolings.py index 315f7e5d65c..192b6218cc5 100644 --- a/tests/unit/mpa/modules/ov/ops/test_ov_ops_poolings.py +++ b/tests/unit/core/ov/ops/test_ov_ops_poolings.py @@ -6,7 +6,7 @@ import torch from torch.nn import functional as F -from otx.mpa.modules.ov.ops.poolings import AvgPoolV1, MaxPoolV0 +from otx.core.ov.ops.poolings import AvgPoolV1, MaxPoolV0 from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/ov/ops/test_ov_ops_reductions.py b/tests/unit/core/ov/ops/test_ov_ops_reductions.py similarity index 98% rename from tests/unit/mpa/modules/ov/ops/test_ov_ops_reductions.py rename to tests/unit/core/ov/ops/test_ov_ops_reductions.py index d6749aa6434..34419a695e0 100644 --- a/tests/unit/mpa/modules/ov/ops/test_ov_ops_reductions.py +++ b/tests/unit/core/ov/ops/test_ov_ops_reductions.py @@ -4,7 +4,7 @@ import torch -from otx.mpa.modules.ov.ops.reductions import ( +from otx.core.ov.ops.reductions import ( ReduceMeanV1, ReduceMinV1, ReduceProdV1, diff --git a/tests/unit/mpa/modules/ov/ops/test_ov_ops_shape_manipulations.py b/tests/unit/core/ov/ops/test_ov_ops_shape_manipulations.py similarity index 98% rename from tests/unit/mpa/modules/ov/ops/test_ov_ops_shape_manipulations.py rename to tests/unit/core/ov/ops/test_ov_ops_shape_manipulations.py index 8777d4ee964..ec449751074 100644 --- a/tests/unit/mpa/modules/ov/ops/test_ov_ops_shape_manipulations.py +++ b/tests/unit/core/ov/ops/test_ov_ops_shape_manipulations.py @@ -5,7 +5,7 @@ import pytest import torch -from otx.mpa.modules.ov.ops.shape_manipulations import ( +from otx.core.ov.ops.shape_manipulations import ( ReshapeV1, ShapeOfV0, ShapeOfV3, diff --git a/tests/unit/mpa/modules/ov/ops/test_ov_ops_sorting_maximization.py b/tests/unit/core/ov/ops/test_ov_ops_sorting_maximization.py similarity index 95% rename from tests/unit/mpa/modules/ov/ops/test_ov_ops_sorting_maximization.py rename to tests/unit/core/ov/ops/test_ov_ops_sorting_maximization.py index 7ccdd358b1f..5f0d0a65b56 100644 --- a/tests/unit/mpa/modules/ov/ops/test_ov_ops_sorting_maximization.py +++ b/tests/unit/core/ov/ops/test_ov_ops_sorting_maximization.py @@ -4,7 +4,7 @@ import pytest -from otx.mpa.modules.ov.ops.sorting_maximization import ( +from otx.core.ov.ops.sorting_maximization import ( NonMaxSuppressionV5, NonMaxSuppressionV9, TopKV3, diff --git a/tests/unit/mpa/modules/ov/ops/test_ov_ops_type_conversions.py b/tests/unit/core/ov/ops/test_ov_ops_type_conversions.py similarity index 97% rename from tests/unit/mpa/modules/ov/ops/test_ov_ops_type_conversions.py rename to tests/unit/core/ov/ops/test_ov_ops_type_conversions.py index 6db5128ab91..9df3ef3de88 100644 --- a/tests/unit/mpa/modules/ov/ops/test_ov_ops_type_conversions.py +++ b/tests/unit/core/ov/ops/test_ov_ops_type_conversions.py @@ -5,7 +5,7 @@ import pytest import torch -from otx.mpa.modules.ov.ops.type_conversions import ConvertV0 +from otx.core.ov.ops.type_conversions import ConvertV0 from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/ov/ops/test_ov_ops_utils.py b/tests/unit/core/ov/ops/test_ov_ops_utils.py similarity index 90% rename from tests/unit/mpa/modules/ov/ops/test_ov_ops_utils.py rename to tests/unit/core/ov/ops/test_ov_ops_utils.py index a35cbe42b14..2c3c9f2d068 100644 --- a/tests/unit/mpa/modules/ov/ops/test_ov_ops_utils.py +++ b/tests/unit/core/ov/ops/test_ov_ops_utils.py @@ -6,7 +6,8 @@ import pytest import torch -from otx.mpa.modules.ov.ops.utils import get_dynamic_shape, get_torch_padding +from otx.core.ov.ops.movements import get_torch_padding +from otx.core.ov.ops.utils import get_dynamic_shape from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/ov/test_ov_omz_wrapper.py b/tests/unit/core/ov/test_ov_omz_wrapper.py similarity index 96% rename from tests/unit/mpa/modules/ov/test_ov_omz_wrapper.py rename to tests/unit/core/ov/test_ov_omz_wrapper.py index 345e19ee592..a04c9edce74 100644 --- a/tests/unit/mpa/modules/ov/test_ov_omz_wrapper.py +++ b/tests/unit/core/ov/test_ov_omz_wrapper.py @@ -7,7 +7,7 @@ from openvino.model_zoo._configuration import Model -from otx.mpa.modules.ov.omz_wrapper import ( +from otx.core.ov.omz_wrapper import ( download_model, get_model_configuration, get_omz_model, diff --git a/tests/unit/mpa/modules/ov/test_ov_registry.py b/tests/unit/core/ov/test_ov_registry.py similarity index 94% rename from tests/unit/mpa/modules/ov/test_ov_registry.py rename to tests/unit/core/ov/test_ov_registry.py index cc9f163a4db..25c5e32be7d 100644 --- a/tests/unit/mpa/modules/ov/test_ov_registry.py +++ b/tests/unit/core/ov/test_ov_registry.py @@ -4,7 +4,7 @@ import pytest -from otx.mpa.modules.ov.registry import Registry +from otx.core.ov.registry import Registry from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/mpa/modules/ov/test_ov_utils.py b/tests/unit/core/ov/test_ov_utils.py similarity index 89% rename from tests/unit/mpa/modules/ov/test_ov_utils.py rename to tests/unit/core/ov/test_ov_utils.py index d5432e49ed2..3057de461db 100644 --- a/tests/unit/mpa/modules/ov/test_ov_utils.py +++ b/tests/unit/core/ov/test_ov_utils.py @@ -8,11 +8,11 @@ import openvino.runtime as ov import pytest -from otx.mpa.modules.ov.omz_wrapper import get_omz_model -from otx.mpa.modules.ov.ops import ParameterV0 -from otx.mpa.modules.ov.utils import ( - convert_op_to_torch, - convert_op_to_torch_module, +from otx.core.ov.omz_wrapper import get_omz_model +from otx.core.ov.ops.infrastructures import ParameterV0 +from otx.core.ov.ops.modules.op_module import convert_op_to_torch_module +from otx.core.ov.ops.utils import convert_op_to_torch +from otx.core.ov.utils import ( get_op_name, load_ov_model, normalize_name, diff --git a/tests/unit/mpa/deploy/__init__.py b/tests/unit/mpa/deploy/__init__.py index 2faffbe2b1f..ff847f01203 100644 --- a/tests/unit/mpa/deploy/__init__.py +++ b/tests/unit/mpa/deploy/__init__.py @@ -1,13 +1,3 @@ # Copyright (C) 2023 Intel Corporation # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions -# and limitations under the License. +# SPDX-License-Identifier: MIT diff --git a/tests/unit/mpa/modules/__init__.py b/tests/unit/mpa/modules/__init__.py deleted file mode 100644 index 2e7b47a93c3..00000000000 --- a/tests/unit/mpa/modules/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -"""Test for otx.mpa.modules""" -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# diff --git a/tests/unit/mpa/modules/datasets/__init__.py b/tests/unit/mpa/modules/datasets/__init__.py deleted file mode 100644 index 8b0a8f1de67..00000000000 --- a/tests/unit/mpa/modules/datasets/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -"""Test for otx.mpa.modules.datasets""" -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# diff --git a/tests/unit/mpa/modules/datasets/pipelines/__init__.py b/tests/unit/mpa/modules/datasets/pipelines/__init__.py deleted file mode 100644 index 9288b05e850..00000000000 --- a/tests/unit/mpa/modules/datasets/pipelines/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -"""Test for otx.mpa.modules.datasets.pipelines""" -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# diff --git a/tests/unit/mpa/modules/datasets/pipelines/transforms/__init__.py b/tests/unit/mpa/modules/datasets/pipelines/transforms/__init__.py deleted file mode 100644 index b9a06469c00..00000000000 --- a/tests/unit/mpa/modules/datasets/pipelines/transforms/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -"""Test for otx.mpa.modules.datasets.pipelines.transforms""" -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# diff --git a/tests/unit/mpa/modules/datasets/pipelines/transforms/test_seg_custom_pipelines.py b/tests/unit/mpa/modules/datasets/pipelines/transforms/test_seg_custom_pipelines.py deleted file mode 100644 index c966164a992..00000000000 --- a/tests/unit/mpa/modules/datasets/pipelines/transforms/test_seg_custom_pipelines.py +++ /dev/null @@ -1,103 +0,0 @@ -import numpy as np -import pytest -import torch -from mmcv.parallel import DataContainer - -from otx.mpa.modules.datasets.pipelines.transforms.seg_custom_pipelines import ( - BranchImage, - DefaultFormatBundle, - Normalize, -) -from tests.test_suite.e2e_test_system import e2e_pytest_unit - - -class TestNormalize: - @e2e_pytest_unit - @pytest.mark.parametrize( - "mean,std,to_rgb,expected", - [ - (1.0, 1.0, True, np.array([[[1.0, 0.0, 0.0]]], dtype=np.float32)), - (1.0, 1.0, False, np.array([[[-1.0, 0.0, 0.0]]], dtype=np.float32)), - ], - ) - def test_call(self, mean: float, std: float, to_rgb: bool, expected: np.array) -> None: - """Test __call__.""" - normalize = Normalize(mean=mean, std=std, to_rgb=to_rgb) - inputs = dict(img=np.arange(3).reshape(1, 1, 3)) - - results = normalize(inputs.copy()) - - assert "img" in results - assert "img_norm_cfg" in results - assert np.all(results["img"] == expected) - - @e2e_pytest_unit - @pytest.mark.parametrize("mean,std,to_rgb", [(1.0, 1.0, True)]) - def test_repr(self, mean: float, std: float, to_rgb: bool) -> None: - """Test __repr__.""" - normalize = Normalize(mean=mean, std=std, to_rgb=to_rgb) - - assert repr(normalize) == normalize.__class__.__name__ + f"(mean={mean}, std={std}, to_rgb=" f"{to_rgb})" - - -class TestDefaultFormatBundle: - @pytest.fixture(autouse=True) - def setup(self) -> None: - self.default_format_bundle = DefaultFormatBundle() - - @e2e_pytest_unit - @pytest.mark.parametrize("img", [np.ones((1, 1)), np.ones((1, 1, 1)), np.ones((1, 1, 1, 1))]) - @pytest.mark.parametrize("gt_semantic_seg,pixel_weights", [(np.ones((1, 1)), np.ones((1, 1)))]) - def test_call(self, img: np.array, gt_semantic_seg: np.array, pixel_weights: np.array) -> None: - """Test __call__.""" - inputs = dict(img=img, gt_semantic_seg=gt_semantic_seg, pixel_weights=pixel_weights) - - results = self.default_format_bundle(inputs.copy()) - - assert isinstance(results, dict) - assert "img" in results - assert isinstance(results["img"], DataContainer) - assert len(results["img"].data.shape) >= 3 - assert results["img"].data.dtype == torch.float32 - assert "gt_semantic_seg" in results - assert len(results["gt_semantic_seg"].data.shape) == len(inputs["gt_semantic_seg"].shape) + 1 - assert results["gt_semantic_seg"].data.dtype == torch.int64 - assert "pixel_weights" in results - assert len(results["pixel_weights"].data.shape) == len(inputs["pixel_weights"].shape) + 1 - assert results["pixel_weights"].data.dtype == torch.float32 - - @e2e_pytest_unit - @pytest.mark.parametrize("img", [np.ones((1,))]) - def test_call_invalid_shape(self, img: np.array): - inputs = dict(img=img) - - with pytest.raises(ValueError): - self.default_format_bundle(inputs.copy()) - - @e2e_pytest_unit - def test_repr(self) -> None: - """Test __repr__.""" - assert repr(self.default_format_bundle) == self.default_format_bundle.__class__.__name__ - - -class TestBranchImage: - @pytest.fixture(autouse=True) - def setup(self) -> None: - self.branch_image = BranchImage(key_map={"key1": "key2"}) - - @e2e_pytest_unit - def test_call(self) -> None: - """Test __call__.""" - inputs = dict(key1="key1", img_fields=["key1"]) - - results = self.branch_image(inputs.copy()) - - assert isinstance(results, dict) - assert "key2" in results - assert results["key1"] == results["key2"] - assert "key2" in results["img_fields"] - - @e2e_pytest_unit - def test_repr(self) -> None: - """Test __repr__.""" - assert repr(self.branch_image) == self.branch_image.__class__.__name__ diff --git a/tests/unit/mpa/modules/heads/__init__.py b/tests/unit/mpa/modules/heads/__init__.py deleted file mode 100644 index 2e7b47a93c3..00000000000 --- a/tests/unit/mpa/modules/heads/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -"""Test for otx.mpa.modules""" -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# diff --git a/tests/unit/mpa/modules/hooks/test_mpa_fp16_sam_optimizer_hook.py b/tests/unit/mpa/modules/hooks/test_mpa_fp16_sam_optimizer_hook.py deleted file mode 100644 index 3464c8dd385..00000000000 --- a/tests/unit/mpa/modules/hooks/test_mpa_fp16_sam_optimizer_hook.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Unit test for otx.mpa.modules.hooks.fp16_sam_optimizer_hook.""" -# Copyright (C) 2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from otx.mpa.modules.hooks.fp16_sam_optimizer_hook import Fp16SAMOptimizerHook -from tests.test_suite.e2e_test_system import e2e_pytest_unit - - -class TestFp16SAMOptimizerHook: - @e2e_pytest_unit - def test_temp(self) -> None: - try: - hook = Fp16SAMOptimizerHook() - assert hook is None - except Exception as e: - print(e) - pass diff --git a/tests/unit/mpa/modules/losses/__init__.py b/tests/unit/mpa/modules/losses/__init__.py deleted file mode 100644 index 2e7b47a93c3..00000000000 --- a/tests/unit/mpa/modules/losses/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -"""Test for otx.mpa.modules""" -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# diff --git a/tests/unit/mpa/modules/optimizer/__init__.py b/tests/unit/mpa/modules/optimizer/__init__.py deleted file mode 100644 index 8a6c5467f4e..00000000000 --- a/tests/unit/mpa/modules/optimizer/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -"""Test for otx.mpa.modules.optimizer""" -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# diff --git a/tests/unit/mpa/seg/semisl/__init__.py b/tests/unit/mpa/seg/semisl/__init__.py deleted file mode 100644 index 1e19f1159d9..00000000000 --- a/tests/unit/mpa/seg/semisl/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# diff --git a/tests/unit/mpa/test_augments.py b/tests/unit/mpa/test_augments.py index 41beaece056..d7c93288745 100644 --- a/tests/unit/mpa/test_augments.py +++ b/tests/unit/mpa/test_augments.py @@ -11,7 +11,7 @@ import pytest from PIL import Image -from otx.mpa.modules.datasets.pipelines.transforms.augments import ( +from otx.algorithms.common.adapters.mmcv.pipelines.transforms.augments import ( Augments, CythonAugments, ) diff --git a/tests/unit/mpa/test_builder.py b/tests/unit/mpa/test_builder.py deleted file mode 100644 index b68bb8f5c49..00000000000 --- a/tests/unit/mpa/test_builder.py +++ /dev/null @@ -1,37 +0,0 @@ -import mmcv - -from otx.mpa.builder import build -from tests.test_suite.e2e_test_system import e2e_pytest_unit - - -@e2e_pytest_unit -def test_build_with_stages(mocker): - cfg = mmcv.ConfigDict( - stages=[mocker.MagicMock()], - type=mocker.MagicMock(), - workflow_hooks=[mocker.MagicMock()], - ) - mocker.patch("otx.mpa.builder.build_workflow_hook") - mock_build_from_cfg = mocker.patch("otx.mpa.builder.build_from_cfg") - mock_workflow = mocker.patch("otx.mpa.builder.Workflow") - mocker.patch("otx.mpa.builder.config_logger") - mocker.patch("os.makedirs") - mocker.patch("os.unlink") - mocker.patch("os.symlink") - - build(cfg) - - mock_build_from_cfg.assert_called() - mock_workflow.assert_called_once() - - -@e2e_pytest_unit -def test_build_without_stages(mocker): - cfg = mmcv.ConfigDict() - - mocker.patch("otx.mpa.builder.get_available_types", return_value="MockStage") - mock_build_from_cfg = mocker.patch("otx.mpa.builder.build_from_cfg") - - build(cfg, None, "MockStage") - - mock_build_from_cfg.assert_called_once() diff --git a/tox.ini b/tox.ini index 558965b1394..a57f038f7ef 100644 --- a/tox.ini +++ b/tox.ini @@ -147,7 +147,7 @@ deps = use_develop = true commands = coverage erase - - coverage run tests/fuzzing/cli_fuzzing.py {posargs:-dict=tests/fuzzing/assets/cli/operations.dict -artifact_prefix={toxworkdir}/ -print_final_stats=1 -atheris_runs=100000} + - coverage run tests/fuzzing/cli_fuzzing.py {posargs:-dict=tests/fuzzing/assets/cli/operations.dict -artifact_prefix={toxworkdir}/ -print_final_stats=1 -atheris_runs=500000} coverage report --precision=2 ; coverage html -d {toxworkdir}/htmlcov @@ -173,11 +173,11 @@ deps = skip_install = true allowlist_externals = rm + find commands = rm -rf ./dist python -m build --sdist - python -m pip install dist/otx-1.0.0.tar.gz[full] - # python -m pip install otx[full]==1.0.0 + find ./dist -type f -name *.tar.gz -exec pip install {}[full] \; pytest {posargs:tests/unit tests/integration/cli}