diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 84748796d3b..657584ff1bd 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -18,7 +18,7 @@ repos: exclude: "tests/" - repo: https://github.com/psf/black - rev: 21.7b0 + rev: 22.3.0 hooks: - id: black name: "black (ote_sdk|ote_cli)" diff --git a/.pylintrc b/.pylintrc index 7916fb5d2f5..1fdee4488f5 100644 --- a/.pylintrc +++ b/.pylintrc @@ -142,7 +142,8 @@ disable=logging-fstring-interpolation, deprecated-sys-function, exception-escape, comprehension-escape, - import-outside-toplevel + import-outside-toplevel, + arguments-differ # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option diff --git a/external/README.md b/external/README.md index ab82d6565ca..f4681cf2c37 100644 --- a/external/README.md +++ b/external/README.md @@ -47,9 +47,10 @@ ID | Name | Complexity (GFlops) | Model size (MB) | Path Custom_Rotated_Detection_via_Instance_Segmentation_MaskRCNN_EfficientNetB2B | MaskRCNN-EfficientNetB2B | 68.48 | 13.27 | mmdetection/configs/rotated_detection/efficientnetb2b_maskrcnn/template.yaml Custom_Rotated_Detection_via_Instance_Segmentation_MaskRCNN_ResNet50 | MaskRCNN-ResNet50 | 533.8 | 177.9 | mmdetection/configs/rotated_detection/resnet50_maskrcnn/template.yaml -## Semantic Segmentaion +## Semantic Segmentation ID | Name | Complexity (GFlops) | Model size (MB) | Path ------- | ------- | ------- | ------- | ------- +Custom_Semantic_Segmentation_Lite-HRNet-s-mod2_OCR | Lite-HRNet-s-mod2 OCR | 1.82 | 3.5 | mmsegmentation/configs/custom-sematic-segmentation/ocr-lite-hrnet-s-mod2/template.yaml Custom_Semantic_Segmentation_Lite-HRNet-18_OCR | Lite-HRNet-18 OCR | 3.45 | 4.5 | mmsegmentation/configs/custom-sematic-segmentation/ocr-lite-hrnet-18/template.yaml Custom_Semantic_Segmentation_Lite-HRNet-18-mod2_OCR | Lite-HRNet-18-mod2 OCR | 3.63 | 4.8 | mmsegmentation/configs/custom-sematic-segmentation/ocr-lite-hrnet-18-mod2/template.yaml Custom_Semantic_Segmentation_Lite-HRNet-x-mod3_OCR | Lite-HRNet-x-mod3 OCR | 13.97 | 6.4 | mmsegmentation/configs/custom-sematic-segmentation/ocr-lite-hrnet-x-mod3/template.yaml diff --git a/external/anomaly/anomaly_classification/configs/padim/compression_config.json b/external/anomaly/anomaly_classification/configs/padim/compression_config.json new file mode 100644 index 00000000000..48bd526180f --- /dev/null +++ b/external/anomaly/anomaly_classification/configs/padim/compression_config.json @@ -0,0 +1,42 @@ +{ + "base": { + "find_unused_parameters": true, + "target_metric_name": "image_F1", + "nncf_config": { + "input_info": { + "sample_size": [1, 3, 256, 256] + }, + "compression": [], + "log_dir": "/tmp" + } + }, + "nncf_quantization": { + "nncf_config": { + "compression": [ + { + "algorithm": "quantization", + "preset": "mixed", + "initializer": { + "range": { + "num_init_samples": 250 + }, + "batchnorm_adaptation": { + "num_bn_adaptation_samples": 250 + } + }, + "ignored_scopes": [ + "PadimModel/sqrt_0", + "PadimModel/interpolate_2", + "PadimModel/__truediv___0", + "PadimModel/__truediv___1", + "PadimModel/matmul_1", + "PadimModel/conv2d_0" + ] + } + ] + } + }, + "order_of_parts": [ + "nncf_quantization" + ] +} diff --git a/external/anomaly/anomaly_classification/configs/padim/configuration.yaml b/external/anomaly/anomaly_classification/configs/padim/configuration.yaml index f919c851676..cff368c59e8 100644 --- a/external/anomaly/anomaly_classification/configs/padim/configuration.yaml +++ b/external/anomaly/anomaly_classification/configs/padim/configuration.yaml @@ -83,6 +83,56 @@ pot_parameters: visible_in_ui: true warning: null type: PARAMETER_GROUP + visible_in_ui: true +nncf_optimization: + description: Optimization by NNCF + header: Optimization by NNCF + enable_quantization: + affects_outcome_of: TRAINING + default_value: true + description: Enable quantization algorithm + editable: true + header: Enable quantization algorithm + type: BOOLEAN + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: true + visible_in_ui: true + warning: null + enable_pruning: + affects_outcome_of: TRAINING + default_value: false + description: Enable filter pruning algorithm + editable: true + header: Enable filter pruning algorithm + type: BOOLEAN + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: false + visible_in_ui: true + warning: null + pruning_supported: + affects_outcome_of: TRAINING + default_value: false + description: Whether filter pruning is supported + editable: false + header: Whether filter pruning is supported + type: BOOLEAN + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: false + visible_in_ui: false + warning: null + type: PARAMETER_GROUP visible_in_ui: false type: CONFIGURABLE_PARAMETERS visible_in_ui: true diff --git a/external/anomaly/anomaly_classification/configs/padim/template.yaml b/external/anomaly/anomaly_classification/configs/padim/template.yaml index c5c33e6f0be..a9721a933e7 100644 --- a/external/anomaly/anomaly_classification/configs/padim/template.yaml +++ b/external/anomaly/anomaly_classification/configs/padim/template.yaml @@ -12,8 +12,9 @@ framework: OTEAnomalyClassification v0.1.0 # Task implementations. entrypoints: - base: ote_anomalib.BaseAnomalyTask + base: ote_anomalib.AnomalyTrainingTask openvino: ote_anomalib.OpenVINOAnomalyTask + nncf: ote_anomalib.AnomalyNNCFTask # Hyper Parameters hyper_parameters: diff --git a/external/anomaly/anomaly_classification/configs/stfpm/compression_config.json b/external/anomaly/anomaly_classification/configs/stfpm/compression_config.json new file mode 100644 index 00000000000..9fb1d550f9f --- /dev/null +++ b/external/anomaly/anomaly_classification/configs/stfpm/compression_config.json @@ -0,0 +1,40 @@ +{ + "base": { + "find_unused_parameters": true, + "target_metric_name": "image_F1", + "nncf_config": { + "input_info": { + "sample_size": [1, 3, 256, 256] + }, + "compression": [], + "log_dir": "/tmp" + } + }, + "nncf_quantization": { + "model": { + "lr": 0.004 + }, + "nncf_config": { + "compression": [ + { + "algorithm": "quantization", + "preset": "mixed", + "initializer": { + "range": { + "num_init_samples": 250 + }, + "batchnorm_adaptation": { + "num_bn_adaptation_samples": 250 + } + }, + "ignored_scopes": [ + "{re}.*__pow__.*" + ] + } + ] + } + }, + "order_of_parts": [ + "nncf_quantization" + ] +} diff --git a/external/anomaly/anomaly_classification/configs/stfpm/configuration.yaml b/external/anomaly/anomaly_classification/configs/stfpm/configuration.yaml index d35a707c099..9facd1e1d1c 100644 --- a/external/anomaly/anomaly_classification/configs/stfpm/configuration.yaml +++ b/external/anomaly/anomaly_classification/configs/stfpm/configuration.yaml @@ -132,6 +132,56 @@ pot_parameters: visible_in_ui: true warning: null type: PARAMETER_GROUP + visible_in_ui: true +nncf_optimization: + description: Optimization by NNCF + header: Optimization by NNCF + enable_quantization: + affects_outcome_of: TRAINING + default_value: true + description: Enable quantization algorithm + editable: true + header: Enable quantization algorithm + type: BOOLEAN + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: true + visible_in_ui: true + warning: null + enable_pruning: + affects_outcome_of: TRAINING + default_value: false + description: Enable filter pruning algorithm + editable: true + header: Enable filter pruning algorithm + type: BOOLEAN + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: false + visible_in_ui: true + warning: null + pruning_supported: + affects_outcome_of: TRAINING + default_value: false + description: Whether filter pruning is supported + editable: false + header: Whether filter pruning is supported + type: BOOLEAN + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: false + visible_in_ui: false + warning: null + type: PARAMETER_GROUP visible_in_ui: false type: CONFIGURABLE_PARAMETERS visible_in_ui: true diff --git a/external/anomaly/anomaly_classification/configs/stfpm/template.yaml b/external/anomaly/anomaly_classification/configs/stfpm/template.yaml index ee742321f62..def02567d2d 100644 --- a/external/anomaly/anomaly_classification/configs/stfpm/template.yaml +++ b/external/anomaly/anomaly_classification/configs/stfpm/template.yaml @@ -12,8 +12,9 @@ framework: OTEAnomalyClassification v0.1.0 # Task implementations. entrypoints: - base: ote_anomalib.BaseAnomalyTask + base: ote_anomalib.AnomalyTrainingTask openvino: ote_anomalib.OpenVINOAnomalyTask + nncf: ote_anomalib.AnomalyNNCFTask # Hyper Parameters hyper_parameters: diff --git a/external/anomaly/anomaly_detection/configs/padim/compression_config.json b/external/anomaly/anomaly_detection/configs/padim/compression_config.json new file mode 100644 index 00000000000..48bd526180f --- /dev/null +++ b/external/anomaly/anomaly_detection/configs/padim/compression_config.json @@ -0,0 +1,42 @@ +{ + "base": { + "find_unused_parameters": true, + "target_metric_name": "image_F1", + "nncf_config": { + "input_info": { + "sample_size": [1, 3, 256, 256] + }, + "compression": [], + "log_dir": "/tmp" + } + }, + "nncf_quantization": { + "nncf_config": { + "compression": [ + { + "algorithm": "quantization", + "preset": "mixed", + "initializer": { + "range": { + "num_init_samples": 250 + }, + "batchnorm_adaptation": { + "num_bn_adaptation_samples": 250 + } + }, + "ignored_scopes": [ + "PadimModel/sqrt_0", + "PadimModel/interpolate_2", + "PadimModel/__truediv___0", + "PadimModel/__truediv___1", + "PadimModel/matmul_1", + "PadimModel/conv2d_0" + ] + } + ] + } + }, + "order_of_parts": [ + "nncf_quantization" + ] +} diff --git a/external/anomaly/anomaly_detection/configs/padim/configuration.yaml b/external/anomaly/anomaly_detection/configs/padim/configuration.yaml index be5d120f060..cff368c59e8 100644 --- a/external/anomaly/anomaly_detection/configs/padim/configuration.yaml +++ b/external/anomaly/anomaly_detection/configs/padim/configuration.yaml @@ -84,5 +84,55 @@ pot_parameters: warning: null type: PARAMETER_GROUP visible_in_ui: true +nncf_optimization: + description: Optimization by NNCF + header: Optimization by NNCF + enable_quantization: + affects_outcome_of: TRAINING + default_value: true + description: Enable quantization algorithm + editable: true + header: Enable quantization algorithm + type: BOOLEAN + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: true + visible_in_ui: true + warning: null + enable_pruning: + affects_outcome_of: TRAINING + default_value: false + description: Enable filter pruning algorithm + editable: true + header: Enable filter pruning algorithm + type: BOOLEAN + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: false + visible_in_ui: true + warning: null + pruning_supported: + affects_outcome_of: TRAINING + default_value: false + description: Whether filter pruning is supported + editable: false + header: Whether filter pruning is supported + type: BOOLEAN + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: false + visible_in_ui: false + warning: null + type: PARAMETER_GROUP + visible_in_ui: false type: CONFIGURABLE_PARAMETERS visible_in_ui: true diff --git a/external/anomaly/anomaly_detection/configs/padim/template.yaml b/external/anomaly/anomaly_detection/configs/padim/template.yaml index a866f821d13..3777963f6f4 100644 --- a/external/anomaly/anomaly_detection/configs/padim/template.yaml +++ b/external/anomaly/anomaly_detection/configs/padim/template.yaml @@ -12,8 +12,9 @@ framework: OTEAnomalyClassification v0.1.0 # TODO: update after the name has bee # Task implementations. entrypoints: - base: ote_anomalib.BaseAnomalyTask + base: ote_anomalib.AnomalyTrainingTask openvino: ote_anomalib.OpenVINOAnomalyTask + nncf: ote_anomalib.AnomalyNNCFTask # Hyper Parameters hyper_parameters: diff --git a/external/anomaly/anomaly_detection/configs/stfpm/compression_config.json b/external/anomaly/anomaly_detection/configs/stfpm/compression_config.json new file mode 100644 index 00000000000..9fb1d550f9f --- /dev/null +++ b/external/anomaly/anomaly_detection/configs/stfpm/compression_config.json @@ -0,0 +1,40 @@ +{ + "base": { + "find_unused_parameters": true, + "target_metric_name": "image_F1", + "nncf_config": { + "input_info": { + "sample_size": [1, 3, 256, 256] + }, + "compression": [], + "log_dir": "/tmp" + } + }, + "nncf_quantization": { + "model": { + "lr": 0.004 + }, + "nncf_config": { + "compression": [ + { + "algorithm": "quantization", + "preset": "mixed", + "initializer": { + "range": { + "num_init_samples": 250 + }, + "batchnorm_adaptation": { + "num_bn_adaptation_samples": 250 + } + }, + "ignored_scopes": [ + "{re}.*__pow__.*" + ] + } + ] + } + }, + "order_of_parts": [ + "nncf_quantization" + ] +} diff --git a/external/anomaly/anomaly_detection/configs/stfpm/configuration.yaml b/external/anomaly/anomaly_detection/configs/stfpm/configuration.yaml index f50e8c31acf..9facd1e1d1c 100644 --- a/external/anomaly/anomaly_detection/configs/stfpm/configuration.yaml +++ b/external/anomaly/anomaly_detection/configs/stfpm/configuration.yaml @@ -133,5 +133,55 @@ pot_parameters: warning: null type: PARAMETER_GROUP visible_in_ui: true +nncf_optimization: + description: Optimization by NNCF + header: Optimization by NNCF + enable_quantization: + affects_outcome_of: TRAINING + default_value: true + description: Enable quantization algorithm + editable: true + header: Enable quantization algorithm + type: BOOLEAN + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: true + visible_in_ui: true + warning: null + enable_pruning: + affects_outcome_of: TRAINING + default_value: false + description: Enable filter pruning algorithm + editable: true + header: Enable filter pruning algorithm + type: BOOLEAN + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: false + visible_in_ui: true + warning: null + pruning_supported: + affects_outcome_of: TRAINING + default_value: false + description: Whether filter pruning is supported + editable: false + header: Whether filter pruning is supported + type: BOOLEAN + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: false + visible_in_ui: false + warning: null + type: PARAMETER_GROUP + visible_in_ui: false type: CONFIGURABLE_PARAMETERS visible_in_ui: true diff --git a/external/anomaly/anomaly_detection/configs/stfpm/template.yaml b/external/anomaly/anomaly_detection/configs/stfpm/template.yaml index f70dd918d91..7ebc3085f06 100644 --- a/external/anomaly/anomaly_detection/configs/stfpm/template.yaml +++ b/external/anomaly/anomaly_detection/configs/stfpm/template.yaml @@ -12,8 +12,9 @@ framework: OTEAnomalyClassification v0.1.0 # TODO: update after the name has bee # Task implementations. entrypoints: - base: ote_anomalib.BaseAnomalyTask + base: ote_anomalib.AnomalyTrainingTask openvino: ote_anomalib.OpenVINOAnomalyTask + nncf: ote_anomalib.AnomalyNNCFTask # Hyper Parameters hyper_parameters: diff --git a/external/anomaly/anomaly_segmentation/configs/padim/compression_config.json b/external/anomaly/anomaly_segmentation/configs/padim/compression_config.json new file mode 100644 index 00000000000..48bd526180f --- /dev/null +++ b/external/anomaly/anomaly_segmentation/configs/padim/compression_config.json @@ -0,0 +1,42 @@ +{ + "base": { + "find_unused_parameters": true, + "target_metric_name": "image_F1", + "nncf_config": { + "input_info": { + "sample_size": [1, 3, 256, 256] + }, + "compression": [], + "log_dir": "/tmp" + } + }, + "nncf_quantization": { + "nncf_config": { + "compression": [ + { + "algorithm": "quantization", + "preset": "mixed", + "initializer": { + "range": { + "num_init_samples": 250 + }, + "batchnorm_adaptation": { + "num_bn_adaptation_samples": 250 + } + }, + "ignored_scopes": [ + "PadimModel/sqrt_0", + "PadimModel/interpolate_2", + "PadimModel/__truediv___0", + "PadimModel/__truediv___1", + "PadimModel/matmul_1", + "PadimModel/conv2d_0" + ] + } + ] + } + }, + "order_of_parts": [ + "nncf_quantization" + ] +} diff --git a/external/anomaly/anomaly_segmentation/configs/padim/configuration.yaml b/external/anomaly/anomaly_segmentation/configs/padim/configuration.yaml index f919c851676..cff368c59e8 100644 --- a/external/anomaly/anomaly_segmentation/configs/padim/configuration.yaml +++ b/external/anomaly/anomaly_segmentation/configs/padim/configuration.yaml @@ -83,6 +83,56 @@ pot_parameters: visible_in_ui: true warning: null type: PARAMETER_GROUP + visible_in_ui: true +nncf_optimization: + description: Optimization by NNCF + header: Optimization by NNCF + enable_quantization: + affects_outcome_of: TRAINING + default_value: true + description: Enable quantization algorithm + editable: true + header: Enable quantization algorithm + type: BOOLEAN + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: true + visible_in_ui: true + warning: null + enable_pruning: + affects_outcome_of: TRAINING + default_value: false + description: Enable filter pruning algorithm + editable: true + header: Enable filter pruning algorithm + type: BOOLEAN + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: false + visible_in_ui: true + warning: null + pruning_supported: + affects_outcome_of: TRAINING + default_value: false + description: Whether filter pruning is supported + editable: false + header: Whether filter pruning is supported + type: BOOLEAN + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: false + visible_in_ui: false + warning: null + type: PARAMETER_GROUP visible_in_ui: false type: CONFIGURABLE_PARAMETERS visible_in_ui: true diff --git a/external/anomaly/anomaly_segmentation/configs/padim/template.yaml b/external/anomaly/anomaly_segmentation/configs/padim/template.yaml index 8ebf4e76ad2..7140cd326ba 100644 --- a/external/anomaly/anomaly_segmentation/configs/padim/template.yaml +++ b/external/anomaly/anomaly_segmentation/configs/padim/template.yaml @@ -12,8 +12,9 @@ framework: OTEAnomalyClassification v0.1.0 # TODO: update after the name has bee # Task implementations. entrypoints: - base: ote_anomalib.BaseAnomalyTask + base: ote_anomalib.AnomalyTrainingTask openvino: ote_anomalib.OpenVINOAnomalyTask + nncf: ote_anomalib.AnomalyNNCFTask # Hyper Parameters hyper_parameters: diff --git a/external/anomaly/anomaly_segmentation/configs/stfpm/compression_config.json b/external/anomaly/anomaly_segmentation/configs/stfpm/compression_config.json new file mode 100644 index 00000000000..9fb1d550f9f --- /dev/null +++ b/external/anomaly/anomaly_segmentation/configs/stfpm/compression_config.json @@ -0,0 +1,40 @@ +{ + "base": { + "find_unused_parameters": true, + "target_metric_name": "image_F1", + "nncf_config": { + "input_info": { + "sample_size": [1, 3, 256, 256] + }, + "compression": [], + "log_dir": "/tmp" + } + }, + "nncf_quantization": { + "model": { + "lr": 0.004 + }, + "nncf_config": { + "compression": [ + { + "algorithm": "quantization", + "preset": "mixed", + "initializer": { + "range": { + "num_init_samples": 250 + }, + "batchnorm_adaptation": { + "num_bn_adaptation_samples": 250 + } + }, + "ignored_scopes": [ + "{re}.*__pow__.*" + ] + } + ] + } + }, + "order_of_parts": [ + "nncf_quantization" + ] +} diff --git a/external/anomaly/anomaly_segmentation/configs/stfpm/configuration.yaml b/external/anomaly/anomaly_segmentation/configs/stfpm/configuration.yaml index d35a707c099..9facd1e1d1c 100644 --- a/external/anomaly/anomaly_segmentation/configs/stfpm/configuration.yaml +++ b/external/anomaly/anomaly_segmentation/configs/stfpm/configuration.yaml @@ -132,6 +132,56 @@ pot_parameters: visible_in_ui: true warning: null type: PARAMETER_GROUP + visible_in_ui: true +nncf_optimization: + description: Optimization by NNCF + header: Optimization by NNCF + enable_quantization: + affects_outcome_of: TRAINING + default_value: true + description: Enable quantization algorithm + editable: true + header: Enable quantization algorithm + type: BOOLEAN + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: true + visible_in_ui: true + warning: null + enable_pruning: + affects_outcome_of: TRAINING + default_value: false + description: Enable filter pruning algorithm + editable: true + header: Enable filter pruning algorithm + type: BOOLEAN + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: false + visible_in_ui: true + warning: null + pruning_supported: + affects_outcome_of: TRAINING + default_value: false + description: Whether filter pruning is supported + editable: false + header: Whether filter pruning is supported + type: BOOLEAN + ui_rules: + action: DISABLE_EDITING + operator: AND + rules: [] + type: UI_RULES + value: false + visible_in_ui: false + warning: null + type: PARAMETER_GROUP visible_in_ui: false type: CONFIGURABLE_PARAMETERS visible_in_ui: true diff --git a/external/anomaly/anomaly_segmentation/configs/stfpm/template.yaml b/external/anomaly/anomaly_segmentation/configs/stfpm/template.yaml index 644d435e2d0..c60f8f2c4ca 100644 --- a/external/anomaly/anomaly_segmentation/configs/stfpm/template.yaml +++ b/external/anomaly/anomaly_segmentation/configs/stfpm/template.yaml @@ -12,8 +12,9 @@ framework: OTEAnomalyClassification v0.1.0 # TODO: update after the name has bee # Task implementations. entrypoints: - base: ote_anomalib.BaseAnomalyTask + base: ote_anomalib.AnomalyTrainingTask openvino: ote_anomalib.OpenVINOAnomalyTask + nncf: ote_anomalib.AnomalyNNCFTask # Hyper Parameters hyper_parameters: diff --git a/external/anomaly/constraints.txt b/external/anomaly/constraints.txt index 30a7f72e227..39f36232791 100644 --- a/external/anomaly/constraints.txt +++ b/external/anomaly/constraints.txt @@ -5,12 +5,12 @@ kornia==0.5.6 lxml==4.6.5 matplotlib==3.4.3 networkx~=2.5 -nncf==2.1.0 +nncf@ git+https://github.com/openvinotoolkit/nncf@37a830a412e60ec2fd2d84d7f00e2524e5f62777#egg=nncf numpy==1.19.5 omegaconf==2.1.1 onnx==1.10.1 opencv-python==4.5.3.56 -openvino-dev==2022.1.0.dev20220316 +openvino-dev==2022.1.0 pillow==9.0.0 pytorch-lightning==1.5.9 requests==2.26.0 diff --git a/external/anomaly/init_venv.sh b/external/anomaly/init_venv.sh index dc5f06b4080..6795f2a87af 100755 --- a/external/anomaly/init_venv.sh +++ b/external/anomaly/init_venv.sh @@ -90,7 +90,8 @@ CONSTRAINTS_FILE=$(tempfile) cat constraints.txt >> ${CONSTRAINTS_FILE} export PIP_CONSTRAINT=${CONSTRAINTS_FILE} -pip install --upgrade pip || exit 1 +# Newer versions of pip have troubles with NNCF installation from the repo commit. +pip install pip==21.2.1 || exit 1 pip install wheel || exit 1 pip install --upgrade setuptools || exit 1 diff --git a/external/anomaly/ote_anomalib/__init__.py b/external/anomaly/ote_anomalib/__init__.py index a90e7dcb2fa..2fdae410031 100644 --- a/external/anomaly/ote_anomalib/__init__.py +++ b/external/anomaly/ote_anomalib/__init__.py @@ -16,7 +16,9 @@ # See the License for the specific language governing permissions # and limitations under the License. +from .inference_task import AnomalyInferenceTask +from .nncf_task import AnomalyNNCFTask from .openvino import OpenVINOAnomalyTask -from .task import BaseAnomalyTask +from .train_task import AnomalyTrainingTask -__all__ = ["BaseAnomalyTask", "OpenVINOAnomalyTask"] +__all__ = ["AnomalyInferenceTask", "AnomalyTrainingTask", "AnomalyNNCFTask", "OpenVINOAnomalyTask"] diff --git a/external/anomaly/ote_anomalib/callbacks/inference.py b/external/anomaly/ote_anomalib/callbacks/inference.py index 6b777cbbcf8..382dcdab7d6 100644 --- a/external/anomaly/ote_anomalib/callbacks/inference.py +++ b/external/anomaly/ote_anomalib/callbacks/inference.py @@ -58,23 +58,27 @@ def on_predict_epoch_end(self, _trainer: pl.Trainer, pl_module: AnomalyModule, o for dataset_item, pred_score, pred_label, anomaly_map, pred_mask in zip( self.ote_dataset, pred_scores, pred_labels, anomaly_maps, pred_masks ): - label = self.anomalous_label if pred_label else self.normal_label - probability = (1 - pred_score) if pred_score < 0.5 else pred_score - dataset_item.append_labels([ScoredLabel(label=label, probability=float(probability))]) + probability = pred_score if pred_label else 1 - pred_score + if self.task_type == TaskType.ANOMALY_CLASSIFICATION: + label = self.anomalous_label if pred_label else self.normal_label if self.task_type == TaskType.ANOMALY_DETECTION: - dataset_item.append_annotations( - annotations=create_detection_annotation_from_anomaly_heatmap( - hard_prediction=pred_mask, - soft_prediction=anomaly_map, - label_map=self.label_map, - ) + annotations = create_detection_annotation_from_anomaly_heatmap( + hard_prediction=pred_mask, + soft_prediction=anomaly_map, + label_map=self.label_map, ) + dataset_item.append_annotations(annotations) + label = self.normal_label if len(annotations) == 0 else self.anomalous_label elif self.task_type == TaskType.ANOMALY_SEGMENTATION: - mask = pred_mask.squeeze().astype(np.uint8) - dataset_item.append_annotations( - create_annotation_from_segmentation_map(mask, anomaly_map.squeeze(), self.label_map) + annotations = create_annotation_from_segmentation_map( + hard_prediction=pred_mask.squeeze().astype(np.uint8), + soft_prediction=anomaly_map.squeeze(), + label_map=self.label_map, ) + dataset_item.append_annotations(annotations) + label = self.normal_label if len(annotations) == 0 else self.anomalous_label + dataset_item.append_labels([ScoredLabel(label=label, probability=float(probability))]) dataset_item.append_metadata_item( ResultMediaEntity( name="Anomaly Map", diff --git a/external/anomaly/ote_anomalib/configs/anomalib_config.py b/external/anomaly/ote_anomalib/configs/anomalib_config.py index 49d0f00907d..db081df9a14 100644 --- a/external/anomaly/ote_anomalib/configs/anomalib_config.py +++ b/external/anomaly/ote_anomalib/configs/anomalib_config.py @@ -55,6 +55,6 @@ def update_anomalib_config(anomalib_config: Union[DictConfig, ListConfig], ote_c sc_value = sc_value.value if hasattr(sc_value, "value") else sc_value anomalib_config[param] = sc_value for group in ote_config.groups: - # Since pot_parameters are specific to OTE - if group != "pot_parameters": + # Since pot_parameters and nncf_optimization are specific to OTE + if group not in ["pot_parameters", "nncf_optimization"]: update_anomalib_config(anomalib_config[group], getattr(ote_config, group)) diff --git a/external/anomaly/ote_anomalib/configs/configuration.py b/external/anomaly/ote_anomalib/configs/configuration.py index 8fd1fe795ec..11858df24bb 100644 --- a/external/anomaly/ote_anomalib/configs/configuration.py +++ b/external/anomaly/ote_anomalib/configs/configuration.py @@ -25,6 +25,7 @@ ParameterGroup, add_parameter_group, boolean_attribute, + configurable_boolean, configurable_integer, selectable, string_attribute, @@ -97,5 +98,34 @@ class POTParameters(ParameterGroup): max_value=maxsize, ) + @attrs + class NNCFOptimization(ParameterGroup): + """ + Parameters for NNCF optimization + """ + + header = string_attribute("Optimization by NNCF") + description = header + + enable_quantization = configurable_boolean( + default_value=True, + header="Enable quantization algorithm", + description="Enable quantization algorithm", + ) + + enable_pruning = configurable_boolean( + default_value=False, + header="Enable filter pruning algorithm", + description="Enable filter pruning algorithm", + ) + + pruning_supported = configurable_boolean( + default_value=False, + header="Whether filter pruning is supported", + description="Whether filter pruning is supported", + affects_outcome_of=ModelLifecycle.TRAINING, + ) + dataset = add_parameter_group(DatasetParameters) pot_parameters = add_parameter_group(POTParameters) + nncf_optimization = add_parameter_group(NNCFOptimization) diff --git a/external/anomaly/ote_anomalib/data/create_mvtec_ad_json_annotations.py b/external/anomaly/ote_anomalib/data/create_mvtec_ad_json_annotations.py new file mode 100644 index 00000000000..5c9c98578e0 --- /dev/null +++ b/external/anomaly/ote_anomalib/data/create_mvtec_ad_json_annotations.py @@ -0,0 +1,271 @@ +# Copyright (C) 2020-2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +"""Create MVTec AD (CC BY-NC-SA 4.0) JSON Annotations for OTE CLI. + +Description: + This script converts MVTec AD dataset masks to OTE CLI annotation format for + classification, detection and segmentation tasks. + +License: + MVTec AD dataset is released under the Creative Commons + Attribution-NonCommercial-ShareAlike 4.0 International License + (CC BY-NC-SA 4.0)(https://creativecommons.org/licenses/by-nc-sa/4.0/). + +Reference: + - Paul Bergmann, Kilian Batzner, Michael Fauser, David Sattlegger, Carsten Steger: + The MVTec Anomaly Detection Dataset: A Comprehensive Real-World Dataset for + Unsupervised Anomaly Detection; in: International Journal of Computer Vision + 129(4):1038-1059, 2021, DOI: 10.1007/s11263-020-01400-4. + + - Paul Bergmann, Michael Fauser, David Sattlegger, Carsten Steger: MVTec AD — + A Comprehensive Real-World Dataset for Unsupervised Anomaly Detection; + in: IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), + 9584-9592, 2019, DOI: 10.1109/CVPR.2019.00982. + +Example: + Assume that MVTec AD dataset is located in "./data/anomaly/MVTec/" from the root + directory in training_extensions. JSON annotations could be created by running the + following: + + >>> import os + '~/training_extensions' + >>> os.listdir("./data/anomaly") + ['detection', 'shapes', 'segmentation', 'MVTec', 'classification'] + + The following script will generate the classification, detection and segmentation + JSON annotations to each category in ./data/anomaly/MVTec dataset. + + >>> python external/anomaly/ote_anomalib/data/create_mvtec_ad_json_annotations.py \ + ... --data_path ./data/anomaly/MVTec/ +""" + +import json +import os +from argparse import ArgumentParser, Namespace +from pathlib import Path +from typing import Any, Dict, List, Optional + +import cv2 +import pandas as pd +from anomalib.data.mvtec import make_mvtec_dataset + + +def create_bboxes_from_mask(mask_path: str) -> List[List[float]]: + """Create bounding box from binary mask. + + Args: + mask_path (str): Path to binary mask. + + Returns: + List[List[float]]: Bounding box coordinates. + """ + # pylint: disable-msg=too-many-locals + + mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE) + height, width = mask.shape + + bboxes: List[List[float]] = [] + _, _, coordinates, _ = cv2.connectedComponentsWithStats(mask) + for i, coordinate in enumerate(coordinates): + # First row of the coordinates is always backround, + # so should be ignored. + if i == 0: + continue + + # Last column of the coordinates is the area of the connected component. + # It could therefore be ignored. + comp_x, comp_y, comp_w, comp_h, _ = coordinate + x1 = comp_x / width + y1 = comp_y / height + x2 = (comp_x + comp_w) / width + y2 = (comp_y + comp_h) / height + + bboxes.append([x1, y1, x2, y2]) + + return bboxes + + +def create_polygons_from_mask(mask_path: str) -> List[List[List[float]]]: + """Create polygons from binary mask. + + Args: + mask_path (str): Path to binary mask. + + Returns: + List[List[float]]: Polygon coordinates. + """ + mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE) + height, width = mask.shape + + polygons = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[0] + polygons = [[[point[0][0] / width, point[0][1] / height] for point in polygon] for polygon in polygons] + + return polygons + + +def create_classification_json_items(pd_items: pd.DataFrame) -> Dict[str, Any]: + """Create JSON items for the classification task. + + Args: + pd_items (pd.DataFrame): MVTec AD samples in pandas DataFrame object. + + Returns: + Dict[str, Any]: MVTec AD classification JSON items + """ + json_items: Dict[str, Any] = {"image_path": {}, "label": {}, "masks": {}} + for index, pd_item in pd_items.iterrows(): + json_items["image_path"][str(index)] = pd_item.image_path.replace(pd_item.path, "")[1:] + json_items["label"][str(index)] = pd_item.label + if pd_item.label != "good": + json_items["masks"][str(index)] = pd_item.mask_path.replace(pd_item.path, "")[1:] + + return json_items + + +def create_detection_json_items(pd_items: pd.DataFrame) -> Dict[str, Any]: + """Create JSON items for the detection task. + + Args: + pd_items (pd.DataFrame): MVTec AD samples in pandas DataFrame object. + + Returns: + Dict[str, Any]: MVTec AD detection JSON items + """ + json_items: Dict[str, Any] = {"image_path": {}, "label": {}, "bboxes": {}} + for index, pd_item in pd_items.iterrows(): + json_items["image_path"][str(index)] = pd_item.image_path.replace(pd_item.path, "")[1:] + json_items["label"][str(index)] = pd_item.label + if pd_item.label != "good": + json_items["bboxes"][str(index)] = create_bboxes_from_mask(pd_item.mask_path) + + return json_items + + +def create_segmentation_json_items(pd_items: pd.DataFrame) -> Dict[str, Any]: + """Create JSON items for the segmentation task. + + Args: + pd_items (pd.DataFrame): MVTec AD samples in pandas DataFrame object. + + Returns: + Dict[str, Any]: MVTec AD segmentation JSON items + """ + json_items: Dict[str, Any] = {"image_path": {}, "label": {}, "masks": {}} + for index, pd_item in pd_items.iterrows(): + json_items["image_path"][str(index)] = pd_item.image_path.replace(pd_item.path, "")[1:] + json_items["label"][str(index)] = pd_item.label + if pd_item.label != "good": + json_items["masks"][str(index)] = create_polygons_from_mask(pd_item.mask_path) + + return json_items + + +def save_json_items(json_items: Dict[str, Any], file: str) -> None: + """Save JSON items to file. + + Args: + json_items (Dict[str, Any]): MVTec AD JSON items + file (str): Path to save as a JSON file. + """ + with open(file=file, mode="w", encoding="utf-8") as f: + json.dump(json_items, f) + + +def create_task_annotations(task: str, data_path: str, annotation_path: str) -> None: + """Create MVTec AD categories for a given task. + + Args: + task (str): Task type to save annotations. + data_path (str): Path to MVTec AD category. + annotation_path (str): Path to save MVTec AD category JSON annotation items. + + Raises: + ValueError: When task is not classification, detection or segmentation. + """ + annotation_path = os.path.join(data_path, task) + os.makedirs(annotation_path, exist_ok=True) + + for split in ["train", "val", "test"]: + + if task == "classification": + create_json_items = create_classification_json_items + elif task == "detection": + create_json_items = create_detection_json_items + elif task == "segmentation": + create_json_items = create_segmentation_json_items + else: + raise ValueError(f"Unknown task {task}. Available tasks are classification, detection and segmentation.") + + df_items = make_mvtec_dataset(path=Path(data_path), create_validation_set=True, split=split) + json_items = create_json_items(df_items) + save_json_items(json_items, f"{annotation_path}/{split}.json") + + +def create_mvtec_ad_category_annotations(data_path: str, annotation_path: str) -> None: + """Create MVTec AD category annotations for classification, detection and segmentation tasks. + + Args: + data_path (str): Path to MVTec AD category. + annotation_path (str): Path to save MVTec AD category JSON annotation items. + """ + for task in ["classification", "detection", "segmentation"]: + create_task_annotations(task, data_path, annotation_path) + + +def create_mvtec_ad_annotations(mvtec_data_path: str, mvtec_annotation_path: Optional[str] = None) -> None: + """Create JSON annotations for MVTec AD dataset. + + Args: + mvtec_data_path (str): Path to MVTec AD dataset. + mvtec_annotation_path (Optional[str], optional): Path to save JSON annotations. Defaults to None. + """ + if mvtec_annotation_path is None: + mvtec_annotation_path = mvtec_data_path + + categories = [ + "bottle", + "cable", + "capsule", + "carpet", + "grid", + "hazelnut", + "leather", + "metal_nut", + "pill", + "screw", + "tile", + "toothbrush", + "transistor", + "wood", + "zipper", + ] + + for category in categories: + print(f"Creating annotations for {category}") + category_data_path = os.path.join(mvtec_data_path, category) + category_annotation_path = os.path.join(mvtec_annotation_path, category) + create_mvtec_ad_category_annotations(category_data_path, category_annotation_path) + + +def get_args() -> Namespace: + """Get command line arguments. + + Returns: + Namespace: List of arguments. + """ + parser = ArgumentParser() + parser.add_argument("--data_path", type=str, default="./data/anomaly/MVTec/", help="Path to Mvtec AD dataset.") + parser.add_argument("--annotation_path", type=str, required=False, help="Path to create OTE CLI annotations.") + return parser.parse_args() + + +def main(): + """Create MVTec AD Annotations.""" + args = get_args() + create_mvtec_ad_annotations(mvtec_data_path=args.data_path, mvtec_annotation_path=args.annotation_path) + + +if __name__ == "__main__": + main() diff --git a/external/anomaly/ote_anomalib/data/data.py b/external/anomaly/ote_anomalib/data/data.py index bc579eda710..aafe9408ac5 100644 --- a/external/anomaly/ote_anomalib/data/data.py +++ b/external/anomaly/ote_anomalib/data/data.py @@ -22,15 +22,15 @@ import numpy as np from anomalib.pre_processing import PreProcessor from omegaconf import DictConfig, ListConfig -from ote_anomalib.data.utils import ( - contains_anomalous_images, - split_local_global_dataset, -) from ote_anomalib.logging import get_logger from ote_sdk.entities.datasets import DatasetEntity from ote_sdk.entities.model_template import TaskType from ote_sdk.entities.shapes.polygon import Polygon from ote_sdk.entities.subset import Subset +from ote_sdk.utils.dataset_utils import ( + contains_anomalous_images, + split_local_global_dataset, +) from ote_sdk.utils.segmentation_utils import mask_from_dataset_item from pytorch_lightning.core.datamodule import LightningDataModule from torch import Tensor diff --git a/ote_cli/ote_cli/datasets/anomaly/dataset.py b/external/anomaly/ote_anomalib/data/dataset.py similarity index 81% rename from ote_cli/ote_cli/datasets/anomaly/dataset.py rename to external/anomaly/ote_anomalib/data/dataset.py index 06de81371f2..757c9401a12 100644 --- a/ote_cli/ote_cli/datasets/anomaly/dataset.py +++ b/external/anomaly/ote_anomalib/data/dataset.py @@ -55,9 +55,7 @@ def __init__( and dataset used for testing. Defaults to None. """ items: List[DatasetItemEntity] = [] - self.normal_label = LabelEntity( - id=ID(0), name="Normal", domain=Domain.ANOMALY_CLASSIFICATION - ) + self.normal_label = LabelEntity(id=ID(0), name="Normal", domain=Domain.ANOMALY_CLASSIFICATION) self.abnormal_label = LabelEntity( id=ID(1), name="Anomalous", @@ -101,9 +99,7 @@ def __init__( super().__init__(items=items) @abstractmethod - def get_dataset_items( - self, ann_file_path: Path, data_root_dir: Path, subset: Subset - ) -> List[DatasetItemEntity]: + def get_dataset_items(self, ann_file_path: Path, data_root_dir: Path, subset: Subset) -> List[DatasetItemEntity]: """To be implemented ib subclasses.""" raise NotImplementedError @@ -124,9 +120,7 @@ class AnomalyClassificationDataset(BaseAnomalyDataset): >>> testing_dataset = AnomalyClassificationDataset(test_subset=test_subset) """ - def get_dataset_items( - self, ann_file_path: Path, data_root_dir: Path, subset: Subset - ) -> List[DatasetItemEntity]: + def get_dataset_items(self, ann_file_path: Path, data_root_dir: Path, subset: Subset) -> List[DatasetItemEntity]: """Loads dataset based on the image path in annotation file. Args: @@ -148,19 +142,13 @@ def get_dataset_items( image = Image(file_path=str(data_root_dir / sample.image_path)) # Create annotation shape = Rectangle.generate_full_box() - label: LabelEntity = ( - self.normal_label if sample.label == "good" else self.abnormal_label - ) + label: LabelEntity = self.normal_label if sample.label == "good" else self.abnormal_label labels = [ScoredLabel(label, probability=1.0)] annotations = [Annotation(shape=shape, labels=labels)] - annotation_scene = AnnotationSceneEntity( - annotations=annotations, kind=AnnotationSceneKind.ANNOTATION - ) + annotation_scene = AnnotationSceneEntity(annotations=annotations, kind=AnnotationSceneKind.ANNOTATION) # Create dataset item - dataset_item = DatasetItemEntity( - media=image, annotation_scene=annotation_scene, subset=subset - ) + dataset_item = DatasetItemEntity(media=image, annotation_scene=annotation_scene, subset=subset) # Add to dataset items dataset_items.append(dataset_item) @@ -184,9 +172,7 @@ class AnomalySegmentationDataset(BaseAnomalyDataset): """ - def get_dataset_items( - self, ann_file_path: Path, data_root_dir: Path, subset: Subset - ) -> List[DatasetItemEntity]: + def get_dataset_items(self, ann_file_path: Path, data_root_dir: Path, subset: Subset) -> List[DatasetItemEntity]: """Loads dataset based on the image path in annotation file. Args: @@ -207,10 +193,13 @@ def get_dataset_items( # convert path to str as PosixPath is not supported by Image image = Image(file_path=str(data_root_dir / sample.image_path)) # Create annotation - label: LabelEntity = ( - self.normal_label if sample.label == "good" else self.abnormal_label - ) - annotations = [] + label: LabelEntity = self.normal_label if sample.label == "good" else self.abnormal_label + annotations = [ + Annotation( + Rectangle.generate_full_box(), + labels=[ScoredLabel(label=label, probability=1.0)], + ) + ] if isinstance(sample.masks, list) and len(sample.masks) > 0: for contour in sample.masks: points = [Point(x, y) for x, y in contour] @@ -232,23 +221,10 @@ def get_dataset_items( "will be removed.", UserWarning, ) - else: - annotations.append( - Annotation( - Rectangle.generate_full_box(), - labels=[ScoredLabel(label=self.normal_label, probability=1.0)], - ) - ) - annotation_scene = AnnotationSceneEntity( - annotations=annotations, kind=AnnotationSceneKind.ANNOTATION - ) + annotation_scene = AnnotationSceneEntity(annotations=annotations, kind=AnnotationSceneKind.ANNOTATION) # Add to dataset items - dataset_items.append( - DatasetItemEntity( - media=image, annotation_scene=annotation_scene, subset=subset - ) - ) + dataset_items.append(DatasetItemEntity(media=image, annotation_scene=annotation_scene, subset=subset)) return dataset_items @@ -270,9 +246,7 @@ class AnomalyDetectionDataset(BaseAnomalyDataset): """ - def get_dataset_items( - self, ann_file_path: Path, data_root_dir: Path, subset: Subset - ) -> List[DatasetItemEntity]: + def get_dataset_items(self, ann_file_path: Path, data_root_dir: Path, subset: Subset) -> List[DatasetItemEntity]: """Loads dataset based on the image path in annotation file. Args: @@ -293,10 +267,13 @@ def get_dataset_items( # convert path to str as PosixPath is not supported by Image image = Image(file_path=str(data_root_dir / sample.image_path)) # Create annotation - label: LabelEntity = ( - self.normal_label if sample.label == "good" else self.abnormal_label - ) - annotations = [] + label: LabelEntity = self.normal_label if sample.label == "good" else self.abnormal_label + annotations = [ + Annotation( + Rectangle.generate_full_box(), + labels=[ScoredLabel(label=label, probability=1.0)], + ) + ] if isinstance(sample.bboxes, list) and len(sample.bboxes) > 0: for bbox in sample.bboxes: box = Rectangle(bbox[0], bbox[1], bbox[2], bbox[3]) @@ -317,22 +294,9 @@ def get_dataset_items( "will be removed.", UserWarning, ) - else: - annotations.append( - Annotation( - Rectangle.generate_full_box(), - labels=[ScoredLabel(label=self.normal_label, probability=1.0)], - ) - ) - annotation_scene = AnnotationSceneEntity( - annotations=annotations, kind=AnnotationSceneKind.ANNOTATION - ) + annotation_scene = AnnotationSceneEntity(annotations=annotations, kind=AnnotationSceneKind.ANNOTATION) # Add to dataset items - dataset_items.append( - DatasetItemEntity( - media=image, annotation_scene=annotation_scene, subset=subset - ) - ) + dataset_items.append(DatasetItemEntity(media=image, annotation_scene=annotation_scene, subset=subset)) return dataset_items diff --git a/external/anomaly/ote_anomalib/data/mvtec.py b/external/anomaly/ote_anomalib/data/mvtec.py index 80580d96abb..98e94acd0b2 100644 --- a/external/anomaly/ote_anomalib/data/mvtec.py +++ b/external/anomaly/ote_anomalib/data/mvtec.py @@ -1,4 +1,21 @@ -"""OTE MVTec Dataset facilitate OTE Anomaly Training.""" +"""OTE MVTec Dataset facilitate OTE Anomaly Training. + +License: + MVTec AD dataset is released under the Creative Commons + Attribution-NonCommercial-ShareAlike 4.0 International License + (CC BY-NC-SA 4.0)(https://creativecommons.org/licenses/by-nc-sa/4.0/). + +Reference: + - Paul Bergmann, Kilian Batzner, Michael Fauser, David Sattlegger, Carsten Steger: + The MVTec Anomaly Detection Dataset: A Comprehensive Real-World Dataset for + Unsupervised Anomaly Detection; in: International Journal of Computer Vision + 129(4):1038-1059, 2021, DOI: 10.1007/s11263-020-01400-4. + + - Paul Bergmann, Michael Fauser, David Sattlegger, Carsten Steger: MVTec AD — + A Comprehensive Real-World Dataset for Unsupervised Anomaly Detection; + in: IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), + 9584-9592, 2019, DOI: 10.1109/CVPR.2019.00982. +""" # Copyright (C) 2021 Intel Corporation # diff --git a/external/anomaly/ote_anomalib/data/utils.py b/external/anomaly/ote_anomalib/data/utils.py deleted file mode 100644 index 171a53c5cf6..00000000000 --- a/external/anomaly/ote_anomalib/data/utils.py +++ /dev/null @@ -1,169 +0,0 @@ -""" -Dataset utils for OTE Anomaly -""" - -# Copyright (C) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions -# and limitations under the License. - -from typing import Tuple - -from ote_sdk.entities.annotation import AnnotationSceneEntity, AnnotationSceneKind -from ote_sdk.entities.dataset_item import DatasetItemEntity -from ote_sdk.entities.datasets import DatasetEntity -from ote_sdk.entities.resultset import ResultSetEntity -from ote_sdk.entities.shapes.rectangle import Rectangle - - -def split_local_global_dataset(dataset) -> Tuple[DatasetEntity, DatasetEntity]: - """Split a dataset into globally and locally annotated items.""" - globally_annotated = [] - locally_annotated = [] - for gt_item in dataset: - - annotations = gt_item.get_annotations() - global_annotations = [annotation for annotation in annotations if Rectangle.is_full_box(annotation.shape)] - local_annotations = [annotation for annotation in annotations if not Rectangle.is_full_box(annotation.shape)] - - if not any(label.is_anomalous for label in gt_item.get_shapes_labels()): - # normal images get added to both datasets - globally_annotated.append(gt_item) - locally_annotated.append(gt_item) - else: # image is abnormal - globally_annotated.append( - DatasetItemEntity( - media=gt_item.media, - annotation_scene=AnnotationSceneEntity(global_annotations, kind=AnnotationSceneKind.ANNOTATION), - metadata=gt_item.metadata, - subset=gt_item.subset, - ignored_labels=gt_item.ignored_labels, - ) - ) - # add locally annotated dataset items - if len(local_annotations) > 0: - locally_annotated.append( - DatasetItemEntity( - media=gt_item.media, - annotation_scene=AnnotationSceneEntity(local_annotations, kind=AnnotationSceneKind.ANNOTATION), - metadata=gt_item.metadata, - subset=gt_item.subset, - ignored_labels=gt_item.ignored_labels, - ) - ) - global_gt_dataset = DatasetEntity(globally_annotated, purpose=dataset.purpose) - local_gt_dataset = DatasetEntity(locally_annotated, purpose=dataset.purpose) - return global_gt_dataset, local_gt_dataset - - -def split_local_global_resultset(resultset) -> Tuple[ResultSetEntity, ResultSetEntity]: - """Split resultset based on the type of available annotations.""" - # splits the dataset - globally_annotated = [] - locally_annotated = [] - globally_predicted = [] - locally_predicted = [] - for gt_item, pred_item in zip(resultset.ground_truth_dataset, resultset.prediction_dataset): - - annotations = gt_item.get_annotations() - global_annotations = [annotation for annotation in annotations if Rectangle.is_full_box(annotation.shape)] - local_annotations = [annotation for annotation in annotations if not Rectangle.is_full_box(annotation.shape)] - - predictions = gt_item.get_annotations() - global_predictions = [predictions for predictions in predictions if Rectangle.is_full_box(predictions.shape)] - local_predictions = [predictions for predictions in predictions if not Rectangle.is_full_box(predictions.shape)] - - if not any(label.is_anomalous for label in gt_item.get_shapes_labels()): - # normal images get added to both datasets - globally_annotated.append(gt_item) - locally_annotated.append(gt_item) - globally_predicted.append( - DatasetItemEntity( - media=pred_item.media, - annotation_scene=AnnotationSceneEntity(global_predictions, kind=AnnotationSceneKind.PREDICTION), - metadata=pred_item.metadata, - subset=pred_item.subset, - ignored_labels=pred_item.ignored_labels, - ) - ) - locally_predicted.append( - DatasetItemEntity( - media=pred_item.media, - annotation_scene=AnnotationSceneEntity(local_predictions, kind=AnnotationSceneKind.PREDICTION), - metadata=pred_item.metadata, - subset=pred_item.subset, - ignored_labels=pred_item.ignored_labels, - ) - ) - else: # image is abnormal - globally_annotated.append( - DatasetItemEntity( - media=gt_item.media, - annotation_scene=AnnotationSceneEntity(global_annotations, kind=AnnotationSceneKind.ANNOTATION), - metadata=gt_item.metadata, - subset=gt_item.subset, - ignored_labels=gt_item.ignored_labels, - ) - ) - globally_predicted.append( - DatasetItemEntity( - media=pred_item.media, - annotation_scene=AnnotationSceneEntity(global_predictions, kind=AnnotationSceneKind.PREDICTION), - metadata=pred_item.metadata, - subset=pred_item.subset, - ignored_labels=pred_item.ignored_labels, - ) - ) - # add locally annotated dataset items - if len(local_annotations) > 0: - locally_annotated.append( - DatasetItemEntity( - media=gt_item.media, - annotation_scene=AnnotationSceneEntity(local_annotations, kind=AnnotationSceneKind.ANNOTATION), - metadata=gt_item.metadata, - subset=gt_item.subset, - ignored_labels=gt_item.ignored_labels, - ) - ) - locally_predicted.append( - DatasetItemEntity( - media=pred_item.media, - annotation_scene=AnnotationSceneEntity(local_predictions, kind=AnnotationSceneKind.PREDICTION), - metadata=pred_item.metadata, - subset=pred_item.subset, - ignored_labels=pred_item.ignored_labels, - ) - ) - - global_resultset = ResultSetEntity( - model=resultset.model, - ground_truth_dataset=DatasetEntity(globally_annotated, purpose=resultset.ground_truth_dataset.purpose), - prediction_dataset=DatasetEntity(globally_predicted, purpose=resultset.prediction_dataset.purpose), - purpose=resultset.purpose, - ) - local_resultset = ResultSetEntity( - model=resultset.model, - ground_truth_dataset=DatasetEntity(locally_annotated, purpose=resultset.ground_truth_dataset.purpose), - prediction_dataset=DatasetEntity(locally_predicted, purpose=resultset.prediction_dataset.purpose), - purpose=resultset.purpose, - ) - - return global_resultset, local_resultset - - -def contains_anomalous_images(dataset: DatasetEntity) -> bool: - """Find the number of local annotations in a resultset.""" - for item in dataset: - labels = item.get_shapes_labels() - if any(label.is_anomalous for label in labels): - return True - return False diff --git a/external/anomaly/ote_anomalib/task.py b/external/anomaly/ote_anomalib/inference_task.py similarity index 79% rename from external/anomaly/ote_anomalib/task.py rename to external/anomaly/ote_anomalib/inference_task.py index c35533971e1..c7349a3ace6 100644 --- a/external/anomaly/ote_anomalib/task.py +++ b/external/anomaly/ote_anomalib/inference_task.py @@ -21,7 +21,7 @@ import subprocess # nosec import tempfile from glob import glob -from typing import Optional, Union +from typing import Dict, List, Optional, Union import torch from anomalib.models import AnomalyModule, get_model @@ -30,36 +30,35 @@ from ote_anomalib.callbacks import AnomalyInferenceCallback, ProgressCallback from ote_anomalib.configs import get_anomalib_config from ote_anomalib.data import OTEAnomalyDataModule -from ote_anomalib.data.utils import ( - contains_anomalous_images, - split_local_global_resultset, -) from ote_anomalib.logging import get_logger from ote_sdk.entities.datasets import DatasetEntity from ote_sdk.entities.inference_parameters import InferenceParameters from ote_sdk.entities.metrics import Performance, ScoreMetric -from ote_sdk.entities.model import ModelEntity, ModelPrecision +from ote_sdk.entities.model import ( + ModelEntity, + ModelFormat, + ModelOptimizationType, + ModelPrecision, + OptimizationMethod, +) from ote_sdk.entities.model_template import TaskType from ote_sdk.entities.resultset import ResultSetEntity from ote_sdk.entities.task_environment import TaskEnvironment -from ote_sdk.entities.train_parameters import TrainParameters from ote_sdk.serialization.label_mapper import label_schema_to_bytes -from ote_sdk.usecases.evaluation.averaging import MetricAverageMethod from ote_sdk.usecases.evaluation.metrics_helper import MetricsHelper from ote_sdk.usecases.tasks.interfaces.evaluate_interface import IEvaluationTask from ote_sdk.usecases.tasks.interfaces.export_interface import ExportType, IExportTask from ote_sdk.usecases.tasks.interfaces.inference_interface import IInferenceTask -from ote_sdk.usecases.tasks.interfaces.training_interface import ITrainingTask from ote_sdk.usecases.tasks.interfaces.unload_interface import IUnload from pytorch_lightning import Trainer logger = get_logger(__name__) -class BaseAnomalyTask(ITrainingTask, IInferenceTask, IEvaluationTask, IExportTask, IUnload): +# pylint: disable=too-many-instance-attributes +class AnomalyInferenceTask(IInferenceTask, IEvaluationTask, IExportTask, IUnload): """Base Anomaly Task.""" - # pylint: disable=too-many-instance-attributes def __init__(self, task_environment: TaskEnvironment) -> None: """Train, Infer, Export, Optimize and Deploy an Anomaly Classification Task. @@ -73,10 +72,18 @@ def __init__(self, task_environment: TaskEnvironment) -> None: self.model_name = task_environment.model_template.name self.labels = task_environment.get_labels() + template_file_path = task_environment.model_template.model_template_path + self.base_dir = os.path.abspath(os.path.dirname(template_file_path)) + # Hyperparameters. self.project_path: str = tempfile.mkdtemp(prefix="ote-anomalib") self.config = self.get_config() + # Set default model attributes. + self.optimization_methods: List[OptimizationMethod] = [] + self.precision = [ModelPrecision.FP32] + self.optimization_type = ModelOptimizationType.MO + self.model = self.load_model(ote_model=task_environment.model) self.trainer: Trainer @@ -87,8 +94,8 @@ def get_config(self) -> Union[DictConfig, ListConfig]: Returns: Union[DictConfig, ListConfig]: Anomalib config. """ - hyper_parameters = self.task_environment.get_hyper_parameters() - config = get_anomalib_config(task_name=self.model_name, ote_config=hyper_parameters) + self.hyper_parameters = self.task_environment.get_hyper_parameters() + config = get_anomalib_config(task_name=self.model_name, ote_config=self.hyper_parameters) config.project.path = self.project_path config.dataset.task = "classification" @@ -129,57 +136,6 @@ def load_model(self, ote_model: Optional[ModelEntity]) -> AnomalyModule: return model - def train( - self, - dataset: DatasetEntity, - output_model: ModelEntity, - train_parameters: TrainParameters, - ) -> None: - """Train the anomaly classification model. - - Args: - dataset (DatasetEntity): Input dataset. - output_model (ModelEntity): Output model to save the model weights. - train_parameters (TrainParameters): Training parameters - """ - logger.info("Training the model.") - - config = self.get_config() - logger.info("Training Configs '%s'", config) - - datamodule = OTEAnomalyDataModule(config=config, dataset=dataset, task_type=self.task_type) - callbacks = [ProgressCallback(parameters=train_parameters), MinMaxNormalizationCallback()] - - self.trainer = Trainer(**config.trainer, logger=False, callbacks=callbacks) - self.trainer.fit(model=self.model, datamodule=datamodule) - - self.save_model(output_model) - - logger.info("Training completed.") - - def save_model(self, output_model: ModelEntity) -> None: - """Save the model after training is completed. - - Args: - output_model (ModelEntity): Output model onto which the weights are saved. - """ - logger.info("Saving the model weights.") - config = self.get_config() - model_info = { - "model": self.model.state_dict(), - "config": config, - "VERSION": 1, - } - buffer = io.BytesIO() - torch.save(model_info, buffer) - output_model.set_data("weights.pth", buffer.getvalue()) - output_model.set_data("label_schema.json", label_schema_to_bytes(self.task_environment.label_schema)) - self._set_metadata(output_model) - - f1_score = self.model.image_metrics.F1.compute().item() - output_model.performance = Performance(score=ScoreMetric(name="F1 Score", value=f1_score)) - output_model.precision = [ModelPrecision.FP32] - def cancel_training(self) -> None: """Cancel the training `after_batch_end`. @@ -230,20 +186,9 @@ def evaluate(self, output_resultset: ResultSetEntity, evaluation_metric: Optiona if self.task_type == TaskType.ANOMALY_CLASSIFICATION: metric = MetricsHelper.compute_f_measure(output_resultset) elif self.task_type == TaskType.ANOMALY_DETECTION: - global_resultset, local_resultset = split_local_global_resultset(output_resultset) - metric = MetricsHelper.compute_f_measure(local_resultset) + metric = MetricsHelper.compute_anomaly_detection_scores(output_resultset) elif self.task_type == TaskType.ANOMALY_SEGMENTATION: - global_resultset, local_resultset = split_local_global_resultset(output_resultset) - logger.info(f"Global annotations: {len(global_resultset.ground_truth_dataset)}") - logger.info(f"Local annotations: {len(local_resultset.ground_truth_dataset)}") - logger.info(f"Global predictions: {len(global_resultset.prediction_dataset)}") - logger.info(f"Local predictions: {len(local_resultset.prediction_dataset)}") - if contains_anomalous_images(local_resultset.ground_truth_dataset): - logger.info("Dataset contains polygon annotations. Using pixel-level evaluation metric.") - metric = MetricsHelper.compute_dice_averaged_over_pixels(local_resultset, MetricAverageMethod.MICRO) - else: - logger.info("Dataset does not contain polygon annotations. Using image-level evaluation metric.") - metric = MetricsHelper.compute_f_measure(global_resultset) + metric = MetricsHelper.compute_anomaly_segmentation_scores(output_resultset) else: raise ValueError(f"Unknown task type: {self.task_type}") output_resultset.performance = metric.get_performance() @@ -252,6 +197,20 @@ def evaluate(self, output_resultset: ResultSetEntity, evaluation_metric: Optiona accuracy = MetricsHelper.compute_accuracy(output_resultset).get_performance() output_resultset.performance.dashboard_metrics.extend(accuracy.dashboard_metrics) + def _export_to_onnx(self, onnx_path: str): + """Export model to ONNX + + Args: + onnx_path (str): path to save ONNX file + """ + height, width = self.config.model.input_size + torch.onnx.export( + model=self.model.model, + args=torch.zeros((1, 3, height, width)).to(self.model.device), + f=onnx_path, + opset_version=11, + ) + def export(self, export_type: ExportType, output_model: ModelEntity) -> None: """Export model to OpenVINO IR. @@ -264,16 +223,13 @@ def export(self, export_type: ExportType, output_model: ModelEntity) -> None: """ assert export_type == ExportType.OPENVINO + output_model.model_format = ModelFormat.OPENVINO + output_model.optimization_type = self.optimization_type + # pylint: disable=no-member; need to refactor this logger.info("Exporting the OpenVINO model.") - height, width = self.config.model.input_size onnx_path = os.path.join(self.config.project.path, "onnx_model.onnx") - torch.onnx.export( - model=self.model.model, - args=torch.zeros((1, 3, height, width)).to(self.model.device), - f=onnx_path, - opset_version=11, - ) + self._export_to_onnx(onnx_path) optimize_command = "mo --input_model " + onnx_path + " --output_dir " + self.config.project.path subprocess.call(optimize_command, shell=True) bin_file = glob(os.path.join(self.config.project.path, "*.bin"))[0] @@ -282,9 +238,45 @@ def export(self, export_type: ExportType, output_model: ModelEntity) -> None: output_model.set_data("openvino.bin", file.read()) with open(xml_file, "rb") as file: output_model.set_data("openvino.xml", file.read()) + + output_model.precision = self.precision + output_model.optimization_methods = self.optimization_methods + + output_model.set_data("label_schema.json", label_schema_to_bytes(self.task_environment.label_schema)) + self._set_metadata(output_model) + + def _model_info(self) -> Dict: + """Return model info to save the model weights. + + Returns: + Dict: Model info. + """ + + return { + "model": self.model.state_dict(), + "config": self.get_config(), + "VERSION": 1, + } + + def save_model(self, output_model: ModelEntity) -> None: + """Save the model after training is completed. + + Args: + output_model (ModelEntity): Output model onto which the weights are saved. + """ + logger.info("Saving the model weights.") + model_info = self._model_info() + buffer = io.BytesIO() + torch.save(model_info, buffer) + output_model.set_data("weights.pth", buffer.getvalue()) output_model.set_data("label_schema.json", label_schema_to_bytes(self.task_environment.label_schema)) self._set_metadata(output_model) + f1_score = self.model.image_metrics.F1.compute().item() + output_model.performance = Performance(score=ScoreMetric(name="F1 Score", value=f1_score)) + output_model.precision = self.precision + output_model.optimization_methods = self.optimization_methods + def _set_metadata(self, output_model: ModelEntity): output_model.set_data("image_threshold", self.model.image_threshold.value.cpu().numpy().tobytes()) output_model.set_data("pixel_threshold", self.model.pixel_threshold.value.cpu().numpy().tobytes()) diff --git a/external/anomaly/ote_anomalib/nncf_task.py b/external/anomaly/ote_anomalib/nncf_task.py new file mode 100644 index 00000000000..2b4cabcc7d3 --- /dev/null +++ b/external/anomaly/ote_anomalib/nncf_task.py @@ -0,0 +1,214 @@ +"""Anomaly Classification Task.""" + +# Copyright (C) 2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions +# and limitations under the License. + +import io +import json +import os +import re +from typing import Dict, Optional + +import torch +from anomalib.models import AnomalyModule, get_model +from anomalib.utils.callbacks import MinMaxNormalizationCallback +from anomalib.utils.callbacks.nncf.callback import NNCFCallback +from anomalib.utils.callbacks.nncf.utils import ( + compose_nncf_config, + is_state_nncf, + wrap_nncf_model, +) +from ote_anomalib import AnomalyInferenceTask +from ote_anomalib.callbacks import ProgressCallback +from ote_anomalib.data import OTEAnomalyDataModule +from ote_anomalib.logging import get_logger +from ote_sdk.entities.datasets import DatasetEntity +from ote_sdk.entities.model import ( + ModelEntity, + ModelOptimizationType, + ModelPrecision, + OptimizationMethod, +) +from ote_sdk.entities.optimization_parameters import OptimizationParameters +from ote_sdk.entities.task_environment import TaskEnvironment +from ote_sdk.usecases.tasks.interfaces.optimization_interface import ( + IOptimizationTask, + OptimizationType, +) +from pytorch_lightning import Trainer + +logger = get_logger(__name__) + + +class AnomalyNNCFTask(AnomalyInferenceTask, IOptimizationTask): + """Base Anomaly Task.""" + + def __init__(self, task_environment: TaskEnvironment) -> None: + """Task for compressing models using NNCF. + + Args: + task_environment (TaskEnvironment): OTE Task environment. + """ + self.compression_ctrl = None + self.nncf_preset = "nncf_quantization" + super().__init__(task_environment) + self.optimization_type = ModelOptimizationType.NNCF + + def _set_attributes_by_hyperparams(self): + quantization = self.hyper_parameters.nncf_optimization.enable_quantization + pruning = self.hyper_parameters.nncf_optimization.enable_pruning + if quantization and pruning: + self.nncf_preset = "nncf_quantization_pruning" + self.optimization_methods = [ + OptimizationMethod.QUANTIZATION, + OptimizationMethod.FILTER_PRUNING, + ] + self.precision = [ModelPrecision.INT8] + return + if quantization and not pruning: + self.nncf_preset = "nncf_quantization" + self.optimization_methods = [OptimizationMethod.QUANTIZATION] + self.precision = [ModelPrecision.INT8] + return + if not quantization and pruning: + self.nncf_preset = "nncf_pruning" + self.optimization_methods = [OptimizationMethod.FILTER_PRUNING] + self.precision = [ModelPrecision.FP32] + return + raise RuntimeError("Not selected optimization algorithm") + + def load_model(self, ote_model: Optional[ModelEntity]) -> AnomalyModule: + """Create and Load Anomalib Module from OTE Model. + + This method checks if the task environment has a saved OTE Model, + and creates one. If the OTE model already exists, it returns the + the model with the saved weights. + + Args: + ote_model (Optional[ModelEntity]): OTE Model from the + task environment. + + Returns: + AnomalyModule: Anomalib + classification or segmentation model with/without weights. + """ + nncf_config_path = os.path.join(self.base_dir, "compression_config.json") + + with open(nncf_config_path, encoding="utf8") as nncf_config_file: + common_nncf_config = json.load(nncf_config_file) + + self._set_attributes_by_hyperparams() + self.optimization_config = compose_nncf_config(common_nncf_config, [self.nncf_preset]) + self.config.merge_with(self.optimization_config) + model = get_model(config=self.config) + if ote_model is None: + raise ValueError("No trained model in project. NNCF require pretrained weights to compress the model") + + buffer = io.BytesIO(ote_model.get_data("weights.pth")) # type: ignore + model_data = torch.load(buffer, map_location=torch.device("cpu")) + + if is_state_nncf(model_data): + logger.info("Loaded model weights from Task Environment and wrapped by NNCF") + + # Fix name mismatch for wrapped model by pytorch_lighting + nncf_modules = {} + pl_modules = {} + for key in model_data["model"].keys(): + if key.startswith("model."): + new_key = key.replace("model.", "") + res = re.search(r"nncf_module\.(\w+)_backbone\.(.*)", new_key) + if res: + new_key = f"nncf_module.{res.group(1)}_model.backbone.{res.group(2)}" + nncf_modules[new_key] = model_data["model"][key] + else: + pl_modules[key] = model_data["model"][key] + model_data["model"] = nncf_modules + + self.compression_ctrl, model.model = wrap_nncf_model( + model.model, + self.optimization_config["nncf_config"], + init_state_dict=model_data, + ) + # Load extra parameters of pytorch_lighting model + model.load_state_dict(pl_modules, strict=False) + else: + try: + model.load_state_dict(model_data["model"]) + logger.info("Loaded model weights from Task Environment") + except BaseException as exception: + raise ValueError("Could not load the saved model. The model file structure is invalid.") from exception + + return model + + def optimize( + self, + optimization_type: OptimizationType, + dataset: DatasetEntity, + output_model: ModelEntity, + optimization_parameters: Optional[OptimizationParameters] = None, + ): + """Train the anomaly classification model. + + Args: + optimization_type (OptimizationType): Type of optimization. + dataset (DatasetEntity): Input dataset. + output_model (ModelEntity): Output model to save the model weights. + optimization_parameters (OptimizationParameters): Training parameters + """ + logger.info("Optimization the model.") + + if optimization_type is not OptimizationType.NNCF: + raise RuntimeError("NNCF is the only supported optimization") + + datamodule = OTEAnomalyDataModule(config=self.config, dataset=dataset, task_type=self.task_type) + + nncf_callback = NNCFCallback(nncf_config=self.optimization_config["nncf_config"]) + callbacks = [ + ProgressCallback(parameters=optimization_parameters), + MinMaxNormalizationCallback(), + nncf_callback, + ] + + self.trainer = Trainer(**self.config.trainer, logger=False, callbacks=callbacks) + self.trainer.fit(model=self.model, datamodule=datamodule) + self.compression_ctrl = nncf_callback.nncf_ctrl + self.save_model(output_model) + + logger.info("Training completed.") + + def _model_info(self) -> Dict: + """Return model info to save the model weights. + + Returns: + Dict: Model info. + """ + + return { + "compression_state": self.compression_ctrl.get_compression_state(), # type: ignore + "meta": { + "config": self.config, + "nncf_enable_compression": True, + }, + "model": self.model.state_dict(), + "config": self.get_config(), + "VERSION": 1, + } + + def _export_to_onnx(self, onnx_path: str): + """Export model to ONNX + + Args: + onnx_path (str): path to save ONNX file + """ + self.compression_ctrl.export_model(onnx_path, "onnx_11") # type: ignore diff --git a/external/anomaly/ote_anomalib/openvino.py b/external/anomaly/ote_anomalib/openvino.py index d46530e60cb..15ab6c4f25b 100644 --- a/external/anomaly/ote_anomalib/openvino.py +++ b/external/anomaly/ote_anomalib/openvino.py @@ -16,17 +16,15 @@ # See the License for the specific language governing permissions # and limitations under the License. -import inspect +import io import json import os -import subprocess # nosec -import sys import tempfile -from shutil import copyfile, copytree from typing import Any, Dict, List, Optional from zipfile import ZipFile import numpy as np +import ote_anomalib.exportable_code from addict import Dict as ADDict from anomalib.deploy import OpenVINOInferencer from anomalib.post_processing import anomaly_map_to_color_map @@ -37,16 +35,7 @@ from compression.pipeline.initializer import create_pipeline from omegaconf import OmegaConf from ote_anomalib.configs import get_anomalib_config -from ote_anomalib.data.utils import ( - contains_anomalous_images, - split_local_global_resultset, -) -from ote_anomalib.exportable_code import ( - AnomalyBase, - AnomalyClassification, - AnomalyDetection, - AnomalySegmentation, -) +from ote_anomalib.data import LabelNames from ote_anomalib.logging import get_logger from ote_sdk.entities.datasets import DatasetEntity from ote_sdk.entities.inference_parameters import ( @@ -64,17 +53,11 @@ from ote_sdk.entities.optimization_parameters import OptimizationParameters from ote_sdk.entities.result_media import ResultMediaEntity from ote_sdk.entities.resultset import ResultSetEntity +from ote_sdk.entities.scored_label import ScoredLabel from ote_sdk.entities.task_environment import TaskEnvironment from ote_sdk.serialization.label_mapper import LabelSchemaMapper, label_schema_to_bytes -from ote_sdk.usecases.evaluation.averaging import MetricAverageMethod from ote_sdk.usecases.evaluation.metrics_helper import MetricsHelper from ote_sdk.usecases.exportable_code import demo -from ote_sdk.usecases.exportable_code.prediction_to_annotation_converter import ( - AnomalyClassificationToAnnotationConverter, - AnomalyDetectionToAnnotationConverter, - AnomalySegmentationToAnnotationConverter, - IPredictionToAnnotationConverter, -) from ote_sdk.usecases.tasks.interfaces.deployment_interface import IDeploymentTask from ote_sdk.usecases.tasks.interfaces.evaluate_interface import IEvaluationTask from ote_sdk.usecases.tasks.interfaces.inference_interface import IInferenceTask @@ -82,6 +65,8 @@ IOptimizationTask, OptimizationType, ) +from ote_sdk.utils.anomaly_utils import create_detection_annotation_from_anomaly_heatmap +from ote_sdk.utils.segmentation_utils import create_annotation_from_segmentation_map logger = get_logger(__name__) @@ -131,15 +116,9 @@ def __init__(self, task_environment: TaskEnvironment) -> None: self.config = self.get_config() self.inferencer = self.load_inferencer() - self.annotation_converter: IPredictionToAnnotationConverter - if self.task_type == TaskType.ANOMALY_CLASSIFICATION: - self.annotation_converter = AnomalyClassificationToAnnotationConverter(self.task_environment.label_schema) - elif self.task_type == TaskType.ANOMALY_DETECTION: - self.annotation_converter = AnomalyDetectionToAnnotationConverter(self.task_environment.label_schema) - elif self.task_type == TaskType.ANOMALY_SEGMENTATION: - self.annotation_converter = AnomalySegmentationToAnnotationConverter(self.task_environment.label_schema) - else: - raise ValueError(f"Unknown task type: {self.task_type}") + labels = self.task_environment.get_labels() + self.normal_label = [label for label in labels if label.name == LabelNames.normal][0] + self.anomalous_label = [label for label in labels if label.name == LabelNames.anomalous][0] template_file_path = task_environment.model_template.model_template_path self._base_dir = os.path.abspath(os.path.dirname(template_file_path)) @@ -180,16 +159,28 @@ def infer(self, dataset: DatasetEntity, inference_parameters: InferenceParameter anomaly_map, pred_score = self.inferencer.predict( dataset_item.numpy, superimpose=False, meta_data=meta_data ) + # TODO: inferencer should return predicted label and mask + pred_label = pred_score >= 0.5 + pred_mask = (anomaly_map >= 0.5).astype(np.uint8) + probability = pred_score if pred_label else 1 - pred_score if self.task_type == TaskType.ANOMALY_CLASSIFICATION: - annotations_scene = self.annotation_converter.convert_to_annotation(pred_score, meta_data) - elif self.task_type in (TaskType.ANOMALY_DETECTION, TaskType.ANOMALY_SEGMENTATION): - annotations_scene = self.annotation_converter.convert_to_annotation(anomaly_map, meta_data) + label = self.anomalous_label if pred_score >= 0.5 else self.normal_label + elif self.task_type == TaskType.ANOMALY_SEGMENTATION: + annotations = create_annotation_from_segmentation_map( + pred_mask, anomaly_map.squeeze(), {0: self.normal_label, 1: self.anomalous_label} + ) + dataset_item.append_annotations(annotations) + label = self.normal_label if len(annotations) == 0 else self.anomalous_label + elif self.task_type == TaskType.ANOMALY_DETECTION: + annotations = create_detection_annotation_from_anomaly_heatmap( + pred_mask, anomaly_map.squeeze(), {0: self.normal_label, 1: self.anomalous_label} + ) + dataset_item.append_annotations(annotations) + label = self.normal_label if len(annotations) == 0 else self.anomalous_label else: raise ValueError(f"Unknown task type: {self.task_type}") - # pylint: disable=protected-access - dataset_item.append_annotations(annotations_scene.annotations) - + dataset_item.append_labels([ScoredLabel(label=label, probability=float(probability))]) anomaly_map = anomaly_map_to_color_map(anomaly_map, normalize=False) heatmap_media = ResultMediaEntity( name="Anomaly Map", @@ -227,20 +218,9 @@ def evaluate(self, output_resultset: ResultSetEntity, evaluation_metric: Optiona if self.task_type == TaskType.ANOMALY_CLASSIFICATION: metric = MetricsHelper.compute_f_measure(output_resultset) elif self.task_type == TaskType.ANOMALY_DETECTION: - global_resultset, local_resultset = split_local_global_resultset(output_resultset) - metric = MetricsHelper.compute_f_measure(local_resultset) + metric = MetricsHelper.compute_anomaly_detection_scores(output_resultset) elif self.task_type == TaskType.ANOMALY_SEGMENTATION: - global_resultset, local_resultset = split_local_global_resultset(output_resultset) - logger.info(f"Global annotations: {len(global_resultset.ground_truth_dataset)}") - logger.info(f"Local annotations: {len(local_resultset.ground_truth_dataset)}") - logger.info(f"Global predictions: {len(global_resultset.prediction_dataset)}") - logger.info(f"Local predictions: {len(local_resultset.prediction_dataset)}") - if contains_anomalous_images(local_resultset.ground_truth_dataset): - logger.info("Dataset contains polygon annotations. Using pixel-level evaluation metric.") - metric = MetricsHelper.compute_dice_averaged_over_pixels(local_resultset, MetricAverageMethod.MICRO) - else: - logger.info("Dataset does not contain polygon annotations. Using image-level evaluation metric.") - metric = MetricsHelper.compute_f_measure(global_resultset) + metric = MetricsHelper.compute_anomaly_segmentation_scores(output_resultset) else: raise ValueError(f"Unknown task type: {self.task_type}") output_resultset.performance = metric.get_performance() @@ -307,11 +287,17 @@ def optimize( if get_nodes_by_type(model, ["FakeQuantize"]): raise RuntimeError("Model is already optimized by POT") + if optimization_parameters is not None: + optimization_parameters.update_progress(10) + engine = IEEngine(config=ADDict({"device": "CPU"}), data_loader=data_loader, metric=None) pipeline = create_pipeline(algo_config=self._get_optimization_algorithms_configs(), engine=engine) compressed_model = pipeline.run(model) compress_model_weights(compressed_model) + if optimization_parameters is not None: + optimization_parameters.update_progress(90) + with tempfile.TemporaryDirectory() as tempdir: save_model(compressed_model, tempdir, model_name="model") self.__load_weights(path=os.path.join(tempdir, "model.xml"), output_model=output_model, key="openvino.xml") @@ -330,6 +316,10 @@ def optimize( self.task_environment.model = output_model self.inferencer = self.load_inferencer() + if optimization_parameters is not None: + optimization_parameters.update_progress(100) + logger.info("POT optimization completed") + def load_inferencer(self) -> OpenVINOInferencer: """ Create the OpenVINO inferencer object @@ -422,57 +412,26 @@ def deploy(self, output_model: ModelEntity) -> None: task_type = str(self.task_type).lower() - if self.task_type == TaskType.ANOMALY_CLASSIFICATION: - selected_class = AnomalyClassification - elif self.task_type == TaskType.ANOMALY_DETECTION: - selected_class = AnomalyDetection - elif self.task_type == TaskType.ANOMALY_SEGMENTATION: - selected_class = AnomalySegmentation - else: - raise ValueError( - f"{self.task_type} is not supported. " - "Only Anomaly are supported" - ) - parameters["type_of_model"] = task_type parameters["converter_type"] = task_type.upper() parameters["model_parameters"] = self._get_openvino_configuration() - name_of_package = "demo_package" - - with tempfile.TemporaryDirectory() as tempdir: - copyfile(os.path.join(work_dir, "setup.py"), os.path.join(tempdir, "setup.py")) - copyfile(os.path.join(work_dir, "requirements.txt"), os.path.join(tempdir, "requirements.txt")) - copytree(os.path.join(work_dir, name_of_package), os.path.join(tempdir, name_of_package)) - config_path = os.path.join(tempdir, name_of_package, "config.json") - with open(config_path, "w", encoding="utf-8") as file: - json.dump(parameters, file, ensure_ascii=False, indent=4) - - copyfile(inspect.getfile(selected_class), os.path.join(tempdir, name_of_package, "model.py")) - copyfile(inspect.getfile(AnomalyBase), os.path.join(tempdir, name_of_package, "base.py")) - - # create wheel package - subprocess.run( - [ - sys.executable, - os.path.join(tempdir, "setup.py"), - "bdist_wheel", - "--dist-dir", - tempdir, - "clean", - "--all", - ], - check=True, - ) - wheel_file_name = [f for f in os.listdir(tempdir) if f.endswith(".whl")][0] - - with ZipFile(os.path.join(tempdir, "openvino.zip"), "w") as arch: - arch.writestr(os.path.join("model", "model.xml"), self.task_environment.model.get_data("openvino.xml")) - arch.writestr(os.path.join("model", "model.bin"), self.task_environment.model.get_data("openvino.bin")) - arch.write(os.path.join(tempdir, "requirements.txt"), os.path.join("python", "requirements.txt")) - arch.write(os.path.join(work_dir, "README.md"), os.path.join("python", "README.md")) - arch.write(os.path.join(work_dir, "LICENSE"), os.path.join("python", "LICENSE")) - arch.write(os.path.join(work_dir, "demo.py"), os.path.join("python", "demo.py")) - arch.write(os.path.join(tempdir, wheel_file_name), os.path.join("python", wheel_file_name)) - with open(os.path.join(tempdir, "openvino.zip"), "rb") as output_arch: - output_model.exportable_code = output_arch.read() + zip_buffer = io.BytesIO() + with ZipFile(zip_buffer, "w") as arch: + # model files + arch.writestr(os.path.join("model", "model.xml"), self.task_environment.model.get_data("openvino.xml")) + arch.writestr(os.path.join("model", "model.bin"), self.task_environment.model.get_data("openvino.bin")) + arch.writestr(os.path.join("model", "config.json"), json.dumps(parameters, ensure_ascii=False, indent=4)) + # model_wrappers files + for root, _, files in os.walk(os.path.dirname(ote_anomalib.exportable_code.__file__)): + for file in files: + file_path = os.path.join(root, file) + arch.write( + file_path, os.path.join("python", "model_wrappers", file_path.split("exportable_code/")[1]) + ) + # other python files + arch.write(os.path.join(work_dir, "requirements.txt"), os.path.join("python", "requirements.txt")) + arch.write(os.path.join(work_dir, "LICENSE"), os.path.join("python", "LICENSE")) + arch.write(os.path.join(work_dir, "README.md"), os.path.join("python", "README.md")) + arch.write(os.path.join(work_dir, "demo.py"), os.path.join("python", "demo.py")) + output_model.exportable_code = zip_buffer.getvalue() logger.info("Deployment completed.") diff --git a/external/anomaly/ote_anomalib/tools/sample.py b/external/anomaly/ote_anomalib/tools/sample.py index 0769d90f0a7..b79b5cf7ce0 100644 --- a/external/anomaly/ote_anomalib/tools/sample.py +++ b/external/anomaly/ote_anomalib/tools/sample.py @@ -22,21 +22,26 @@ import os import shutil from argparse import Namespace -from typing import Any, cast - -from ote_anomalib import BaseAnomalyTask, OpenVINOAnomalyTask -from ote_anomalib.data.mvtec import OteMvtecDataset +from typing import Any, Dict, Type, Union + +from ote_anomalib import AnomalyNNCFTask, OpenVINOAnomalyTask +from ote_anomalib.data.dataset import ( + AnomalyClassificationDataset, + AnomalyDetectionDataset, + AnomalySegmentationDataset, +) from ote_anomalib.logging import get_logger from ote_sdk.configuration.helper import create as create_hyper_parameters from ote_sdk.entities.inference_parameters import InferenceParameters from ote_sdk.entities.label_schema import LabelSchemaEntity from ote_sdk.entities.model import ModelEntity -from ote_sdk.entities.model_template import parse_model_template +from ote_sdk.entities.model_template import TaskType, parse_model_template from ote_sdk.entities.optimization_parameters import OptimizationParameters from ote_sdk.entities.resultset import ResultSetEntity from ote_sdk.entities.subset import Subset from ote_sdk.entities.task_environment import TaskEnvironment from ote_sdk.entities.train_parameters import TrainParameters +from ote_sdk.usecases.adapters.model_adapter import ModelAdapter from ote_sdk.usecases.tasks.interfaces.evaluate_interface import IEvaluationTask from ote_sdk.usecases.tasks.interfaces.export_interface import ExportType from ote_sdk.usecases.tasks.interfaces.inference_interface import IInferenceTask @@ -45,10 +50,18 @@ logger = get_logger(__name__) +# pylint: disable=too-many-instance-attributes class OteAnomalyTask: """OTE Anomaly Classification Task.""" - def __init__(self, dataset_path: str, seed: int, model_template_path: str) -> None: + def __init__( + self, + dataset_path: str, + train_subset: Dict[str, str], + val_subset: Dict[str, str], + test_subset: Dict[str, str], + model_template_path: str, + ) -> None: """Initialize OteAnomalyTask. Args: @@ -82,15 +95,42 @@ def __init__(self, dataset_path: str, seed: int, model_template_path: str) -> No logger.info("Loading MVTec dataset.") self.task_type = self.model_template.task_type - self.dataset = OteMvtecDataset(path=dataset_path, seed=seed, task_type=self.task_type).generate() + + dataclass = self.get_dataclass() + + self.dataset = dataclass(train_subset, val_subset, test_subset) logger.info("Creating the task-environment.") self.task_environment = self.create_task_environment() logger.info("Creating the base Torch and OpenVINO tasks.") self.torch_task = self.create_task(task="base") - self.torch_task = cast(BaseAnomalyTask, self.torch_task) + + self.trained_model: ModelEntity self.openvino_task: OpenVINOAnomalyTask + self.nncf_task: AnomalyNNCFTask + self.results = {"category": dataset_path} + + def get_dataclass( + self, + ) -> Union[Type[AnomalyDetectionDataset], Type[AnomalySegmentationDataset], Type[AnomalyClassificationDataset]]: + """Gets the dataloader based on the task type. + + Raises: + ValueError: Validates task type. + + Returns: + Dataloader + """ + if self.task_type == TaskType.ANOMALY_DETECTION: + dataclass = AnomalyDetectionDataset + elif self.task_type == TaskType.ANOMALY_SEGMENTATION: + dataclass = AnomalySegmentationDataset + elif self.task_type == TaskType.ANOMALY_CLASSIFICATION: + dataclass = AnomalyClassificationDataset + else: + raise ValueError(f"{self.task_type} not a supported task") + return dataclass def create_task_environment(self) -> TaskEnvironment: """Create task environment.""" @@ -146,7 +186,9 @@ def train(self) -> ModelEntity: logger.info("Evaluating the base torch model on the validation set.") self.evaluate(self.torch_task, result_set) - return output_model + self.results["torch_fp32"] = result_set.performance.score.value + self.trained_model = output_model + return self.trained_model def infer(self, task: IInferenceTask, output_model: ModelEntity) -> ResultSetEntity: """Get the predictions using the base Torch or OpenVINO tasks and models. @@ -196,13 +238,14 @@ def export(self) -> ModelEntity: logger.info("Creating the OpenVINO Task.") self.openvino_task = self.create_task(task="openvino") - self.openvino_task = cast(OpenVINOAnomalyTask, self.openvino_task) logger.info("Inferring the exported model on the validation set.") result_set = self.infer(task=self.openvino_task, output_model=exported_model) logger.info("Evaluating the exported model on the validation set.") self.evaluate(task=self.openvino_task, result_set=result_set) + self.results["vino_fp32"] = result_set.performance.score.value + return exported_model def optimize(self) -> None: @@ -225,6 +268,54 @@ def optimize(self) -> None: logger.info("Evaluating the optimized model on the validation set.") self.evaluate(task=self.openvino_task, result_set=result_set) + self.results["pot_int8"] = result_set.performance.score.value + + def optimize_nncf(self) -> None: + """Optimize the model via NNCF.""" + logger.info("Running the NNCF optimization") + init_model = ModelEntity( + self.dataset, + configuration=self.task_environment.get_model_configuration(), + model_adapters={"weights.pth": ModelAdapter(self.trained_model.get_data("weights.pth"))}, + ) + + self.task_environment.model = init_model + self.nncf_task = self.create_task("nncf") + + optimized_model = ModelEntity( + self.dataset, + configuration=self.task_environment.get_model_configuration(), + ) + self.nncf_task.optimize(OptimizationType.NNCF, self.dataset, optimized_model) + + logger.info("Inferring the optimised model on the validation set.") + result_set = self.infer(task=self.nncf_task, output_model=optimized_model) + + logger.info("Evaluating the optimized model on the validation set.") + self.evaluate(task=self.nncf_task, result_set=result_set) + self.results["torch_int8"] = result_set.performance.score.value + + def export_nncf(self) -> ModelEntity: + """Export NNCF model via openvino.""" + logger.info("Exporting the model.") + exported_model = ModelEntity( + train_dataset=self.dataset, + configuration=self.task_environment.get_model_configuration(), + ) + self.nncf_task.export(ExportType.OPENVINO, exported_model) + self.task_environment.model = exported_model + + logger.info("Creating the OpenVINO Task.") + + self.openvino_task = self.create_task(task="openvino") + + logger.info("Inferring the exported model on the validation set.") + result_set = self.infer(task=self.openvino_task, output_model=exported_model) + + logger.info("Evaluating the exported model on the validation set.") + self.evaluate(task=self.openvino_task, result_set=result_set) + self.results["vino_int8"] = result_set.performance.score.value + return exported_model @staticmethod def clean_up() -> None: @@ -244,9 +335,16 @@ def parse_args() -> Namespace: parser = argparse.ArgumentParser( description="Sample showcasing how to run Anomaly Classification Task using OTE SDK" ) - parser.add_argument("--model_template_path", default="./anomaly_classification/configs/padim/template.yaml") + parser.add_argument( + "--model_template_path", + default="./anomaly_classification/configs/padim/template.yaml", + ) parser.add_argument("--dataset_path", default="./datasets/MVTec") parser.add_argument("--category", default="bottle") + parser.add_argument("--train-ann-files", required=True) + parser.add_argument("--val-ann-files", required=True) + parser.add_argument("--test-ann-files", required=True) + parser.add_argument("--optimization", choices=("none", "pot", "nncf"), default="none") parser.add_argument("--seed", default=0) return parser.parse_args() @@ -256,11 +354,28 @@ def main() -> None: args = parse_args() path = os.path.join(args.dataset_path, args.category) - task = OteAnomalyTask(dataset_path=path, seed=args.seed, model_template_path=args.model_template_path) + train_subset = {"ann_file": args.train_ann_files, "data_root": path} + val_subset = {"ann_file": args.val_ann_files, "data_root": path} + test_subset = {"ann_file": args.test_ann_files, "data_root": path} + + task = OteAnomalyTask( + dataset_path=path, + train_subset=train_subset, + val_subset=val_subset, + test_subset=test_subset, + model_template_path=args.model_template_path, + ) task.train() task.export() - task.optimize() + + if args.optimization == "pot": + task.optimize() + + if args.optimization == "nncf": + task.optimize_nncf() + task.export_nncf() + task.clean_up() diff --git a/external/anomaly/ote_anomalib/train_task.py b/external/anomaly/ote_anomalib/train_task.py new file mode 100644 index 00000000000..d2de58ab9dd --- /dev/null +++ b/external/anomaly/ote_anomalib/train_task.py @@ -0,0 +1,60 @@ +"""Anomaly Classification Task.""" + +# Copyright (C) 2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions +# and limitations under the License. + +from anomalib.utils.callbacks import MinMaxNormalizationCallback +from ote_anomalib import AnomalyInferenceTask +from ote_anomalib.callbacks import ProgressCallback +from ote_anomalib.data import OTEAnomalyDataModule +from ote_anomalib.logging import get_logger +from ote_sdk.entities.datasets import DatasetEntity +from ote_sdk.entities.model import ModelEntity +from ote_sdk.entities.train_parameters import TrainParameters +from ote_sdk.usecases.tasks.interfaces.training_interface import ITrainingTask +from pytorch_lightning import Trainer + +logger = get_logger(__name__) + + +class AnomalyTrainingTask(AnomalyInferenceTask, ITrainingTask): + """Base Anomaly Task.""" + + def train( + self, + dataset: DatasetEntity, + output_model: ModelEntity, + train_parameters: TrainParameters, + ) -> None: + """Train the anomaly classification model. + + Args: + dataset (DatasetEntity): Input dataset. + output_model (ModelEntity): Output model to save the model weights. + train_parameters (TrainParameters): Training parameters + """ + logger.info("Training the model.") + + config = self.get_config() + logger.info("Training Configs '%s'", config) + + datamodule = OTEAnomalyDataModule(config=config, dataset=dataset, task_type=self.task_type) + callbacks = [ProgressCallback(parameters=train_parameters), MinMaxNormalizationCallback()] + + self.trainer = Trainer(**config.trainer, logger=False, callbacks=callbacks) + self.trainer.fit(model=self.model, datamodule=datamodule) + + self.save_model(output_model) + + logger.info("Training completed.") diff --git a/external/anomaly/requirements.txt b/external/anomaly/requirements.txt index 73bfb3c1608..d01300118d0 100644 --- a/external/anomaly/requirements.txt +++ b/external/anomaly/requirements.txt @@ -1,5 +1,5 @@ -anomalib @ git+https://github.com/openvinotoolkit/anomalib.git@5f3ee2725d97af8a0a7865b2fcac7280140bfc08 -openmodelzoo-modelapi @ git+https://github.com/openvinotoolkit/open_model_zoo/@releases/2021/SCMVP#egg=openmodelzoo-modelapi&subdirectory=demos/common/python -openvino==2022.1.0.dev20220316 -openvino-dev==2022.1.0.dev20220316 +anomalib @ git+https://github.com/openvinotoolkit/anomalib.git@834d45ab1761841ba4041eb4472f01fb63d344a6 +openmodelzoo-modelapi @ git+https://github.com/openvinotoolkit/open_model_zoo/@releases/2022/SCv1.1#egg=openmodelzoo-modelapi&subdirectory=demos/common/python +openvino==2022.1.0 +openvino-dev==2022.1.0 onnx==1.10.1 diff --git a/external/anomaly/tests/ote_cli/test_anomaly_classification.py b/external/anomaly/tests/ote_cli/test_anomaly_classification.py index 76fa1cfbc94..8997efe1dea 100644 --- a/external/anomaly/tests/ote_cli/test_anomaly_classification.py +++ b/external/anomaly/tests/ote_cli/test_anomaly_classification.py @@ -128,11 +128,13 @@ def test_nncf_export(self, template): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) + @pytest.mark.xfail(reason="CVS-83124") def test_nncf_eval(self, template): if template.entrypoints.nncf is None: pytest.skip("nncf entrypoint is none") - nncf_eval_testing(template, root, ote_dir, args, threshold=0.001) + # TODO(AlexanderDokuchaev): return threshold=0.0001 after fix loading NNCF model + nncf_eval_testing(template, root, ote_dir, args, threshold=0.3) @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) diff --git a/external/anomaly/tests/ote_cli/test_anomaly_detection.py b/external/anomaly/tests/ote_cli/test_anomaly_detection.py index e6412b169db..76ee445539d 100644 --- a/external/anomaly/tests/ote_cli/test_anomaly_detection.py +++ b/external/anomaly/tests/ote_cli/test_anomaly_detection.py @@ -127,11 +127,13 @@ def test_nncf_export(self, template): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) + @pytest.mark.xfail(reason="CVS-83124") def test_nncf_eval(self, template): if template.entrypoints.nncf is None: pytest.skip("nncf entrypoint is none") - nncf_eval_testing(template, root, ote_dir, args, threshold=0.001) + # TODO(AlexanderDokuchaev): return threshold=0.0001 after fix loading NNCF model + nncf_eval_testing(template, root, ote_dir, args, threshold=0.3) @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) diff --git a/external/anomaly/tests/ote_cli/test_anomaly_segmentation.py b/external/anomaly/tests/ote_cli/test_anomaly_segmentation.py index 0789816fc0b..98eb8aea3a7 100644 --- a/external/anomaly/tests/ote_cli/test_anomaly_segmentation.py +++ b/external/anomaly/tests/ote_cli/test_anomaly_segmentation.py @@ -128,11 +128,13 @@ def test_nncf_export(self, template): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) + @pytest.mark.xfail(reason="CVS-83124") def test_nncf_eval(self, template): if template.entrypoints.nncf is None: pytest.skip("nncf entrypoint is none") - nncf_eval_testing(template, root, ote_dir, args, threshold=0.001) + # TODO(AlexanderDokuchaev): return threshold=0.0001 after fix loading NNCF model + nncf_eval_testing(template, root, ote_dir, args, threshold=0.3) @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) diff --git a/external/anomaly/tests/test_ote_task.py b/external/anomaly/tests/test_ote_task.py index 5f8366b0d18..06612daf3c0 100644 --- a/external/anomaly/tests/test_ote_task.py +++ b/external/anomaly/tests/test_ote_task.py @@ -43,7 +43,6 @@ class TestAnomalyClassification: Anomaly Classification Task Tests. """ - # _trainer: OTEAnomalyTrainer _trainer: OteAnomalyTask @staticmethod @@ -65,7 +64,11 @@ def test_ote_config(task_path, template_path): @TestDataset(num_train=200, num_test=10, dataset_path="./datasets/MVTec", use_mvtec=False) def test_ote_train_export_and_optimize( - self, task_path, template_path, dataset_path="./datasets/MVTec", category="bottle" + self, + task_path, + template_path, + dataset_path="./datasets/MVTec", + category="bottle", ): """ E2E Train-Export Should Yield Similar Inference Results @@ -90,10 +93,37 @@ def test_ote_train_export_and_optimize( openvino_results = self._trainer.infer(task=self._trainer.openvino_task, output_model=output_model) self._trainer.evaluate(task=self._trainer.openvino_task, result_set=openvino_results) - assert np.allclose(base_results.performance.score.value, openvino_results.performance.score.value, atol=0.1) + assert np.allclose( + base_results.performance.score.value, + openvino_results.performance.score.value, + atol=0.1, + ) + + # NNCF optimization + self._trainer.optimize_nncf() + + base_nncf_results = self._trainer.infer(task=self._trainer.torch_task, output_model=output_model) + self._trainer.evaluate(task=self._trainer.torch_task, result_set=base_nncf_results) + if task_path == "anomaly_classification": # skip this check for anomaly segmentation until we switch metrics + assert base_nncf_results.performance.score.value > 0.5 + + self._trainer.export_nncf() + openvino_results = self._trainer.infer(task=self._trainer.openvino_task, output_model=output_model) + self._trainer.evaluate(task=self._trainer.openvino_task, result_set=openvino_results) + assert np.allclose( + base_nncf_results.performance.score.value, + openvino_results.performance.score.value, + atol=0.2, + ) @TestDataset(num_train=200, num_test=10, dataset_path="./datasets/MVTec", use_mvtec=False) - def test_ote_deploy(self, task_path, template_path, dataset_path="./datasets/MVTec", category="bottle"): + def test_ote_deploy( + self, + task_path, + template_path, + dataset_path="./datasets/MVTec", + category="bottle", + ): """ E2E Test generation of exportable code. """ diff --git a/external/deep-object-reid/configs/ote_custom_classification/efficientnet_b0/compression_config.json b/external/deep-object-reid/configs/ote_custom_classification/efficientnet_b0/compression_config.json index 5bf1ff483b2..753aef84907 100644 --- a/external/deep-object-reid/configs/ote_custom_classification/efficientnet_b0/compression_config.json +++ b/external/deep-object-reid/configs/ote_custom_classification/efficientnet_b0/compression_config.json @@ -1,26 +1,59 @@ { "base": { "nncf_config": { - "compression": [], "log_dir": "." - } - }, - "nncf_quantization": { + }, "lr_finder": { "enable": false }, "train": { "batch_size": 64, - "ema": { - "enable": false - }, + "lr_scheduler": "reduce_on_plateau", "mix_precision": false }, "test": { "batch_size": 64 + } + }, + "nncf_quantization": { + "nncf_config": { + "compression": { + "algorithm": "quantization", + "preset": "mixed", + "initializer": { + "range": { + "num_init_samples": 8192 + }, + "batchnorm_adaptation": { + "num_bn_adaptation_samples": 8192 + } + } + }, + "accuracy_aware_training": { + "mode": "early_exit", + "params": { + "maximal_absolute_accuracy_degradation": 0.01, + "maximal_total_epochs": 100 + } + } + } + }, + "nncf_quantization_pruning": { + "nncf": { + "coeff_decrease_lr_for_nncf": 1.0 }, "nncf_config": { "compression": [ + { + "algorithm": "filter_pruning", + "pruning_init": 0.1, + "params": { + "schedule": "baseline", + "pruning_flops_target": 0.1, + "filter_importance": "geometric_median", + "prune_downsample_convs": true + } + }, { "algorithm": "quantization", "preset": "mixed", @@ -35,15 +68,18 @@ } ], "accuracy_aware_training": { - "mode": "early_exit", + "mode": "adaptive_compression_level", "params": { "maximal_absolute_accuracy_degradation": 0.01, - "maximal_total_epochs": 100 + "initial_training_phase_epochs": 100, + "patience_epochs": 100, + "maximal_total_epochs": 200 } } } }, "order_of_parts": [ - "nncf_quantization" + "nncf_quantization", + "nncf_quantization_pruning" ] } diff --git a/external/deep-object-reid/configs/ote_custom_classification/efficientnet_b0/main_model.yaml b/external/deep-object-reid/configs/ote_custom_classification/efficientnet_b0/main_model.yaml index c7edd94e279..ee37353a85d 100644 --- a/external/deep-object-reid/configs/ote_custom_classification/efficientnet_b0/main_model.yaml +++ b/external/deep-object-reid/configs/ote_custom_classification/efficientnet_b0/main_model.yaml @@ -14,7 +14,8 @@ lr_finder: model: name: 'efficientnet_b0' type: 'classification' - pretrained: True + pretrained: False + load_weights: 'https://storage.openvinotoolkit.org/repositories/openvino_training_extensions/models/custom_image_classification/efficientnet_b0_imagenet_cls.pth' save_all_chkpts: False custom_datasets: diff --git a/external/deep-object-reid/configs/ote_custom_classification/efficientnet_b0/main_model_multihead.yaml b/external/deep-object-reid/configs/ote_custom_classification/efficientnet_b0/main_model_multihead.yaml index 253af774206..0d39a7ef8b9 100644 --- a/external/deep-object-reid/configs/ote_custom_classification/efficientnet_b0/main_model_multihead.yaml +++ b/external/deep-object-reid/configs/ote_custom_classification/efficientnet_b0/main_model_multihead.yaml @@ -13,7 +13,8 @@ lr_finder: model: name: 'efficientnet_b0' type: 'multihead' - pretrained: True + pretrained: False + load_weights: 'https://storage.openvinotoolkit.org/repositories/openvino_training_extensions/models/custom_image_classification/efficientnet_b0_imagenet_cls.pth' save_all_chkpts: False dropout_cls: p: 0.1 diff --git a/external/deep-object-reid/configs/ote_custom_classification/efficientnet_b0/main_model_multilabel.yaml b/external/deep-object-reid/configs/ote_custom_classification/efficientnet_b0/main_model_multilabel.yaml index 0f11694a528..5b1914f1896 100644 --- a/external/deep-object-reid/configs/ote_custom_classification/efficientnet_b0/main_model_multilabel.yaml +++ b/external/deep-object-reid/configs/ote_custom_classification/efficientnet_b0/main_model_multilabel.yaml @@ -13,7 +13,8 @@ lr_finder: model: name: 'efficientnet_b0' type: 'multilabel' - pretrained: True + pretrained: False + load_weights: 'https://storage.openvinotoolkit.org/repositories/openvino_training_extensions/models/custom_image_classification/efficientnet_b0_imagenet_cls.pth' save_all_chkpts: False dropout_cls: p: 0.1 diff --git a/external/deep-object-reid/configs/ote_custom_classification/efficientnet_b0/template.yaml b/external/deep-object-reid/configs/ote_custom_classification/efficientnet_b0/template.yaml index a472dd07cb3..9c9a3e97010 100644 --- a/external/deep-object-reid/configs/ote_custom_classification/efficientnet_b0/template.yaml +++ b/external/deep-object-reid/configs/ote_custom_classification/efficientnet_b0/template.yaml @@ -41,7 +41,7 @@ hyper_parameters: enable_pruning: default_value: false pruning_supported: - default_value: false + default_value: true maximal_accuracy_degradation: default_value: 1.0 diff --git a/external/deep-object-reid/configs/ote_custom_classification/efficientnet_v2_s/compression_config.json b/external/deep-object-reid/configs/ote_custom_classification/efficientnet_v2_s/compression_config.json index 6cf4654f09c..753aef84907 100644 --- a/external/deep-object-reid/configs/ote_custom_classification/efficientnet_v2_s/compression_config.json +++ b/external/deep-object-reid/configs/ote_custom_classification/efficientnet_v2_s/compression_config.json @@ -1,36 +1,59 @@ { "base": { "nncf_config": { - "compression": [], "log_dir": "." - } - }, - "nncf_quantization": { + }, "lr_finder": { "enable": false }, "train": { "batch_size": 64, - "ema": { - "enable": false - }, + "lr_scheduler": "reduce_on_plateau", "mix_precision": false }, "test": { "batch_size": 64 - }, - "nncf_aux_config_changes": [ - { - "train": { - "batch_size": 64 - }, - "test": { - "batch_size": 64 + } + }, + "nncf_quantization": { + "nncf_config": { + "compression": { + "algorithm": "quantization", + "preset": "mixed", + "initializer": { + "range": { + "num_init_samples": 8192 + }, + "batchnorm_adaptation": { + "num_bn_adaptation_samples": 8192 + } + } + }, + "accuracy_aware_training": { + "mode": "early_exit", + "params": { + "maximal_absolute_accuracy_degradation": 0.01, + "maximal_total_epochs": 100 } } - ], + } + }, + "nncf_quantization_pruning": { + "nncf": { + "coeff_decrease_lr_for_nncf": 1.0 + }, "nncf_config": { "compression": [ + { + "algorithm": "filter_pruning", + "pruning_init": 0.1, + "params": { + "schedule": "baseline", + "pruning_flops_target": 0.1, + "filter_importance": "geometric_median", + "prune_downsample_convs": true + } + }, { "algorithm": "quantization", "preset": "mixed", @@ -45,15 +68,18 @@ } ], "accuracy_aware_training": { - "mode": "early_exit", + "mode": "adaptive_compression_level", "params": { "maximal_absolute_accuracy_degradation": 0.01, - "maximal_total_epochs": 100 + "initial_training_phase_epochs": 100, + "patience_epochs": 100, + "maximal_total_epochs": 200 } } } }, "order_of_parts": [ - "nncf_quantization" + "nncf_quantization", + "nncf_quantization_pruning" ] } diff --git a/external/deep-object-reid/configs/ote_custom_classification/efficientnet_v2_s/template.yaml b/external/deep-object-reid/configs/ote_custom_classification/efficientnet_v2_s/template.yaml index b821f20b8fc..637d529676d 100644 --- a/external/deep-object-reid/configs/ote_custom_classification/efficientnet_v2_s/template.yaml +++ b/external/deep-object-reid/configs/ote_custom_classification/efficientnet_v2_s/template.yaml @@ -41,7 +41,7 @@ hyper_parameters: enable_pruning: default_value: false pruning_supported: - default_value: false + default_value: true maximal_accuracy_degradation: default_value: 1.0 diff --git a/external/deep-object-reid/configs/ote_custom_classification/mobilenet_v3_large_075/compression_config.json b/external/deep-object-reid/configs/ote_custom_classification/mobilenet_v3_large_075/compression_config.json index 6cf4654f09c..9d8cc0ebd55 100644 --- a/external/deep-object-reid/configs/ote_custom_classification/mobilenet_v3_large_075/compression_config.json +++ b/external/deep-object-reid/configs/ote_custom_classification/mobilenet_v3_large_075/compression_config.json @@ -1,19 +1,14 @@ { "base": { "nncf_config": { - "compression": [], "log_dir": "." - } - }, - "nncf_quantization": { + }, "lr_finder": { "enable": false }, "train": { "batch_size": 64, - "ema": { - "enable": false - }, + "lr_scheduler": "reduce_on_plateau", "mix_precision": false }, "test": { @@ -22,15 +17,55 @@ "nncf_aux_config_changes": [ { "train": { - "batch_size": 64 + "batch_size": 64, + "lr_scheduler": "reduce_on_plateau", + "mix_precision": false }, "test": { "batch_size": 64 } } - ], + ] + }, + "nncf_quantization": { + "nncf_config": { + "compression": { + "algorithm": "quantization", + "preset": "mixed", + "initializer": { + "range": { + "num_init_samples": 8192 + }, + "batchnorm_adaptation": { + "num_bn_adaptation_samples": 8192 + } + } + }, + "accuracy_aware_training": { + "mode": "early_exit", + "params": { + "maximal_absolute_accuracy_degradation": 0.01, + "maximal_total_epochs": 100 + } + } + } + }, + "nncf_quantization_pruning": { + "nncf": { + "coeff_decrease_lr_for_nncf": 1.0 + }, "nncf_config": { "compression": [ + { + "algorithm": "filter_pruning", + "pruning_init": 0.1, + "params": { + "schedule": "baseline", + "pruning_flops_target": 0.1, + "filter_importance": "geometric_median", + "prune_downsample_convs": true + } + }, { "algorithm": "quantization", "preset": "mixed", @@ -45,15 +80,18 @@ } ], "accuracy_aware_training": { - "mode": "early_exit", + "mode": "adaptive_compression_level", "params": { "maximal_absolute_accuracy_degradation": 0.01, - "maximal_total_epochs": 100 + "initial_training_phase_epochs": 100, + "patience_epochs": 100, + "maximal_total_epochs": 200 } } } }, "order_of_parts": [ - "nncf_quantization" + "nncf_quantization", + "nncf_quantization_pruning" ] } diff --git a/external/deep-object-reid/configs/ote_custom_classification/mobilenet_v3_large_075/template_experimental.yaml b/external/deep-object-reid/configs/ote_custom_classification/mobilenet_v3_large_075/template_experimental.yaml index f975bf0dbca..519349d6f07 100644 --- a/external/deep-object-reid/configs/ote_custom_classification/mobilenet_v3_large_075/template_experimental.yaml +++ b/external/deep-object-reid/configs/ote_custom_classification/mobilenet_v3_large_075/template_experimental.yaml @@ -41,7 +41,7 @@ hyper_parameters: enable_pruning: default_value: false pruning_supported: - default_value: false + default_value: true maximal_accuracy_degradation: default_value: 1.0 diff --git a/external/deep-object-reid/configs/ote_custom_classification/mobilenet_v3_large_1/compression_config.json b/external/deep-object-reid/configs/ote_custom_classification/mobilenet_v3_large_1/compression_config.json index 6cf4654f09c..9d8cc0ebd55 100644 --- a/external/deep-object-reid/configs/ote_custom_classification/mobilenet_v3_large_1/compression_config.json +++ b/external/deep-object-reid/configs/ote_custom_classification/mobilenet_v3_large_1/compression_config.json @@ -1,19 +1,14 @@ { "base": { "nncf_config": { - "compression": [], "log_dir": "." - } - }, - "nncf_quantization": { + }, "lr_finder": { "enable": false }, "train": { "batch_size": 64, - "ema": { - "enable": false - }, + "lr_scheduler": "reduce_on_plateau", "mix_precision": false }, "test": { @@ -22,15 +17,55 @@ "nncf_aux_config_changes": [ { "train": { - "batch_size": 64 + "batch_size": 64, + "lr_scheduler": "reduce_on_plateau", + "mix_precision": false }, "test": { "batch_size": 64 } } - ], + ] + }, + "nncf_quantization": { + "nncf_config": { + "compression": { + "algorithm": "quantization", + "preset": "mixed", + "initializer": { + "range": { + "num_init_samples": 8192 + }, + "batchnorm_adaptation": { + "num_bn_adaptation_samples": 8192 + } + } + }, + "accuracy_aware_training": { + "mode": "early_exit", + "params": { + "maximal_absolute_accuracy_degradation": 0.01, + "maximal_total_epochs": 100 + } + } + } + }, + "nncf_quantization_pruning": { + "nncf": { + "coeff_decrease_lr_for_nncf": 1.0 + }, "nncf_config": { "compression": [ + { + "algorithm": "filter_pruning", + "pruning_init": 0.1, + "params": { + "schedule": "baseline", + "pruning_flops_target": 0.1, + "filter_importance": "geometric_median", + "prune_downsample_convs": true + } + }, { "algorithm": "quantization", "preset": "mixed", @@ -45,15 +80,18 @@ } ], "accuracy_aware_training": { - "mode": "early_exit", + "mode": "adaptive_compression_level", "params": { "maximal_absolute_accuracy_degradation": 0.01, - "maximal_total_epochs": 100 + "initial_training_phase_epochs": 100, + "patience_epochs": 100, + "maximal_total_epochs": 200 } } } }, "order_of_parts": [ - "nncf_quantization" + "nncf_quantization", + "nncf_quantization_pruning" ] } diff --git a/external/deep-object-reid/configs/ote_custom_classification/mobilenet_v3_large_1/template.yaml b/external/deep-object-reid/configs/ote_custom_classification/mobilenet_v3_large_1/template.yaml index f1334dc4c41..e2bf7b8a8a6 100644 --- a/external/deep-object-reid/configs/ote_custom_classification/mobilenet_v3_large_1/template.yaml +++ b/external/deep-object-reid/configs/ote_custom_classification/mobilenet_v3_large_1/template.yaml @@ -41,7 +41,7 @@ hyper_parameters: enable_pruning: default_value: false pruning_supported: - default_value: false + default_value: true maximal_accuracy_degradation: default_value: 1.0 diff --git a/external/deep-object-reid/configs/ote_custom_classification/mobilenet_v3_small/compression_config.json b/external/deep-object-reid/configs/ote_custom_classification/mobilenet_v3_small/compression_config.json index 6cf4654f09c..9d8cc0ebd55 100644 --- a/external/deep-object-reid/configs/ote_custom_classification/mobilenet_v3_small/compression_config.json +++ b/external/deep-object-reid/configs/ote_custom_classification/mobilenet_v3_small/compression_config.json @@ -1,19 +1,14 @@ { "base": { "nncf_config": { - "compression": [], "log_dir": "." - } - }, - "nncf_quantization": { + }, "lr_finder": { "enable": false }, "train": { "batch_size": 64, - "ema": { - "enable": false - }, + "lr_scheduler": "reduce_on_plateau", "mix_precision": false }, "test": { @@ -22,15 +17,55 @@ "nncf_aux_config_changes": [ { "train": { - "batch_size": 64 + "batch_size": 64, + "lr_scheduler": "reduce_on_plateau", + "mix_precision": false }, "test": { "batch_size": 64 } } - ], + ] + }, + "nncf_quantization": { + "nncf_config": { + "compression": { + "algorithm": "quantization", + "preset": "mixed", + "initializer": { + "range": { + "num_init_samples": 8192 + }, + "batchnorm_adaptation": { + "num_bn_adaptation_samples": 8192 + } + } + }, + "accuracy_aware_training": { + "mode": "early_exit", + "params": { + "maximal_absolute_accuracy_degradation": 0.01, + "maximal_total_epochs": 100 + } + } + } + }, + "nncf_quantization_pruning": { + "nncf": { + "coeff_decrease_lr_for_nncf": 1.0 + }, "nncf_config": { "compression": [ + { + "algorithm": "filter_pruning", + "pruning_init": 0.1, + "params": { + "schedule": "baseline", + "pruning_flops_target": 0.1, + "filter_importance": "geometric_median", + "prune_downsample_convs": true + } + }, { "algorithm": "quantization", "preset": "mixed", @@ -45,15 +80,18 @@ } ], "accuracy_aware_training": { - "mode": "early_exit", + "mode": "adaptive_compression_level", "params": { "maximal_absolute_accuracy_degradation": 0.01, - "maximal_total_epochs": 100 + "initial_training_phase_epochs": 100, + "patience_epochs": 100, + "maximal_total_epochs": 200 } } } }, "order_of_parts": [ - "nncf_quantization" + "nncf_quantization", + "nncf_quantization_pruning" ] } diff --git a/external/deep-object-reid/configs/ote_custom_classification/mobilenet_v3_small/template_experimental.yaml b/external/deep-object-reid/configs/ote_custom_classification/mobilenet_v3_small/template_experimental.yaml index dfe2334179e..d824ffb8cbc 100644 --- a/external/deep-object-reid/configs/ote_custom_classification/mobilenet_v3_small/template_experimental.yaml +++ b/external/deep-object-reid/configs/ote_custom_classification/mobilenet_v3_small/template_experimental.yaml @@ -41,7 +41,7 @@ hyper_parameters: enable_pruning: default_value: false pruning_supported: - default_value: false + default_value: true maximal_accuracy_degradation: default_value: 1.0 diff --git a/external/deep-object-reid/requirements.txt b/external/deep-object-reid/requirements.txt index c02da9949e3..71df3f1c415 100644 --- a/external/deep-object-reid/requirements.txt +++ b/external/deep-object-reid/requirements.txt @@ -1,4 +1,4 @@ -nncf @ git+https://github.com/openvinotoolkit/nncf@464244204fc2c5e80c8164c17d8d266ccae50062#egg=nncf -openvino==2022.1.0.dev20220316 -openvino-dev==2022.1.0.dev20220316 -openmodelzoo-modelapi @ git+https://github.com/openvinotoolkit/open_model_zoo/@releases/2021/SCMVP#egg=openmodelzoo-modelapi&subdirectory=demos/common/python \ No newline at end of file +nncf @ git+https://github.com/openvinotoolkit/nncf@ed552bee19b1e40eaa2c06627acb928c1d6c2360#egg=nncf +openvino==2022.1.0 +openvino-dev==2022.1.0 +openmodelzoo-modelapi @ git+https://github.com/openvinotoolkit/open_model_zoo/@releases/2022/SCv1.1#egg=openmodelzoo-modelapi&subdirectory=demos/common/python diff --git a/external/deep-object-reid/submodule b/external/deep-object-reid/submodule index 6e5a870c394..8376d3ec0d3 160000 --- a/external/deep-object-reid/submodule +++ b/external/deep-object-reid/submodule @@ -1 +1 @@ -Subproject commit 6e5a870c39499b2139e0659037c0eae0e1aedd9a +Subproject commit 8376d3ec0d3ceaf540020a070661739cddd9110b diff --git a/external/deep-object-reid/tests/expected_metrics/metrics_test_ote_training.yml b/external/deep-object-reid/tests/expected_metrics/metrics_test_ote_training.yml index 3d14b044554..ac04910e6e3 100644 --- a/external/deep-object-reid/tests/expected_metrics/metrics_test_ote_training.yml +++ b/external/deep-object-reid/tests/expected_metrics/metrics_test_ote_training.yml @@ -86,3 +86,47 @@ 'metrics.accuracy.Accuracy': 'base': 'nncf_evaluation.metrics.accuracy.Accuracy' 'max_diff': 0.01 + +'ACTION-training_evaluation,model-Custom_Image_Classification_EfficientNet-V2-S,dataset-mlc_voc,num_epochs-CONFIG,batch-CONFIG,usecase-reallife': + 'metrics.accuracy.Accuracy': + 'target_value': 0.98 + 'max_diff_if_less_threshold': 0.005 + 'max_diff_if_greater_threshold': 0.03 +'ACTION-export_evaluation,model-Custom_Image_Classification_EfficientNet-V2-S,dataset-mlc_voc,num_epochs-CONFIG,batch-CONFIG,usecase-reallife': + 'metrics.accuracy.Accuracy': + 'base': 'training_evaluation.metrics.accuracy.Accuracy' + 'max_diff': 0.01 +'ACTION-pot_evaluation,model-Custom_Image_Classification_EfficientNet-V2-S,dataset-mlc_voc,num_epochs-CONFIG,batch-CONFIG,usecase-reallife': + 'metrics.accuracy.Accuracy': + 'base': 'export_evaluation.metrics.accuracy.Accuracy' + 'max_diff': 0.01 +'ACTION-nncf_evaluation,model-Custom_Image_Classification_EfficientNet-V2-S,dataset-mlc_voc,num_epochs-CONFIG,batch-CONFIG,usecase-reallife': + 'metrics.accuracy.Accuracy': + 'base': 'training_evaluation.metrics.accuracy.Accuracy' + 'max_diff_if_less_threshold': 0.01 +'ACTION-nncf_export_evaluation,model-Custom_Image_Classification_EfficientNet-V2-S,dataset-mlc_voc,num_epochs-CONFIG,batch-CONFIG,usecase-reallife': + 'metrics.accuracy.Accuracy': + 'base': 'nncf_evaluation.metrics.accuracy.Accuracy' + 'max_diff': 0.01 + +'ACTION-training_evaluation,model-Custom_Image_Classification_MobileNet-V3-large-1x,dataset-mlc_voc,num_epochs-CONFIG,batch-CONFIG,usecase-reallife': + 'metrics.accuracy.Accuracy': + 'target_value': 0.98 + 'max_diff_if_less_threshold': 0.005 + 'max_diff_if_greater_threshold': 0.03 +'ACTION-export_evaluation,model-Custom_Image_Classification_MobileNet-V3-large-1x,dataset-mlc_voc,num_epochs-CONFIG,batch-CONFIG,usecase-reallife': + 'metrics.accuracy.Accuracy': + 'base': 'training_evaluation.metrics.accuracy.Accuracy' + 'max_diff': 0.01 +'ACTION-pot_evaluation,model-Custom_Image_Classification_MobileNet-V3-large-1x,dataset-mlc_voc,num_epochs-CONFIG,batch-CONFIG,usecase-reallife': + 'metrics.accuracy.Accuracy': + 'base': 'export_evaluation.metrics.accuracy.Accuracy' + 'max_diff': 0.01 +'ACTION-nncf_evaluation,model-Custom_Image_Classification_MobileNet-V3-large-1x,dataset-mlc_voc,num_epochs-CONFIG,batch-CONFIG,usecase-reallife': + 'metrics.accuracy.Accuracy': + 'base': 'training_evaluation.metrics.accuracy.Accuracy' + 'max_diff_if_less_threshold': 0.01 +'ACTION-nncf_export_evaluation,model-Custom_Image_Classification_MobileNet-V3-large-1x,dataset-mlc_voc,num_epochs-CONFIG,batch-CONFIG,usecase-reallife': + 'metrics.accuracy.Accuracy': + 'base': 'nncf_evaluation.metrics.accuracy.Accuracy' + 'max_diff': 0.01 diff --git a/external/deep-object-reid/tests/ote_cli/test_classification.py b/external/deep-object-reid/tests/ote_cli/test_classification.py index bce27f1bbc1..9310904c578 100644 --- a/external/deep-object-reid/tests/ote_cli/test_classification.py +++ b/external/deep-object-reid/tests/ote_cli/test_classification.py @@ -39,6 +39,7 @@ nncf_export_testing, nncf_eval_testing, nncf_eval_openvino_testing, + xfail_templates, ) @@ -140,12 +141,13 @@ def test_nncf_export(self, template): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) + @pytest.mark.xfail(reason="CVS-82892") def test_nncf_eval(self, template): if template.entrypoints.nncf is None: pytest.skip("nncf entrypoint is none") nncf_eval_testing(template, root, ote_dir, args, threshold=0.001) - + @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_nncf_eval_openvino(self, template): diff --git a/external/deep-object-reid/tests/test_ote_training.py b/external/deep-object-reid/tests/test_ote_training.py index d6beac03968..a42e01e7a06 100644 --- a/external/deep-object-reid/tests/test_ote_training.py +++ b/external/deep-object-reid/tests/test_ote_training.py @@ -133,7 +133,7 @@ def test_bunches(self) -> List[Dict[str, Any]]: 'Custom_Image_Classification_EfficientNet-V2-S', 'Custom_Image_Classification_MobileNet-V3-large-1x', ], - dataset_name='lg_chem_short', + dataset_name=['lg_chem_short','mlc_voc_short'], usecase='precommit', ), dict( @@ -141,7 +141,7 @@ def test_bunches(self) -> List[Dict[str, Any]]: 'Custom_Image_Classification_EfficientNet-V2-S', 'Custom_Image_Classification_MobileNet-V3-large-1x', ], - dataset_name=['lg_chem','cifar100'], + dataset_name=['lg_chem','cifar100','mlc_voc'], max_num_epochs=KEEP_CONFIG_FIELD_VALUE, batch_size=KEEP_CONFIG_FIELD_VALUE, usecase=REALLIFE_USECASE_CONSTANT, @@ -173,7 +173,7 @@ def test_parameters_defining_test_case_behavior(self) -> List[str]: def default_test_parameters(self) -> Dict[str, Any]: DEFAULT_TEST_PARAMETERS = { - "max_num_epochs": 1, + "max_num_epochs": 3, "batch_size": 2, } return deepcopy(DEFAULT_TEST_PARAMETERS) @@ -464,6 +464,9 @@ def test(self, if "nncf_graph" in test_parameters["test_stage"]: pytest.xfail("The models has no a reference NNCF graph yet") - + if "mlc_voc" in test_parameters["dataset_name"] \ + and "MobileNet" in test_parameters["model_name"] \ + and "nncf_evaluation" in test_parameters["test_stage"]: + pytest.xfail("Known issue CVS-83261") test_case_fx.run_stage(test_parameters['test_stage'], data_collector_fx, cur_test_expected_metrics_callback_fx) diff --git a/external/deep-object-reid/tools/ote_sample.py b/external/deep-object-reid/tools/ote_sample.py index 4d8f7be3476..fdf23b37999 100644 --- a/external/deep-object-reid/tools/ote_sample.py +++ b/external/deep-object-reid/tools/ote_sample.py @@ -20,7 +20,7 @@ from ote_sdk.configuration.helper import create from ote_sdk.entities.datasets import Subset from ote_sdk.entities.inference_parameters import InferenceParameters -from ote_sdk.entities.model import ModelEntity, ModelPrecision +from ote_sdk.entities.model import ModelEntity, ModelPrecision, ModelOptimizationType from ote_sdk.entities.model_template import parse_model_template from ote_sdk.entities.optimization_parameters import OptimizationParameters from ote_sdk.entities.resultset import ResultSetEntity @@ -31,7 +31,7 @@ from torchreid.integration.nncf.compression import is_nncf_checkpoint from torchreid_tasks.utils import (ClassificationDatasetAdapter, - get_task_class) + get_task_class) def parse_args(): parser = argparse.ArgumentParser(description='Sample showcasing the new API') @@ -44,6 +44,8 @@ def parse_args(): help='path to the pre-trained aux model weights', default=None) parser.add_argument('--optimize', choices=['nncf', 'pot', 'none'], default='pot') + parser.add_argument('--enable_quantization', action='store_true') + parser.add_argument('--enable_pruning', action='store_true') parser.add_argument('--export', action='store_true') parser.add_argument('--debug-dump-folder', default='') args = parser.parse_args() @@ -92,8 +94,10 @@ def main(args): model_template = parse_model_template(args.template_file_path) print('Set hyperparameters') - params = create(model_template.hyper_parameters.data) + params.nncf_optimization.enable_quantization = args.enable_quantization + params.nncf_optimization.enable_pruning = args.enable_pruning + print('Setup environment') environment = TaskEnvironment(model=None, hyper_parameters=params, @@ -119,8 +123,13 @@ def main(args): validate(task, validation_dataset, trained_model) else: print('Load pre-trained weights') - task_impl_path = model_template.entrypoints.nncf if is_nncf_checkpoint(args.weights) \ - else model_template.entrypoints.base + if is_nncf_checkpoint(args.weights): + task_impl_path = model_template.entrypoints.nncf + optimization_type = ModelOptimizationType.NNCF + else: + task_impl_path = model_template.entrypoints.base + optimization_type = ModelOptimizationType.NONE + weights = load_weights(args.weights) model_adapters = {'weights.pth': ModelAdapter(weights)} if args.aux_weights is not None: @@ -131,6 +140,7 @@ def main(args): configuration=environment.get_model_configuration(), model_adapters=model_adapters, precision = [ModelPrecision.FP32], + optimization_type=optimization_type ) environment.model = trained_model diff --git a/external/deep-object-reid/torchreid_tasks/configuration.yaml b/external/deep-object-reid/torchreid_tasks/configuration.yaml index 2242083ad7e..e7751d0bc5a 100644 --- a/external/deep-object-reid/torchreid_tasks/configuration.yaml +++ b/external/deep-object-reid/torchreid_tasks/configuration.yaml @@ -20,6 +20,7 @@ learning_parameters: visible_in_ui: true warning: Increasing this value may cause the system to use more memory than available, potentially causing out of memory errors, please update with caution. + auto_hpo_state: POSSIBLE description: Learning Parameters header: Learning Parameters learning_rate: @@ -39,6 +40,7 @@ learning_parameters: type: UI_RULES visible_in_ui: true warning: null + auto_hpo_state: POSSIBLE max_num_epochs: affects_outcome_of: TRAINING default_value: 200 diff --git a/external/deep-object-reid/torchreid_tasks/inference_task.py b/external/deep-object-reid/torchreid_tasks/inference_task.py index 540749f19e0..f648870ab6e 100644 --- a/external/deep-object-reid/torchreid_tasks/inference_task.py +++ b/external/deep-object-reid/torchreid_tasks/inference_task.py @@ -30,6 +30,7 @@ from ote_sdk.entities.metadata import FloatMetadata, FloatType from ote_sdk.entities.model import (ModelEntity, ModelFormat, ModelOptimizationType, ModelPrecision) +from ote_sdk.entities.model import OptimizationMethod from ote_sdk.entities.result_media import ResultMediaEntity from ote_sdk.entities.resultset import ResultSetEntity from ote_sdk.entities.scored_label import ScoredLabel @@ -164,6 +165,8 @@ def _patch_config(self, base_dir: str): merge_from_files_with_base(self._cfg, config_file_path) self._cfg.use_gpu = torch.cuda.device_count() > 0 self.num_devices = 1 if self._cfg.use_gpu else 0 + if not self._cfg.use_gpu: + self._cfg.train.mix_precision = False self._cfg.custom_datasets.types = ['external_classification_wrapper', 'external_classification_wrapper'] self._cfg.custom_datasets.roots = ['']*2 @@ -287,8 +290,9 @@ def export(self, export_type: ExportType, output_model: ModelEntity): opset=self._cfg.model.export_onnx_opset, output_names=['logits', 'features', 'vector']) self._model.forward = self._model.old_forward del self._model.old_forward + pruning_transformation = OptimizationMethod.FILTER_PRUNING in self._optimization_methods export_ir(onnx_model_path, self._cfg.data.norm_mean, self._cfg.data.norm_std, - optimized_model_dir=optimized_model_dir) + optimized_model_dir=optimized_model_dir, pruning_transformation=pruning_transformation) bin_file = [f for f in os.listdir(optimized_model_dir) if f.endswith('.bin')][0] xml_file = [f for f in os.listdir(optimized_model_dir) if f.endswith('.xml')][0] diff --git a/external/deep-object-reid/torchreid_tasks/model_wrappers/classification.py b/external/deep-object-reid/torchreid_tasks/model_wrappers/classification.py index a64cbdfa9c6..3abe6e0a8cc 100644 --- a/external/deep-object-reid/torchreid_tasks/model_wrappers/classification.py +++ b/external/deep-object-reid/torchreid_tasks/model_wrappers/classification.py @@ -45,6 +45,9 @@ def _check_io_number(self, inp, outp): def _get_outputs(self): layer_name = 'logits' + for name, meta in self.outputs.items(): + if 'logits' in meta.names: + layer_name = name layer_shape = self.outputs[layer_name].shape if len(layer_shape) != 2 and len(layer_shape) != 4: diff --git a/external/deep-object-reid/torchreid_tasks/nncf_task.py b/external/deep-object-reid/torchreid_tasks/nncf_task.py index cd012e31c39..fc51d02c6c6 100644 --- a/external/deep-object-reid/torchreid_tasks/nncf_task.py +++ b/external/deep-object-reid/torchreid_tasks/nncf_task.py @@ -38,7 +38,7 @@ patch_config) from torchreid_tasks.inference_task import OTEClassificationInferenceTask from torchreid_tasks.monitors import DefaultMetricsMonitor -from torchreid_tasks.utils import OTEClassificationDataset, TrainingProgressCallback +from torchreid_tasks.utils import OTEClassificationDataset, OptimizationProgressCallback from torchreid.ops import DataParallel from torchreid.utils import set_random_seed, set_model_attr @@ -165,8 +165,6 @@ def optimize( raise RuntimeError('NNCF is the only supported optimization') if self._compression_ctrl: raise RuntimeError('The model is already optimized. NNCF requires the original model for optimization.') - if self._cfg.train.ema.enable: - raise RuntimeError('EMA model could not be used together with NNCF compression') if self._cfg.lr_finder.enable: raise RuntimeError('LR finder could not be used together with NNCF compression') @@ -180,16 +178,21 @@ def optimize( update_progress_callback = optimization_parameters.update_progress else: update_progress_callback = default_progress_callback - time_monitor = TrainingProgressCallback(update_progress_callback, num_epoch=self._cfg.train.max_epoch, - num_train_steps=math.ceil(len(dataset.get_subset(Subset.TRAINING)) / - self._cfg.train.batch_size), - num_val_steps=0, num_test_steps=0) + + num_epoch = self._cfg.nncf_config['accuracy_aware_training']['params']['maximal_total_epochs'] + train_subset = dataset.get_subset(Subset.TRAINING) + time_monitor = OptimizationProgressCallback(update_progress_callback, + num_epoch=num_epoch, + num_train_steps=max(1, math.floor(len(train_subset) / + self._cfg.train.batch_size)), + num_val_steps=0, num_test_steps=0, + loading_stage_progress_percentage=5, + initialization_stage_progress_percentage=5) self.metrics_monitor = DefaultMetricsMonitor() self.stop_callback.reset() set_random_seed(self._cfg.train.seed) - train_subset = dataset.get_subset(Subset.TRAINING) val_subset = dataset.get_subset(Subset.VALIDATION) self._cfg.custom_datasets.roots = [OTEClassificationDataset(train_subset, self._labels, self._multilabel, self._hierarchical, self._multihead_class_info, @@ -202,6 +205,8 @@ def optimize( self._compression_ctrl, self._model, self._nncf_metainfo = \ wrap_nncf_model(self._model, self._cfg, datamanager_for_init=datamanager) + time_monitor.on_initialization_end() + self._cfg.train.lr = calculate_lr_for_nncf_training(self._cfg, self._initial_lr, False) train_model = self._model @@ -219,6 +224,7 @@ def optimize( **lr_scheduler_kwargs(self._cfg)) logger.info('Start training') + time_monitor.on_train_begin() run_training(self._cfg, datamanager, train_model, optimizer, scheduler, extra_device_ids, self._cfg.train.lr, should_freeze_aux_models=True, @@ -228,6 +234,7 @@ def optimize( stop_callback=self.stop_callback, nncf_metainfo=self._nncf_metainfo, compression_ctrl=self._compression_ctrl) + time_monitor.on_train_end() self.metrics_monitor.close() if self.stop_callback.check_stop(): diff --git a/external/deep-object-reid/torchreid_tasks/openvino_task.py b/external/deep-object-reid/torchreid_tasks/openvino_task.py index f4566f3fa8c..8687fbaff64 100644 --- a/external/deep-object-reid/torchreid_tasks/openvino_task.py +++ b/external/deep-object-reid/torchreid_tasks/openvino_task.py @@ -12,20 +12,17 @@ # See the License for the specific language governing permissions # and limitations under the License. -import inspect +import io import json import logging import os -import subprocess # nosec -import sys import tempfile -from shutil import copyfile, copytree from typing import Any, Dict, Optional, Tuple, Union from addict import Dict as ADDict import numpy as np - +import torchreid_tasks.model_wrappers as model_wrappers from ote_sdk.usecases.exportable_code import demo from ote_sdk.entities.annotation import AnnotationSceneEntity from ote_sdk.entities.datasets import DatasetEntity @@ -72,7 +69,6 @@ from torchreid_tasks.utils import get_multihead_class_info from zipfile import ZipFile -from . import model_wrappers logger = logging.getLogger(__name__) @@ -204,39 +200,31 @@ def deploy(self, logger.info('Deploying the model') work_dir = os.path.dirname(demo.__file__) - model_file = inspect.getfile(type(self.inferencer.model)) parameters = {} parameters['type_of_model'] = 'ote_classification' parameters['converter_type'] = 'CLASSIFICATION' parameters['model_parameters'] = self.inferencer.configuration parameters['model_parameters']['labels'] = LabelSchemaMapper.forward(self.task_environment.label_schema) - name_of_package = "demo_package" - with tempfile.TemporaryDirectory() as tempdir: - copyfile(os.path.join(work_dir, "setup.py"), os.path.join(tempdir, "setup.py")) - copyfile(os.path.join(work_dir, "requirements.txt"), os.path.join(tempdir, "requirements.txt")) - copytree(os.path.join(work_dir, name_of_package), os.path.join(tempdir, name_of_package)) - config_path = os.path.join(tempdir, name_of_package, "config.json") - with open(config_path, "w", encoding='utf-8') as f: - json.dump(parameters, f, ensure_ascii=False, indent=4) - # generate model.py - if (inspect.getmodule(self.inferencer.model) in - [module[1] for module in inspect.getmembers(model_wrappers, inspect.ismodule)]): - copyfile(model_file, os.path.join(tempdir, name_of_package, "model.py")) - # create wheel package - subprocess.run([sys.executable, os.path.join(tempdir, "setup.py"), 'bdist_wheel', - '--dist-dir', tempdir, 'clean', '--all'], check=True) - wheel_file_name = [f for f in os.listdir(tempdir) if f.endswith('.whl')][0] - - with ZipFile(os.path.join(tempdir, "openvino.zip"), 'w') as zip_f: - zip_f.writestr(os.path.join("model", "model.xml"), self.model.get_data("openvino.xml")) - zip_f.writestr(os.path.join("model", "model.bin"), self.model.get_data("openvino.bin")) - zip_f.write(os.path.join(tempdir, "requirements.txt"), os.path.join("python", "requirements.txt")) - zip_f.write(os.path.join(work_dir, "README.md"), os.path.join("python", "README.md")) - zip_f.write(os.path.join(work_dir, "LICENSE"), os.path.join("python", "LICENSE")) - zip_f.write(os.path.join(work_dir, "demo.py"), os.path.join("python", "demo.py")) - zip_f.write(os.path.join(tempdir, wheel_file_name), os.path.join("python", wheel_file_name)) - with open(os.path.join(tempdir, "openvino.zip"), "rb") as file: - output_model.exportable_code = file.read() + + zip_buffer = io.BytesIO() + with ZipFile(zip_buffer, 'w') as arch: + # model files + arch.writestr(os.path.join("model", "model.xml"), self.model.get_data("openvino.xml")) + arch.writestr(os.path.join("model", "model.bin"), self.model.get_data("openvino.bin")) + arch.writestr( + os.path.join("model", "config.json"), json.dumps(parameters, ensure_ascii=False, indent=4) + ) + # model_wrappers files + for root, dirs, files in os.walk(os.path.dirname(model_wrappers.__file__)): + for file in files: + file_path = os.path.join(root, file) + arch.write(file_path, os.path.join("python", "model_wrappers", file_path.split("model_wrappers/")[1])) + # other python files + arch.write(os.path.join(work_dir, "requirements.txt"), os.path.join("python", "requirements.txt")) + arch.write(os.path.join(work_dir, "LICENSE"), os.path.join("python", "LICENSE")) + arch.write(os.path.join(work_dir, "README.md"), os.path.join("python", "README.md")) + arch.write(os.path.join(work_dir, "demo.py"), os.path.join("python", "demo.py")) + output_model.exportable_code = zip_buffer.getvalue() logger.info('Deploying completed') def optimize(self, @@ -269,6 +257,9 @@ def optimize(self, if get_nodes_by_type(model, ["FakeQuantize"]): raise RuntimeError("Model is already optimized by POT") + if optimization_parameters is not None: + optimization_parameters.update_progress(10) + engine_config = ADDict({ 'device': 'CPU' }) @@ -296,6 +287,9 @@ def optimize(self, compress_model_weights(compressed_model) + if optimization_parameters is not None: + optimization_parameters.update_progress(90) + with tempfile.TemporaryDirectory() as tempdir: save_model(compressed_model, tempdir, model_name="model") with open(os.path.join(tempdir, "model.xml"), "rb") as f: @@ -313,3 +307,7 @@ def optimize(self, self.model = output_model self.inferencer = self.load_inferencer() + + if optimization_parameters is not None: + optimization_parameters.update_progress(100) + logger.info('POT optimization completed') diff --git a/external/deep-object-reid/torchreid_tasks/parameters.py b/external/deep-object-reid/torchreid_tasks/parameters.py index f0f0a4595df..91f8897dc95 100644 --- a/external/deep-object-reid/torchreid_tasks/parameters.py +++ b/external/deep-object-reid/torchreid_tasks/parameters.py @@ -25,7 +25,7 @@ string_attribute, ) from ote_sdk.configuration.configurable_parameters import ConfigurableParameters -from ote_sdk.configuration.model_lifecycle import ModelLifecycle +from ote_sdk.configuration.enums import ModelLifecycle, AutoHPOState from .parameters_enums import POTQuantizationPreset @@ -49,7 +49,8 @@ class __LearningParameters(ParameterGroup): "memory requirements.", warning="Increasing this value may cause the system to use more memory than available, " "potentially causing out of memory errors, please update with caution.", - affects_outcome_of=ModelLifecycle.TRAINING + affects_outcome_of=ModelLifecycle.TRAINING, + auto_hpo_state=AutoHPOState.POSSIBLE ) max_num_epochs = configurable_integer( @@ -69,7 +70,8 @@ class __LearningParameters(ParameterGroup): header="Learning rate", description="Increasing this value will speed up training \ convergence but might make it unstable.", - affects_outcome_of=ModelLifecycle.TRAINING + affects_outcome_of=ModelLifecycle.TRAINING, + auto_hpo_state=AutoHPOState.POSSIBLE ) enable_lr_finder = configurable_boolean( diff --git a/external/deep-object-reid/torchreid_tasks/utils.py b/external/deep-object-reid/torchreid_tasks/utils.py index 0ac6feaa4f5..cec3a974abe 100644 --- a/external/deep-object-reid/torchreid_tasks/utils.py +++ b/external/deep-object-reid/torchreid_tasks/utils.py @@ -400,6 +400,41 @@ def on_test_batch_end(self, batch=None, logs=None): self.update_progress_callback(self.get_progress()) +class OptimizationProgressCallback(TimeMonitorCallback): + """ Progress callback used for optimization using NNCF + There are three stages to the progress bar: + - 5 % model is loaded + - 10 % compressed model is initialized + - 10-100 % compressed model is being fine-tuned + """ + def __init__(self, update_progress_callback: UpdateProgressCallback, loading_stage_progress_percentage: int = 5, + initialization_stage_progress_percentage: int = 5, **kwargs): + super().__init__(update_progress_callback=update_progress_callback, **kwargs) + if loading_stage_progress_percentage + initialization_stage_progress_percentage >= 100: + raise RuntimeError('Total optimization progress percentage is more than 100%') + + train_percentage = 100 - loading_stage_progress_percentage - initialization_stage_progress_percentage + self.loading_stage_steps = self.total_steps * loading_stage_progress_percentage / train_percentage + self.initialization_stage_steps = self.total_steps * initialization_stage_progress_percentage / train_percentage + self.total_steps += self.loading_stage_steps + self.initialization_stage_steps + + # set loading_stage_steps from the start as the model is already loaded at this point + self.current_step = self.loading_stage_steps + self.update_progress_callback(self.get_progress()) + + def on_train_batch_end(self, batch, logs=None): + super().on_train_batch_end(batch, logs) + self.update_progress_callback(self.get_progress(), score=logs) + + def on_train_end(self, logs=None): + super(OptimizationProgressCallback, self).on_train_end(logs) + self.update_progress_callback(self.get_progress(), score=logs) + + def on_initialization_end(self): + self.current_step += self.initialization_stage_steps + self.update_progress_callback(self.get_progress()) + + def preprocess_features_for_actmap(features): features = np.mean(features, axis=1) b, h, w = features.shape diff --git a/external/mmdetection/configs/custom-object-detection/cspdarknet_YOLOX/coco_data_pipeline.py b/external/mmdetection/configs/custom-object-detection/cspdarknet_YOLOX/coco_data_pipeline.py index 300850adb09..6937d5b993d 100644 --- a/external/mmdetection/configs/custom-object-detection/cspdarknet_YOLOX/coco_data_pipeline.py +++ b/external/mmdetection/configs/custom-object-detection/cspdarknet_YOLOX/coco_data_pipeline.py @@ -30,7 +30,7 @@ img_scale=(416, 416), flip=False, transforms=[ - dict(type='Resize', keep_ratio=True), + dict(type='Resize', keep_ratio=False), dict(type='RandomFlip'), dict(type='Pad', size=(416, 416), pad_val=114.0), dict(type='Normalize', **img_norm_cfg), @@ -45,17 +45,21 @@ train=dict( type='MultiImageMixDataset', dataset=dict( - type=dataset_type, - ann_file='data/coco/annotations/instances_train2017.json', - img_prefix='data/coco/train2017', - pipeline=[ - dict(type='LoadImageFromFile', to_float32=True), - dict(type='LoadAnnotations', with_bbox=True) - ], - # filter_empty_gt=False, - ), - pipeline=train_pipeline, - dynamic_scale=img_scale), + type='RepeatDataset', + adaptive_repeat_times=True, + times=1, + dataset=dict( + type=dataset_type, + ann_file='data/coco/annotations/instances_train2017.json', + img_prefix='data/coco/train2017', + pipeline=[ + dict(type='LoadImageFromFile', to_float32=True), + dict(type='LoadAnnotations', with_bbox=True) + ], + )), + pipeline=train_pipeline, + dynamic_scale=img_scale + ), val=dict( type=dataset_type, ann_file='data/coco/annotations/instances_val2017.json', diff --git a/external/mmdetection/configs/custom-object-detection/gen3_mobilenetV2_ATSS/compression_config.json b/external/mmdetection/configs/custom-object-detection/gen3_mobilenetV2_ATSS/compression_config.json index 07210929e3c..666fe36b6e5 100644 --- a/external/mmdetection/configs/custom-object-detection/gen3_mobilenetV2_ATSS/compression_config.json +++ b/external/mmdetection/configs/custom-object-detection/gen3_mobilenetV2_ATSS/compression_config.json @@ -43,6 +43,7 @@ "mode": "adaptive_compression_level", "params": { "initial_training_phase_epochs": 5, + "maximal_total_epochs": 100, "patience_epochs": 5 } }, diff --git a/external/mmdetection/configs/custom-object-detection/gen3_mobilenetV2_SSD/compression_config.json b/external/mmdetection/configs/custom-object-detection/gen3_mobilenetV2_SSD/compression_config.json index f8974f3afc1..359526bb04e 100644 --- a/external/mmdetection/configs/custom-object-detection/gen3_mobilenetV2_SSD/compression_config.json +++ b/external/mmdetection/configs/custom-object-detection/gen3_mobilenetV2_SSD/compression_config.json @@ -43,6 +43,7 @@ "mode": "adaptive_compression_level", "params": { "initial_training_phase_epochs": 5, + "maximal_total_epochs": 100, "patience_epochs": 5 } }, diff --git a/external/mmdetection/constraints.txt b/external/mmdetection/constraints.txt index 7b71ad9404a..5b9abf55199 100644 --- a/external/mmdetection/constraints.txt +++ b/external/mmdetection/constraints.txt @@ -54,8 +54,8 @@ onnx==1.10.1 onnxoptimizer==0.2.6 onnxruntime==1.9.0 opencv-python==4.5.3.56 -openvino==2022.1.0.dev20220316 -openvino-dev==2022.1.0.dev20220316 +openvino==2022.1.0 +openvino-dev==2022.1.0 ordered-set==4.0.2 packaging==21.0 pandas==1.1.5 diff --git a/external/mmdetection/detection_tasks/apis/detection/config_utils.py b/external/mmdetection/detection_tasks/apis/detection/config_utils.py index 88cbad2c12f..0151b68a856 100644 --- a/external/mmdetection/detection_tasks/apis/detection/config_utils.py +++ b/external/mmdetection/detection_tasks/apis/detection/config_utils.py @@ -20,6 +20,7 @@ from collections import defaultdict from typing import List, Optional, Union +import torch from mmcv import Config, ConfigDict from ote_sdk.entities.datasets import DatasetEntity from ote_sdk.entities.label import LabelEntity, Domain @@ -90,6 +91,12 @@ def patch_config(config: Config, work_dir: str, labels: List[LabelEntity], domai # Patch data pipeline, making it OTE-compatible. patch_datasets(config, domain) + # Remove FP16 config if running on CPU device and revert to FP32 + # https://github.com/pytorch/pytorch/issues/23377 + if not torch.cuda.is_available() and 'fp16' in config: + logger.info(f'Revert FP16 to FP32 on CPU device') + remove_from_config(config, 'fp16') + if 'log_config' not in config: config.log_config = ConfigDict() # config.log_config.hooks = [] @@ -143,8 +150,10 @@ def patch_adaptive_repeat_dataset(config: Union[Config, ConfigDict], num_samples :param decay: decaying rate :param factor: base repeat factor """ - if config.data.train.type == 'RepeatDataset' and getattr( - config.data.train, 'adaptive_repeat_times', False): + data_train = config.data.train + if data_train.type == 'MultiImageMixDataset': + data_train = data_train.dataset + if data_train.type == 'RepeatDataset' and getattr(data_train, 'adaptive_repeat_times', False): if is_epoch_based_runner(config.runner): cur_epoch = config.runner.max_epochs new_repeat = max(round(math.exp(decay * num_samples) * factor), 1) @@ -152,7 +161,7 @@ def patch_adaptive_repeat_dataset(config: Union[Config, ConfigDict], num_samples if new_epoch == 1: return config.runner.max_epochs = new_epoch - config.data.train.times = new_repeat + data_train.times = new_repeat @check_input_parameters_type({"dataset": DatasetParamTypeCheck}) @@ -169,11 +178,9 @@ def prepare_for_training(config: Config, train_dataset: DatasetEntity, val_datas time_monitor: TimeMonitorCallback, learning_curves: defaultdict) -> Config: config = copy.deepcopy(config) prepare_work_dir(config) + data_train = get_data_cfg(config) + data_train.ote_dataset = train_dataset config.data.val.ote_dataset = val_dataset - if 'ote_dataset' in config.data.train: - config.data.train.ote_dataset = train_dataset - else: - config.data.train.dataset.ote_dataset = train_dataset patch_adaptive_repeat_dataset(config, len(train_dataset)) config.custom_hooks.append({'type': 'OTEProgressHook', 'time_monitor': time_monitor, 'verbose': True}) config.log_config.hooks.append({'type': 'OTELoggerHook', 'curves': learning_curves}) @@ -194,12 +201,9 @@ def config_to_string(config: Union[Config, ConfigDict]) -> str: config_copy.data.test.labels = None config_copy.data.val.ote_dataset = None config_copy.data.val.labels = None - if 'ote_dataset' in config_copy.data.train: - config_copy.data.train.ote_dataset = None - config_copy.data.train.labels = None - else: - config_copy.data.train.dataset.ote_dataset = None - config_copy.data.train.dataset.labels = None + data_train = get_data_cfg(config_copy) + data_train.ote_dataset = None + data_train.labels = None return Config(config_copy).pretty_text @@ -246,11 +250,8 @@ def prepare_work_dir(config: Union[Config, ConfigDict]) -> str: def set_data_classes(config: Config, labels: List[LabelEntity]): # Save labels in data configs. for subset in ('train', 'val', 'test'): - cfg = config.data[subset] - if cfg.type == 'RepeatDataset' or cfg.type == 'MultiImageMixDataset': - cfg.dataset.labels = labels - else: - cfg.labels = labels + cfg = get_data_cfg(config, subset) + cfg.labels = labels config.data[subset].labels = labels # Set proper number of classes in model's detection heads. @@ -289,9 +290,7 @@ def patch_color_conversion(pipeline): assert 'data' in config for subset in ('train', 'val', 'test'): - cfg = config.data[subset] - if cfg.type == 'RepeatDataset' or cfg.type == 'MultiImageMixDataset': - cfg = cfg.dataset + cfg = get_data_cfg(config, subset) cfg.type = 'OTEDataset' cfg.domain = domain cfg.ote_dataset = None @@ -352,3 +351,10 @@ def cluster_anchors(config: Config, dataset: DatasetEntity, model: BaseDetector) config.model.bbox_head.anchor_generator = config_generator model.bbox_head.anchor_generator = model_generator return config, model + + +def get_data_cfg(config: Config, subset: str = 'train') -> Config: + data_cfg = config.data[subset] + while 'dataset' in data_cfg: + data_cfg = data_cfg.dataset + return data_cfg diff --git a/external/mmdetection/detection_tasks/apis/detection/configuration.py b/external/mmdetection/detection_tasks/apis/detection/configuration.py index 57ff7965133..de8ba9f8eb5 100644 --- a/external/mmdetection/detection_tasks/apis/detection/configuration.py +++ b/external/mmdetection/detection_tasks/apis/detection/configuration.py @@ -24,7 +24,7 @@ selectable, string_attribute) from ote_sdk.configuration import ConfigurableParameters -from ote_sdk.configuration.model_lifecycle import ModelLifecycle +from ote_sdk.configuration.enums import ModelLifecycle, AutoHPOState from .configuration_enums import POTQuantizationPreset @@ -49,7 +49,8 @@ class __LearningParameters(ParameterGroup): "memory requirements.", warning="Increasing this value may cause the system to use more memory than available, " "potentially causing out of memory errors, please update with caution.", - affects_outcome_of=ModelLifecycle.TRAINING + affects_outcome_of=ModelLifecycle.TRAINING, + auto_hpo_state=AutoHPOState.POSSIBLE ) num_iters = configurable_integer( @@ -67,7 +68,8 @@ class __LearningParameters(ParameterGroup): max_value=1e-01, header="Learning rate", description="Increasing this value will speed up training convergence but might make it unstable.", - affects_outcome_of=ModelLifecycle.TRAINING + affects_outcome_of=ModelLifecycle.TRAINING, + auto_hpo_state=AutoHPOState.POSSIBLE ) learning_rate_warmup_iters = configurable_integer( diff --git a/external/mmdetection/detection_tasks/apis/detection/configuration.yaml b/external/mmdetection/detection_tasks/apis/detection/configuration.yaml index 20bf3baf0df..ea3e3bceebc 100644 --- a/external/mmdetection/detection_tasks/apis/detection/configuration.yaml +++ b/external/mmdetection/detection_tasks/apis/detection/configuration.yaml @@ -23,6 +23,7 @@ learning_parameters: warning: Increasing this value may cause the system to use more memory than available, potentially causing out of memory errors, please update with caution. + auto_hpo_state: POSSIBLE description: Learning Parameters header: Learning Parameters learning_rate: @@ -44,6 +45,7 @@ learning_parameters: value: 0.01 visible_in_ui: true warning: null + auto_hpo_state: POSSIBLE learning_rate_warmup_iters: affects_outcome_of: TRAINING default_value: 100 diff --git a/external/mmdetection/detection_tasks/apis/detection/inference_task.py b/external/mmdetection/detection_tasks/apis/detection/inference_task.py index e82dad3b5d1..94db4880a35 100644 --- a/external/mmdetection/detection_tasks/apis/detection/inference_task.py +++ b/external/mmdetection/detection_tasks/apis/detection/inference_task.py @@ -220,7 +220,9 @@ def _add_predictions_to_dataset(self, prediction_results, dataset, confidence_th box_points = cv2.boxPoints(cv2.minAreaRect(contour)) points = [Point(x=point[0] / width, y=point[1] / height) for point in box_points] labels = [ScoredLabel(self._labels[label_idx], probability=probability)] - shapes.append(Annotation(Polygon(points=points), labels=labels, id=ID(f"{label_idx:08}"))) + polygon = Polygon(points=points) + if polygon.get_area() > 1e-12: + shapes.append(Annotation(polygon, labels=labels, id=ID(f"{label_idx:08}"))) else: raise RuntimeError( f"Detection results assignment not implemented for task: {self._task_type}") diff --git a/external/mmdetection/detection_tasks/apis/detection/nncf_task.py b/external/mmdetection/detection_tasks/apis/detection/nncf_task.py index b0f295f7d7f..e94e3ff8ea4 100644 --- a/external/mmdetection/detection_tasks/apis/detection/nncf_task.py +++ b/external/mmdetection/detection_tasks/apis/detection/nncf_task.py @@ -46,7 +46,7 @@ from detection_tasks.apis.detection.config_utils import prepare_for_training from detection_tasks.apis.detection.configuration import OTEDetectionConfig from detection_tasks.apis.detection.inference_task import OTEDetectionInferenceTask -from detection_tasks.apis.detection.ote_utils import TrainingProgressCallback +from detection_tasks.apis.detection.ote_utils import OptimizationProgressCallback from detection_tasks.extension.utils.hooks import OTELoggerHook from mmdet.apis.train import build_val_dataloader from mmdet.datasets import build_dataloader, build_dataset @@ -202,7 +202,10 @@ def optimize( update_progress_callback = optimization_parameters.update_progress else: update_progress_callback = default_progress_callback - time_monitor = TrainingProgressCallback(update_progress_callback) + + time_monitor = OptimizationProgressCallback(update_progress_callback, + loading_stage_progress_percentage=5, + initialization_stage_progress_percentage=5) learning_curves = defaultdict(OTELoggerHook.Curve) training_config = prepare_for_training(config, train_dataset, val_dataset, time_monitor, learning_curves) mm_train_dataset = build_dataset(training_config.data.train) @@ -214,6 +217,8 @@ def optimize( if not self._compression_ctrl: self._create_compressed_model(mm_train_dataset, training_config) + time_monitor.on_initialization_end() + # Run training. self._training_work_dir = training_config.work_dir self._is_training = True diff --git a/external/mmdetection/detection_tasks/apis/detection/openvino_task.py b/external/mmdetection/detection_tasks/apis/detection/openvino_task.py index 9ec75118924..5352f6afa63 100644 --- a/external/mmdetection/detection_tasks/apis/detection/openvino_task.py +++ b/external/mmdetection/detection_tasks/apis/detection/openvino_task.py @@ -14,13 +14,11 @@ import attr import copy -import inspect +import io import json import numpy as np import os import ote_sdk.usecases.exportable_code.demo as demo -import subprocess # nosec -import sys import tempfile from addict import Dict as ADDict from compression.api import DataLoader @@ -67,7 +65,6 @@ from zipfile import ZipFile from mmdet.utils.logger import get_root_logger -from . import model_wrappers from .configuration import OTEDetectionConfig logger = get_root_logger() @@ -224,7 +221,6 @@ def __init__(self, task_environment: TaskEnvironment): self.model = self.task_environment.model self.task_type = self.task_environment.model_template.task_type self.confidence_threshold: float = 0.0 - self.model_name = task_environment.model_template.model_template_id self.inferencer = self.load_inferencer() logger.info('OpenVINO task initialization completed') @@ -280,39 +276,26 @@ def deploy(self, logger.info('Deploying the model') work_dir = os.path.dirname(demo.__file__) - model_file = inspect.getfile(type(self.inferencer.model)) parameters = {} parameters['type_of_model'] = self.inferencer.model.__model__ parameters['converter_type'] = str(self.task_type) parameters['model_parameters'] = self.inferencer.configuration parameters['model_parameters']['labels'] = LabelSchemaMapper.forward(self.task_environment.label_schema) - name_of_package = "demo_package" - with tempfile.TemporaryDirectory() as tempdir: - copyfile(os.path.join(work_dir, "setup.py"), os.path.join(tempdir, "setup.py")) - copyfile(os.path.join(work_dir, "requirements.txt"), os.path.join(tempdir, "requirements.txt")) - copytree(os.path.join(work_dir, name_of_package), os.path.join(tempdir, name_of_package)) - config_path = os.path.join(tempdir, name_of_package, "config.json") - with open(config_path, "w", encoding='utf-8') as f: - json.dump(parameters, f, ensure_ascii=False, indent=4) - # generate model.py - if (inspect.getmodule(self.inferencer.model) in - [module[1] for module in inspect.getmembers(model_wrappers, inspect.ismodule)]): - copyfile(model_file, os.path.join(tempdir, name_of_package, "model.py")) - # create wheel package - subprocess.run([sys.executable, os.path.join(tempdir, "setup.py"), 'bdist_wheel', - '--dist-dir', tempdir, 'clean', '--all']) - wheel_file_name = [f for f in os.listdir(tempdir) if f.endswith('.whl')][0] - - with ZipFile(os.path.join(tempdir, "openvino.zip"), 'w') as zip: - zip.writestr(os.path.join("model", "model.xml"), self.model.get_data("openvino.xml")) - zip.writestr(os.path.join("model", "model.bin"), self.model.get_data("openvino.bin")) - zip.write(os.path.join(tempdir, "requirements.txt"), os.path.join("python", "requirements.txt")) - zip.write(os.path.join(work_dir, "README.md"), os.path.join("python", "README.md")) - zip.write(os.path.join(work_dir, "LICENSE"), os.path.join("python", "LICENSE")) - zip.write(os.path.join(work_dir, "demo.py"), os.path.join("python", "demo.py")) - zip.write(os.path.join(tempdir, wheel_file_name), os.path.join("python", wheel_file_name)) - with open(os.path.join(tempdir, "openvino.zip"), "rb") as file: - output_model.exportable_code = file.read() + + zip_buffer = io.BytesIO() + with ZipFile(zip_buffer, 'w') as arch: + # model files + arch.writestr(os.path.join("model", "model.xml"), self.model.get_data("openvino.xml")) + arch.writestr(os.path.join("model", "model.bin"), self.model.get_data("openvino.bin")) + arch.writestr( + os.path.join("model", "config.json"), json.dumps(parameters, ensure_ascii=False, indent=4) + ) + # python files + arch.write(os.path.join(work_dir, "requirements.txt"), os.path.join("python", "requirements.txt")) + arch.write(os.path.join(work_dir, "LICENSE"), os.path.join("python", "LICENSE")) + arch.write(os.path.join(work_dir, "README.md"), os.path.join("python", "README.md")) + arch.write(os.path.join(work_dir, "demo.py"), os.path.join("python", "demo.py")) + output_model.exportable_code = zip_buffer.getvalue() logger.info('Deploying completed') @check_input_parameters_type({"dataset": DatasetParamTypeCheck}) @@ -347,6 +330,9 @@ def optimize(self, if get_nodes_by_type(model, ['FakeQuantize']): raise RuntimeError("Model is already optimized by POT") + if optimization_parameters is not None: + optimization_parameters.update_progress(10) + engine_config = ADDict({ 'device': 'CPU' }) @@ -374,6 +360,9 @@ def optimize(self, compress_model_weights(compressed_model) + if optimization_parameters is not None: + optimization_parameters.update_progress(90) + with tempfile.TemporaryDirectory() as tempdir: save_model(compressed_model, tempdir, model_name="model") with open(os.path.join(tempdir, "model.xml"), "rb") as f: @@ -393,3 +382,6 @@ def optimize(self, self.model = output_model self.inferencer = self.load_inferencer() logger.info('POT optimization completed') + + if optimization_parameters is not None: + optimization_parameters.update_progress(100) diff --git a/external/mmdetection/detection_tasks/apis/detection/ote_utils.py b/external/mmdetection/detection_tasks/apis/detection/ote_utils.py index d5370802ff2..1fa456e1da6 100644 --- a/external/mmdetection/detection_tasks/apis/detection/ote_utils.py +++ b/external/mmdetection/detection_tasks/apis/detection/ote_utils.py @@ -106,12 +106,12 @@ def get_task_class(path: str): class TrainingProgressCallback(TimeMonitorCallback): - def __init__(self, update_progress_callback: Union[UpdateProgressCallback, Callable[[int], None]]): + def __init__(self, update_progress_callback: UpdateProgressCallback): super().__init__(0, 0, 0, 0, update_progress_callback=update_progress_callback) def on_train_batch_end(self, batch, logs=None): super().on_train_batch_end(batch, logs) - self.update_progress_callback(int(self.get_progress())) + self.update_progress_callback(self.get_progress()) def on_epoch_end(self, epoch, logs=None): self.past_epoch_duration.append(time.time() - self.start_epoch_time) @@ -119,11 +119,8 @@ def on_epoch_end(self, epoch, logs=None): score = None if hasattr(self.update_progress_callback, 'metric') and isinstance(logs, dict): score = logs.get(self.update_progress_callback.metric, None) - # Workaround for NNCF trainer, which uses callback of a different type. - if score is not None: - self.update_progress_callback(self.get_progress(), score=float(score)) - else: - self.update_progress_callback(int(self.get_progress())) + score = float(score) if score is not None else None + self.update_progress_callback(self.get_progress(), score=score) class InferenceProgressCallback(TimeMonitorCallback): @@ -138,3 +135,42 @@ def __init__(self, num_test_steps, update_progress_callback: Callable[[int], Non def on_test_batch_end(self, batch=None, logs=None): super().on_test_batch_end(batch, logs) self.update_progress_callback(int(self.get_progress())) + + +class OptimizationProgressCallback(TrainingProgressCallback): + """ Progress callback used for optimization using NNCF + There are three stages to the progress bar: + - 5 % model is loaded + - 10 % compressed model is initialized + - 10-100 % compressed model is being fine-tuned + """ + def __init__(self, update_progress_callback: UpdateProgressCallback, loading_stage_progress_percentage: int = 5, + initialization_stage_progress_percentage: int = 5): + super().__init__(update_progress_callback=update_progress_callback) + if loading_stage_progress_percentage + initialization_stage_progress_percentage >= 100: + raise RuntimeError('Total optimization progress percentage is more than 100%') + + self.loading_stage_progress_percentage = loading_stage_progress_percentage + self.initialization_stage_progress_percentage = initialization_stage_progress_percentage + + # set loading_stage_progress_percentage from the start as the model is already loaded at this point + self.update_progress_callback(loading_stage_progress_percentage) + + def on_train_begin(self, logs=None): + super().on_train_begin(logs) + # Callback initialization takes place here after OTEProgressHook.before_run() is called + train_percentage = 100 - self.loading_stage_progress_percentage - self.initialization_stage_progress_percentage + loading_stage_steps = self.total_steps * self.loading_stage_progress_percentage / train_percentage + initialization_stage_steps = self.total_steps * self.initialization_stage_progress_percentage / train_percentage + self.total_steps += loading_stage_steps + initialization_stage_steps + + self.current_step = loading_stage_steps + initialization_stage_steps + self.update_progress_callback(self.get_progress()) + + def on_train_end(self, logs=None): + super().on_train_end(logs) + self.update_progress_callback(self.get_progress(), score=logs) + + def on_initialization_end(self): + self.update_progress_callback(self.loading_stage_progress_percentage + + self.initialization_stage_progress_percentage) diff --git a/external/mmdetection/detection_tasks/extension/utils/hooks.py b/external/mmdetection/detection_tasks/extension/utils/hooks.py index f3674dfb5e1..d9035e3d9a6 100644 --- a/external/mmdetection/detection_tasks/extension/utils/hooks.py +++ b/external/mmdetection/detection_tasks/extension/utils/hooks.py @@ -160,6 +160,7 @@ def before_run(self, runner: BaseRunner): self.time_monitor.total_steps = max(math.ceil(self.time_monitor.steps_per_epoch * total_epochs), 1) self.time_monitor.current_step = 0 self.time_monitor.current_epoch = 0 + self.time_monitor.on_train_begin() @check_input_parameters_type() def before_epoch(self, runner: BaseRunner): diff --git a/external/mmdetection/requirements.txt b/external/mmdetection/requirements.txt index a4b2b54e5b1..fc32d05a108 100644 --- a/external/mmdetection/requirements.txt +++ b/external/mmdetection/requirements.txt @@ -1,5 +1,5 @@ -openvino==2022.1.0.dev20220316 -openvino-dev==2022.1.0.dev20220316 -openmodelzoo-modelapi @ git+https://github.com/openvinotoolkit/open_model_zoo@ef556fee2cdd92488838b49ef8939c303992d89c#egg=openmodelzoo-modelapi&subdirectory=demos/common/python +openvino==2022.1.0 +openvino-dev==2022.1.0 +openmodelzoo-modelapi @ git+https://github.com/openvinotoolkit/open_model_zoo/@releases/2022/SCv1.1#egg=openmodelzoo-modelapi&subdirectory=demos/common/python nncf@ git+https://github.com/openvinotoolkit/nncf@464244204fc2c5e80c8164c17d8d266ccae50062#egg=nncf diff --git a/external/mmdetection/submodule b/external/mmdetection/submodule index d701ac1661e..b7afe852faf 160000 --- a/external/mmdetection/submodule +++ b/external/mmdetection/submodule @@ -1 +1 @@ -Subproject commit d701ac1661e2ee97d5547152e47beb92f36764c2 +Subproject commit b7afe852fafeab36c9fd9f126e8d3f48d44675ba diff --git a/external/mmdetection/tests/ote_cli/test_detection.py b/external/mmdetection/tests/ote_cli/test_detection.py index 964b1a1bed0..0ee0abdec49 100644 --- a/external/mmdetection/tests/ote_cli/test_detection.py +++ b/external/mmdetection/tests/ote_cli/test_detection.py @@ -92,7 +92,7 @@ def test_ote_eval(self, template): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_eval_openvino(self, template): - ote_eval_openvino_testing(template, root, ote_dir, args, threshold=0.2) + ote_eval_openvino_testing(template, root, ote_dir, args, threshold=0.1) @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) diff --git a/external/mmdetection/tests/test_ote_api.py b/external/mmdetection/tests/test_ote_api.py index 016cc5f0a31..685622e5c85 100644 --- a/external/mmdetection/tests/test_ote_api.py +++ b/external/mmdetection/tests/test_ote_api.py @@ -24,6 +24,7 @@ from typing import Optional import numpy as np +import pytest import torch from bson import ObjectId from ote_sdk.test_suite.e2e_test_system import e2e_pytest_api @@ -297,8 +298,7 @@ def test_nncf_optimize_progress_tracking(self): print('Task initialized, model optimization starts.') training_progress_curve = [] - def progress_callback(progress: int): - assert isinstance(progress, int) + def progress_callback(progress: float, score: Optional[float] = None): training_progress_curve.append(progress) optimization_parameters = OptimizationParameters() @@ -403,7 +403,7 @@ def end_to_end( num_iters=5, quality_score_threshold=0.5, reload_perf_delta_tolerance=0.0, - export_perf_delta_tolerance=0.0005, + export_perf_delta_tolerance=0.001, pot_perf_delta_tolerance=0.1, nncf_perf_delta_tolerance=0.1, task_type=TaskType.DETECTION): @@ -542,12 +542,14 @@ def test_training_yolox(self): osp.join('configs', 'custom-object-detection', 'cspdarknet_YOLOX')) @e2e_pytest_api + @pytest.mark.xfail(reason='CVS-83115') def test_training_maskrcnn_resnet50(self): self.end_to_end(osp.join('configs', 'custom-counting-instance-seg', 'resnet50_maskrcnn'), task_type=TaskType.INSTANCE_SEGMENTATION) @e2e_pytest_api + @pytest.mark.xfail(reason='CVS-83116') def test_training_maskrcnn_efficientnetb2b(self): self.end_to_end(osp.join('configs', 'custom-counting-instance-seg', 'efficientnetb2b_maskrcnn'), diff --git a/external/mmsegmentation/configs/custom-sematic-segmentation/ocr-lite-hrnet-s-mod2/template_experimental.yaml b/external/mmsegmentation/configs/custom-sematic-segmentation/ocr-lite-hrnet-s-mod2/template.yaml similarity index 100% rename from external/mmsegmentation/configs/custom-sematic-segmentation/ocr-lite-hrnet-s-mod2/template_experimental.yaml rename to external/mmsegmentation/configs/custom-sematic-segmentation/ocr-lite-hrnet-s-mod2/template.yaml diff --git a/external/mmsegmentation/requirements.txt b/external/mmsegmentation/requirements.txt index ff9125f2b31..9618b736aaa 100644 --- a/external/mmsegmentation/requirements.txt +++ b/external/mmsegmentation/requirements.txt @@ -1,4 +1,4 @@ -openvino==2022.1.0.dev20220316 -openvino-dev==2022.1.0.dev20220316 +openvino==2022.1.0 +openvino-dev==2022.1.0 nncf@git+https://github.com/openvinotoolkit/nncf@464244204fc2c5e80c8164c17d8d266ccae50062#egg=nncf -openmodelzoo-modelapi@ git+https://github.com/openvinotoolkit/open_model_zoo@ef556fee2cdd92488838b49ef8939c303992d89c#egg=openmodelzoo-modelapi&subdirectory=demos/common/python +openmodelzoo-modelapi @ git+https://github.com/openvinotoolkit/open_model_zoo/@releases/2022/SCv1.1#egg=openmodelzoo-modelapi&subdirectory=demos/common/python diff --git a/external/mmsegmentation/segmentation_tasks/apis/segmentation/configuration.py b/external/mmsegmentation/segmentation_tasks/apis/segmentation/configuration.py index be10482fffb..28eec0fbbbf 100644 --- a/external/mmsegmentation/segmentation_tasks/apis/segmentation/configuration.py +++ b/external/mmsegmentation/segmentation_tasks/apis/segmentation/configuration.py @@ -24,7 +24,7 @@ selectable, string_attribute) from ote_sdk.configuration.configurable_parameters import ConfigurableParameters -from ote_sdk.configuration.model_lifecycle import ModelLifecycle +from ote_sdk.configuration.enums import ModelLifecycle, AutoHPOState from .configuration_enums import POTQuantizationPreset, Models @@ -49,7 +49,8 @@ class __LearningParameters(ParameterGroup): "memory requirements.", warning="Increasing this value may cause the system to use more memory than available, " "potentially causing out of memory errors, please update with caution.", - affects_outcome_of=ModelLifecycle.TRAINING + affects_outcome_of=ModelLifecycle.TRAINING, + auto_hpo_state=AutoHPOState.POSSIBLE ) num_iters = configurable_integer( @@ -67,7 +68,8 @@ class __LearningParameters(ParameterGroup): max_value=1e-01, header="Learning rate", description="Increasing this value will speed up training convergence but might make it unstable.", - affects_outcome_of=ModelLifecycle.TRAINING + affects_outcome_of=ModelLifecycle.TRAINING, + auto_hpo_state=AutoHPOState.POSSIBLE ) learning_rate_fixed_iters = configurable_integer( diff --git a/external/mmsegmentation/segmentation_tasks/apis/segmentation/configuration.yaml b/external/mmsegmentation/segmentation_tasks/apis/segmentation/configuration.yaml index de33ad2a07a..9c21c6fc8de 100644 --- a/external/mmsegmentation/segmentation_tasks/apis/segmentation/configuration.yaml +++ b/external/mmsegmentation/segmentation_tasks/apis/segmentation/configuration.yaml @@ -4,7 +4,7 @@ id: "" learning_parameters: batch_size: affects_outcome_of: TRAINING - auto_hpo_state: not_possible + auto_hpo_state: POSSIBLE auto_hpo_value: null default_value: 8 description: @@ -30,7 +30,7 @@ learning_parameters: header: Learning Parameters learning_rate: affects_outcome_of: TRAINING - auto_hpo_state: not_possible + auto_hpo_state: POSSIBLE auto_hpo_value: null default_value: 0.001 description: diff --git a/external/mmsegmentation/segmentation_tasks/apis/segmentation/nncf_task.py b/external/mmsegmentation/segmentation_tasks/apis/segmentation/nncf_task.py index c5e280a4ec8..e83aa6dcb51 100644 --- a/external/mmsegmentation/segmentation_tasks/apis/segmentation/nncf_task.py +++ b/external/mmsegmentation/segmentation_tasks/apis/segmentation/nncf_task.py @@ -43,7 +43,7 @@ from segmentation_tasks.apis.segmentation import OTESegmentationInferenceTask from segmentation_tasks.apis.segmentation.config_utils import prepare_for_training from segmentation_tasks.apis.segmentation.configuration import OTESegmentationConfig -from segmentation_tasks.apis.segmentation.ote_utils import TrainingProgressCallback +from segmentation_tasks.apis.segmentation.ote_utils import OptimizationProgressCallback from segmentation_tasks.extension.utils.hooks import OTELoggerHook from mmseg.apis.train import build_val_dataloader from mmseg.datasets import build_dataloader, build_dataset @@ -186,7 +186,9 @@ def optimize( else: update_progress_callback = default_progress_callback - time_monitor = TrainingProgressCallback(update_progress_callback) + time_monitor = OptimizationProgressCallback(update_progress_callback, + loading_stage_progress_percentage=5, + initialization_stage_progress_percentage=5) learning_curves = defaultdict(OTELoggerHook.Curve) training_config = prepare_for_training(config, train_dataset, val_dataset, time_monitor, learning_curves) @@ -197,6 +199,8 @@ def optimize( if not self._compression_ctrl: self._create_compressed_model(mm_train_dataset, training_config) + time_monitor.on_initialization_end() + self._is_training = True self._model.train() diff --git a/external/mmsegmentation/segmentation_tasks/apis/segmentation/openvino_task.py b/external/mmsegmentation/segmentation_tasks/apis/segmentation/openvino_task.py index ef8346519dc..ab3c761ee71 100644 --- a/external/mmsegmentation/segmentation_tasks/apis/segmentation/openvino_task.py +++ b/external/mmsegmentation/segmentation_tasks/apis/segmentation/openvino_task.py @@ -14,12 +14,9 @@ import attr import logging -import inspect +import io import json import os -from shutil import copyfile, copytree -import sys -import subprocess # nosec import tempfile from addict import Dict as ADDict from typing import Any, Dict, Tuple, Optional, Union @@ -46,7 +43,7 @@ from ote_sdk.usecases.evaluation.metrics_helper import MetricsHelper from ote_sdk.usecases.exportable_code.inference import BaseInferencer from ote_sdk.usecases.exportable_code.prediction_to_annotation_converter import SegmentationToAnnotationConverter -import ote_sdk.usecases.exportable_code.demo as demo +from ote_sdk.usecases.exportable_code import demo from ote_sdk.usecases.tasks.interfaces.deployment_interface import IDeploymentTask from ote_sdk.usecases.tasks.interfaces.evaluate_interface import IEvaluationTask from ote_sdk.usecases.tasks.interfaces.inference_interface import IInferenceTask @@ -209,40 +206,31 @@ def deploy(self, logger.info('Deploying the model') work_dir = os.path.dirname(demo.__file__) - model_file = inspect.getfile(type(self.inferencer.model)) parameters = {} parameters['type_of_model'] = self.hparams.postprocessing.class_name.value parameters['converter_type'] = 'SEGMENTATION' parameters['model_parameters'] = self.inferencer.configuration parameters['model_parameters']['labels'] = LabelSchemaMapper.forward(self.task_environment.label_schema) - name_of_package = "demo_package" - with tempfile.TemporaryDirectory() as tempdir: - copyfile(os.path.join(work_dir, "setup.py"), os.path.join(tempdir, "setup.py")) - copyfile(os.path.join(work_dir, "requirements.txt"), os.path.join(tempdir, "requirements.txt")) - copytree(os.path.join(work_dir, name_of_package), os.path.join(tempdir, name_of_package)) - config_path = os.path.join(tempdir, name_of_package, "config.json") - print(parameters) - with open(config_path, "w", encoding='utf-8') as f: - json.dump(parameters, f, ensure_ascii=False, indent=4) - # generate model.py - if (inspect.getmodule(self.inferencer.model) in - [module[1] for module in inspect.getmembers(model_wrappers, inspect.ismodule)]): - copyfile(model_file, os.path.join(tempdir, name_of_package, "model.py")) - # create wheel package - subprocess.run([sys.executable, os.path.join(tempdir, "setup.py"), 'bdist_wheel', - '--dist-dir', tempdir, 'clean', '--all']) - wheel_file_name = [f for f in os.listdir(tempdir) if f.endswith('.whl')][0] - - with ZipFile(os.path.join(tempdir, "openvino.zip"), 'w') as zip: - zip.writestr(os.path.join("model", "model.xml"), self.model.get_data("openvino.xml")) - zip.writestr(os.path.join("model", "model.bin"), self.model.get_data("openvino.bin")) - zip.write(os.path.join(tempdir, "requirements.txt"), os.path.join("python", "requirements.txt")) - zip.write(os.path.join(work_dir, "README.md"), os.path.join("python", "README.md")) - zip.write(os.path.join(work_dir, "LICENSE"), os.path.join("python", "LICENSE")) - zip.write(os.path.join(work_dir, "demo.py"), os.path.join("python", "demo.py")) - zip.write(os.path.join(tempdir, wheel_file_name), os.path.join("python", wheel_file_name)) - with open(os.path.join(tempdir, "openvino.zip"), "rb") as file: - output_model.exportable_code = file.read() + + zip_buffer = io.BytesIO() + with ZipFile(zip_buffer, 'w') as arch: + # model files + arch.writestr(os.path.join("model", "model.xml"), self.model.get_data("openvino.xml")) + arch.writestr(os.path.join("model", "model.bin"), self.model.get_data("openvino.bin")) + arch.writestr( + os.path.join("model", "config.json"), json.dumps(parameters, ensure_ascii=False, indent=4) + ) + # model_wrappers files + for root, dirs, files in os.walk(os.path.dirname(model_wrappers.__file__)): + for file in files: + file_path = os.path.join(root, file) + arch.write(file_path, os.path.join("python", "model_wrappers", file_path.split("model_wrappers/")[1])) + # other python files + arch.write(os.path.join(work_dir, "requirements.txt"), os.path.join("python", "requirements.txt")) + arch.write(os.path.join(work_dir, "LICENSE"), os.path.join("python", "LICENSE")) + arch.write(os.path.join(work_dir, "README.md"), os.path.join("python", "README.md")) + arch.write(os.path.join(work_dir, "demo.py"), os.path.join("python", "demo.py")) + output_model.exportable_code = zip_buffer.getvalue() logger.info('Deploying completed') def optimize(self, @@ -250,6 +238,7 @@ def optimize(self, dataset: DatasetEntity, output_model: ModelEntity, optimization_parameters: Optional[OptimizationParameters]): + logger.info('Start POT optimization') if optimization_type is not OptimizationType.POT: raise ValueError("POT is the only supported optimization type for OpenVino models") @@ -275,6 +264,9 @@ def optimize(self, if get_nodes_by_type(model, ['FakeQuantize']): raise RuntimeError("Model is already optimized by POT") + if optimization_parameters is not None: + optimization_parameters.update_progress(10) + engine_config = ADDict({ 'device': 'CPU' }) @@ -306,6 +298,9 @@ def optimize(self, compress_model_weights(compressed_model) + if optimization_parameters is not None: + optimization_parameters.update_progress(90) + with tempfile.TemporaryDirectory() as tempdir: save_model(compressed_model, tempdir, model_name="model") with open(os.path.join(tempdir, "model.xml"), "rb") as f: @@ -323,3 +318,7 @@ def optimize(self, self.model = output_model self.inferencer = self.load_inferencer() + + if optimization_parameters is not None: + optimization_parameters.update_progress(100) + logger.info('POT optimization completed') diff --git a/external/mmsegmentation/segmentation_tasks/apis/segmentation/ote_utils.py b/external/mmsegmentation/segmentation_tasks/apis/segmentation/ote_utils.py index d6d88f6bd0a..fe52d60ce97 100644 --- a/external/mmsegmentation/segmentation_tasks/apis/segmentation/ote_utils.py +++ b/external/mmsegmentation/segmentation_tasks/apis/segmentation/ote_utils.py @@ -60,12 +60,8 @@ def on_epoch_end(self, epoch, logs=None): score = None if hasattr(self.update_progress_callback, 'metric') and isinstance(logs, dict): score = logs.get(self.update_progress_callback.metric, None) - - # Workaround for NNCF trainer, which uses callback of a different type. - if score is not None: - self.update_progress_callback(self.get_progress(), score=float(score)) - else: - self.update_progress_callback(int(self.get_progress())) + score = float(score) if score is not None else None + self.update_progress_callback(self.get_progress(), score=score) class InferenceProgressCallback(TimeMonitorCallback): @@ -80,3 +76,42 @@ def __init__(self, num_test_steps, update_progress_callback: UpdateProgressCallb def on_test_batch_end(self, batch=None, logs=None): super().on_test_batch_end(batch, logs) self.update_progress_callback(int(self.get_progress())) + + +class OptimizationProgressCallback(TrainingProgressCallback): + """ Progress callback used for optimization using NNCF + There are three stages to the progress bar: + - 5 % model is loaded + - 10 % compressed model is initialized + - 10-100 % compressed model is being fine-tuned + """ + def __init__(self, update_progress_callback: UpdateProgressCallback, loading_stage_progress_percentage: int = 5, + initialization_stage_progress_percentage: int = 5): + super().__init__(update_progress_callback=update_progress_callback) + if loading_stage_progress_percentage + initialization_stage_progress_percentage >= 100: + raise RuntimeError('Total optimization progress percentage is more than 100%') + + self.loading_stage_progress_percentage = loading_stage_progress_percentage + self.initialization_stage_progress_percentage = initialization_stage_progress_percentage + + # set loading_stage_progress_percentage from the start as the model is already loaded at this point + self.update_progress_callback(loading_stage_progress_percentage) + + def on_train_begin(self, logs=None): + super().on_train_begin(logs) + # Callback initialization takes place here after OTEProgressHook.before_run() is called + train_percentage = 100 - self.loading_stage_progress_percentage - self.initialization_stage_progress_percentage + loading_stage_steps = self.total_steps * self.loading_stage_progress_percentage / train_percentage + initialization_stage_steps = self.total_steps * self.initialization_stage_progress_percentage / train_percentage + self.total_steps += loading_stage_steps + initialization_stage_steps + + self.current_step = loading_stage_steps + initialization_stage_steps + self.update_progress_callback(self.get_progress()) + + def on_train_end(self, logs=None): + super().on_train_end(logs) + self.update_progress_callback(self.get_progress(), score=logs) + + def on_initialization_end(self): + self.update_progress_callback(self.loading_stage_progress_percentage + + self.initialization_stage_progress_percentage) diff --git a/external/mmsegmentation/segmentation_tasks/extension/utils/hooks.py b/external/mmsegmentation/segmentation_tasks/extension/utils/hooks.py index 7b0805c9331..211d86ed83e 100644 --- a/external/mmsegmentation/segmentation_tasks/extension/utils/hooks.py +++ b/external/mmsegmentation/segmentation_tasks/extension/utils/hooks.py @@ -144,6 +144,7 @@ def before_run(self, runner): self.time_monitor.total_steps = max(math.ceil(self.time_monitor.steps_per_epoch * total_epochs), 1) self.time_monitor.current_step = 0 self.time_monitor.current_epoch = 0 + self.time_monitor.on_train_begin() def before_epoch(self, runner): self.time_monitor.on_epoch_begin(runner.epoch) diff --git a/external/mmsegmentation/tests/ote_cli/test_segmentation.py b/external/mmsegmentation/tests/ote_cli/test_segmentation.py index bcf3e49c1a9..5295472342b 100644 --- a/external/mmsegmentation/tests/ote_cli/test_segmentation.py +++ b/external/mmsegmentation/tests/ote_cli/test_segmentation.py @@ -146,7 +146,7 @@ def test_nncf_export(self, template): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) - @pytest.mark.skip(reason="Issue with model loading 76853") + @pytest.mark.skip("Issue with model loading 76853") def test_nncf_eval(self, template): if template.entrypoints.nncf is None: pytest.skip("nncf entrypoint is none") @@ -155,7 +155,7 @@ def test_nncf_eval(self, template): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) - @pytest.mark.skip(reason="Issue with model loading 76853") + @pytest.mark.skip("Issue with model loading 76853") def test_nncf_eval_openvino(self, template): if template.entrypoints.nncf is None: pytest.skip("nncf entrypoint is none") @@ -165,9 +165,13 @@ def test_nncf_eval_openvino(self, template): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_pot_optimize(self, template): + if template.model_template_id.startswith('Custom_Semantic_Segmentation_Lite-HRNet-'): + pytest.skip('CVS-82482') pot_optimize_testing(template, root, ote_dir, args) @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_pot_eval(self, template): + if template.model_template_id.startswith('Custom_Semantic_Segmentation_Lite-HRNet-'): + pytest.skip('CVS-82482') pot_eval_testing(template, root, ote_dir, args) diff --git a/ote_cli/ote_cli/datasets/__init__.py b/ote_cli/ote_cli/datasets/__init__.py index bde293df4ee..a4e157f3028 100644 --- a/ote_cli/ote_cli/datasets/__init__.py +++ b/ote_cli/ote_cli/datasets/__init__.py @@ -30,15 +30,15 @@ def get_dataset_class(task_type): """ if task_type == TaskType.ANOMALY_CLASSIFICATION: - from .anomaly.dataset import AnomalyClassificationDataset + from ote_anomalib.data.dataset import AnomalyClassificationDataset return AnomalyClassificationDataset if task_type == TaskType.ANOMALY_DETECTION: - from .anomaly.dataset import AnomalyDetectionDataset + from ote_anomalib.data.dataset import AnomalyDetectionDataset return AnomalyDetectionDataset if task_type == TaskType.ANOMALY_SEGMENTATION: - from .anomaly.dataset import AnomalySegmentationDataset + from ote_anomalib.data.dataset import AnomalySegmentationDataset return AnomalySegmentationDataset if task_type == TaskType.CLASSIFICATION: diff --git a/ote_cli/ote_cli/datasets/anomaly/__init__.py b/ote_cli/ote_cli/datasets/anomaly/__init__.py deleted file mode 100644 index d9aaf4962bc..00000000000 --- a/ote_cli/ote_cli/datasets/anomaly/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -"""DataLoader for anomaly tasks.""" - -# Copyright (C) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions -# and limitations under the License. diff --git a/ote_cli/ote_cli/tools/optimize.py b/ote_cli/ote_cli/tools/optimize.py index 7d9135a7e01..32976d75322 100644 --- a/ote_cli/ote_cli/tools/optimize.py +++ b/ote_cli/ote_cli/tools/optimize.py @@ -22,6 +22,7 @@ from ote_sdk.configuration.helper import create from ote_sdk.entities.inference_parameters import InferenceParameters from ote_sdk.entities.model import ModelEntity +from ote_sdk.entities.optimization_parameters import OptimizationParameters from ote_sdk.entities.resultset import ResultSetEntity from ote_sdk.entities.subset import Subset from ote_sdk.entities.task_environment import TaskEnvironment @@ -153,7 +154,7 @@ def main(): OptimizationType.POT if is_pot else OptimizationType.NNCF, dataset, output_model, - None, + OptimizationParameters(), ) save_model_data(output_model, args.save_model_to) diff --git a/ote_cli/ote_cli/utils/io.py b/ote_cli/ote_cli/utils/io.py index c6709e19ee1..9d81fc86529 100644 --- a/ote_cli/ote_cli/utils/io.py +++ b/ote_cli/ote_cli/utils/io.py @@ -21,7 +21,6 @@ import re import struct import tempfile -from io import BytesIO from zipfile import ZipFile from ote_sdk.entities.label import Domain, LabelEntity @@ -99,10 +98,6 @@ def read_model(model_configuration, path, train_dataset): with tempfile.TemporaryDirectory() as temp_dir: with ZipFile(path) as myzip: myzip.extractall(temp_dir) - with ZipFile( - os.path.join(temp_dir, "python", "demo_package-0.0-py3-none-any.whl") - ) as myzip: - myzip.extractall(temp_dir) model_path = os.path.join(temp_dir, "model", "model") model_adapters = { @@ -110,7 +105,7 @@ def read_model(model_configuration, path, train_dataset): "openvino.bin": ModelAdapter(read_binary(model_path + ".bin")), } - config_path = os.path.join(temp_dir, "demo_package", "config.json") + config_path = os.path.join(temp_dir, "model", "config.json") with open(config_path, encoding="UTF-8") as f: model_parameters = json.load(f)["model_parameters"] @@ -144,18 +139,10 @@ def read_label_schema(path): serialized_label_schema = json.load(read_file) elif path.endswith(".zip"): with ZipFile(path) as read_zip_file: - zfiledata = BytesIO( - read_zip_file.read( - os.path.join("python", "demo_package-0.0-py3-none-any.whl") - ) - ) - with ZipFile(zfiledata) as read_whl_file: - with read_whl_file.open( - os.path.join("demo_package", "config.json") - ) as read_file: - serialized_label_schema = json.load(read_file)["model_parameters"][ - "labels" - ] + with read_zip_file.open(os.path.join("model", "config.json")) as read_file: + serialized_label_schema = json.load(read_file)["model_parameters"][ + "labels" + ] return LabelSchemaMapper().backward(serialized_label_schema) diff --git a/ote_cli/ote_cli/utils/tests.py b/ote_cli/ote_cli/utils/tests.py index d53e4a4c861..d15ea4d5a50 100644 --- a/ote_cli/ote_cli/utils/tests.py +++ b/ote_cli/ote_cli/utils/tests.py @@ -74,23 +74,19 @@ def patch_demo_py(src_path, dst_path): content = [line for line in read_file] replaced = False for i, line in enumerate(content): - if "visualizer = Visualizer(media_type)" in line: - content[i] = line.rstrip() + "; visualizer.show = show\n" + if "visualizer = create_visualizer(models[-1].task_type)" in line: + content[i] = " visualizer = Visualizer(); visualizer.show = show\n" replaced = True assert replaced - content = ["def show(self):\n", " pass\n\n"] + content + content = [ + "from ote_sdk.usecases.exportable_code.visualizers import Visualizer\n", + "def show(self):\n", + " pass\n\n", + ] + content with open(dst_path, "w") as write_file: write_file.write("".join(content)) -def remove_ote_sdk_from_requirements(path): - with open(path, encoding="UTF-8") as read_file: - content = "".join([line for line in read_file if "ote_sdk" not in line]) - - with open(path, "w", encoding="UTF-8") as write_file: - write_file.write(content) - - def ote_train_testing(template, root, ote_dir, args): work_dir, template_work_dir, _ = get_some_vars(template, root) command_line = [ @@ -295,14 +291,6 @@ def ote_deploy_openvino_testing(template, root, ote_dir, args): == 0 ) - # Remove ote_sdk from requirements.txt, since merge commit (that is created on CI) - # is not pushed to github and that's why cannot be cloned. - # Install ote_sdk from local folder instead. - # Install the demo_package with --no-deps since, requirements.txt - # has been embedded to the demo_package during creation. - remove_ote_sdk_from_requirements( - os.path.join(deployment_dir, "python", "requirements.txt") - ) assert ( run( ["python3", "-m", "pip", "install", "pip", "--upgrade"], @@ -311,21 +299,6 @@ def ote_deploy_openvino_testing(template, root, ote_dir, args): ).returncode == 0 ) - assert ( - run( - [ - "python3", - "-m", - "pip", - "install", - "-e", - os.path.join(os.path.dirname(__file__), "..", "..", "..", "ote_sdk"), - ], - cwd=os.path.join(deployment_dir, "python"), - env=collect_env_vars(os.path.join(deployment_dir, "python")), - ).returncode - == 0 - ) assert ( run( [ @@ -341,21 +314,6 @@ def ote_deploy_openvino_testing(template, root, ote_dir, args): ).returncode == 0 ) - assert ( - run( - [ - "python3", - "-m", - "pip", - "install", - "demo_package-0.0-py3-none-any.whl", - "--no-deps", - ], - cwd=os.path.join(deployment_dir, "python"), - env=collect_env_vars(os.path.join(deployment_dir, "python")), - ).returncode - == 0 - ) # Patch demo since we are not able to run cv2.imshow on CI. patch_demo_py( @@ -369,7 +327,7 @@ def ote_deploy_openvino_testing(template, root, ote_dir, args): "python3", "demo_patched.py", "-m", - "../model/model.xml", + "../model", "-i", os.path.join(ote_dir, args["--input"]), ], diff --git a/ote_sdk/ote_sdk/configuration/elements/parameter_group.py b/ote_sdk/ote_sdk/configuration/elements/parameter_group.py index 8f8909fe2f0..45d071d9716 100644 --- a/ote_sdk/ote_sdk/configuration/elements/parameter_group.py +++ b/ote_sdk/ote_sdk/configuration/elements/parameter_group.py @@ -170,10 +170,10 @@ def __eq__(self, other): return False -TParameterGroup = TypeVar("TParameterGroup", bound=ParameterGroup) +_ParameterGroup = TypeVar("_ParameterGroup", bound=ParameterGroup) -def add_parameter_group(group: Type[TParameterGroup]) -> TParameterGroup: +def add_parameter_group(group: Type[_ParameterGroup]) -> _ParameterGroup: """ Wrapper to attr.ib to add nested parameter groups to a configuration. """ diff --git a/ote_sdk/ote_sdk/configuration/elements/primitive_parameters.py b/ote_sdk/ote_sdk/configuration/elements/primitive_parameters.py index fd7356da0ec..b78cc54cbec 100644 --- a/ote_sdk/ote_sdk/configuration/elements/primitive_parameters.py +++ b/ote_sdk/ote_sdk/configuration/elements/primitive_parameters.py @@ -44,7 +44,7 @@ # pylint:disable=too-many-arguments -TConfigurableEnum = TypeVar("TConfigurableEnum", bound=ConfigurableEnum) +_ConfigurableEnum = TypeVar("_ConfigurableEnum", bound=ConfigurableEnum) def set_common_metadata( @@ -350,7 +350,7 @@ class for more details. Defaults to NullUIRules. def selectable( - default_value: TConfigurableEnum, + default_value: _ConfigurableEnum, header: str, description: str = "Default selectable description", warning: str = None, @@ -360,7 +360,7 @@ def selectable( ui_rules: UIRules = NullUIRules(), auto_hpo_state: AutoHPOState = AutoHPOState.NOT_POSSIBLE, auto_hpo_value: Optional[str] = None, -) -> TConfigurableEnum: +) -> _ConfigurableEnum: """ Constructs a selectable attribute from a pre-defined Enum, with the appropriate metadata. The list of options for display in the UI is inferred from the type of the ConfigurableEnum instance passed in as default_value. @@ -408,8 +408,8 @@ class for more details. Defaults to NullUIRules. type_validator = attr.validators.instance_of(ConfigurableEnum) value_validator = construct_attr_enum_selectable_onsetattr(default_value) - # The Attribute returned by attr.ib is not compatible with the return typevar TConfigurableEnum. However, as the - # class containing the Attribute is instantiated the selectable type will correspond to the TConfigurableEnum, so + # The Attribute returned by attr.ib is not compatible with the return typevar _ConfigurableEnum. However, as the + # class containing the Attribute is instantiated the selectable type will correspond to the _ConfigurableEnum, so # mypy can ignore the error. return attr.ib( default=default_value, diff --git a/ote_sdk/ote_sdk/entities/dataset_item.py b/ote_sdk/ote_sdk/entities/dataset_item.py index 8f9c6a3e375..8192c845690 100644 --- a/ote_sdk/ote_sdk/entities/dataset_item.py +++ b/ote_sdk/ote_sdk/entities/dataset_item.py @@ -247,6 +247,7 @@ def get_annotations( self, labels: Optional[List[LabelEntity]] = None, include_empty: bool = False, + include_ignored: bool = False, ) -> List[Annotation]: """ Returns a list of annotations that exist in the dataset item (wrt. ROI). This is done by checking that the @@ -254,19 +255,21 @@ def get_annotations( :param labels: Subset of input labels to filter with; if ``None``, all the shapes within the ROI are returned :param include_empty: if True, returns both empty and non-empty labels + :param include_ignored: if True, includes the labels in ignored_labels :return: The intersection of the input label set and those present within the ROI """ is_full_box = Rectangle.is_full_box(self.roi.shape) annotations = [] - if is_full_box and labels is None and not include_empty: + if is_full_box and labels is None and include_empty and include_ignored: # Fast path for the case where we do not need to change the shapes - # todo: this line is incorrect. CVS-75919 annotations = self.annotation_scene.annotations else: # Todo: improve speed. This is O(n) for n shapes. roi_as_box = ShapeFactory.shape_as_rectangle(self.roi.shape) - labels_set = {label.name for label in labels} if labels is not None else {} + labels_set = ( + {label.name for label in labels} if labels is not None else set() + ) for annotation in self.annotation_scene.annotations: if not is_full_box and not self.roi.shape.contains_center( @@ -276,13 +279,20 @@ def get_annotations( shape_labels = annotation.get_labels(include_empty) + if not include_ignored: + shape_labels = [ + label + for label in shape_labels + if label.label not in self.ignored_labels + ] + if labels is not None: shape_labels = [ label for label in shape_labels if label.name in labels_set ] - if len(shape_labels) == 0: - continue + if len(shape_labels) == 0: + continue if not is_full_box: # Create a denormalized copy of the shape. @@ -326,23 +336,32 @@ def append_annotations(self, annotations: Sequence[Annotation]): self.annotation_scene.append_annotations(validated_annotations) def get_roi_labels( - self, labels: Optional[List[LabelEntity]] = None, include_empty: bool = False + self, + labels: Optional[List[LabelEntity]] = None, + include_empty: bool = False, + include_ignored: bool = False, ) -> List[LabelEntity]: """ Return the subset of the input labels which exist in the dataset item (wrt. ROI). :param labels: Subset of input labels to filter with; if ``None``, all the labels within the ROI are returned :param include_empty: if True, returns both empty and non-empty labels + :param include_ignored: if True, includes the labels in ignored_labels :return: The intersection of the input label set and those present within the ROI """ filtered_labels = set() for label in self.roi.get_labels(include_empty): if labels is None or label.get_label() in labels: filtered_labels.add(label.get_label()) + if not include_ignored: + filtered_labels -= self.ignored_labels return sorted(list(filtered_labels), key=lambda x: x.name) def get_shapes_labels( - self, labels: Optional[List[LabelEntity]] = None, include_empty: bool = False + self, + labels: Optional[List[LabelEntity]] = None, + include_empty: bool = False, + include_ignored: bool = False, ) -> List[LabelEntity]: """ Get the labels of the shapes present in this dataset item. if a label list is supplied, only labels present @@ -350,16 +369,20 @@ def get_shapes_labels( :param labels: if supplied only labels present in this list are returned :param include_empty: if True, returns both empty and non-empty labels + :param include_ignored: if True, includes the labels in ignored_labels :return: a list of labels from the shapes within the roi of this dataset item """ - annotations = self.get_annotations() + annotations = self.get_annotations( + labels=labels, include_empty=include_empty, include_ignored=include_ignored + ) scored_label_set = set( itertools.chain( *[annotation.get_labels(include_empty) for annotation in annotations] ) ) label_set = {scored_label.get_label() for scored_label in scored_label_set} - + if not include_ignored: + label_set -= self.ignored_labels if labels is None: return list(label_set) return [label for label in label_set if label in labels] diff --git a/ote_sdk/ote_sdk/entities/metrics.py b/ote_sdk/ote_sdk/entities/metrics.py index 049db2e94d7..463ba2ebaeb 100644 --- a/ote_sdk/ote_sdk/entities/metrics.py +++ b/ote_sdk/ote_sdk/entities/metrics.py @@ -558,11 +558,11 @@ def __repr__(self): ) -MetricType = TypeVar("MetricType", bound=MetricEntity) -VisualizationInfoType = TypeVar("VisualizationInfoType", bound=VisualizationInfo) +_Metric = TypeVar("_Metric", bound=MetricEntity) +_VisualizationInfo = TypeVar("_VisualizationInfo", bound=VisualizationInfo) -class MetricsGroup(Generic[MetricType, VisualizationInfoType]): +class MetricsGroup(Generic[_Metric, _VisualizationInfo]): """ This class aggregates a list of metric entities and defines how this group will be visualized on the UI. This class is the parent class to the different types of @@ -571,7 +571,7 @@ class MetricsGroup(Generic[MetricType, VisualizationInfoType]): :example: An accuracy as a metrics group >>> acc = ScoreMetric("Accuracy", 0.5) - >>> visual_info = BarChartInfo("Accuracy", visualization_type=VisualizationInfoType.BAR) # show it as radial bar + >>> visual_info = BarChartInfo("Accuracy", visualization_type=_VisualizationInfo.BAR) # show it as radial bar >>> metrics_group = BarMetricsGroup([acc], visual_info) Loss curves as a metrics group @@ -583,7 +583,7 @@ class MetricsGroup(Generic[MetricType, VisualizationInfoType]): """ def __init__( - self, metrics: Sequence[MetricType], visualization_info: VisualizationInfoType + self, metrics: Sequence[_Metric], visualization_info: _VisualizationInfo ): if metrics is None or len(metrics) == 0: raise ValueError("Metrics cannot be None or empty") @@ -675,11 +675,16 @@ def __init__( raise ValueError( f"Expected score to be of type `ScoreMetric`, got type `{type(score)}` instead." ) - self.score: ScoreMetric = score + self._score: ScoreMetric = score self.dashboard_metrics: List[MetricsGroup] = ( [] if dashboard_metrics is None else dashboard_metrics ) + @property + def score(self): + """Return the score metric.""" + return self._score + def __eq__(self, other: object) -> bool: if not isinstance(other, Performance): return False @@ -702,3 +707,62 @@ def __repr__(self): def __eq__(self, other): return isinstance(other, NullPerformance) + + +class MultiScorePerformance(Performance): + """ + This class can be used in tasks where performance is measured by multiple metrics + + :param primary_score: The main performance score. + :param additional_metrics: List of additional scores. When no primary score is provided, the first additional score + takes priority as the main project score. + :param dashboard_metrics: (optional) additional statistics, containing charts, curves, and other additional info. + """ + + def __init__( + self, + primary_score: Optional[ScoreMetric] = None, + additional_scores: Optional[List[ScoreMetric]] = None, + dashboard_metrics: Optional[List[MetricsGroup]] = None, + ): + assert primary_score is not None or ( + additional_scores is not None and len(additional_scores) > 0 + ), "Provide at least one primary or additional score." + + self._primary_score = primary_score + self._additional_scores: List[ScoreMetric] = ( + [] if additional_scores is None else additional_scores + ) + self.dashboard_metrics: List[MetricsGroup] = ( + [] if dashboard_metrics is None else dashboard_metrics + ) + + if self.primary_score is None: + super().__init__(self.additional_scores[0], dashboard_metrics) + else: + super().__init__(self.primary_score, dashboard_metrics) + + @property + def primary_score(self) -> Optional[ScoreMetric]: + """Return the primary score metric.""" + return self._primary_score + + @property + def additional_scores(self) -> List[ScoreMetric]: + """Return the additional score metrics.""" + return self._additional_scores + + def __eq__(self, other: object) -> bool: + if not isinstance(other, MultiScorePerformance): + return False + return ( + self.primary_score == other.primary_score + and self.additional_scores == other.additional_scores + ) + + def __repr__(self): + return ( + f"MultiScorePerformance(score: {self.score.value}, primary_metric: {self.primary_score}, " + f"additional_metrics: ({len(self.additional_scores)} metrics), " + f"dashboard: ({len(self.dashboard_metrics)} metric groups))" + ) diff --git a/ote_sdk/ote_sdk/entities/model_template.py b/ote_sdk/ote_sdk/entities/model_template.py index 0e9d075e3bb..939c184b1d1 100644 --- a/ote_sdk/ote_sdk/entities/model_template.py +++ b/ote_sdk/ote_sdk/entities/model_template.py @@ -114,6 +114,7 @@ def __init__( self.is_trainable = task_info.is_trainable self.is_anomaly = task_info.is_anomaly self.is_global = task_info.is_global + self.is_local = task_info.is_local NULL = 1, TaskInfo( domain=Domain.NULL, @@ -553,7 +554,7 @@ def supports_auto_hpo(self) -> bool: self.hyper_parameters.data, key_to_search=metadata_keys.AUTO_HPO_STATE ) for result in auto_hpo_state_results: - if result[0] == AutoHPOState.POSSIBLE: + if str(result[0]).lower() == str(AutoHPOState.POSSIBLE): return True return False diff --git a/ote_sdk/ote_sdk/entities/optimization_parameters.py b/ote_sdk/ote_sdk/entities/optimization_parameters.py index ad827effef1..e5bd9482b7c 100644 --- a/ote_sdk/ote_sdk/entities/optimization_parameters.py +++ b/ote_sdk/ote_sdk/entities/optimization_parameters.py @@ -4,10 +4,11 @@ # from dataclasses import dataclass -from typing import Callable +from typing import Callable, Optional -def default_progress_callback(_: int): +# pylint: disable=unused-argument +def default_progress_callback(progress: float, score: Optional[float] = None): """ This is the default progress callback for OptimizationParameters. """ @@ -34,5 +35,7 @@ class OptimizationParameters: """ resume: bool = False - update_progress: Callable[[int], None] = default_progress_callback + update_progress: Callable[ + [float, Optional[float]], None + ] = default_progress_callback save_model: Callable[[], None] = default_save_model_callback diff --git a/ote_sdk/ote_sdk/entities/shapes/ellipse.py b/ote_sdk/ote_sdk/entities/shapes/ellipse.py index ce9271c12ef..863f5136c9b 100644 --- a/ote_sdk/ote_sdk/entities/shapes/ellipse.py +++ b/ote_sdk/ote_sdk/entities/shapes/ellipse.py @@ -243,7 +243,7 @@ def get_evenly_distributed_ellipse_coordinates( :return: list of tuple's with coordinates along the ellipse line """ angles = 2 * np.pi * np.arange(number_of_coordinates) / number_of_coordinates - e = (1.0 - self.minor_axis ** 2.0 / self.major_axis ** 2.0) ** 0.5 + e = (1.0 - self.minor_axis**2.0 / self.major_axis**2.0) ** 0.5 total_size = special.ellipeinc(2.0 * np.pi, e) arc_size = total_size / number_of_coordinates arcs = np.arange(number_of_coordinates) * arc_size diff --git a/ote_sdk/ote_sdk/test_suite/QUICK_HOWTO.md b/ote_sdk/ote_sdk/test_suite/QUICK_HOWTO.md new file mode 100644 index 00000000000..5837f5df7f7 --- /dev/null +++ b/ote_sdk/ote_sdk/test_suite/QUICK_HOWTO.md @@ -0,0 +1,176 @@ +# Quick HOW TO add training tests using OTE SDK test suite + +## I. Introduction to OTE SDK test suite +### I.1 General description + +OTE SDK test suite allows to create training tests + +The training tests are tests that may run in some unified manner such stages (or, as we also +call it, "actions") as +* training of a model, +* evaluation of the trained model, +* export or optimization of the trained model, +* and evaluation of exported/optimized model. + +Typically each OTE algo backend contains test file `test_ote_training.py` that allows to run the +training tests. + +Note that there are a lot of dependencies between different stages of training tests: most of them +require trained model, so they depends on training stage; also for example POT optimization stage +and evaluation of exported model stage require the exported model, so export stage should be run +before, etc. + +The `test_suite` library allows to create training tests such that +1. the tests do not repeat the common steps that can be re-used +2. if we point for pytest that only some test stage is required, all dependency stages are run + automatically +3. if a stage is failed all the stage that depend on this stage are also failed. + +To avoid repeating of the common steps between stages the results of stages should be kept in a +special cache to be re-used by the next stages. + +We suppose that each test executes one test stage (also called test action). + +At the moment we have the following test actions: +* class `"training"` -- training of a model +* class `"training_evaluation"` -- evaluation after the training +* class `"export"` -- export after the training +* class `"export_evaluation"` -- evaluation of exported model +* class `"pot"` -- POT compression of exported model +* class `"pot_evaluation"` -- evaluation of POT-compressed model +* class `"nncf"` -- NNCF-compression of the trained model +* class `"nncf_graph"` -- check of NNCF compression graph (work on not trained model) +* class `"nncf_evaluation"` -- evaluation of NNCF-compressed model +* class `"nncf_export"` -- export of NNCF-compressed model +* class `"nncf_export_evaluation"` -- evaluation after export of NNCF-compressed model + +### I.2. General description of test cases + +One of the most important question is when a test may re-use results of another test. +We can consider this from the following point of view. +We suppose that the test suite indeed do not make several independent tests, but make a set of +actions with several "test cases". +Since the test suite works with OTE, each "test case" is considered as a situation that could be +happened during some process of work with OTE, and the process may include different actions. + +Since OTE is focused on training a neural network and making some operations on the trained model, +we defined the test case by the parameters that define training process +(at least they defines it as much as it is possible for such stochastic process). + +Usually the parameters defining the training process are: +1. a model - typically it is a name of OTE template to be used + -- this is the field `model_template_id` of the model template YAML file +2. a dataset - typically it is a dataset name that should be used + (we use known pre-defined names for the datasets on our CI) +3. other training parameters: + * `batch_size` + * `num_training_epochs` or `num_training_iters` + +We suppose that for each algo backend there is a known set of parameters that define training +process, and we suppose that if two tests have the same these parameters, then they are belong to +the same test case. +We call these parameters "the parameters defining the test case". + +But from pytest point of view there are just a lot of tests with some parameters. + +The general approach that is used to allow re-using results of test stages between test is the +following: +* The tests are grouped such that the tests from one group have the same parameters from the list + of "parameters that define the test case" -- it means that the tests are grouped by the + "test cases" +* After that the tests are reordered such that + * the test from one group are executed sequentially one-by-one, without tests from other group + between tests in one group + * the test from one group are executed sequentially in the order defined for the test actions + beforehand; +* An instance of a special test case class is created once for each of the group of tests stated above + -- so, the instance of test case class is created for each "test case" described above. + +The instance of the special test case class (described in the last item of the list above) +is kept inside cache in test suite, it allows to use the results of the +previous tests of the same test case in the current test. + +### I.3. String names of tests + +Pytest allows running parametrized test methods in test classes. + +The test suite is made such that for each OTE task (e.g. "object detection", "image classification", +etc) there is one test class with one test method with the name `test`, the method is parametrized +using special pytest tricks in the function `pytest_generate_tests` in the file `conftest.py` in the +folder `tests/`. + +(Note that "classical" way of parametrization of a class method is using pytest decorator +`@pytest.mark.parametrize`, but we do NOT use this way, since we need to regroup tests by test cases +-- see details in the previous section.) + +For each parametrized test method the pytest framework generates its name as follows: +`.[]` + +For the test suite the test names are generated in the same way (this is the inner part of pytest +that was not changed by us), but test suite generates the `parameters_string` part. + +Test suite generates the parameters string using +1. the name of the test action (aka test stage) +2. the values of the test's parameters defining test behavior + (see the previous section "II. General description of test cases") +3. the usecase -- at the moment it is either "precommit" or "reallife" + +Note that in test suite the test parameters may have "short names" that are used during generation +of the test parameters strings. +Examples of test parameters short names +* for parameter `model_name` -- `"model"` +* for parameter `dataset_name` -- `"dataset"` +* for parameter `num_training_iters` -- `"num_iters"` +* for parameter `batch_size` -- `"batch"` + +So, examples of test parameters strings are +* `ACTION-training_evaluation,model-Custom_Object_Detection_Gen3_ATSS,dataset-bbcd,num_iters-CONFIG,batch-CONFIG,usecase-reallife` +* `ACTION-nncf_export_evaluation,model-Custom_Image_Classification_EfficinetNet-B0,dataset-lg_chem,num_epochs-CONFIG,batch-CONFIG,usecase-reallife` + +The test parameters strings are used in the test suite as test id-s. +Although the id-s are unique, they have a drawback -- they are quite long, since they contain all +the info to identify the test. + +## II. How To-s + +### II.1 How to add a new model+dataset pair to the training tests + +Let's there are implemented training tests for some OTE SDK algo backend, and we want to add +new model+dataset pair to the training test. + +In this case you should do as follows: +1. Open the file with the training tests for the task type. + Typically it has name `test_ote_training.py` and it is placed in the folder + `external//tests/`. + +2. Find the class derived either from the class `OTETestCreationParametersInterface` + or from the class `DefaultOTETestCreationParametersInterface`. + There should be only one such class in the file, it should have name like + `ObjectDetectionTrainingTestParameters`. + +3. Find the method `test_bunches` in the class. + Most probably the method creates a variable `test_bunches` with a list of dicts, + and returns the deepcopy of the variable. + +4. Make change: add to the list a new element -- dict with the following keys + * `model_name` -- either a string with the model name or a list of strings with the model names, + the model names should be taken from the field `model_template_id` of the model template YAML + file + * `dataset_name` -- either a string with the dataset name or a list of strings with the dataset names, + we use known pre-defined names for the datasets on our CI. + The dataset names may be taken from the YAML file `dataset_definitions.yml` in the dataset server + of the CI. + (If you should add a new dataset -- please, upload your dataset in the proper folder to the + server and point the relative paths to the dataset parts to the file `dataset_definitions.yml` + in the folder) + Note that if `model_name` and/or `dataset_name` are lists, the test will be executed for + all possible pairs `(model, dataset)` from Cartesian product of the lists. + * `num_training_iters` or `max_num_epochs` or `patience` -- either integer, or a constant + `KEEP_CONFIG_FIELD_VALUE` to keep the value from the template, or just do not add (skip) the + key to use the default small value for the precommit tests (1 or 2) + * `batch_size` -- either integer, or a constant `KEEP_CONFIG_FIELD_VALUE` to keep the value from + the template, or just do not add (skip) the key to use the default small value for the + precommit tests (1 or 2) + * `usecase` -- either `REALLIFE_USECASE_CONSTANT` for reallife training tests or "precommit" for + precommit tests + diff --git a/ote_sdk/ote_sdk/tests/entities/test_dataset_item.py b/ote_sdk/ote_sdk/tests/entities/test_dataset_item.py index 826141ad659..71fd3ed1053 100644 --- a/ote_sdk/ote_sdk/tests/entities/test_dataset_item.py +++ b/ote_sdk/ote_sdk/tests/entities/test_dataset_item.py @@ -577,8 +577,18 @@ def test_dataset_item_get_annotations(self): result_annotations = partial_box_dataset_item.get_annotations( include_empty=True ) - expected_annotations = [expected_annotation] - self.compare_denormalized_annotations(result_annotations, expected_annotations) + self.compare_denormalized_annotations(result_annotations, [expected_annotation]) + + # Check if ignored labels are properly removed + ignore_labels_dataset_item = ( + DatasetItemParameters().default_values_dataset_item() + ) + ignore_labels_dataset_item.ignored_labels = ( + ignore_labels_dataset_item.get_shapes_labels( + include_ignored=True, include_empty=True + ) + ) + assert ignore_labels_dataset_item.get_annotations(include_empty=True) == [] @pytest.mark.priority_medium @pytest.mark.unit @@ -660,6 +670,7 @@ def test_dataset_item_get_roi_labels(self): Steps 1. Check annotations list returned by "get_roi_labels" for non-specified "labels" parameter 2. Check annotations list returned by "get_roi_labels" for specified "labels" parameter + 3. Check annotations list returned by "get_roi_labels" if dataset item ignores a label """ dataset_item = DatasetItemParameters().dataset_item() roi_labels = DatasetItemParameters.roi_labels() @@ -674,6 +685,9 @@ def test_dataset_item_get_roi_labels(self): assert dataset_item.get_roi_labels(labels=[empty_roi_label]) == [] # Scenario for "include_empty" is "True" assert dataset_item.get_roi_labels([empty_roi_label], True) == [empty_roi_label] + # Scenario for ignored labels + dataset_item.ignored_labels = [empty_roi_label] + assert dataset_item.get_roi_labels([empty_roi_label], True) == [] @pytest.mark.priority_medium @pytest.mark.unit @@ -693,6 +707,7 @@ def test_dataset_item_get_shapes_labels(self): Steps 1. Check labels list returned by "get_shapes_labels" for non-specified "labels" parameter 2. Check labels list returned by "get_shapes_labels" for specified "labels" parameter + 3. Check labels list returned by "get_shapes_labels" if dataset_item ignores labels """ dataset_item = DatasetItemParameters().default_values_dataset_item() labels = DatasetItemParameters.labels() @@ -713,7 +728,17 @@ def test_dataset_item_get_shapes_labels(self): list_labels = [segmentation_label, non_included_label] assert dataset_item.get_shapes_labels(labels=list_labels) == [] # Scenario for "include_empty" is "True", expected that non_included label will not be shown - assert dataset_item.get_shapes_labels(list_labels, True) == [segmentation_label] + assert dataset_item.get_shapes_labels(list_labels, include_empty=True) == [ + segmentation_label + ] + # Check ignore labels functionality + dataset_item.ignored_labels = [detection_label] + assert dataset_item.get_shapes_labels( + include_empty=True, include_ignored=False + ) == [segmentation_label] + assert dataset_item.get_shapes_labels( + include_empty=False, include_ignored=True + ) == [detection_label] @pytest.mark.priority_medium @pytest.mark.unit diff --git a/ote_sdk/ote_sdk/tests/entities/test_metrics.py b/ote_sdk/ote_sdk/tests/entities/test_metrics.py index 9dfd3ef7698..3fa1c3c2f71 100644 --- a/ote_sdk/ote_sdk/tests/entities/test_metrics.py +++ b/ote_sdk/ote_sdk/tests/entities/test_metrics.py @@ -21,6 +21,7 @@ MatrixChartInfo, MatrixMetric, MatrixMetricsGroup, + MultiScorePerformance, NullMetric, NullPerformance, Performance, @@ -1094,3 +1095,67 @@ def test_null_performance(self): score_metric = TestScoreMetric().score_metric() performance = Performance(score_metric) assert null_performance != performance + + +@pytest.mark.components(OteSdkComponent.OTE_SDK) +class TestMultiScorePerformance: + @pytest.mark.priority_medium + @pytest.mark.unit + @pytest.mark.reqids(Requirements.REQ_1) + def test_multi_score_performance(self): + """ + Description: + Check MultiScorePerformance class + + Input data: + MultiScorePerformance object with specified score + + Expected results: + Test passes if MultiScorePerformance object score attribute and __eq__ and __repr__ method return + expected values + + Steps + 1. Check primary and additional score attributes for MultiScorePerformance object + 2. Check primary and additional score attributes for MultiScorePerformance object when only primary score is + passed + 3. Check primary and additional score attributes for MultiScorePerformance object when only additional score is + passed + 4. Check __eq__ method for equal and unequal Performance objects + 5. Check __repr__ method + """ + # Positive scenario for Performance object with default parameters + primary_score = TestScoreMetric().score_metric() + additional_score = TestScoreMetric().score_metric() + default_parameters_performance = MultiScorePerformance( + primary_score, [additional_score] + ) + assert default_parameters_performance.score == primary_score + assert default_parameters_performance.primary_score == primary_score + assert default_parameters_performance.additional_scores == [additional_score] + assert default_parameters_performance.dashboard_metrics == [] + # Positive scenario for Performance object with only primary metric + only_primary_performance = MultiScorePerformance(primary_score) + assert only_primary_performance.score == primary_score + assert only_primary_performance.primary_score == primary_score + assert only_primary_performance.additional_scores == [] + assert only_primary_performance.dashboard_metrics == [] + # Positive scenario for Performance object with only additional metric + only_additional_performance = MultiScorePerformance( + additional_scores=[additional_score] + ) + assert only_additional_performance.score == additional_score + assert only_additional_performance.primary_score is None + assert only_additional_performance.additional_scores == [additional_score] + assert only_additional_performance.dashboard_metrics == [] + # Checking __eq__ method + equal_default_parameters_performance = MultiScorePerformance( + primary_score, [additional_score] + ) + assert default_parameters_performance == equal_default_parameters_performance + assert default_parameters_performance != only_primary_performance + # Checking __repr__ method + assert ( + repr(default_parameters_performance) + == "MultiScorePerformance(score: 2.0, primary_metric: ScoreMetric(name=`Test ScoreMetric`, score=`2.0`), " + "additional_metrics: (1 metrics), dashboard: (0 metric groups))" + ) diff --git a/ote_sdk/ote_sdk/tests/entities/test_optimization_parameters.py b/ote_sdk/ote_sdk/tests/entities/test_optimization_parameters.py index 627c5c733f1..f5d32dec9ba 100644 --- a/ote_sdk/ote_sdk/tests/entities/test_optimization_parameters.py +++ b/ote_sdk/ote_sdk/tests/entities/test_optimization_parameters.py @@ -75,9 +75,12 @@ def test_optimization_parameters_update_member(self): opt_params = OptimizationParameters(False) assert opt_params.resume is False assert ( - opt_params.update_progress(-2147483648) - is opt_params.update_progress(0) - is opt_params.update_progress(2147483648) + opt_params.update_progress(0) + is opt_params.update_progress(50.5) + is opt_params.update_progress(100) + is opt_params.update_progress(0, 0.3) + is opt_params.update_progress(50.5, 1.4) + is opt_params.update_progress(100, -6.1) is None ) assert opt_params.save_model() is None @@ -85,9 +88,12 @@ def test_optimization_parameters_update_member(self): opt_params = OptimizationParameters(True) assert opt_params.resume is True assert ( - opt_params.update_progress(-2147483648) - is opt_params.update_progress(0) - is opt_params.update_progress(2147483648) + opt_params.update_progress(0) + is opt_params.update_progress(50.5) + is opt_params.update_progress(100) + is opt_params.update_progress(0, 0.3) + is opt_params.update_progress(50.5, 1.4) + is opt_params.update_progress(100, -6.1) is None ) assert opt_params.save_model() is None diff --git a/ote_sdk/ote_sdk/tests/requirements.txt b/ote_sdk/ote_sdk/tests/requirements.txt index 36b2e05090d..32fa04b49aa 100644 --- a/ote_sdk/ote_sdk/tests/requirements.txt +++ b/ote_sdk/ote_sdk/tests/requirements.txt @@ -1,8 +1,4 @@ -bandit==1.7.* -flake8==3.9.* -mypy==0.812 pylint==2.7.3 pytest==6.2.* -pytest-cov==2.11.* -openmodelzoo-modelapi @ git+https://github.com/openvinotoolkit/open_model_zoo/@releases/2021/SCMVP#egg=openmodelzoo-modelapi&subdirectory=demos/common/python -openvino==2022.1.0.dev20220316 \ No newline at end of file +openmodelzoo-modelapi @ git+https://github.com/openvinotoolkit/open_model_zoo/@releases/2022/SCv1.1#egg=openmodelzoo-modelapi&subdirectory=demos/common/python +openvino==2022.1.0 diff --git a/ote_sdk/ote_sdk/tests/usecases/evaluation/test_accuracy.py b/ote_sdk/ote_sdk/tests/usecases/evaluation/test_accuracy.py index bf06acc3908..595373ddc34 100644 --- a/ote_sdk/ote_sdk/tests/usecases/evaluation/test_accuracy.py +++ b/ote_sdk/ote_sdk/tests/usecases/evaluation/test_accuracy.py @@ -676,20 +676,14 @@ def test_accuracy_compute_accuracy(self): name="other_confusion_matrix", matrix_values=np.array([[4, 0, 0], [2, 4, 0], [0, 0, 6]]), ) - assert ( - accuracy._compute_accuracy( - average=MetricAverageMethod.MICRO, - confusion_matrices=[confusion_matrix, other_confusion_matrix], - ) - == np.float64(0.8333333333333334) - ) - assert ( - accuracy._compute_accuracy( - average=MetricAverageMethod.MACRO, - confusion_matrices=[confusion_matrix, other_confusion_matrix], - ) - == np.float64(0.8375) - ) + assert accuracy._compute_accuracy( + average=MetricAverageMethod.MICRO, + confusion_matrices=[confusion_matrix, other_confusion_matrix], + ) == np.float64(0.8333333333333334) + assert accuracy._compute_accuracy( + average=MetricAverageMethod.MACRO, + confusion_matrices=[confusion_matrix, other_confusion_matrix], + ) == np.float64(0.8375) # Checking "ValueError" exception is raised when empty list is specified as "confusion_matrices" with pytest.raises(ValueError): accuracy._compute_accuracy( diff --git a/ote_sdk/ote_sdk/tests/usecases/exportable_code/test_streamer.py b/ote_sdk/ote_sdk/tests/usecases/exportable_code/test_streamer.py index 7f41565b482..87fe6e21f0d 100644 --- a/ote_sdk/ote_sdk/tests/usecases/exportable_code/test_streamer.py +++ b/ote_sdk/ote_sdk/tests/usecases/exportable_code/test_streamer.py @@ -18,7 +18,9 @@ ) from ote_sdk.usecases.exportable_code.streamer import ( CameraStreamer, + DirStreamer, ImageStreamer, + OpenError, ThreadedStreamer, VideoStreamer, get_streamer, @@ -57,23 +59,23 @@ def test_image_streamer_with_single_image(self): @pytest.mark.priority_medium @pytest.mark.unit @pytest.mark.reqids(Requirements.REQ_1) - def test_image_streamer_with_folder(self): + def test_dir_streamer_with_folder(self): """ Description: - Test that ImageStreamer works correctly with a folder of images as input + Test that DirStreamer works correctly with a folder of images as input Input data: Folder with 10 random images Expected results: - Test passes if ImageStreamer returns ten images with the correct size + Test passes if DirStreamer returns ten images with the correct size Steps - 1. Create ImageStreamer + 1. Create DirStreamer 2. Request images from streamer """ with generate_random_image_folder(height=360, width=480) as path: - streamer = ImageStreamer(path) + streamer = DirStreamer(path) self.assert_streamer_element(streamer) @pytest.mark.priority_medium @@ -101,47 +103,53 @@ def test_video_streamer_with_single_video(self): @pytest.mark.priority_medium @pytest.mark.unit @pytest.mark.reqids(Requirements.REQ_1) - def test_video_streamer_with_folder(self): + def test_video_streamer_with_loop_flag(self): """ Description: - Test that VideoStreamer works correctly with a a folder of videos as input + Test that VideoStreamer works correctly with a loop flag Input data: - Folder with random videos + Random Video file Expected results: Test passes if VideoStreamer returns frames with the correct amount of dimensions + after the end of the video Steps 1. Create VideoStreamer 2. Request frames from streamer """ - with generate_random_video_folder() as path: - streamer = VideoStreamer(path) + with generate_random_single_video( + height=360, width=480, number_of_frames=100 + ) as path: + streamer = VideoStreamer(path, loop=True) - for frame in streamer: + for index, frame in enumerate(streamer): assert frame.shape[-1] == 3 + if index > 200: + break @pytest.mark.priority_medium @pytest.mark.unit @pytest.mark.reqids(Requirements.REQ_1) - def test_image_file_fails_on_video_streamer(self): + def test_video_streamer_with_single_image(self): """ Description: - Test that VideoStreamer raises an exception if an image is passed + Test that VideoStreamer works correctly with a single image as input Input data: Random image file Expected results: - Test passes if a ValueError is raised + Test passes if VideoStreamer can read the single frame Steps - 1. Attempt to create VideoStreamer + 1. Create VideoStreamer + 2. Request frame from VideoStreamer """ - with generate_random_single_image() as path: - with pytest.raises(ValueError): - VideoStreamer(path) + with generate_random_single_video(height=360, width=480) as path: + streamer = VideoStreamer(path) + self.assert_streamer_element(streamer) @pytest.mark.priority_medium @pytest.mark.unit @@ -171,25 +179,25 @@ def test_invalid_inputs_to_get_streamer(self): invalid_file = Path(temp_dir) / "not_valid.bin" invalid_file.touch() - with pytest.raises(ValueError) as context: + with pytest.raises(Exception) as context: get_streamer(str(invalid_file)) the_exception = context # .exception - assert "not supported" in str(the_exception), str(the_exception) + assert "Can't open" in str(the_exception), str(the_exception) with tempfile.TemporaryDirectory() as empty_dir: - with pytest.raises(FileNotFoundError): + with pytest.raises(Exception): get_streamer(empty_dir) with generate_random_video_folder() as path: - with pytest.raises(FileNotFoundError): + with pytest.raises(Exception): get_streamer(path) - with pytest.raises(ValueError) as context: + with pytest.raises(Exception) as context: get_streamer("not_a_file") the_exception = context # .exception - assert "does not exist" in str(the_exception), str(the_exception) + assert "Can't find" in str(the_exception), str(the_exception) @pytest.mark.priority_medium @pytest.mark.unit @@ -224,12 +232,12 @@ def test_valid_inputs_to_get_streamer(self): with generate_random_image_folder() as path: streamer = get_streamer(path) - assert isinstance(streamer, ImageStreamer) + assert isinstance(streamer, DirStreamer) - streamer = get_streamer(camera_device=0) + streamer = get_streamer(0) assert isinstance(streamer, CameraStreamer) - streamer = get_streamer(camera_device=0, threaded=True) + streamer = get_streamer(input_stream=0, threaded=True) assert isinstance(streamer, ThreadedStreamer) @pytest.mark.priority_medium @@ -244,13 +252,13 @@ def test_video_file_fails_on_image_streamer(self): Random Video file Expected results: - Test passes if a ValueError is raised + Test passes if a OpenError is raised Steps 1. Attempt to create ImageStreamer """ with generate_random_single_video() as path: - with pytest.raises(ValueError): + with pytest.raises(OpenError): ImageStreamer(path) @pytest.mark.priority_medium @@ -343,25 +351,3 @@ def test_threaded_streamer_timeout(self): break assert frame_count == 5 - - @pytest.mark.priority_medium - @pytest.mark.unit - @pytest.mark.reqids(Requirements.REQ_1) - def test_get_streamer_parses_path(self): - """ - Description: - Test that get_streamer raises an error if both camera_device and path are provided - - Input data: - Path to a folder - Camera Index - - Expected results: - Test passes if a ValueError is raised - - Steps - 1. Attempt to call get_streamer with path and camera_device - """ - with generate_random_image_folder(number_of_images=1) as path: - with pytest.raises(ValueError): - get_streamer(path=path, camera_device=0) diff --git a/ote_sdk/ote_sdk/tests/usecases/exportable_code/test_visualization.py b/ote_sdk/ote_sdk/tests/usecases/exportable_code/test_visualization.py index 4841a63f429..ba909ffb314 100644 --- a/ote_sdk/ote_sdk/tests/usecases/exportable_code/test_visualization.py +++ b/ote_sdk/ote_sdk/tests/usecases/exportable_code/test_visualization.py @@ -19,8 +19,7 @@ from ote_sdk.entities.shapes.rectangle import Rectangle from ote_sdk.tests.constants.ote_sdk_components import OteSdkComponent from ote_sdk.tests.constants.requirements import Requirements -from ote_sdk.usecases.exportable_code.streamer.streamer import MediaType -from ote_sdk.usecases.exportable_code.visualization import Visualizer +from ote_sdk.usecases.exportable_code.visualizers import Visualizer from ote_sdk.utils.shape_drawer import ShapeDrawer from ote_sdk.utils.time_utils import now @@ -99,11 +98,7 @@ def test_visualizer_initialization(self): Steps 1. Check attributes of "Visualizer" object initialized with default optional parameters - 2. Check attributes of "Visualizer" object initialized with default optional parameters except "media_type" is - set to "IMAGE" - 3. Check attributes of "Visualizer" object initialized with default optional parameters except "media_type" is - set to "VIDEO" - 4. Check attributes of "Visualizer" object initialized with specified optional parameters + 2. Check attributes of "Visualizer" object initialized with specified optional parameters """ def check_visualizer_attributes( @@ -121,26 +116,6 @@ def check_visualizer_attributes( # Checking attributes of "Visualizer" initialized with default optional parameters visualizer = Visualizer() - check_visualizer_attributes( - actual_visualizer=visualizer, - expected_name="Window", - expected_delay=0, - expected_show_count=False, - expected_is_one_label=False, - ) - # Checking attributes of "Visualizer" initialized with default optional parameters except "media_type" is set - # to "IMAGE" - visualizer = Visualizer(media_type=MediaType.IMAGE) - check_visualizer_attributes( - actual_visualizer=visualizer, - expected_name="Window", - expected_delay=0, - expected_show_count=False, - expected_is_one_label=False, - ) - # Checking attributes of "Visualizer" initialized with default optional parameters except "media_type" is set - # to "VIDEO" - visualizer = Visualizer(media_type=MediaType.VIDEO) check_visualizer_attributes( actual_visualizer=visualizer, expected_name="Window", @@ -150,7 +125,6 @@ def check_visualizer_attributes( ) # Checking attributes of "Visualizer" initialized with specified optional parameters visualizer = Visualizer( - media_type=MediaType.CAMERA, window_name="Test Visualizer", show_count=True, is_one_label=True, diff --git a/ote_sdk/ote_sdk/tests/utils/test_shape_drawer.py b/ote_sdk/ote_sdk/tests/utils/test_shape_drawer.py index 495ade86f6b..7d55c7d980c 100644 --- a/ote_sdk/ote_sdk/tests/utils/test_shape_drawer.py +++ b/ote_sdk/ote_sdk/tests/utils/test_shape_drawer.py @@ -213,7 +213,7 @@ def test_helpers_initialization(self): assert helpers.content_padding == 3 assert helpers.top_left_box_thickness == 1 assert helpers.content_margin == 2 - assert helpers.label_offset_box_shape == 10 + assert helpers.label_offset_box_shape == 0 assert helpers.black == (0, 0, 0) assert helpers.white == (255, 255, 255) assert helpers.yellow == (255, 255, 0) @@ -991,7 +991,7 @@ def draw_rectangle_labels( ) # Drawing rectangle frame image_copy = cv2.rectangle( - img=image_copy, pt1=(x1, y1), pt2=(x2, y2), color=[0, 0, 0], thickness=2 + img=image_copy, pt1=(x1, y1), pt2=(x2, y2), color=base_color, thickness=2 ) # Generating draw command to add labels to image draw_command, _, _ = rectangle_drawer.generate_draw_command_for_labels( @@ -1050,15 +1050,15 @@ def test_rectangle_drawer_draw(self): for rectangle, expected_cursor_position in [ ( # without changing labels positions Rectangle(0.1, 0.3, 0.8, 0.5), - Coordinate(128, 261), + Coordinate(128, 271), ), ( # with putting labels to the bottom of drawn rectangle Rectangle(0.1, 0.1, 0.9, 0.9), - Coordinate(128, 931), + Coordinate(128, 102), ), ( # with shifting labels to the left of drawn rectangle Rectangle(0.6, 0.7, 0.9, 0.9), - Coordinate(61, 670), + Coordinate(61, 680), ), ]: image = RANDOM_IMAGE.copy() @@ -1134,7 +1134,7 @@ def draw_ellipse_labels( angle=0, startAngle=0, endAngle=360, - color=[0, 0, 0], + color=base_color, lineType=cv2.LINE_AA, ) # Generating draw command to add labels to image @@ -1198,20 +1198,20 @@ def test_ellipse_drawer_draw(self): for (ellipse, expected_cursor_position, flagpole_start, flagpole_end,) in [ ( # without changing labels positions Ellipse(0.1, 0.3, 0.8, 0.5), - Coordinate(128.0, 261.2), - Coordinate(129.0, 297.2), + Coordinate(128.0, 271.2), + Coordinate(129.0, 307.2), Coordinate(129, 409), ), ( # with putting labels to the bottom Ellipse(0.1, 0.1, 0.8, 0.8), - Coordinate(128.0, 931.6), - Coordinate(129.0, 931.6), + Coordinate(128.0, 921.6), + Coordinate(129.0, 921.6), Coordinate(129, 460), ), ( # with shifting labels to the left Ellipse(0.6, 0.7, 0.9, 0.9), - Coordinate(299, 670.8), - Coordinate(769.0, 706.8), + Coordinate(299, 680.8), + Coordinate(769.0, 716.8), Coordinate(769, 819), ), ]: @@ -1273,7 +1273,7 @@ def draw_polygon_labels( image=result_without_border, contours=[contours], contourIdx=-1, - color=[0, 0, 0], + color=base_color, thickness=2, lineType=cv2.LINE_AA, ) @@ -1366,21 +1366,21 @@ def test_polygon_drawer_draw(self): for (polygon, expected_cursor_position, flagpole_start, flagpole_end,) in [ ( # without changing labels position polygon_no_change_labels_position, - Coordinate(251, 158), + Coordinate(251, 168), + Coordinate(257, 204), Coordinate(257, 204), - Coordinate(257, 194), ), ( # with putting labels to the bottom polygon_put_labels_to_bottom, - Coordinate(251, 726), + Coordinate(251, 716), + Coordinate(257, 716), Coordinate(257, 102), - Coordinate(257, 726), ), ( # with shifting labels to the left polygon_shift_labels_to_left, - Coordinate(251, 158), + Coordinate(251, 168), + Coordinate(513, 204), Coordinate(513, 204), - Coordinate(513, 194), ), ]: image = RANDOM_IMAGE.copy() diff --git a/ote_sdk/ote_sdk/usecases/evaluation/anomaly_metrics.py b/ote_sdk/ote_sdk/usecases/evaluation/anomaly_metrics.py new file mode 100644 index 00000000000..2d941cbbb4d --- /dev/null +++ b/ote_sdk/ote_sdk/usecases/evaluation/anomaly_metrics.py @@ -0,0 +1,120 @@ +""" This module contains the implementations of performance providers for multi-score anomaly metrics. """ + +# Copyright (C) 2021-2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +from abc import ABC +from typing import List, Optional + +from ote_sdk.entities.metrics import ( + MetricsGroup, + MultiScorePerformance, + Performance, + ScoreMetric, +) +from ote_sdk.entities.resultset import ResultSetEntity +from ote_sdk.usecases.evaluation.averaging import MetricAverageMethod +from ote_sdk.usecases.evaluation.dice import DiceAverage +from ote_sdk.usecases.evaluation.f_measure import FMeasure +from ote_sdk.usecases.evaluation.performance_provider_interface import ( + IPerformanceProvider, +) +from ote_sdk.utils.dataset_utils import ( + contains_anomalous_images, + split_local_global_resultset, +) + + +class AnomalyLocalizationPerformance(MultiScorePerformance): + """ + This class implements a special case of the MultiScorePerformance, specific for anomaly tasks that perform + anomaly localization (detection/segmentation), in addition to anomaly classification. + + :param global_score: Image-level performance metric. + :param local_score: Pixel- or bbox-level performance metric, depending on the task type. + :param dashboard_metrics: (optional) additional statistics, containing charts, curves, and other additional info. + """ + + def __init__( + self, + global_score: ScoreMetric, + local_score: Optional[ScoreMetric], + dashboard_metrics: Optional[List[MetricsGroup]], + ): + super().__init__( + primary_score=local_score, + additional_scores=[global_score], + dashboard_metrics=dashboard_metrics, + ) + self._global_score = global_score + self._local_score = local_score + + @property + def global_score(self): + """Return the global (image-level) score metric.""" + return self._global_score + + @property + def local_score(self): + """Return the local (pixel-/bbox-level) score metric.""" + return self._local_score + + +class AnomalyLocalizationScores(IPerformanceProvider, ABC): + """ + This class provides the AnomalyLocalizationPerformance object for anomaly segmentation and anomaly detection tasks. + Depending on the subclass, the `get_performance` method returns an AnomalyLocalizationPerformance object with the + pixel- or bbox-level metric as the primary score. The global (image-level) performance metric is included as an + additional metric. + + :param resultset: ResultSet that scores will be computed for + """ + + def __init__(self, resultset: ResultSetEntity): + self.local_score: Optional[ScoreMetric] = None + self.dashboard_metrics: List[MetricsGroup] = [] + + global_resultset, local_resultset = split_local_global_resultset(resultset) + + global_metric = FMeasure(resultset=global_resultset) + global_performance = global_metric.get_performance() + self.global_score = global_performance.score + self.dashboard_metrics += global_performance.dashboard_metrics + + if contains_anomalous_images(local_resultset.ground_truth_dataset): + local_metric = self._get_local_metric(local_resultset) + local_performance = local_metric.get_performance() + self.local_score = local_performance.score + self.dashboard_metrics += local_performance.dashboard_metrics + + @staticmethod + def _get_local_metric(local_resultset: ResultSetEntity) -> IPerformanceProvider: + raise NotImplementedError + + def get_performance(self) -> Performance: + return AnomalyLocalizationPerformance( + global_score=self.global_score, + local_score=self.local_score, + dashboard_metrics=self.dashboard_metrics, + ) + + +class AnomalySegmentationScores(AnomalyLocalizationScores): + """ + Performance provider for anomaly segmentation tasks. + """ + + @staticmethod + def _get_local_metric(local_resultset: ResultSetEntity) -> IPerformanceProvider: + return DiceAverage(resultset=local_resultset, average=MetricAverageMethod.MICRO) + + +class AnomalyDetectionScores(AnomalyLocalizationScores): + """ + Performance provider for anomaly detection tasks. + """ + + @staticmethod + def _get_local_metric(local_resultset: ResultSetEntity) -> IPerformanceProvider: + return FMeasure(resultset=local_resultset) diff --git a/ote_sdk/ote_sdk/usecases/evaluation/metrics_helper.py b/ote_sdk/ote_sdk/usecases/evaluation/metrics_helper.py index eb8dce766c0..7d4e808b2a3 100644 --- a/ote_sdk/ote_sdk/usecases/evaluation/metrics_helper.py +++ b/ote_sdk/ote_sdk/usecases/evaluation/metrics_helper.py @@ -8,6 +8,10 @@ from ote_sdk.entities.resultset import ResultSetEntity from ote_sdk.usecases.evaluation.accuracy import Accuracy +from ote_sdk.usecases.evaluation.anomaly_metrics import ( + AnomalyDetectionScores, + AnomalySegmentationScores, +) from ote_sdk.usecases.evaluation.averaging import MetricAverageMethod from ote_sdk.usecases.evaluation.dice import DiceAverage from ote_sdk.usecases.evaluation.f_measure import FMeasure @@ -68,3 +72,27 @@ def compute_accuracy( :return: Accuracy object """ return Accuracy(resultset=resultset, average=average) + + @staticmethod + def compute_anomaly_segmentation_scores( + resultset: ResultSetEntity, + ) -> AnomalySegmentationScores: + """ + Compute the anomaly localization performance metrics on an anomaly segmentation resultset. + + :param resultset: The resultset used to compute the metrics + :return: AnomalyLocalizationScores object + """ + return AnomalySegmentationScores(resultset) + + @staticmethod + def compute_anomaly_detection_scores( + resultset: ResultSetEntity, + ) -> AnomalyDetectionScores: + """ + Compute the anomaly localization performance metrics on an anomaly detection resultset. + + :param resultset: The resultset used to compute the metrics + :return: AnomalyLocalizationScores object + """ + return AnomalyDetectionScores(resultset) diff --git a/ote_sdk/ote_sdk/usecases/exportable_code/demo/README.md b/ote_sdk/ote_sdk/usecases/exportable_code/demo/README.md index 10241609132..6b77235d027 100644 --- a/ote_sdk/ote_sdk/usecases/exportable_code/demo/README.md +++ b/ote_sdk/ote_sdk/usecases/exportable_code/demo/README.md @@ -1,25 +1,29 @@ -# Exportable code - demo package +# Exportable code -Demo package contains simple demo to get and visualize result of model inference. +Exportable code is a .zip archive that contains simple demo to get and visualize result of model inference. -## Structure of generated package: +## Structure of generated zip: * model - `model.xml` - `model.bin` + - `config.json` * python + - model_wrappers (Optional) + - `__init__.py` + - model_wrappers required to run demo - `README.md` - `LICENSE` - `demo.py` - `requirements.txt` - - `demo_package-0.0-py3-none-any.whl` +> **NOTE**: Zip archive contains model_wrappers when [ModelAPI](https://github.com/openvinotoolkit/open_model_zoo/tree/master/demos/common/python/openvino/model_zoo/model_api) has no appropriate standard model wrapper for the model. ## Prerequisites * [Python 3.8](https://www.python.org/downloads/) * [Git](https://git-scm.com/) -## Setup Demo Package +## Install requirements to run demo 1. Install [prerequisites](#prerequisites). You may also need to [install pip](https://pip.pypa.io/en/stable/installation/). For example, on Ubuntu execute the following command to get pip installed: ``` @@ -53,75 +57,60 @@ Demo package contains simple demo to get and visualize result of model inference ``` > **NOTE**: On Linux and macOS, you may need to type `python3` instead of `python`. -3. Install the package in the environment: +3. Install requirements in the environment: ``` - python -m pip install demo_package-0.0-py3-none-any.whl + python -m pip install -r requirements.txt ``` +4. Add `model_wrappers` package to PYTHONPATH: -When the package is installed, you can import it as follows: -``` -python -c "from demo_package import create_model" -``` - -## Usecases - -1. Running the `demo.py` application with the `-h` option yields the following usage message: + On Linux and macOS: ``` - usage: demo.py [-h] -i INPUT -m MODEL [-c CONFIG] - - Options: - -h, --help Show this help message and exit. - -i INPUT, --input INPUT - Required. An input to process. The input must be a - single image, a folder of images, video file or camera - id. - -m MODEL, --model MODEL - Required. Path to an .xml file with a trained model. - -c CONFIG, --config CONFIG - Optional. Path to an .json file with parameters for - model. - + export PYTHONPATH=$PYTHONPATH:/path/to/model_wrappers ``` - As a model, you can use `model.xml` from generated zip. So can use the following command to do inference with a pre-trained model: + On Windows: ``` - python3 demo.py \ - -i /inputVideo.mp4 \ - -m /model.xml + set PYTHONPATH=%PYTHONPATH%;/path/to/model_wrappers ``` - You can press `Q` to stop inference during demo running. - > **NOTE**: Default configuration contains info about pre- and postprocessing to model inference and is guaranteed to be correct. - > Also you can define own json config that specifies needed parameters, but any change should be made with caution. - > To create this config please see `config.json` in demo_package wheel. -2. You can create your own demo application, using `demo_package`. The main function of package is `create_model`: - ```python - def create_model(model_path: Path, config_file: Path = None) -> Model: - """ - Create model using ModelAPI factory +## Usecase - :param model_path: Path to .xml model - :param config_file: Path to .json config. If it is not defined, use config from demo_package - """ - ``` - Function returns model wrapper from ModelAPI. To get more information please see [ModelAPI](https://github.com/openvinotoolkit/open_model_zoo/tree/master/demos/common/python/openvino/model_zoo/model_api). +Running the `demo.py` application with the `-h` option yields the following usage message: - Some example how to use `demo_package`: - ```python - import cv2 - from demo_package import create_model +``` +usage: demo.py [-h] -i INPUT -m MODELS [MODELS ...] [-it {sync,async}] [-l] +Options: + -h, --help Show this help message and exit. + -i INPUT, --input INPUT + Required. An input to process. The input must be a + single image, a folder of images, video file or camera + id. + -m MODELS [MODELS ...], --models MODELS [MODELS ...] + Required. Path to directory with trained model and + configuration file. If you provide several models you + will start the task chain pipeline with the provided + models in the order in which they were specified + -it {sync,async}, --inference_type {sync,async} + Optional. Type of inference for single model + -l, --loop Optional. Enable reading the input in a loop. +``` - # read input - frame = cv2.imread(path_to_image) - # create model - model = create_model(path_to_model) - # inference - objects = model(frame) - # show results using some visualizer - output = visualizer.draw(frame, objects) - cv2.imshow(output) - ``` +As a model, you can use path to model directory from generated zip. So you can use the following command to do inference with a pre-trained model: + +``` +python3 demo.py \ + -i /inputVideo.mp4 \ + -m +``` + +You can press `Q` to stop inference during demo running. + +> **NOTE**: If you provide a single image as an input, the demo processes and renders it quickly, then exits. To continuously +> visualize inference results on the screen, apply the `loop` option, which enforces processing a single image in a loop. + +> **NOTE**: Default configuration contains info about pre- and post processing for inference and is guaranteed to be correct. +> Also you can change `config.json` that specifies needed parameters, but any changes should be made with caution. ## Troubleshooting @@ -130,4 +119,9 @@ python -c "from demo_package import create_model" python -m pip install --proxy http://:@: ``` -2. If you use Anaconda environment, you should consider that OpenVINO has limited [Conda support](https://docs.openvino.ai/2021.4/openvino_docs_install_guides_installing_openvino_conda.html) for Python 3.6 and 3.7 versions only. But the demo package requires python 3.8. So please use other tools to create the environment (like `venv` or `virtualenv`) and use `pip` as a package manager. \ No newline at end of file +2. If you use Anaconda environment, you should consider that OpenVINO has limited [Conda support](https://docs.openvino.ai/2021.4/openvino_docs_install_guides_installing_openvino_conda.html) for Python 3.6 and 3.7 versions only. But the demo package requires python 3.8. So please use other tools to create the environment (like `venv` or `virtualenv`) and use `pip` as a package manager. + +3. If you have problems when you try to use `pip install` command, please update pip version by following command: + ``` + python -m pip install --upgrade pip + ``` diff --git a/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo.py b/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo.py index 43eef700182..e4e633378d8 100644 --- a/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo.py +++ b/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo.py @@ -10,10 +10,13 @@ from pathlib import Path # pylint: disable=no-name-in-module, import-error -from demo_package import SyncDemo, create_model, create_output_converter - -from ote_sdk.usecases.exportable_code.streamer import get_media_type -from ote_sdk.usecases.exportable_code.visualization import Visualizer +from ote_sdk.usecases.exportable_code.demo.demo_package import ( + AsyncExecutor, + ChainExecutor, + ModelContainer, + SyncExecutor, + create_visualizer, +) def build_argparser(): @@ -38,35 +41,75 @@ def build_argparser(): ) args.add_argument( "-m", - "--model", - help="Required. Path to an .xml file with a trained model.", + "--models", + help="Required. Path to directory with trained model and configuration file. " + "If you provide several models you will start the task chain pipeline with " + "the provided models in the order in which they were specified.", + nargs="+", required=True, type=Path, ) args.add_argument( - "-c", - "--config", - help="Optional. Path to an .json file with parameters for model.", - type=Path, + "-it", + "--inference_type", + help="Optional. Type of inference for single model.", + choices=["sync", "async"], + default="sync", + type=str, + ) + args.add_argument( + "-l", + "--loop", + help="Optional. Enable reading the input in a loop.", + default=False, + action="store_true", ) return parser +EXECUTORS = { + "sync": SyncExecutor, + "async": AsyncExecutor, + "chain": ChainExecutor, +} + + +def get_inferencer_class(type_inference, models): + """ + Return class for inference of models + """ + if len(models) > 1: + type_inference = "chain" + print( + "You started the task chain pipeline with the provided models " + "in the order in which they were specified" + ) + return EXECUTORS[type_inference] + + def main(): """ Main function that is used to run demo. """ args = build_argparser().parse_args() - # create components for demo + # create models + models = [] + for model_dir in args.models: + model = ModelContainer(model_dir) + models.append(model) + + inferencer = get_inferencer_class(args.inference_type, models) + + # create visualizer + visualizer = create_visualizer(models[-1].task_type) - model = create_model(args.model, args.config) - media_type = get_media_type(args.input) + if len(models) == 1: + models = models[0] - visualizer = Visualizer(media_type) - converter = create_output_converter(args.config) - demo = SyncDemo(model, visualizer, converter) - demo.run(args.input) + # create inferencer and run + demo = inferencer(models, visualizer) + demo.run(args.input, args.loop) if __name__ == "__main__": diff --git a/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo_package/__init__.py b/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo_package/__init__.py index 0440c71c974..47c9e469f23 100644 --- a/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo_package/__init__.py +++ b/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo_package/__init__.py @@ -6,7 +6,15 @@ # SPDX-License-Identifier: Apache-2.0 # -from .sync import SyncDemo -from .utils import create_model, create_output_converter +from .executors import AsyncExecutor, ChainExecutor, SyncExecutor +from .model_container import ModelContainer +from .utils import create_output_converter, create_visualizer -__all__ = ["SyncDemo", "create_model", "create_output_converter"] +__all__ = [ + "SyncExecutor", + "AsyncExecutor", + "ChainExecutor", + "create_output_converter", + "create_visualizer", + "ModelContainer", +] diff --git a/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo_package/executors/__init__.py b/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo_package/executors/__init__.py new file mode 100644 index 00000000000..692cbd8c514 --- /dev/null +++ b/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo_package/executors/__init__.py @@ -0,0 +1,17 @@ +""" +Initialization of executors +""" + +# Copyright (C) 2021-2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +from .asynchronous import AsyncExecutor +from .sync_pipeline import ChainExecutor +from .synchronous import SyncExecutor + +__all__ = [ + "SyncExecutor", + "AsyncExecutor", + "ChainExecutor", +] diff --git a/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo_package/executors/asynchronous.py b/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo_package/executors/asynchronous.py new file mode 100644 index 00000000000..ff79d348fe9 --- /dev/null +++ b/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo_package/executors/asynchronous.py @@ -0,0 +1,74 @@ +""" +Async executor based on ModelAPI +""" +# Copyright (C) 2021-2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +from typing import Any, Tuple, Union + +import numpy as np +from openvino.model_zoo.model_api.pipelines import AsyncPipeline + +from ote_sdk.usecases.exportable_code.demo.demo_package.model_container import ( + ModelContainer, +) +from ote_sdk.usecases.exportable_code.demo.demo_package.utils import ( + create_output_converter, +) +from ote_sdk.usecases.exportable_code.streamer import get_streamer +from ote_sdk.usecases.exportable_code.visualizers import Visualizer + + +class AsyncExecutor: + """ + Async inferencer + + Args: + model: model for inference + visualizer: visualizer of inference results + """ + + def __init__(self, model: ModelContainer, visualizer: Visualizer) -> None: + self.model = model.core_model + self.visualizer = visualizer + self.converter = create_output_converter(model.task_type, model.labels) + self.async_pipeline = AsyncPipeline(self.model) + + def run(self, input_stream: Union[int, str], loop: bool = False) -> None: + """ + Async inference for input stream (image, video stream, camera) + """ + streamer = get_streamer(input_stream, loop) + next_frame_id = 0 + next_frame_id_to_show = 0 + stop_visualization = False + + for frame in streamer: + results = self.async_pipeline.get_result(next_frame_id_to_show) + while results: + output = self.render_result(results) + next_frame_id_to_show += 1 + self.visualizer.show(output) + if self.visualizer.is_quit(): + stop_visualization = True + results = self.async_pipeline.get_result(next_frame_id_to_show) + if stop_visualization: + break + self.async_pipeline.submit_data(frame, next_frame_id, {"frame": frame}) + next_frame_id += 1 + self.async_pipeline.await_all() + for next_frame_id_to_show in range(next_frame_id_to_show, next_frame_id): + results = self.async_pipeline.get_result(next_frame_id_to_show) + output = self.render_result(results) + self.visualizer.show(output) + + def render_result(self, results: Tuple[Any, dict]) -> np.ndarray: + """ + Render for results of inference + """ + predictions, frame_meta = results + annotation_scene = self.converter.convert_to_annotation(predictions, frame_meta) + current_frame = frame_meta["frame"] + output = self.visualizer.draw(current_frame, annotation_scene, frame_meta) + return output diff --git a/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo_package/executors/sync_pipeline.py b/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo_package/executors/sync_pipeline.py new file mode 100644 index 00000000000..756d325b5f1 --- /dev/null +++ b/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo_package/executors/sync_pipeline.py @@ -0,0 +1,105 @@ +""" +Sync pipeline executor based on ModelAPI +""" +# Copyright (C) 2021-2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +from typing import List, Tuple, Union + +import numpy as np + +from ote_sdk.entities.annotation import ( + Annotation, + AnnotationSceneEntity, + AnnotationSceneKind, +) +from ote_sdk.entities.shapes.rectangle import Rectangle +from ote_sdk.usecases.exportable_code.demo.demo_package.model_container import ( + ModelContainer, +) +from ote_sdk.usecases.exportable_code.demo.demo_package.utils import ( + create_output_converter, +) +from ote_sdk.usecases.exportable_code.streamer import get_streamer +from ote_sdk.usecases.exportable_code.visualizers import Visualizer +from ote_sdk.utils.shape_factory import ShapeFactory + + +class ChainExecutor: + """ + Sync executor for task-chain inference + + Args: + models: list of models for inference + visualizer: visualizer of inference results + """ + + def __init__( + self, + models: List[ModelContainer], + visualizer: Visualizer, + ) -> None: + self.models = models + self.visualizer = visualizer + self.converters = [] + for model in self.models: + self.converters.append( + create_output_converter(model.task_type, model.labels) + ) + + # pylint: disable=too-many-locals + def single_run(self, input_image: np.ndarray) -> AnnotationSceneEntity: + """ + Inference for single image + """ + current_objects = [(input_image, Annotation(Rectangle(0, 0, 1, 1), labels=[]))] + result_scene = AnnotationSceneEntity([], AnnotationSceneKind.PREDICTION) + for index, model in enumerate(self.models): + new_objects = [] + for item, parent_annotation in current_objects: + predictions, frame_meta = model.core_model(item) + annotation_scene = self.converters[index].convert_to_annotation( + predictions, frame_meta + ) + for annotation in annotation_scene.annotations: + new_item, item_annotation = self.crop( + item, parent_annotation, annotation + ) + new_objects.append((new_item, item_annotation)) + if model.task_type.is_global: + for label in item_annotation.get_labels(): + parent_annotation.append_label(label) + else: + result_scene.append_annotation(item_annotation) + current_objects = new_objects + return result_scene + + @staticmethod + def crop( + item: np.ndarray, parent_annotation: Annotation, item_annotation: Annotation + ) -> Tuple[np.ndarray, Annotation]: + """ + Crop operation between chain stages + """ + new_item = ShapeFactory.shape_as_rectangle( + item_annotation.shape + ).crop_numpy_array(item) + item_annotation.shape = item_annotation.shape.normalize_wrt_roi_shape( + ShapeFactory.shape_as_rectangle(parent_annotation.shape) + ) + return new_item, item_annotation + + def run(self, input_stream: Union[int, str], loop: bool = False) -> None: + """ + Run demo using input stream (image, video stream, camera) + """ + streamer = get_streamer(input_stream, loop) + + for frame in streamer: + # getting result for single image + annotation_scene = self.single_run(frame) + output = self.visualizer.draw(frame, annotation_scene) + self.visualizer.show(output) + if self.visualizer.is_quit(): + break diff --git a/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo_package/executors/synchronous.py b/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo_package/executors/synchronous.py new file mode 100644 index 00000000000..0c702a2a82c --- /dev/null +++ b/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo_package/executors/synchronous.py @@ -0,0 +1,49 @@ +""" +Sync Executor based on ModelAPI +""" +# Copyright (C) 2021-2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +from typing import Union + +from ote_sdk.usecases.exportable_code.demo.demo_package.model_container import ( + ModelContainer, +) +from ote_sdk.usecases.exportable_code.demo.demo_package.utils import ( + create_output_converter, +) +from ote_sdk.usecases.exportable_code.streamer import get_streamer +from ote_sdk.usecases.exportable_code.visualizers import Visualizer + + +class SyncExecutor: + """ + Synd executor for model inference + + Args: + model: model for inference + visualizer: visualizer of inference results + """ + + def __init__(self, model: ModelContainer, visualizer: Visualizer) -> None: + self.model = model.core_model + self.visualizer = visualizer + self.converter = create_output_converter(model.task_type, model.labels) + + def run(self, input_stream: Union[int, str], loop: bool = False) -> None: + """ + Run demo using input stream (image, video stream, camera) + """ + streamer = get_streamer(input_stream, loop) + + for frame in streamer: + # getting result include preprocessing, infer, postprocessing for sync infer + predictions, frame_meta = self.model(frame) + annotation_scene = self.converter.convert_to_annotation( + predictions, frame_meta + ) + output = self.visualizer.draw(frame, annotation_scene, frame_meta) + self.visualizer.show(output) + if self.visualizer.is_quit(): + break diff --git a/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo_package/model_container.py b/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo_package/model_container.py new file mode 100644 index 00000000000..581b26c3f0f --- /dev/null +++ b/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo_package/model_container.py @@ -0,0 +1,76 @@ +""" +ModelContainer +""" +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +import importlib +from pathlib import Path +from typing import Any, Tuple + +import numpy as np +from openvino.model_zoo.model_api.adapters import OpenvinoAdapter, create_core +from openvino.model_zoo.model_api.models import Model + +from ote_sdk.entities.label_schema import LabelSchemaEntity +from ote_sdk.entities.model_template import TaskType +from ote_sdk.serialization.label_mapper import LabelSchemaMapper + +from .utils import get_model_path, get_parameters + + +class ModelContainer: + """ + Class for storing the model wrapper based on Model API and needed parameters of model + + Args: + model_dir: path to model directory + """ + + def __init__(self, model_dir: Path) -> None: + self.parameters = get_parameters(model_dir / "config.json") + self._labels = LabelSchemaMapper.backward( + self.parameters["model_parameters"]["labels"] + ) + self._task_type = TaskType[self.parameters["converter_type"]] + + # labels for modelAPI wrappers can be empty, because unused in pre- and postprocessing + self.model_parameters = self.parameters["model_parameters"] + self.model_parameters["labels"] = [] + + model_adapter = OpenvinoAdapter( + create_core(), get_model_path(model_dir / "model.xml") + ) + + self._initialize_wrapper() + self.core_model = Model.create_model( + self.parameters["type_of_model"], + model_adapter, + self.model_parameters, + preload=True, + ) + + @property + def task_type(self) -> TaskType: + """ + Task type property + """ + return self._task_type + + @property + def labels(self) -> LabelSchemaEntity: + """ + Labels property + """ + return self._labels + + @staticmethod + def _initialize_wrapper() -> None: + try: + importlib.import_module("model_wrappers") + except ModuleNotFoundError: + print("Using model wrapper from Open Model Zoo ModelAPI") + + def __call__(self, input_data: np.ndarray) -> Tuple[Any, dict]: + return self.core_model(input_data) diff --git a/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo_package/sync.py b/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo_package/sync.py deleted file mode 100644 index c4034fb4302..00000000000 --- a/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo_package/sync.py +++ /dev/null @@ -1,45 +0,0 @@ -""" -Sync Demo based on ModelAPI -""" -# Copyright (C) 2021-2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from ote_sdk.usecases.exportable_code.streamer import get_streamer - - -class SyncDemo: - """ - Synd demo for model inference - - Args: - model: model for inference - visualizer: for visualize inference results - converter: convert model ourtput to annotation scene - """ - - def __init__(self, model, visualizer, converter) -> None: - self.model = model - self.visualizer = visualizer - self.converter = converter - - def run(self, input_stream): - """ - Run demo using input stream (image, video stream, camera) - """ - streamer = get_streamer(input_stream) - for frame in streamer: - # getting result include preprocessing, infer, postprocessing for sync infer - dict_data, input_meta = self.model.preprocess(frame) - raw_result = self.model.infer_sync(dict_data) - predictions = self.model.postprocess(raw_result, input_meta) - annotation_scene = self.converter.convert_to_annotation( - predictions, input_meta - ) - - # any user's visualizer - output = self.visualizer.draw(frame, annotation_scene) - self.visualizer.show(output) - - if self.visualizer.is_quit(): - break diff --git a/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo_package/utils.py b/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo_package/utils.py index 596c5444965..17a7e395a9d 100644 --- a/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo_package/utils.py +++ b/ote_sdk/ote_sdk/usecases/exportable_code/demo/demo_package/utils.py @@ -5,19 +5,16 @@ # SPDX-License-Identifier: Apache-2.0 # -import importlib import json from pathlib import Path from typing import Optional -from openvino.model_zoo.model_api.adapters import OpenvinoAdapter, create_core -from openvino.model_zoo.model_api.models import Model - -from ote_sdk.entities.label import Domain -from ote_sdk.serialization.label_mapper import LabelSchemaMapper +from ote_sdk.entities.label_schema import LabelSchemaEntity +from ote_sdk.entities.model_template import TaskType, task_type_to_label_domain from ote_sdk.usecases.exportable_code.prediction_to_annotation_converter import ( create_converter, ) +from ote_sdk.usecases.exportable_code.visualizers import AnomalyVisualizer, Visualizer def get_model_path(path: Optional[Path]) -> Path: @@ -49,34 +46,21 @@ def get_parameters(path: Optional[Path]) -> dict: return parameters -def create_model(model_path: Path, config_file: Optional[Path] = None) -> Model: +def create_output_converter(task_type: TaskType, labels: LabelSchemaEntity): """ - Create model using ModelAPI factory + Create annotation converter according to kind of task """ - model_adapter = OpenvinoAdapter(create_core(), get_model_path(model_path)) - parameters = get_parameters(config_file) - try: - importlib.import_module(".model", "demo_package") - except ImportError: - print("Using model wrapper from Open Model Zoo ModelAPI") - # labels for modelAPI wrappers can be empty, because unused in pre- and postprocessing - parameters["model_parameters"]["labels"] = [] - model = Model.create_model( - parameters["type_of_model"], - model_adapter, - parameters["model_parameters"], - preload=True, - ) - - return model - - -def create_output_converter(config_file: Path = None): + converter_type = task_type_to_label_domain(task_type) + return create_converter(converter_type, labels) + + +def create_visualizer(task_type: TaskType): """ - Create annotation converter according to kind of task + Create visualizer according to kind of task """ - parameters = get_parameters(config_file) - converter_type = Domain[parameters["converter_type"]] - labels = LabelSchemaMapper.backward(parameters["model_parameters"]["labels"]) - return create_converter(converter_type, labels) + + if task_type.is_anomaly: + return AnomalyVisualizer(window_name="Result") + + return Visualizer(window_name="Result") diff --git a/ote_sdk/ote_sdk/usecases/exportable_code/demo/requirements.txt b/ote_sdk/ote_sdk/usecases/exportable_code/demo/requirements.txt index d70cac90c74..b9e54263900 100644 --- a/ote_sdk/ote_sdk/usecases/exportable_code/demo/requirements.txt +++ b/ote_sdk/ote_sdk/usecases/exportable_code/demo/requirements.txt @@ -1,3 +1,3 @@ -openvino==2022.1.0.dev20220316 -openmodelzoo-modelapi @ git+https://github.com/openvinotoolkit/open_model_zoo/@ef556fee2cdd92488838b49ef8939c303992d89c#egg=openmodelzoo-modelapi&subdirectory=demos/common/python -ote-sdk @ git+https://github.com/openvinotoolkit/training_extensions/@7f3890bcc53ce5ebd76ddbc72e149840fbc7595a#egg=ote-sdk&subdirectory=ote_sdk +openvino==2022.1.0 +openmodelzoo-modelapi @ git+https://github.com/openvinotoolkit/open_model_zoo/@releases/2022/SCv1.1#egg=openmodelzoo-modelapi&subdirectory=demos/common/python +ote-sdk @ git+https://github.com/openvinotoolkit/training_extensions/@83a70cdd11d7c1bbc11ceb1b758750fb4bd18aae#egg=ote-sdk&subdirectory=ote_sdk diff --git a/ote_sdk/ote_sdk/usecases/exportable_code/prediction_to_annotation_converter.py b/ote_sdk/ote_sdk/usecases/exportable_code/prediction_to_annotation_converter.py index 2837a384f98..853537085b5 100644 --- a/ote_sdk/ote_sdk/usecases/exportable_code/prediction_to_annotation_converter.py +++ b/ote_sdk/ote_sdk/usecases/exportable_code/prediction_to_annotation_converter.py @@ -392,14 +392,18 @@ def convert_to_annotation( ) for point in contour ] - annotations.append( - Annotation( - Polygon(points=points), - labels=[ - ScoredLabel(self.labels[int(class_idx) - 1], float(score)) - ], + polygon = Polygon(points=points) + if polygon.get_area() > 1e-12: + annotations.append( + Annotation( + polygon, + labels=[ + ScoredLabel( + self.labels[int(class_idx) - 1], float(score) + ) + ], + ) ) - ) annotation_scene = AnnotationSceneEntity( kind=AnnotationSceneKind.PREDICTION, annotations=annotations, @@ -431,22 +435,25 @@ def convert_to_annotation( continue if len(contour) <= 2: continue - box_points = cv2.boxPoints(cv2.minAreaRect(contour)) points = [ Point( x=point[0] / metadata["original_shape"][1], y=point[1] / metadata["original_shape"][0], ) - for point in box_points + for point in cv2.boxPoints(cv2.minAreaRect(contour)) ] - annotations.append( - Annotation( - Polygon(points=points), - labels=[ - ScoredLabel(self.labels[int(class_idx) - 1], float(score)) - ], + polygon = Polygon(points=points) + if polygon.get_area() > 1e-12: + annotations.append( + Annotation( + polygon, + labels=[ + ScoredLabel( + self.labels[int(class_idx) - 1], float(score) + ) + ], + ) ) - ) annotation_scene = AnnotationSceneEntity( kind=AnnotationSceneKind.PREDICTION, annotations=annotations, diff --git a/ote_sdk/ote_sdk/usecases/exportable_code/streamer/__init__.py b/ote_sdk/ote_sdk/usecases/exportable_code/streamer/__init__.py index f353c719f97..8353913d3af 100644 --- a/ote_sdk/ote_sdk/usecases/exportable_code/streamer/__init__.py +++ b/ote_sdk/ote_sdk/usecases/exportable_code/streamer/__init__.py @@ -7,18 +7,22 @@ from ote_sdk.usecases.exportable_code.streamer.streamer import ( CameraStreamer, + DirStreamer, ImageStreamer, + InvalidInput, + OpenError, ThreadedStreamer, VideoStreamer, - get_media_type, get_streamer, ) __all__ = [ "CameraStreamer", + "DirStreamer", "ImageStreamer", "ThreadedStreamer", "VideoStreamer", - "get_media_type", + "InvalidInput", + "OpenError", "get_streamer", ] diff --git a/ote_sdk/ote_sdk/usecases/exportable_code/streamer/streamer.py b/ote_sdk/ote_sdk/usecases/exportable_code/streamer/streamer.py index 7950dc0852c..5785476ba6f 100644 --- a/ote_sdk/ote_sdk/usecases/exportable_code/streamer/streamer.py +++ b/ote_sdk/ote_sdk/usecases/exportable_code/streamer/streamer.py @@ -8,174 +8,45 @@ import abc import multiprocessing +import os import queue import sys from enum import Enum -from pathlib import Path -from typing import Iterable, Iterator, List, NamedTuple, Optional, Tuple, Union +from typing import Iterator, Union import cv2 import numpy as np -from natsort import natsorted -class MediaType(Enum): - """ - This Enum represents the types of input - """ - - IMAGE = 1 - VIDEO = 2 - CAMERA = 3 - - -class MediaExtensions(NamedTuple): - """ - This NamedTuple represents the extensions for input - """ - - IMAGE: Tuple[str, ...] - VIDEO: Tuple[str, ...] - - -MEDIA_EXTENSIONS = MediaExtensions( - IMAGE=(".jpg", ".jpeg", ".png", ".ppm", ".bmp", ".pgm", ".tif", ".tiff", ".webp"), - VIDEO=(".avi", ".mp4"), -) - - -def get_media_type(path: Optional[Union[str, Path]]) -> MediaType: - """ - Get Media Type from the input path. - :param path: Path to file or directory. - Could be None, which implies camera media type. - """ - if isinstance(path, str): - path = Path(path) - - media_type: MediaType - - if path is None: - media_type = MediaType.CAMERA - - elif path.is_dir(): - if _get_filenames(path, MediaType.IMAGE): - media_type = MediaType.IMAGE - - elif path.is_file(): - if _is_file_with_supported_extensions(path, _get_extensions(MediaType.IMAGE)): - media_type = MediaType.IMAGE - elif _is_file_with_supported_extensions(path, _get_extensions(MediaType.VIDEO)): - media_type = MediaType.VIDEO - else: - raise ValueError("File extension not supported.") - else: - raise ValueError("File or folder does not exist") - - return media_type - - -def _get_extensions(media_type: MediaType) -> Tuple[str, ...]: - """ - Get extensions of the input media type. - :param media_type: Type of the media. Either image or video. - :return: Supported extensions for the corresponding media type. - - :example: - - >>> _get_extensions(media_type=MediaType.IMAGE) - ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp') - >>> _get_extensions(media_type=MediaType.VIDEO) - ('.avi', '.mp4') - +class InvalidInput(Exception): """ - return getattr(MEDIA_EXTENSIONS, media_type.name) - - -def _is_file_with_supported_extensions(path: Path, extensions: Tuple[str, ...]) -> bool: + Exception for wrong input format """ - Check if the file is supported for the media type - :param path: File path to check - :param extensions: Supported extensions for the media type - - :example: - - >>> from pathlib import Path - >>> path = Path("./demo.mp4") - >>> extensions = _get_extensions(media_type=MediaType.VIDEO) - >>> _is_file_with_supported_extensions(path, extensions) - True - - >>> path = Path("demo.jpg") - >>> extensions = _get_extensions(media_type=MediaType.IMAGE) - >>> _is_file_with_supported_extensions(path, extensions) - True - >>> path = Path("demo.mp3") - >>> extensions = _get_extensions(media_type=MediaType.IMAGE) - >>> _is_file_with_supported_extensions(path, extensions) - False - - """ - return path.suffix.lower() in extensions + def __init__(self, message: str) -> None: + super().__init__(message) + self.message = message -def _get_filenames(path: Union[str, Path], media_type: MediaType) -> List[str]: +class OpenError(Exception): """ - Get filenames from a directory or a path to a file. - :param path: Path to the file or to the location that contains files. - :param media_type: Type of the media (image or video) - - :example: - >>> path = "../images" - >>> _get_filenames(path, media_type=MediaType.IMAGE) - ['images/4.jpeg', 'images/1.jpeg', 'images/5.jpeg', 'images/3.jpeg', 'images/2.jpeg'] - + Exception for open reader """ - extensions = _get_extensions(media_type) - filenames: List[str] = [] - - if media_type == MediaType.CAMERA: - raise ValueError( - "Cannot get filenames for camera. Only image and video files are supported." - ) - - if isinstance(path, str): - path = Path(path) - - if path.is_file(): - if _is_file_with_supported_extensions(path, extensions): - filenames = [path.as_posix()] - else: - raise ValueError("Extension not supported for media type") - - if path.is_dir(): - for filename in path.rglob("*"): - if _is_file_with_supported_extensions(filename, extensions): - filenames.append(filename.as_posix()) - - filenames = natsorted(filenames) # type: ignore[assignment] - if len(filenames) == 0: - raise FileNotFoundError(f"No {media_type.name} file found in {path}!") + def __init__(self, message: str) -> None: + super().__init__(message) + self.message = message - return filenames - -def _read_video_stream(stream: cv2.VideoCapture) -> Iterator[np.ndarray]: +class MediaType(Enum): """ - Read video and yield the frame. - :param stream: Video stream captured via OpenCV's VideoCapture - :return: Individual frame + This Enum represents the types of input """ - while True: - frame_available, frame = stream.read() - if not frame_available: - break - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - yield frame - stream.release() + IMAGE = 1 + DIR = 2 + VIDEO = 3 + CAMERA = 4 class BaseStreamer(metaclass=abc.ABCMeta): @@ -184,25 +55,22 @@ class BaseStreamer(metaclass=abc.ABCMeta): """ @abc.abstractmethod - def get_stream(self, stream_input): + def __iter__(self) -> Iterator[np.ndarray]: """ - Get the streamer object, depending on the media type. - :param stream_input: Path to the stream or - camera device index in case to capture from camera. - :return: Streamer object. + Iterate through the streamer object that is a Python Generator object. + :return: Yield the image or video frame. """ raise NotImplementedError @abc.abstractmethod - def __iter__(self) -> Iterator[np.ndarray]: + def get_type(self) -> MediaType: """ - Iterate through the streamer object that is a Python Generator object. - :return: Yield the image or video frame. + Get type of streamer """ raise NotImplementedError -def _process_run(streamer: BaseStreamer, buffer: multiprocessing.Queue): +def _process_run(streamer: BaseStreamer, buffer: multiprocessing.Queue) -> None: """ Private function that is run by the thread. @@ -231,17 +99,14 @@ class ThreadedStreamer(BaseStreamer): ... pass """ - def __init__(self, streamer: BaseStreamer, buffer_size: int = 2): + def __init__(self, streamer: BaseStreamer, buffer_size: int = 2) -> None: self.buffer_size = buffer_size self.streamer = streamer - def get_stream(self, _=None) -> BaseStreamer: - return self.streamer - def __iter__(self) -> Iterator[np.ndarray]: buffer: multiprocessing.Queue = multiprocessing.Queue(maxsize=self.buffer_size) process = multiprocessing.Process( - target=_process_run, args=(self.get_stream(), buffer) + target=_process_run, args=(self.streamer, buffer) ) # Make thread a daemon so that it will exit when the main program exits as well process.daemon = True @@ -262,11 +127,17 @@ def __iter__(self) -> Iterator[np.ndarray]: if sys.version_info >= (3, 7) and process.exitcode is None: process.kill() + def get_type(self) -> MediaType: + """ + Get type of internal streamer + """ + return self.streamer.get_type() + class VideoStreamer(BaseStreamer): """ Video Streamer - :param path: Path to the video file or directory. + :param path: Path to the video file. :example: @@ -275,17 +146,27 @@ class VideoStreamer(BaseStreamer): ... pass """ - def __init__(self, path: str) -> None: + def __init__(self, input_path: str, loop: bool = False) -> None: self.media_type = MediaType.VIDEO - self.filenames = _get_filenames(path, media_type=MediaType.VIDEO) - - def get_stream(self, stream_input: str) -> cv2.VideoCapture: - return cv2.VideoCapture(stream_input) + self.loop = loop + self.cap = cv2.VideoCapture() + status = self.cap.open(input_path) + if not status: + raise InvalidInput(f"Can't open the video from {input_path}") def __iter__(self) -> Iterator[np.ndarray]: - for filename in self.filenames: - stream = self.get_stream(stream_input=filename) - yield from _read_video_stream(stream) + while True: + status, image = self.cap.read() + if status: + yield cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + else: + if self.loop: + self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0) + else: + break + + def get_type(self) -> MediaType: + return MediaType.VIDEO class CameraStreamer(BaseStreamer): @@ -302,22 +183,36 @@ class CameraStreamer(BaseStreamer): ... break """ - def __init__(self, camera_device: Optional[int] = None): + def __init__(self, camera_device: int = 0) -> None: self.media_type = MediaType.CAMERA - self.camera_device = 0 if camera_device is None else camera_device - - def get_stream(self, stream_input: int): - return cv2.VideoCapture(stream_input) + try: + self.stream = cv2.VideoCapture(int(camera_device)) + except ValueError as error: + raise InvalidInput(f"Can't find the camera {camera_device}") from error def __iter__(self) -> Iterator[np.ndarray]: - stream = self.get_stream(stream_input=self.camera_device) - yield from _read_video_stream(stream) + """ + Read video and yield the frame. + :param stream: Video stream captured via OpenCV's VideoCapture + :return: Individual frame + """ + while True: + frame_available, frame = self.stream.read() + if not frame_available: + break + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + yield frame + + self.stream.release() + + def get_type(self) -> MediaType: + return MediaType.CAMERA class ImageStreamer(BaseStreamer): """ - Stream from image file or directory. - :param path: Path to an image or directory. + Stream from image file. + :param path: Path to an image. :example: @@ -327,56 +222,103 @@ class ImageStreamer(BaseStreamer): ... cv2.waitKey(0) """ - def __init__(self, path: str) -> None: + def __init__(self, input_path: str, loop: bool = False) -> None: + self.loop = loop self.media_type = MediaType.IMAGE - self.filenames = _get_filenames(path=path, media_type=MediaType.IMAGE) + if not os.path.isfile(input_path): + raise InvalidInput(f"Can't find the image by {input_path}") + self.image = cv2.imread(input_path, cv2.IMREAD_COLOR) + if self.image is None: + raise OpenError(f"Can't open the image from {input_path}") + self.image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB) - @staticmethod - def get_stream(stream_input: str) -> Iterable[np.ndarray]: - image = cv2.imread(stream_input) - image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) - yield image + def __iter__(self) -> Iterator[np.ndarray]: + if not self.loop: + yield self.image + else: + while True: + yield self.image + + def get_type(self) -> MediaType: + return MediaType.IMAGE + + +class DirStreamer(BaseStreamer): + """ + Stream from directory of images. + :param path: Path to directory. + + :example: + + >>> streamer = DirStreamer(path="../images") + ... for frame in streamer: + ... cv2.imshow("Window", frame) + ... cv2.waitKey(0) + """ + + def __init__(self, input_path: str, loop: bool = False) -> None: + self.loop = loop + self.media_type = MediaType.DIR + self.dir = input_path + if not os.path.isdir(self.dir): + raise InvalidInput(f"Can't find the dir by {input_path}") + self.names = sorted(os.listdir(self.dir)) + if not self.names: + raise OpenError(f"The dir {input_path} is empty") + self.file_id = 0 + for name in self.names: + filename = os.path.join(self.dir, name) + image = cv2.imread(str(filename), cv2.IMREAD_COLOR) + if image is not None: + return + raise OpenError(f"Can't read the first image from {input_path}") def __iter__(self) -> Iterator[np.ndarray]: - for filename in self.filenames: - yield from self.get_stream(stream_input=filename) + while self.file_id < len(self.names): + filename = os.path.join(self.dir, self.names[self.file_id]) + image = cv2.imread(str(filename), cv2.IMREAD_COLOR) + if self.file_id < len(self.names) - 1: + self.file_id = self.file_id + 1 + else: + self.file_id = self.file_id + 1 if not self.loop else 0 + if image is not None: + yield cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + + def get_type(self) -> MediaType: + return MediaType.DIR def get_streamer( - path: Optional[str] = None, - camera_device: Optional[int] = None, + input_stream: Union[int, str] = 0, + loop: bool = False, threaded: bool = False, ) -> BaseStreamer: """ Get streamer object based on the file path or camera device index provided. - :param path: Path to file or directory. - :param camera_device: Camera device index. + :param input_stream: Path to file or directory or index for camera. + :param loop: Enable reading the input in a loop. :param threaded: Threaded streaming option """ - if path is not None and camera_device is not None: - raise ValueError( - "Both path and camera device is provided. Choose either camera or path to a image/video file." - ) - - media_type = get_media_type(path) - + # errors: Dict = {InvalidInput: [], OpenError: []} + errors = [] streamer: BaseStreamer - - if path is not None and media_type == MediaType.IMAGE: - streamer = ImageStreamer(path) - - elif path is not None and media_type == MediaType.VIDEO: - streamer = VideoStreamer(path) - - elif media_type == MediaType.CAMERA: - if camera_device is None: - camera_device = 0 - streamer = CameraStreamer(camera_device) - - else: - raise ValueError("Unknown media type") - - if threaded: - streamer = ThreadedStreamer(streamer) - - return streamer + for reader in (ImageStreamer, DirStreamer, VideoStreamer): + try: + streamer = reader(input_stream, loop) # type: ignore + if threaded: + streamer = ThreadedStreamer(streamer) + return streamer + except (InvalidInput, OpenError) as error: + errors.append(error) + try: + streamer = CameraStreamer(input_stream) # type: ignore + if threaded: + streamer = ThreadedStreamer(streamer) + return streamer + except (InvalidInput, OpenError) as error: + errors.append(error) + + if errors: + raise Exception(errors) + + sys.exit(1) diff --git a/ote_sdk/ote_sdk/usecases/exportable_code/visualizers/__init__.py b/ote_sdk/ote_sdk/usecases/exportable_code/visualizers/__init__.py new file mode 100644 index 00000000000..5076deebeca --- /dev/null +++ b/ote_sdk/ote_sdk/usecases/exportable_code/visualizers/__init__.py @@ -0,0 +1,11 @@ +""" +Initialization of visualizers +""" +# Copyright (C) 2021-2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +from .anomaly_visualizer import AnomalyVisualizer +from .visualizer import Visualizer + +__all__ = ["AnomalyVisualizer", "Visualizer"] diff --git a/ote_sdk/ote_sdk/usecases/exportable_code/visualizers/anomaly_visualizer.py b/ote_sdk/ote_sdk/usecases/exportable_code/visualizers/anomaly_visualizer.py new file mode 100644 index 00000000000..cfd1bca6d19 --- /dev/null +++ b/ote_sdk/ote_sdk/usecases/exportable_code/visualizers/anomaly_visualizer.py @@ -0,0 +1,74 @@ +""" +Visualizer for results of anomaly task prediction +""" + +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +from typing import Optional + +import cv2 +import numpy as np + +from ote_sdk.entities.annotation import AnnotationSceneEntity + +from .visualizer import Visualizer + + +class AnomalyVisualizer(Visualizer): + """ + Visualize the predicted output by drawing the annotations on the input image. + + :example: + + >>> predictions = inference_model.predict(frame) + >>> annotation = prediction_converter.convert_to_annotation(predictions) + >>> output = visualizer.draw(frame, annotation.shape, annotation.get_labels()) + >>> visualizer.show(output) + """ + + def __init__( + self, + window_name: Optional[str] = None, + show_count: bool = False, + is_one_label: bool = False, + delay: Optional[int] = None, + ) -> None: + super().__init__(window_name, show_count, is_one_label, delay) + cv2.namedWindow( + self.window_name, + cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_EXPANDED, + ) + self.trackbar_name = "Opacity" + cv2.createTrackbar(self.trackbar_name, self.window_name, 0, 100, lambda x: x) + + @staticmethod + def to_heat_mask(mask: np.ndarray) -> np.ndarray: + """ + Create heat mask from saliency map + :param mask: saliency map + """ + heat_mask = cv2.normalize( + mask, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX + ).astype(np.uint8) + return cv2.applyColorMap(heat_mask.astype(np.uint8), cv2.COLORMAP_JET) + + # pylint:disable=signature-differs + def draw( # type: ignore[override] + self, image: np.ndarray, annotation: AnnotationSceneEntity, meta: dict + ) -> np.ndarray: + """ + Draw annotations on the image + :param image: Input image + :param annotation: Annotations to be drawn on the input image + :param metadata: Metadata with saliency map + :return: Output image with annotations. + """ + + heat_mask = self.to_heat_mask(1 - meta["anomaly_map"]) + alpha = cv2.getTrackbarPos(self.trackbar_name, self.window_name) / 100.0 + image = (1 - alpha) * image + alpha * heat_mask + image = cv2.cvtColor(image.astype(np.uint8), cv2.COLOR_RGB2BGR) + + return self.shape_drawer.draw(image, annotation, labels=[]) diff --git a/ote_sdk/ote_sdk/usecases/exportable_code/visualization.py b/ote_sdk/ote_sdk/usecases/exportable_code/visualizers/visualizer.py similarity index 64% rename from ote_sdk/ote_sdk/usecases/exportable_code/visualization.py rename to ote_sdk/ote_sdk/usecases/exportable_code/visualizers/visualizer.py index 695c84d7500..6fcb7723258 100644 --- a/ote_sdk/ote_sdk/usecases/exportable_code/visualization.py +++ b/ote_sdk/ote_sdk/usecases/exportable_code/visualizers/visualizer.py @@ -6,17 +6,46 @@ # SPDX-License-Identifier: Apache-2.0 # +import abc from typing import Optional import cv2 import numpy as np from ote_sdk.entities.annotation import AnnotationSceneEntity -from ote_sdk.usecases.exportable_code.streamer.streamer import MediaType from ote_sdk.utils.shape_drawer import ShapeDrawer -class Visualizer: +class IVisualizer(metaclass=abc.ABCMeta): + """ + Interface for converter + """ + + @abc.abstractmethod + def draw( + self, + image: np.ndarray, + annotation: AnnotationSceneEntity, + meta: dict, + ) -> np.ndarray: + """ + Draw annotations on the image + :param image: Input image + :param annotation: Annotations to be drawn on the input image + :param metadata: Metadata is needed to render + :return: Output image with annotations. + """ + raise NotImplementedError + + def show(self, image: np.ndarray) -> None: + """ + Show result image + """ + + raise NotImplementedError + + +class Visualizer(IVisualizer): """ Visualize the predicted output by drawing the annotations on the input image. @@ -30,30 +59,31 @@ class Visualizer: def __init__( self, - media_type: Optional[MediaType] = None, window_name: Optional[str] = None, show_count: bool = False, is_one_label: bool = False, delay: Optional[int] = None, - ): + ) -> None: self.window_name = "Window" if window_name is None else window_name self.shape_drawer = ShapeDrawer(show_count, is_one_label) self.delay = delay if delay is None: - self.delay = ( - 0 if (media_type is None or media_type == MediaType.IMAGE) else 1 - ) + self.delay = 1 - def draw(self, image: np.ndarray, annotation: AnnotationSceneEntity) -> np.ndarray: + def draw( + self, + image: np.ndarray, + annotation: AnnotationSceneEntity, + meta: Optional[dict] = None, + ) -> np.ndarray: """ Draw annotations on the image :param image: Input image :param annotation: Annotations to be drawn on the input image :return: Output image with annotations. """ - # TODO: Conversion is to be made in `show` not here. - # This requires ShapeDrawer.draw to be updated + image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) return self.shape_drawer.draw(image, annotation, labels=[]) @@ -62,8 +92,7 @@ def show(self, image: np.ndarray) -> None: """ Show result image """ - # TODO: RGB2BGR Conversion is to be made here. - # This requires ShapeDrawer.draw to be updated + cv2.imshow(self.window_name, image) def is_quit(self) -> bool: diff --git a/ote_sdk/ote_sdk/utils/dataset_utils.py b/ote_sdk/ote_sdk/utils/dataset_utils.py new file mode 100644 index 00000000000..ae46412c061 --- /dev/null +++ b/ote_sdk/ote_sdk/utils/dataset_utils.py @@ -0,0 +1,190 @@ +""" +Dataset utils +""" + +# Copyright (C) 2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions +# and limitations under the License. + +from typing import List, Optional, Tuple + +from ote_sdk.entities.annotation import AnnotationSceneEntity +from ote_sdk.entities.dataset_item import DatasetItemEntity +from ote_sdk.entities.datasets import DatasetEntity +from ote_sdk.entities.resultset import ResultSetEntity +from ote_sdk.entities.shapes.rectangle import Rectangle + + +def get_fully_annotated_idx(dataset: DatasetEntity) -> List[int]: + """ + Find the indices of the fully annotated items in a dataset. + A dataset item is fully annotated if local annotations are available, or if the item has the `normal` label. + + :param dataset: Dataset that may contain both partially and fully annotated items + :return: List of indices of the fully annotated dataset items. + """ + local_idx = [] + for idx, gt_item in enumerate(dataset): + local_annotations = [ + annotation + for annotation in gt_item.get_annotations() + if not Rectangle.is_full_box(annotation.shape) + ] + if ( + not any(label.is_anomalous for label in gt_item.get_shapes_labels()) + or len(local_annotations) > 0 + ): + local_idx.append(idx) + return local_idx + + +def get_local_subset( + dataset: DatasetEntity, + fully_annotated_idx: Optional[List[int]] = None, + include_normal: bool = True, +) -> DatasetEntity: + """ + Extract a subset that contains only those dataset items that have local annotations. + + :param dataset: Dataset from which we want to extract the locally annotated subset. + :param fully_annotated_idx: The indices of the fully annotated dataset items. If not provided, + the function will compute the indices before creating the subset. + :param include_normal: When true, global normal annotations will be included in the local dataset. + :return: Output dataset with only local annotations + """ + local_items = [] + if fully_annotated_idx is None: + fully_annotated_idx = get_fully_annotated_idx(dataset) + for idx in fully_annotated_idx: + item = dataset[idx] + + local_annotations = [ + annotation + for annotation in item.get_annotations() + if not Rectangle.is_full_box(annotation.shape) + ] + # annotations with the normal label are considered local + if include_normal: + local_annotations.extend( + [ + annotation + for annotation in item.get_annotations() + if not any( + label.label.is_anomalous for label in annotation.get_labels() + ) + ] + ) + local_items.append( + DatasetItemEntity( + media=item.media, + annotation_scene=AnnotationSceneEntity( + local_annotations, + kind=item.annotation_scene.kind, + ), + metadata=item.metadata, + subset=item.subset, + roi=item.roi, + ignored_labels=item.ignored_labels, + ) + ) + return DatasetEntity(local_items, purpose=dataset.purpose) + + +def get_global_subset(dataset: DatasetEntity) -> DatasetEntity: + """ + Extract a subset that contains only the global annotations. + + :param dataset: Dataset from which we want to extract the globally annotated subset. + :return: Output dataset with only global annotations + """ + global_items = [] + for item in dataset: + global_annotations = [ + annotation + for annotation in item.get_annotations() + if Rectangle.is_full_box(annotation.shape) + ] + global_items.append( + DatasetItemEntity( + media=item.media, + annotation_scene=AnnotationSceneEntity( + global_annotations, kind=item.annotation_scene.kind + ), + metadata=item.metadata, + subset=item.subset, + roi=item.roi, + ignored_labels=item.ignored_labels, + ) + ) + return DatasetEntity(global_items, purpose=dataset.purpose) + + +def split_local_global_dataset( + dataset: DatasetEntity, +) -> Tuple[DatasetEntity, DatasetEntity]: + """ + Split a dataset into the globally and locally annotated subsets. + + :param dataset: Input dataset + :return: Globally annotated subset, locally annotated subset + """ + global_dataset = get_global_subset(dataset) + local_dataset = get_local_subset(dataset) + return global_dataset, local_dataset + + +def split_local_global_resultset( + resultset: ResultSetEntity, +) -> Tuple[ResultSetEntity, ResultSetEntity]: + """ + Split a resultset into the globally and locally annotated resultsets. + + :param resultset: Input result set + :return: Globally annotated result set, locally annotated result set + """ + global_gt_dataset, local_gt_dataset = split_local_global_dataset( + resultset.ground_truth_dataset + ) + local_idx = get_fully_annotated_idx(resultset.ground_truth_dataset) + global_pred_dataset = get_global_subset(resultset.prediction_dataset) + local_pred_dataset = get_local_subset( + resultset.prediction_dataset, local_idx, include_normal=False + ) + + global_resultset = ResultSetEntity( + model=resultset.model, + ground_truth_dataset=global_gt_dataset, + prediction_dataset=global_pred_dataset, + purpose=resultset.purpose, + ) + local_resultset = ResultSetEntity( + model=resultset.model, + ground_truth_dataset=local_gt_dataset, + prediction_dataset=local_pred_dataset, + purpose=resultset.purpose, + ) + return global_resultset, local_resultset + + +def contains_anomalous_images(dataset: DatasetEntity) -> bool: + """ + Check if a dataset contains any items with the anomalous label. + + :param dataset: Dataset to check for anomalous items. + :return: boolean indicating if the dataset contains any anomalous items. + """ + for item in dataset: + labels = item.get_shapes_labels() + if any(label.is_anomalous for label in labels): + return True + return False diff --git a/ote_sdk/ote_sdk/utils/shape_drawer.py b/ote_sdk/ote_sdk/utils/shape_drawer.py index ea362ed632b..85ae6fb4894 100644 --- a/ote_sdk/ote_sdk/utils/shape_drawer.py +++ b/ote_sdk/ote_sdk/utils/shape_drawer.py @@ -40,10 +40,10 @@ CvTextSize = NewType("CvTextSize", Tuple[Tuple[int, int], int]) -AnyType = TypeVar("AnyType") +_Any = TypeVar("_Any") -class DrawerEntity(Generic[AnyType]): +class DrawerEntity(Generic[_Any]): """ An interface to draw a shape of type ``T`` onto an image. """ @@ -52,7 +52,7 @@ class DrawerEntity(Generic[AnyType]): @abc.abstractmethod def draw( - self, image: np.ndarray, entity: AnyType, labels: List[ScoredLabel] + self, image: np.ndarray, entity: _Any, labels: List[ScoredLabel] ) -> np.ndarray: """ Draw an entity to a given frame @@ -83,7 +83,7 @@ def __init__(self) -> None: self.content_padding = 3 self.top_left_box_thickness = 1 self.content_margin = 2 - self.label_offset_box_shape = 10 + self.label_offset_box_shape = 0 self.black = (0, 0, 0) self.white = (255, 255, 255) self.yellow = (255, 255, 0) @@ -226,7 +226,6 @@ def generate_draw_command_for_text( width = text_width + 2 * padding height = text_height + baseline + 2 * padding - content_width = width + margin if (color[0] + color[1] + color[2]) / 3 > 200: @@ -236,7 +235,6 @@ def generate_draw_command_for_text( def draw_command(img: np.ndarray) -> np.ndarray: cursor_pos = Coordinate(int(self.cursor_pos.x), int(self.cursor_pos.y)) - self.draw_transparent_rectangle( img, int(cursor_pos.x), @@ -382,7 +380,6 @@ def draw( image = drawer.draw( image, annotation.shape, labels=annotation.get_labels() ) - if self.is_one_label: image = self.top_left_drawer.draw_labels(image, entity.get_labels()) if self.show_count: @@ -482,7 +479,7 @@ def draw( image, x1, y1, x2, y2, base_color, self.alpha_shape ) image = cv2.rectangle( - img=image, pt1=(x1, y1), pt2=(x2, y2), color=[0, 0, 0], thickness=2 + img=image, pt1=(x1, y1), pt2=(x2, y2), color=base_color, thickness=2 ) ( @@ -498,9 +495,9 @@ def draw( y_coord = y1 - self.label_offset_box_shape - content_height x_coord = x1 - # put label at bottom if it is out of bounds at the top of the shape, and shift label to left if needed + # put label inside if it is out of bounds at the top of the shape, and shift label to left if needed if y_coord < self.top_margin * image.shape[0]: - y_coord = y2 + self.label_offset_box_shape + y_coord = y1 + self.label_offset_box_shape if x_coord + content_width > image.shape[1]: x_coord = x2 - content_width @@ -559,7 +556,7 @@ def draw( angle=0, startAngle=0, endAngle=360, - color=[0, 0, 0], + color=base_color, lineType=cv2.LINE_AA, ) @@ -641,7 +638,7 @@ def draw( image=result_without_border, contours=[contours], contourIdx=-1, - color=[0, 0, 0], + color=base_color, thickness=2, lineType=cv2.LINE_AA, ) diff --git a/tests/run_code_checks.sh b/tests/run_code_checks.sh index 8f72b72d7fa..10fa9456d04 100755 --- a/tests/run_code_checks.sh +++ b/tests/run_code_checks.sh @@ -8,7 +8,7 @@ pip install wheel pip install ote_sdk/ pip install ote_cli/ pip install pre-commit -pip install pylint==2.12.1 +pip install -r ote_sdk/ote_sdk/tests/requirements.txt echo "" echo "" echo "" diff --git a/tests/run_model_templates_tests.py b/tests/run_model_templates_tests.py index eb654533040..0f070314d02 100644 --- a/tests/run_model_templates_tests.py +++ b/tests/run_model_templates_tests.py @@ -66,7 +66,7 @@ def test(run_algo_tests): success *= res for algo_dir in ALGO_DIRS: if run_algo_tests[algo_dir]: - command = ["pytest", os.path.join(algo_dir, "tests", "ote_cli"), "-v", "--durations=10"] + command = ["pytest", os.path.join(algo_dir, "tests", "ote_cli"), "-v", "-rxXs", "--durations=10"] try: res = run(command, env=collect_env_vars(wd), check=True).returncode == 0 except: