diff --git a/external/anomaly/ote_tests_pytest.ini b/external/anomaly/ote_tests_pytest.ini
index dcf6b1b4e2a..c2d07cb3f9d 100644
--- a/external/anomaly/ote_tests_pytest.ini
+++ b/external/anomaly/ote_tests_pytest.ini
@@ -1,2 +1,2 @@
[pytest]
-python_files = test_ote_api.py test_ote_inference.py test_ote_training.py
+python_files = test_ote_api.py test_ote_inference.py test_ote_training.py
\ No newline at end of file
diff --git a/external/deep-object-reid/tests/sc_input_params_validation/helpers.py b/external/deep-object-reid/tests/sc_input_params_validation/helpers.py
new file mode 100644
index 00000000000..36e80d46919
--- /dev/null
+++ b/external/deep-object-reid/tests/sc_input_params_validation/helpers.py
@@ -0,0 +1,72 @@
+"""
+OTE parameters validation tests helpers
+"""
+
+import numpy as np
+
+from ote_sdk.entities.annotation import (
+ Annotation,
+ AnnotationSceneEntity,
+ AnnotationSceneKind,
+)
+from ote_sdk.entities.dataset_item import DatasetItemEntity
+from ote_sdk.entities.datasets import DatasetEntity
+from ote_sdk.entities.id import ID
+from ote_sdk.entities.image import Image
+from ote_sdk.entities.label import Domain, LabelEntity
+from ote_sdk.entities.scored_label import ScoredLabel
+from ote_sdk.entities.shapes.rectangle import Rectangle
+from ote_sdk.entities.subset import Subset
+
+
+def load_test_dataset():
+ """Helper to create test dataset"""
+
+ def gen_image(resolution, x1, y1, x2, y2):
+ w, h = resolution
+ image = np.full([h, w, 3], fill_value=255, dtype=np.uint8)
+ image[int(y1 * h): int(y2 * h), int(x1 * w): int(x2 * w), :] = np.array(
+ [0, 128, 128], dtype=np.uint8
+ )[None, None, :]
+ return image, Rectangle(x1=x1, y1=y1, x2=x2, y2=y2)
+
+ images = [
+ gen_image((640, 480), 0.0, 0.0, 0.5, 0.5),
+ gen_image((640, 480), 0.5, 0.0, 1.0, 0.5),
+ gen_image((640, 480), 0.0, 0.5, 0.5, 1.0),
+ gen_image((640, 480), 0.5, 0.5, 1.0, 1.0),
+ ]
+ labels = [LabelEntity(name="rect", domain=Domain.DETECTION, id=ID("0"))]
+
+ def get_image(i, subset):
+ image, bbox = images[i]
+ return DatasetItemEntity(
+ media=Image(data=image),
+ annotation_scene=AnnotationSceneEntity(
+ annotations=[Annotation(bbox, labels=[ScoredLabel(label=labels[0])])],
+ kind=AnnotationSceneKind.ANNOTATION,
+ ),
+ subset=subset,
+ )
+
+ items = [
+ get_image(0, Subset.TRAINING),
+ get_image(1, Subset.TRAINING),
+ get_image(2, Subset.TRAINING),
+ get_image(3, Subset.TRAINING),
+ get_image(0, Subset.TRAINING),
+ get_image(1, Subset.TRAINING),
+ get_image(2, Subset.TRAINING),
+ get_image(3, Subset.TRAINING),
+ get_image(0, Subset.TRAINING),
+ get_image(1, Subset.TRAINING),
+ get_image(0, Subset.VALIDATION),
+ get_image(1, Subset.VALIDATION),
+ get_image(2, Subset.VALIDATION),
+ get_image(3, Subset.VALIDATION),
+ get_image(0, Subset.TESTING),
+ get_image(1, Subset.TESTING),
+ get_image(2, Subset.TESTING),
+ get_image(3, Subset.TESTING),
+ ]
+ return DatasetEntity(items), labels
diff --git a/external/deep-object-reid/tests/sc_input_params_validation/test_ote_classification_input_params_validation.py b/external/deep-object-reid/tests/sc_input_params_validation/test_ote_classification_input_params_validation.py
new file mode 100644
index 00000000000..b3f637efef2
--- /dev/null
+++ b/external/deep-object-reid/tests/sc_input_params_validation/test_ote_classification_input_params_validation.py
@@ -0,0 +1,294 @@
+import numpy as np
+import pytest
+from ote_sdk.test_suite.e2e_test_system import e2e_pytest_unit
+from ote_sdk.tests.parameters_validation.validation_helper import (
+ check_value_error_exception_raised,
+)
+
+from torchreid_tasks.model_wrappers.classification import (
+ OteClassification,
+ sigmoid_numpy,
+ softmax_numpy,
+ get_hierarchical_predictions,
+ get_multiclass_predictions,
+ get_multilabel_predictions,
+ preprocess_features_for_actmap,
+ get_actmap,
+)
+
+
+class MockClassification(OteClassification):
+ def __init__(self):
+ pass
+
+
+class TestClassificationFunctionsParamsValidation:
+ @e2e_pytest_unit
+ def test_preprocess_features_for_actmap_parameters_params_validation(self):
+ """
+ Description:
+ Check "preprocess_features_for_actmap" function input parameters validation
+
+ Input data:
+ "features" non-expected type object
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "preprocess_features_for_actmap" function
+ """
+ with pytest.raises(ValueError):
+ preprocess_features_for_actmap(features=None) # type: ignore
+
+ @e2e_pytest_unit
+ def test_get_actmap_params_validation(self):
+ """
+ Description:
+ Check "get_actmap" function input parameters validation
+
+ Input data:
+ "get_actmap" unexpected type parameters
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "get_actmap" function
+ """
+ correct_values_dict = {
+ "features": ["some", "features"],
+ "output_res": ("iterable", "object")
+ }
+ unexpected_values = [
+ # Unexpected dictionary is specified as "features" parameter
+ ("features", None),
+ # Unexpected dictionary is specified as "output_res" parameter
+ ("output_res", None),
+ ]
+
+ check_value_error_exception_raised(
+ correct_parameters=correct_values_dict,
+ unexpected_values=unexpected_values,
+ class_or_function=get_actmap,
+ )
+
+ @e2e_pytest_unit
+ def test_sigmoid_numpy_params_validation(self):
+ """
+ Description:
+ Check "sigmoid_numpy" function input parameters validation
+
+ Input data:
+ "x" non-nd.array parameter
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "sigmoid_numpy" function
+ """
+ with pytest.raises(ValueError):
+ sigmoid_numpy(x="unexpected string") # type: ignore
+
+ @e2e_pytest_unit
+ def test_softmax_numpy_params_validation(self):
+ """
+ Description:
+ Check "softmax_numpy" function input parameters validation
+
+ Input data:
+ "x" non-nd.array parameter
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "softmax_numpy" function
+ """
+ with pytest.raises(ValueError):
+ softmax_numpy(x="unexpected string") # type: ignore
+
+ @e2e_pytest_unit
+ def test_get_hierarchical_predictions_params_validation(self):
+ """
+ Description:
+ Check "get_hierarchical_predictions" function input parameters validation
+
+ Input data:
+ "get_hierarchical_predictions" unexpected type parameters
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "get_hierarchical_predictions" function
+ """
+ correct_values_dict = {
+ "logits": np.random.randint(low=0, high=255, size=(10, 16, 3)),
+ "multihead_class_info": {"multihead": "dictionary"},
+ }
+ unexpected_str = "unexpected string"
+ unexpected_values = [
+ # Unexpected string is specified as "logits" parameter
+ ("logits", unexpected_str),
+ # Unexpected string is specified as "multihead_class_info" parameter
+ ("multihead_class_info", unexpected_str),
+ # Unexpected string is specified as "pos_thr" parameter
+ ("pos_thr", unexpected_str),
+ # Unexpected string is specified as "activate" parameter
+ ("activate", unexpected_str),
+ ]
+
+ check_value_error_exception_raised(
+ correct_parameters=correct_values_dict,
+ unexpected_values=unexpected_values,
+ class_or_function=get_hierarchical_predictions,
+ )
+
+ @e2e_pytest_unit
+ def test_get_multiclass_predictions_params_validation(self):
+ """
+ Description:
+ Check "get_multiclass_predictions" function input parameters validation
+
+ Input data:
+ "get_multiclass_predictions" unexpected type parameters
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "get_multiclass_predictions" function
+ """
+ correct_values_dict = {
+ "logits": np.random.randint(low=0, high=255, size=(10, 16, 3)),
+ "activate": True,
+ }
+ unexpected_str = "unexpected string"
+ unexpected_values = [
+ # Unexpected string is specified as "logits" parameter
+ ("logits", unexpected_str),
+ # Unexpected string is specified as "activate" parameter
+ ("activate", unexpected_str),
+ ]
+
+ check_value_error_exception_raised(
+ correct_parameters=correct_values_dict,
+ unexpected_values=unexpected_values,
+ class_or_function=get_multiclass_predictions,
+ )
+
+ @e2e_pytest_unit
+ def test_get_multilabel_predictions_params_validation(self):
+ """
+ Description:
+ Check "get_multilabel_predictions" function input parameters validation
+
+ Input data:
+ "get_multilabel_predictions" unexpected type parameters
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "get_multilabel_predictions" function
+ """
+ correct_values_dict = {
+ "logits": np.random.randint(low=0, high=255, size=(10, 16, 3)),
+ "activate": True,
+ }
+ unexpected_str = "unexpected string"
+ unexpected_values = [
+ # Unexpected string is specified as "logits" parameter
+ ("logits", unexpected_str),
+ # Unexpected string is specified as "pos_thr" parameter
+ ("pos_thr", unexpected_str),
+ # Unexpected string is specified as "activate" parameter
+ ("activate", unexpected_str),
+ ]
+
+ check_value_error_exception_raised(
+ correct_parameters=correct_values_dict,
+ unexpected_values=unexpected_values,
+ class_or_function=get_multilabel_predictions,
+ )
+
+
+class TestOteClassificationParamsValidation:
+ @e2e_pytest_unit
+ def test_ote_classification_preprocess_params_validation(self):
+ """
+ Description:
+ Check OteClassification object "preprocess" method input parameters validation
+
+ Input data:
+ OteClassification object. "image" non-ndarray object
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "preprocess" method
+ """
+ classification = MockClassification()
+ with pytest.raises(ValueError):
+ classification.preprocess(image="unexpected string") # type: ignore
+
+ @e2e_pytest_unit
+ def test_ote_classification_postprocess_params_validation(self):
+ """
+ Description:
+ Check OteClassification object "postprocess" method input parameters validation
+
+ Input data:
+ OteClassification object. "postprocess" method unexpected type parameters
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "postprocess" method
+ """
+ classification = MockClassification()
+ correct_values_dict = {
+ "outputs": {"output_1": np.random.rand(2, 2)},
+ "metadata": {"metadata_1": "some_data"},
+ }
+ unexpected_int = 1
+ unexpected_values = [
+ # Unexpected integer is specified as "outputs" parameter
+ ("outputs", unexpected_int),
+ # Unexpected integer is specified as "outputs" dictionary key
+ ("outputs", {unexpected_int: np.random.rand(2, 2)}),
+ # Unexpected integer is specified as "outputs" dictionary value
+ ("outputs", {"output_1": unexpected_int}),
+ # Unexpected integer is specified as "metadata" parameter
+ ("metadata", unexpected_int),
+ # Unexpected integer is specified as "metadata" dictionary key
+ ("metadata", {unexpected_int: "some_data"}),
+ ]
+ check_value_error_exception_raised(
+ correct_parameters=correct_values_dict,
+ unexpected_values=unexpected_values,
+ class_or_function=classification.postprocess,
+ )
+
+ @e2e_pytest_unit
+ def test_ote_classification_postprocess_aux_outputs_params_validation(self):
+ """
+ Description:
+ Check OteClassification object "postprocess_aux_outputs" method input parameters validation
+
+ Input data:
+ OteClassification object. "postprocess_aux_outputs" method unexpected type parameters
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "postprocess_aux_outputs" method
+ """
+ classification = MockClassification()
+ correct_values_dict = {
+ "outputs": {"output_1": np.random.rand(2, 2)},
+ "metadata": {"metadata_1": "some_data"},
+ }
+ unexpected_int = 1
+ unexpected_values = [
+ # Unexpected integer is specified as "outputs" parameter
+ ("outputs", unexpected_int),
+ # Unexpected integer is specified as "outputs" dictionary key
+ ("outputs", {unexpected_int: np.random.rand(2, 2)}),
+ # Unexpected integer is specified as "outputs" dictionary value
+ ("outputs", {"output_1": unexpected_int}),
+ # Unexpected integer is specified as "metadata" parameter
+ ("metadata", unexpected_int),
+ # Unexpected integer is specified as "metadata" dictionary key
+ ("metadata", {unexpected_int: "some_data"}),
+ ]
+ check_value_error_exception_raised(
+ correct_parameters=correct_values_dict,
+ unexpected_values=unexpected_values,
+ class_or_function=classification.postprocess_aux_outputs,
+ )
diff --git a/external/deep-object-reid/tests/sc_input_params_validation/test_ote_inference_task_input_params_validation.py b/external/deep-object-reid/tests/sc_input_params_validation/test_ote_inference_task_input_params_validation.py
new file mode 100644
index 00000000000..13a36ae7ad1
--- /dev/null
+++ b/external/deep-object-reid/tests/sc_input_params_validation/test_ote_inference_task_input_params_validation.py
@@ -0,0 +1,151 @@
+import pytest
+from ote_sdk.configuration.configurable_parameters import ConfigurableParameters
+from ote_sdk.entities.datasets import DatasetEntity
+from ote_sdk.entities.inference_parameters import InferenceParameters
+from ote_sdk.entities.label_schema import LabelSchemaEntity
+from ote_sdk.entities.model import ModelConfiguration, ModelEntity
+from ote_sdk.entities.resultset import ResultSetEntity
+from ote_sdk.test_suite.e2e_test_system import e2e_pytest_unit
+from ote_sdk.tests.parameters_validation.validation_helper import (
+ check_value_error_exception_raised,
+)
+from ote_sdk.usecases.tasks.interfaces.export_interface import ExportType
+
+from torchreid_tasks.inference_task import OTEClassificationInferenceTask
+
+
+class MockClassificationInferenceTask(OTEClassificationInferenceTask):
+ def __init__(self):
+ pass
+
+
+class TestOTEClassificationInferenceTaskInputParamsValidation:
+ @staticmethod
+ def model():
+ model_configuration = ModelConfiguration(
+ configurable_parameters=ConfigurableParameters(
+ header="header", description="description"
+ ),
+ label_schema=LabelSchemaEntity(),
+ )
+ return ModelEntity(
+ train_dataset=DatasetEntity(), configuration=model_configuration
+ )
+
+ @e2e_pytest_unit
+ def test_ote_classification_inference_task_init_params_validation(self):
+ """
+ Description:
+ Check OTEClassificationInferenceTask object initialization parameters validation
+
+ Input data:
+ OTEClassificationInferenceTask object initialization parameters with unexpected type
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ OTEClassificationInferenceTask initialization parameter
+ """
+ with pytest.raises(ValueError):
+ OTEClassificationInferenceTask(task_environment="unexpected string") # type: ignore
+
+ @e2e_pytest_unit
+ def test_ote_classification_inference_task_infer_params_validation(self):
+ """
+ Description:
+ Check OTEClassificationInferenceTask object "infer" method input parameters validation
+
+ Input data:
+ OTEClassificationInferenceTask object. "infer" method unexpected-type input parameters
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "infer" method
+ """
+ task = MockClassificationInferenceTask()
+ correct_values_dict = {
+ "dataset": DatasetEntity(),
+ "inference_parameters": InferenceParameters(),
+ }
+ unexpected_str = "unexpected string"
+ unexpected_values = [
+ # Unexpected string is specified as "dataset" parameter
+ ("dataset", unexpected_str),
+ # Unexpected string is specified as "inference_parameters" parameter
+ ("inference_parameters", unexpected_str),
+ ]
+
+ check_value_error_exception_raised(
+ correct_parameters=correct_values_dict,
+ unexpected_values=unexpected_values,
+ class_or_function=task.infer,
+ )
+
+ @e2e_pytest_unit
+ def test_ote_classification_inference_task_evaluate_params_validation(self):
+ """
+ Description:
+ Check OTEClassificationInferenceTask object "evaluate" method input parameters validation
+
+ Input data:
+ OTEClassificationInferenceTask object. "evaluate" method unexpected-type input parameters
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "evaluate" method
+ """
+ task = MockClassificationInferenceTask()
+ model = self.model()
+ result_set = ResultSetEntity(
+ model=model,
+ ground_truth_dataset=DatasetEntity(),
+ prediction_dataset=DatasetEntity(),
+ )
+ correct_values_dict = {
+ "output_resultset": result_set,
+ "evaluation_metric": "metric",
+ }
+ unexpected_int = 1
+ unexpected_values = [
+ # Unexpected integer is specified as "output_resultset" parameter
+ ("output_resultset", unexpected_int),
+ # Unexpected integer is specified as "evaluation_metric" parameter
+ ("evaluation_metric", unexpected_int),
+ ]
+
+ check_value_error_exception_raised(
+ correct_parameters=correct_values_dict,
+ unexpected_values=unexpected_values,
+ class_or_function=task.evaluate,
+ )
+
+ @e2e_pytest_unit
+ def test_ote_classification_inference_task_export_params_validation(self):
+ """
+ Description:
+ Check OTEClassificationInferenceTask object "export" method input parameters validation
+
+ Input data:
+ OTEClassificationInferenceTask object. "export" method unexpected-type input parameters
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "export" method
+ """
+ task = MockClassificationInferenceTask()
+ model = self.model()
+ correct_values_dict = {
+ "export_type": ExportType.OPENVINO,
+ "output_model": model,
+ }
+ unexpected_str = "unexpected string"
+ unexpected_values = [
+ # Unexpected string is specified as "export_type" parameter
+ ("export_type", unexpected_str),
+ # Unexpected string is specified as "output_model" parameter
+ ("output_model", unexpected_str),
+ ]
+ check_value_error_exception_raised(
+ correct_parameters=correct_values_dict,
+ unexpected_values=unexpected_values,
+ class_or_function=task.export,
+ )
diff --git a/external/deep-object-reid/tests/sc_input_params_validation/test_ote_monitors_input_params_validation.py b/external/deep-object-reid/tests/sc_input_params_validation/test_ote_monitors_input_params_validation.py
new file mode 100644
index 00000000000..a188322d2bb
--- /dev/null
+++ b/external/deep-object-reid/tests/sc_input_params_validation/test_ote_monitors_input_params_validation.py
@@ -0,0 +1,142 @@
+import pytest
+from ote_sdk.test_suite.e2e_test_system import e2e_pytest_unit
+from ote_sdk.tests.parameters_validation.validation_helper import (
+ check_value_error_exception_raised,
+)
+
+from torchreid_tasks.monitors import MetricsMonitor, DefaultMetricsMonitor
+
+
+class TestMetricsMonitorParamsValidation:
+ @e2e_pytest_unit
+ def test_metrics_monitor_init_params_validation(self):
+ """
+ Description:
+ Check MetricsMonitor object initialization parameters validation
+
+ Input data:
+ "log_dir" unexpected type object
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ MetricsMonitor object initialization parameter
+ """
+ for unexpected_value in [
+ # Unexpected integer is specified as "path" parameter
+ 1,
+ # Empty string is specified as "path" parameter
+ "",
+ # Path Null character is specified in "path" parameter
+ "./null\0char",
+ # Path with non-printable character is specified as "path" parameter
+ "./non\nprintable",
+ ]:
+ with pytest.raises(ValueError):
+ MetricsMonitor(log_dir=unexpected_value)
+
+ @e2e_pytest_unit
+ def test_metrics_monitor_add_scalar_params_validation(self):
+ """
+ Description:
+ Check MetricsMonitor object "add_scalar" method input parameters validation
+
+ Input data:
+ MetricsMonitor object. "add_scalar" method unexpected-type input parameters
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "add_scalar" method
+ """
+ monitor = MetricsMonitor("./")
+ correct_values_dict = {
+ "capture": "some capture",
+ "value": 0.1,
+ "timestamp": 1,
+ }
+ unexpected_dict = {"unexpected": "dictionary"}
+ unexpected_values = [
+ # Unexpected dictionary is specified as "capture" parameter
+ ("capture", unexpected_dict),
+ # Unexpected dictionary is specified as "value" parameter
+ ("value", unexpected_dict),
+ # Unexpected dictionary is specified as "timestamp" parameter
+ ("timestamp", unexpected_dict),
+ ]
+
+ check_value_error_exception_raised(
+ correct_parameters=correct_values_dict,
+ unexpected_values=unexpected_values,
+ class_or_function=monitor.add_scalar,
+ )
+
+
+class TestDefaultMetricsMonitorParamsValidation:
+ @e2e_pytest_unit
+ def test_default_metrics_monitor_add_scalar_params_validation(self):
+ """
+ Description:
+ Check DefaultMetricsMonitor object "add_scalar" method input parameters validation
+
+ Input data:
+ DefaultMetricsMonitor object, "add_scalar" method unexpected-type input parameters
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "add_scalar" method
+ """
+ monitor = DefaultMetricsMonitor()
+ correct_values_dict = {
+ "capture": "some capture",
+ "value": 0.1,
+ "timestamp": 1,
+ }
+ unexpected_dict = {"unexpected": "dictionary"}
+ unexpected_values = [
+ # Unexpected dictionary is specified as "capture" parameter
+ ("capture", unexpected_dict),
+ # Unexpected dictionary is specified as "value" parameter
+ ("value", unexpected_dict),
+ # Unexpected dictionary is specified as "timestamp" parameter
+ ("timestamp", unexpected_dict),
+ ]
+
+ check_value_error_exception_raised(
+ correct_parameters=correct_values_dict,
+ unexpected_values=unexpected_values,
+ class_or_function=monitor.add_scalar,
+ )
+
+ @e2e_pytest_unit
+ def test_default_metrics_monitor_get_metric_values_params_validation(self):
+ """
+ Description:
+ Check DefaultMetricsMonitor object "get_metric_values" method input parameters validation
+
+ Input data:
+ DefaultMetricsMonitor object, "capture" unexpected-type value
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "get_metric_values" method
+ """
+ monitor = DefaultMetricsMonitor()
+ with pytest.raises(ValueError):
+ monitor.get_metric_values(capture=1) # type: ignore
+
+ @e2e_pytest_unit
+ def test_default_metrics_monitor_get_metric_timestamps_params_validation(self):
+ """
+ Description:
+ Check DefaultMetricsMonitor object "get_metric_timestamps" method input parameters
+ validation
+
+ Input data:
+ DefaultMetricsMonitor object, "capture" unexpected-type value
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "get_metric_timestamps" method
+ """
+ monitor = DefaultMetricsMonitor()
+ with pytest.raises(ValueError):
+ monitor.get_metric_timestamps(capture=1) # type: ignore
diff --git a/external/deep-object-reid/tests/sc_input_params_validation/test_ote_nncf_task_input_params_validation.py b/external/deep-object-reid/tests/sc_input_params_validation/test_ote_nncf_task_input_params_validation.py
new file mode 100644
index 00000000000..c3885e8f944
--- /dev/null
+++ b/external/deep-object-reid/tests/sc_input_params_validation/test_ote_nncf_task_input_params_validation.py
@@ -0,0 +1,136 @@
+# Copyright (C) 2021-2022 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+#
+
+import pytest
+from ote_sdk.configuration.configurable_parameters import ConfigurableParameters
+from ote_sdk.entities.datasets import DatasetEntity
+from ote_sdk.entities.label_schema import LabelSchemaEntity
+from ote_sdk.entities.model import ModelConfiguration, ModelEntity
+from ote_sdk.test_suite.e2e_test_system import e2e_pytest_unit
+from ote_sdk.tests.parameters_validation.validation_helper import (
+ check_value_error_exception_raised,
+)
+from ote_sdk.usecases.tasks.interfaces.export_interface import ExportType
+from ote_sdk.usecases.tasks.interfaces.optimization_interface import OptimizationType
+
+from torchreid_tasks.nncf_task import OTEClassificationNNCFTask
+
+
+class MockNNCFTask(OTEClassificationNNCFTask):
+ def __init__(self):
+ pass
+
+
+class TestNNCFTaskInputParamsValidation:
+ @staticmethod
+ def model():
+ model_configuration = ModelConfiguration(
+ configurable_parameters=ConfigurableParameters(
+ header="header", description="description"
+ ),
+ label_schema=LabelSchemaEntity(),
+ )
+ return ModelEntity(
+ train_dataset=DatasetEntity(), configuration=model_configuration
+ )
+
+ @e2e_pytest_unit
+ def test_ote_nncf_classification_task_init_params_validation(self):
+ """
+ Description:
+ Check OTEClassificationNNCFTask object initialization parameters validation
+
+ Input data:
+ OTEClassificationNNCFTask object initialization parameters with unexpected type
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ OTEClassificationNNCFTask object initialization parameter
+ """
+ with pytest.raises(ValueError):
+ OTEClassificationNNCFTask(task_environment="unexpected string") # type: ignore
+
+ @e2e_pytest_unit
+ def test_ote_nncf_classification_task_optimize_params_validation(self):
+ """
+ Description:
+ Check OTEClassificationNNCFTask object "optimize" method input parameters validation
+
+ Input data:
+ OTEClassificationNNCFTask object. "optimize" method unexpected-type input parameters
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "optimize" method
+ """
+ task = MockNNCFTask()
+ correct_values_dict = {
+ "optimization_type": OptimizationType.NNCF,
+ "dataset": DatasetEntity(),
+ "output_model": self.model(),
+ }
+ unexpected_str = "unexpected string"
+ unexpected_values = [
+ # Unexpected string is specified as "optimization_type" parameter
+ ("optimization_type", unexpected_str),
+ # Unexpected string is specified as "dataset" parameter
+ ("dataset", unexpected_str),
+ # Unexpected string is specified as "output_model" parameter
+ ("output_model", unexpected_str),
+ # Unexpected string is specified as "optimization_parameters" parameter
+ ("optimization_parameters", unexpected_str),
+ ]
+ check_value_error_exception_raised(
+ correct_parameters=correct_values_dict,
+ unexpected_values=unexpected_values,
+ class_or_function=task.optimize,
+ )
+
+ @e2e_pytest_unit
+ def test_ote_nncf_classification_task_save_model_params_validation(self):
+ """
+ Description:
+ Check OTEClassificationNNCFTask object "save_model" method input parameters validation
+
+ Input data:
+ OTEClassificationNNCFTask object, "output_model" non-ModelEntity object
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "save_model" method
+ """
+ task = MockNNCFTask()
+ with pytest.raises(ValueError):
+ task.save_model(output_model="unexpected string") # type: ignore
+
+ @e2e_pytest_unit
+ def test_ote_nncf_classification_task_export_params_validation(self):
+ """
+ Description:
+ Check OTEClassificationNNCFTask object "export" method input parameters validation
+
+ Input data:
+ OTEClassificationNNCFTask object. "export" method unexpected-type input parameters
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "export" method
+ """
+ task = MockNNCFTask()
+ correct_values_dict = {
+ "export_type": ExportType.OPENVINO,
+ "output_model": self.model(),
+ }
+ unexpected_str = "unexpected string"
+ unexpected_values = [
+ # Unexpected string is specified as "export_type" parameter
+ ("export_type", unexpected_str),
+ # Unexpected string is specified as "output_model" parameter
+ ("output_model", unexpected_str),
+ ]
+ check_value_error_exception_raised(
+ correct_parameters=correct_values_dict,
+ unexpected_values=unexpected_values,
+ class_or_function=task.export,
+ )
diff --git a/external/deep-object-reid/tests/sc_input_params_validation/test_ote_openvino_task_input_params_validation.py b/external/deep-object-reid/tests/sc_input_params_validation/test_ote_openvino_task_input_params_validation.py
new file mode 100644
index 00000000000..3774096b013
--- /dev/null
+++ b/external/deep-object-reid/tests/sc_input_params_validation/test_ote_openvino_task_input_params_validation.py
@@ -0,0 +1,373 @@
+# Copyright (C) 2021-2022 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+#
+
+import numpy as np
+import pytest
+from ote_sdk.configuration.configurable_parameters import ConfigurableParameters
+from ote_sdk.entities.datasets import DatasetEntity
+from ote_sdk.entities.label_schema import LabelSchemaEntity
+from ote_sdk.entities.model import ModelConfiguration, ModelEntity
+from ote_sdk.entities.resultset import ResultSetEntity
+from ote_sdk.test_suite.e2e_test_system import e2e_pytest_unit
+from ote_sdk.tests.parameters_validation.validation_helper import (
+ check_value_error_exception_raised,
+)
+from ote_sdk.usecases.tasks.interfaces.optimization_interface import OptimizationType
+
+from torchreid_tasks.openvino_task import (
+ OpenVINOClassificationInferencer,
+ OTEOpenVinoDataLoader,
+ OpenVINOClassificationTask,
+)
+from torchreid_tasks.parameters import OTEClassificationParameters
+
+
+def model():
+ model_configuration = ModelConfiguration(
+ configurable_parameters=ConfigurableParameters(
+ header="header", description="description"
+ ),
+ label_schema=LabelSchemaEntity(),
+ )
+ return ModelEntity(
+ train_dataset=DatasetEntity(), configuration=model_configuration
+ )
+
+
+class MockOpenVinoTask(OpenVINOClassificationTask):
+ def __init__(self):
+ pass
+
+
+class MockOpenVinoInferencer(OpenVINOClassificationInferencer):
+ def __init__(self):
+ pass
+
+
+class TestOpenVINOClassificationTaskInputParamsValidation:
+ @e2e_pytest_unit
+ def test_openvino_task_init_params_validation(self):
+ """
+ Description:
+ Check OpenVINOClassificationTask object initialization parameters validation
+
+ Input data:
+ "task_environment" non-TaskEnvironment object
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ OpenVINOClassificationTask object initialization parameter
+ """
+ with pytest.raises(ValueError):
+ OpenVINOClassificationTask(task_environment="unexpected string") # type: ignore
+
+ @e2e_pytest_unit
+ def test_openvino_task_infer_params_validation(self):
+ """
+ Description:
+ Check OpenVINOClassificationTask object "infer" method input parameters validation
+
+ Input data:
+ OpenVINOClassificationTask object. "infer" method unexpected-type input parameters
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "infer" method
+ """
+ task = MockOpenVinoTask()
+ correct_values_dict = {"dataset": DatasetEntity()}
+ unexpected_str = "unexpected string"
+ unexpected_values = [
+ # Unexpected string is specified as "dataset" parameter
+ ("dataset", unexpected_str),
+ # Unexpected string is specified as "inference_parameters" parameter
+ ("inference_parameters", unexpected_str),
+ ]
+ check_value_error_exception_raised(
+ correct_parameters=correct_values_dict,
+ unexpected_values=unexpected_values,
+ class_or_function=task.infer,
+ )
+
+ @e2e_pytest_unit
+ def test_openvino_task_evaluate_params_validation(self):
+ """
+ Description:
+ Check OpenVINOClassificationTask object "evaluate" method input parameters validation
+
+ Input data:
+ OpenVINOClassificationTask object. "evaluate" method unexpected-type input parameters
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "evaluate" method
+ """
+ result_set = ResultSetEntity(
+ model=model(),
+ ground_truth_dataset=DatasetEntity(),
+ prediction_dataset=DatasetEntity(),
+ )
+ task = MockOpenVinoTask()
+ correct_values_dict = {"output_result_set": result_set}
+ unexpected_int = 1
+ unexpected_values = [
+ # Unexpected integer is specified as "output_result_set" parameter
+ ("output_result_set", unexpected_int),
+ # Unexpected integer is specified as "evaluation_metric" parameter
+ ("evaluation_metric", unexpected_int),
+ ]
+ check_value_error_exception_raised(
+ correct_parameters=correct_values_dict,
+ unexpected_values=unexpected_values,
+ class_or_function=task.evaluate,
+ )
+
+ @e2e_pytest_unit
+ def test_openvino_task_deploy_params_validation(self):
+ """
+ Description:
+ Check OpenVINOClassificationTask object "deploy" method input parameters validation
+
+ Input data:
+ OpenVINOClassificationTask object. "output_model" non-ModelEntity object
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "deploy" method
+ """
+ task = MockOpenVinoTask()
+ with pytest.raises(ValueError):
+ task.deploy(output_model="unexpected string") # type: ignore
+
+ @e2e_pytest_unit
+ def test_openvino_task_optimize_params_validation(self):
+ """
+ Description:
+ Check OpenVINOClassificationTask object "optimize" method input parameters validation
+
+ Input data:
+ OpenVINOClassificationTask object. "optimize" method unexpected-type input parameters
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "optimize" method
+ """
+ task = MockOpenVinoTask()
+ correct_values_dict = {
+ "optimization_type": OptimizationType.POT,
+ "dataset": DatasetEntity(),
+ "output_model": model(),
+ }
+ unexpected_str = "unexpected string"
+ unexpected_values = [
+ # Unexpected string is specified as "optimization_type" parameter
+ ("optimization_type", unexpected_str),
+ # Unexpected string is specified as "dataset" parameter
+ ("dataset", unexpected_str),
+ # Unexpected string is specified as "output_model" parameter
+ ("output_model", unexpected_str),
+ # Unexpected string is specified as "optimization_parameters" parameter
+ ("optimization_parameters", unexpected_str),
+ ]
+ check_value_error_exception_raised(
+ correct_parameters=correct_values_dict,
+ unexpected_values=unexpected_values,
+ class_or_function=task.optimize,
+ )
+
+
+class TestOTEOpenVinoDataLoaderInputParamsValidation:
+ @e2e_pytest_unit
+ def test_openvino_data_loader_init_params_validation(self):
+ """
+ Description:
+ Check OTEOpenVinoDataLoader object initialization parameters validation
+
+ Input data:
+ OTEOpenVinoDataLoader object initialization parameters with unexpected type
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ OTEOpenVinoDataLoader object initialization parameter
+ """
+ classification_inferencer = MockOpenVinoInferencer()
+ correct_values_dict = {
+ "dataset": DatasetEntity(),
+ "inferencer": classification_inferencer,
+ }
+ unexpected_str = "unexpected string"
+ unexpected_values = [
+ # Unexpected string is specified as "dataset" parameter
+ ("dataset", unexpected_str),
+ # Unexpected string is specified as "inferencer" parameter
+ ("inferencer", unexpected_str),
+ ]
+
+ check_value_error_exception_raised(
+ correct_parameters=correct_values_dict,
+ unexpected_values=unexpected_values,
+ class_or_function=OTEOpenVinoDataLoader,
+ )
+
+ @e2e_pytest_unit
+ def test_openvino_data_loader_getitem_input_params_validation(self):
+ """
+ Description:
+ Check OTEOpenVinoDataLoader object "__getitem__" method input parameters validation
+
+ Input data:
+ OTEOpenVinoDataLoader object. "__getitem__" method unexpected-type input parameters
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "__getitem__" method
+ """
+ classification_inferencer = MockOpenVinoInferencer()
+ data_loader = OTEOpenVinoDataLoader(
+ dataset=DatasetEntity(), inferencer=classification_inferencer
+ )
+ with pytest.raises(ValueError):
+ data_loader.__getitem__(index="unexpected string") # type: ignore
+
+
+class TestOpenVINOClassificationInferencerInputParamsValidation:
+ @e2e_pytest_unit
+ def test_openvino_classification_inferencer_init_params_validation(self):
+ """
+ Description:
+ Check OpenVINOClassificationInferencer object initialization parameters validation
+
+ Input data:
+ OpenVINOClassificationInferencer object initialization parameters
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ OpenVINOClassificationInferencer object initialization parameter
+ """
+ correct_values_dict = {
+ "hparams": OTEClassificationParameters("header"),
+ "label_schema": LabelSchemaEntity(),
+ "model_file": "some model data",
+ }
+ unexpected_float = 1.1
+ unexpected_values = [
+ # Unexpected float is specified as "hparams" parameter
+ ("hparams", unexpected_float),
+ # Unexpected float is specified as "label_schema" parameter
+ ("label_schema", unexpected_float),
+ # Unexpected float is specified as "model_file" parameter
+ ("model_file", unexpected_float),
+ # Unexpected float is specified as "weight_file" parameter
+ ("weight_file", unexpected_float),
+ # Unexpected float is specified as "device" parameter
+ ("device", unexpected_float),
+ # Unexpected float is specified as "num_requests" parameter
+ ("num_requests", unexpected_float),
+ ]
+ check_value_error_exception_raised(
+ correct_parameters=correct_values_dict,
+ unexpected_values=unexpected_values,
+ class_or_function=OpenVINOClassificationInferencer,
+ )
+
+ @e2e_pytest_unit
+ def test_openvino_classification_inferencer_pre_process_params_validation(self):
+ """
+ Description:
+ Check OpenVINOClassificationInferencer object "pre_process" method input parameters
+ validation
+
+ Input data:
+ OpenVINOClassificationInferencer object, "image" non-ndarray object
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "pre_process" method
+ """
+ inferencer = MockOpenVinoInferencer()
+ with pytest.raises(ValueError):
+ inferencer.pre_process(image="unexpected string") # type: ignore
+
+ @e2e_pytest_unit
+ def test_openvino_classification_inferencer_post_process_params_validation(self):
+ """
+ Description:
+ Check OpenVINOClassificationInferencer object "post_process" method input parameters
+ validation
+
+ Input data:
+ OpenVINOClassificationInferencer object, "post_process" method unexpected-type input
+ parameters
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "post_process" method
+ """
+ inferencer = MockOpenVinoInferencer()
+ correct_values_dict = {
+ "prediction": {"prediction_1": np.random.rand(2, 2)},
+ "metadata": {"metadata_1": "some_data"},
+ }
+ unexpected_int = 1
+ unexpected_values = [
+ # Unexpected integer is specified as "prediction" parameter
+ ("prediction", unexpected_int),
+ # Unexpected integer is specified as "prediction" dictionary key
+ ("prediction", {unexpected_int: np.random.rand(2, 2)}),
+ # Unexpected integer is specified as "prediction" dictionary value
+ ("prediction", {"prediction_1": unexpected_int}),
+ # Unexpected integer is specified as "metadata" parameter
+ ("metadata", unexpected_int),
+ # Unexpected integer is specified as "metadata" dictionary key
+ ("metadata", {unexpected_int: "some_data"}),
+ ]
+ check_value_error_exception_raised(
+ correct_parameters=correct_values_dict,
+ unexpected_values=unexpected_values,
+ class_or_function=inferencer.post_process,
+ )
+
+ @e2e_pytest_unit
+ def test_openvino_classification_inferencer_predict_params_validation(self):
+ """
+ Description:
+ Check OpenVINOClassificationInferencer object "predict" method input parameters
+ validation
+
+ Input data:
+ OpenVINOClassificationInferencer object, "image" non-ndarray object
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "predict" method
+ """
+ inferencer = MockOpenVinoInferencer()
+ with pytest.raises(ValueError):
+ inferencer.predict(image="unexpected string") # type: ignore
+
+ @e2e_pytest_unit
+ def test_openvino_classification_inferencer_forward_params_validation(self):
+ """
+ Description:
+ Check OpenVINOClassificationInferencer object "forward" method input parameters validation
+
+ Input data:
+ OpenVINOClassificationInferencer object, "forward" method unexpected-type input parameters
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "forward" method
+ """
+ inferencer = MockOpenVinoInferencer()
+ unexpected_int = 1
+ for unexpected_value in [
+ # Unexpected integer is specified as "inputs" parameter
+ unexpected_int,
+ # Unexpected integer is specified as "inputs" dictionary key
+ {unexpected_int: np.random.rand(2, 2)},
+ # Unexpected integer is specified as "inputs" dictionary value
+ {"input_1": unexpected_int},
+ ]:
+ with pytest.raises(ValueError):
+ inferencer.forward(inputs=unexpected_value) # type: ignore
diff --git a/external/deep-object-reid/tests/sc_input_params_validation/test_ote_train_task_input_params_validation.py b/external/deep-object-reid/tests/sc_input_params_validation/test_ote_train_task_input_params_validation.py
new file mode 100644
index 00000000000..cef38871153
--- /dev/null
+++ b/external/deep-object-reid/tests/sc_input_params_validation/test_ote_train_task_input_params_validation.py
@@ -0,0 +1,101 @@
+# Copyright (C) 2021-2022 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+#
+
+import pytest
+from ote_sdk.configuration.configurable_parameters import ConfigurableParameters
+from ote_sdk.entities.datasets import DatasetEntity
+from ote_sdk.entities.label_schema import LabelSchemaEntity
+from ote_sdk.entities.model import ModelConfiguration, ModelEntity
+from ote_sdk.test_suite.e2e_test_system import e2e_pytest_unit
+from ote_sdk.tests.parameters_validation.validation_helper import (
+ check_value_error_exception_raised,
+)
+
+from torchreid_tasks.train_task import OTEClassificationTrainingTask
+
+
+class MockClassificationTrainingTask(OTEClassificationTrainingTask):
+ def __init__(self):
+ pass
+
+
+class TestOTEClassificationTrainingTaskInputParamsValidation:
+ @staticmethod
+ def model():
+ model_configuration = ModelConfiguration(
+ configurable_parameters=ConfigurableParameters(
+ header="header", description="description"
+ ),
+ label_schema=LabelSchemaEntity(),
+ )
+ return ModelEntity(
+ train_dataset=DatasetEntity(), configuration=model_configuration
+ )
+
+ @e2e_pytest_unit
+ def test_ote_classification_train_task_init_params_validation(self):
+ """
+ Description:
+ Check OTEClassificationTrainingTask object initialization parameters validation
+
+ Input data:
+ "task_environment" non-TaskEnvironment object
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ OTEClassificationTrainingTask object initialization parameter
+ """
+ with pytest.raises(ValueError):
+ OTEClassificationTrainingTask(task_environment="unexpected string") # type: ignore
+
+ @e2e_pytest_unit
+ def test_ote_classification_train_task_save_model_input_params_validation(self):
+ """
+ Description:
+ Check OTEClassificationTrainingTask object "save_model" method input parameters validation
+
+ Input data:
+ OTEClassificationTrainingTask object, "model" non-ModelEntity object
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "save_model" method
+ """
+ task = MockClassificationTrainingTask()
+ with pytest.raises(ValueError):
+ task.save_model(output_model="unexpected string") # type: ignore
+
+ @e2e_pytest_unit
+ def test_ote_classification_train_task_train_input_params_validation(self):
+ """
+ Description:
+ Check OTEClassificationTrainingTask object "train" method input parameters validation
+
+ Input data:
+ OTEClassificationTrainingTask object, "train" method unexpected-type input parameters
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "train" method
+ """
+ task = MockClassificationTrainingTask()
+ correct_values_dict = {
+ "dataset": DatasetEntity(),
+ "output_model": self.model(),
+ }
+ unexpected_str = "unexpected string"
+ unexpected_values = [
+ # Unexpected string is specified as "dataset" parameter
+ ("dataset", unexpected_str),
+ # Unexpected string is specified as "output_model" parameter
+ ("output_model", unexpected_str),
+ # Unexpected string is specified as "train_parameters" parameter
+ ("train_parameters", unexpected_str),
+ ]
+
+ check_value_error_exception_raised(
+ correct_parameters=correct_values_dict,
+ unexpected_values=unexpected_values,
+ class_or_function=task.train,
+ )
diff --git a/external/deep-object-reid/tests/sc_input_params_validation/test_ote_utils_input_params_validation.py b/external/deep-object-reid/tests/sc_input_params_validation/test_ote_utils_input_params_validation.py
new file mode 100644
index 00000000000..82e4795b8fa
--- /dev/null
+++ b/external/deep-object-reid/tests/sc_input_params_validation/test_ote_utils_input_params_validation.py
@@ -0,0 +1,496 @@
+import numpy as np
+import pytest
+from ote_sdk.entities.label import LabelEntity, Domain
+from ote_sdk.entities.label_schema import LabelSchemaEntity
+from ote_sdk.test_suite.e2e_test_system import e2e_pytest_unit
+from ote_sdk.tests.parameters_validation.validation_helper import (
+ check_value_error_exception_raised,
+)
+from torchreid_tasks.utils import (
+ ClassificationDatasetAdapter,
+ active_score_from_probs,
+ OTEClassificationDataset,
+ generate_label_schema,
+ get_actmap,
+ get_multiclass_predictions,
+ get_task_class,
+ reload_hyper_parameters,
+ preprocess_features_for_actmap,
+ set_values_as_default,
+ sigmoid_numpy,
+ softmax_numpy,
+ get_multilabel_predictions,
+ force_fp32,
+ get_multihead_class_info,
+ get_hierarchical_predictions,
+)
+
+from helpers import load_test_dataset
+
+
+class TestClassificationDatasetAdapterInputParamsValidation:
+ @e2e_pytest_unit
+ def test_classification_dataset_adapter_init_params_validation(self):
+ """
+ Description:
+ Check ClassificationDatasetAdapter object initialization parameters validation
+
+ Input data:
+ ClassificationDatasetAdapter object initialization parameters with unexpected type
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ ClassificationDatasetAdapter initialization parameter
+ """
+ correct_values_dict = {}
+ unexpected_int = 1
+ unexpected_values = [
+ # Unexpected integer is specified as "train_ann_file" parameter
+ ("train_ann_file", unexpected_int),
+ # Empty string is specified as "train_ann_file" parameter
+ ("train_ann_file", ""),
+ # Path with null character is specified as "train_ann_file" parameter
+ ("train_ann_file", "./\0fake_data.json"),
+ # Path with non-printable character is specified as "train_ann_file" parameter
+ ("train_ann_file", "./\nfake_data.json"),
+ # Unexpected integer is specified as "train_data_root" parameter
+ ("train_data_root", unexpected_int),
+ # Empty string is specified as "train_data_root" parameter
+ ("train_data_root", ""),
+ # Path with null character is specified as "train_data_root" parameter
+ ("train_data_root", "./\0null_char"),
+ # Path with non-printable character is specified as "train_data_root" parameter
+ ("train_data_root", "./\non_printable_char"),
+ # Unexpected integer is specified as "val_ann_file" parameter
+ ("val_ann_file", unexpected_int),
+ # Empty string is specified as "val_ann_file" parameter
+ ("val_ann_file", ""),
+ # Path with null character is specified as "val_ann_file" parameter
+ ("val_ann_file", "./\0fake_data.json"),
+ # Path with non-printable character is specified as "val_ann_file" parameter
+ ("val_ann_file", "./\nfake_data.json"),
+ # Unexpected integer is specified as "val_data_root" parameter
+ ("val_data_root", unexpected_int),
+ # Empty string is specified as "val_data_root" parameter
+ ("val_data_root", ""),
+ # Path with null character is specified as "val_data_root" parameter
+ ("val_data_root", "./\0null_char"),
+ # Path with non-printable character is specified as "val_data_root" parameter
+ ("val_data_root", "./\non_printable_char"),
+ # Unexpected integer is specified as "test_ann_file" parameter
+ ("test_ann_file", unexpected_int),
+ # Empty string is specified as "test_ann_file" parameter
+ ("test_ann_file", ""),
+ # Path with null character is specified as "test_ann_file" parameter
+ ("test_ann_file", "./\0fake_data.json"),
+ # Path with non-printable character is specified as "test_ann_file" parameter
+ ("test_ann_file", "./\nfake_data.json"),
+ # Unexpected integer is specified as "test_data_root" parameter
+ ("test_data_root", unexpected_int),
+ # Empty string is specified as "test_data_root" parameter
+ ("test_data_root", ""),
+ # Path with null character is specified as "test_data_root" parameter
+ ("test_data_root", "./\0null_char"),
+ # Path with non-printable character is specified as "test_data_root" parameter
+ ("test_data_root", "./\non_printable_char"),
+ ]
+ check_value_error_exception_raised(
+ correct_parameters=correct_values_dict,
+ unexpected_values=unexpected_values,
+ class_or_function=ClassificationDatasetAdapter,
+ )
+
+
+class TestOTEClassificationDatasetInputParamsValidation:
+ @e2e_pytest_unit
+ def test_ote_classification_dataset_init_params_validation(self):
+ """
+ Description:
+ Check OTEClassificationDataset object initialization parameters validation
+
+ Input data:
+ OTEClassificationDataset object initialization parameters with unexpected type
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ OTEClassificationDataset initialization parameter
+ """
+ dataset, labels_list = load_test_dataset()
+
+ correct_values_dict = {
+ "ote_dataset": dataset,
+ "labels": labels_list,
+ }
+ unexpected_str = "unexpected string"
+ unexpected_values = [
+ # Unexpected string is specified as "ote_dataset" parameter
+ ("ote_dataset", unexpected_str),
+ # Unexpected string is specified as "labels" parameter
+ ("labels", unexpected_str),
+ # Unexpected string is specified as nested label
+ ("labels", [labels_list[0], unexpected_str]),
+ # Unexpected string is specified as "multilabel" parameter
+ ("multilabel", unexpected_str),
+ # Unexpected string is specified as "hierarchical" parameter
+ ("hierarchical", unexpected_str),
+ # Unexpected string is specified as "mixed_cls_heads_info" parameter
+ ("mixed_cls_heads_info", unexpected_str),
+ # Unexpected string is specified as "keep_empty_label" parameter
+ ("keep_empty_label", unexpected_str),
+ ]
+
+ check_value_error_exception_raised(
+ correct_parameters=correct_values_dict,
+ unexpected_values=unexpected_values,
+ class_or_function=OTEClassificationDataset,
+ )
+
+ @e2e_pytest_unit
+ def test_ote_classification_dataset_getitem_params_validation(self):
+ """
+ Description:
+ Check OTEClassificationDataset object "__getitem__" method input parameters validation
+
+ Input data:
+ "idx" non-integer parameter
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "__getitem__" method
+ """
+ dataset, labels_list = load_test_dataset()
+ ote_classification_dataset = OTEClassificationDataset(
+ ote_dataset=dataset,
+ labels=labels_list
+ )
+ with pytest.raises(ValueError):
+ ote_classification_dataset.__getitem__(idx="unexpected string") # type: ignore
+
+
+class TestUtilsFunctionsParamsValidation:
+ @e2e_pytest_unit
+ def test_generate_label_schema_params_validation(self):
+ """
+ Description:
+ Check "get_multilabel_predictions" function input parameters validation
+
+ Input data:
+ "get_multilabel_predictions" unexpected type parameters
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "get_multilabel_predictions" function
+ """
+ dataset, labels_list = load_test_dataset()
+
+ correct_values_dict = {
+ "not_empty_labels": labels_list,
+ }
+ unexpected_str = "unexpected string"
+ unexpected_values = [
+ # Unexpected string is specified as "not_empty_labels" parameter
+ ("not_empty_labels", unexpected_str),
+ # Unexpected string is specified as nested non_empty_label
+ ("not_empty_labels", [labels_list[0], unexpected_str]),
+ # Unexpected string is specified as "multilabel" parameter
+ ("multilabel", unexpected_str),
+ ]
+
+ check_value_error_exception_raised(
+ correct_parameters=correct_values_dict,
+ unexpected_values=unexpected_values,
+ class_or_function=generate_label_schema,
+ )
+
+ @e2e_pytest_unit
+ def test_get_multihead_class_info_params_validation(self):
+ """
+ Description:
+ Check "get_multihead_class_info" function input parameters validation
+
+ Input data:
+ "label_schema" non-LabelSchemaEntity parameter
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "get_multihead_class_info" function
+ """
+ with pytest.raises(ValueError):
+ get_multihead_class_info(label_schema=1) # type: ignore
+
+ @e2e_pytest_unit
+ def test_get_task_class_params_validation(self):
+ """
+ Description:
+ Check "get_task_class" function input parameters validation
+
+ Input data:
+ "path" non-string parameter
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "get_task_class" function
+ """
+ with pytest.raises(ValueError):
+ get_task_class(path=1) # type: ignore
+
+ @e2e_pytest_unit
+ def test_reload_hyper_parameters_params_validation(self):
+ """
+ Description:
+ Check "reload_hyper_parameters" function input parameters validation
+
+ Input data:
+ "model_template" non-ModelTemplate parameter
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "reload_hyper_parameters" function
+ """
+ with pytest.raises(ValueError):
+ reload_hyper_parameters(model_template="unexpected string") # type: ignore
+
+ @e2e_pytest_unit
+ def test_set_values_as_default_parameters_params_validation(self):
+ """
+ Description:
+ Check "set_values_as_default" function input parameters validation
+
+ Input data:
+ "parameters" non-ModelTemplate parameter
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "set_values_as_default" function
+ """
+ with pytest.raises(ValueError):
+ set_values_as_default(parameters="unexpected string") # type: ignore
+
+ @e2e_pytest_unit
+ def test_force_fp32_parameters_params_validation(self):
+ """
+ Description:
+ Check "force_fp32" function input parameters validation
+
+ Input data:
+ "model" non-Module parameter
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "force_fp32" function
+ """
+ with pytest.raises(ValueError):
+ with force_fp32(model="unexpected string"): # type: ignore
+ pass
+
+ @e2e_pytest_unit
+ def test_preprocess_features_for_actmap_parameters_params_validation(self):
+ """
+ Description:
+ Check "preprocess_features_for_actmap" function input parameters validation
+
+ Input data:
+ "features" non-expected type object
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "preprocess_features_for_actmap" function
+ """
+ with pytest.raises(ValueError):
+ preprocess_features_for_actmap(features=None) # type: ignore
+
+ @e2e_pytest_unit
+ def test_get_actmap_params_validation(self):
+ """
+ Description:
+ Check "get_actmap" function input parameters validation
+
+ Input data:
+ "get_actmap" unexpected type parameters
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "get_actmap" function
+ """
+ correct_values_dict = {
+ "features": ["some", "features"],
+ "output_res": ("iterable", "object")
+ }
+ unexpected_values = [
+ # Unexpected dictionary is specified as "features" parameter
+ ("features", None),
+ # Unexpected dictionary is specified as "output_res" parameter
+ ("output_res", None),
+ ]
+
+ check_value_error_exception_raised(
+ correct_parameters=correct_values_dict,
+ unexpected_values=unexpected_values,
+ class_or_function=get_actmap,
+ )
+
+ @e2e_pytest_unit
+ def test_active_score_from_probs_parameters_params_validation(self):
+ """
+ Description:
+ Check "active_score_from_probs" function input parameters validation
+
+ Input data:
+ "predictions" non-expected type object
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "active_score_from_probs" function
+ """
+ with pytest.raises(ValueError):
+ active_score_from_probs(predictions=None) # type: ignore
+
+ @e2e_pytest_unit
+ def test_sigmoid_numpy_parameters_params_validation(self):
+ """
+ Description:
+ Check "sigmoid_numpy" function input parameters validation
+
+ Input data:
+ "x" non-ndarray object
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "sigmoid_numpy" function
+ """
+ with pytest.raises(ValueError):
+ sigmoid_numpy(x="unexpected str") # type: ignore
+
+ @e2e_pytest_unit
+ def test_softmax_numpy_parameters_params_validation(self):
+ """
+ Description:
+ Check "softmax_numpy" function input parameters validation
+
+ Input data:
+ "x" non-ndarray object
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "softmax_numpy" function
+ """
+ with pytest.raises(ValueError):
+ softmax_numpy(x="unexpected str") # type: ignore
+
+ @e2e_pytest_unit
+ def test_get_multiclass_predictions_params_validation(self):
+ """
+ Description:
+ Check "get_multiclass_predictions" function input parameters validation
+
+ Input data:
+ "get_multiclass_predictions" unexpected type parameters
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "get_multiclass_predictions" function
+ """
+ label = LabelEntity(name="test label", domain=Domain.DETECTION)
+ correct_values_dict = {
+ "logits": np.random.randint(low=0, high=255, size=(10, 16, 3)),
+ "labels": [label]
+ }
+ unexpected_str = "unexpected string"
+ unexpected_values = [
+ # Unexpected string is specified as "features" parameter
+ ("logits", unexpected_str),
+ # Unexpected string is specified as "labels" parameter
+ ("labels", unexpected_str),
+ # Unexpected string is specified as nested label
+ ("labels", [label, unexpected_str]),
+ # Unexpected string is specified as "output_res" parameter
+ ("activate", unexpected_str),
+ ]
+
+ check_value_error_exception_raised(
+ correct_parameters=correct_values_dict,
+ unexpected_values=unexpected_values,
+ class_or_function=get_multiclass_predictions,
+ )
+
+ @e2e_pytest_unit
+ def test_get_multilabel_predictions_params_validation(self):
+ """
+ Description:
+ Check "get_multilabel_predictions" function input parameters validation
+
+ Input data:
+ "get_multilabel_predictions" unexpected type parameters
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "get_multilabel_predictions" function
+ """
+ label = LabelEntity(name="test label", domain=Domain.DETECTION)
+ correct_values_dict = {
+ "logits": np.random.randint(low=0, high=255, size=(10, 16, 3)),
+ "labels": [label]
+ }
+ unexpected_str = "unexpected string"
+ unexpected_values = [
+ # Unexpected string is specified as "features" parameter
+ ("logits", unexpected_str),
+ # Unexpected string is specified as "labels" parameter
+ ("labels", unexpected_str),
+ # Unexpected string is specified as nested label
+ ("labels", [label, unexpected_str]),
+ # Unexpected string is specified as "pos_thr" parameter
+ ("pos_thr", unexpected_str),
+ # Unexpected string is specified as "output_res" parameter
+ ("activate", unexpected_str),
+ ]
+
+ check_value_error_exception_raised(
+ correct_parameters=correct_values_dict,
+ unexpected_values=unexpected_values,
+ class_or_function=get_multilabel_predictions,
+ )
+
+ @e2e_pytest_unit
+ def test_get_hierarchical_predictions_params_validation(self):
+ """
+ Description:
+ Check "get_hierarchical_predictions" function input parameters validation
+
+ Input data:
+ "get_hierarchical_predictions" unexpected type parameters
+
+ Expected results:
+ Test passes if ValueError exception is raised when unexpected type object is specified as
+ input parameter for "get_hierarchical_predictions" function
+ """
+ label = LabelEntity(name="test label", domain=Domain.DETECTION)
+ correct_values_dict = {
+ "logits": np.random.randint(low=0, high=255, size=(10, 16, 3)),
+ "labels": [label],
+ "label_schema": LabelSchemaEntity(),
+ "multihead_class_info": {"class": "info"},
+ }
+ unexpected_str = "unexpected string"
+ unexpected_values = [
+ # Unexpected string is specified as "features" parameter
+ ("logits", unexpected_str),
+ # Unexpected string is specified as "labels" parameter
+ ("labels", unexpected_str),
+ # Unexpected string is specified as nested label
+ ("labels", [label, unexpected_str]),
+ # Unexpected string is specified as "label_schema" parameter
+ ("label_schema", unexpected_str),
+ # Unexpected string is specified as "multihead_class_info" parameter
+ ("multihead_class_info", unexpected_str),
+ # Unexpected string is specified as "pos_thr" parameter
+ ("pos_thr", unexpected_str),
+ # Unexpected string is specified as "output_res" parameter
+ ("activate", unexpected_str),
+ ]
+
+ check_value_error_exception_raised(
+ correct_parameters=correct_values_dict,
+ unexpected_values=unexpected_values,
+ class_or_function=get_hierarchical_predictions,
+ )
diff --git a/external/deep-object-reid/tests/test_ote_api.py b/external/deep-object-reid/tests/test_ote_api.py
index 9db578f091d..134669d8a1c 100644
--- a/external/deep-object-reid/tests/test_ote_api.py
+++ b/external/deep-object-reid/tests/test_ote_api.py
@@ -136,7 +136,7 @@ def test_training_progress_tracking(default_task_setup):
def progress_callback(progress: float, score: Optional[float] = None):
training_progress_curve.append(progress)
- train_parameters = TrainParameters
+ train_parameters = TrainParameters()
train_parameters.update_progress = progress_callback
output_model = ModelEntity(
dataset,
@@ -160,7 +160,7 @@ def test_inference_progress_tracking(default_task_setup):
def progress_callback(progress: int):
inference_progress_curve.append(progress)
- inference_parameters = InferenceParameters
+ inference_parameters = InferenceParameters()
inference_parameters.update_progress = progress_callback
task.infer(dataset.with_empty_annotations(), inference_parameters)
diff --git a/external/deep-object-reid/torchreid_tasks/inference_task.py b/external/deep-object-reid/torchreid_tasks/inference_task.py
index f648870ab6e..09d6c05ec15 100644
--- a/external/deep-object-reid/torchreid_tasks/inference_task.py
+++ b/external/deep-object-reid/torchreid_tasks/inference_task.py
@@ -43,6 +43,10 @@
from ote_sdk.usecases.tasks.interfaces.export_interface import ExportType, IExportTask
from ote_sdk.usecases.tasks.interfaces.inference_interface import IInferenceTask
from ote_sdk.usecases.tasks.interfaces.unload_interface import IUnload
+from ote_sdk.utils.argument_checks import (
+ DatasetParamTypeCheck,
+ check_input_parameters_type,
+)
from ote_sdk.utils.labels_utils import get_empty_label
from scripts.default_config import (get_default_config, imagedata_kwargs,
merge_from_files_with_base, model_kwargs)
@@ -64,6 +68,7 @@ class OTEClassificationInferenceTask(IInferenceTask, IEvaluationTask, IExportTas
task_environment: TaskEnvironment
+ @check_input_parameters_type()
def __init__(self, task_environment: TaskEnvironment):
logger.info("Loading OTEClassificationTask.")
self._scratch_space = tempfile.mkdtemp(prefix="ote-cls-scratch-")
@@ -186,6 +191,7 @@ def _patch_config(self, base_dir: str):
self._cfg.lr_finder.enable = self._hyperparams.learning_parameters.enable_lr_finder
self._cfg.train.early_stopping = self._hyperparams.learning_parameters.enable_early_stopping
+ @check_input_parameters_type({"dataset": DatasetParamTypeCheck})
def infer(self, dataset: DatasetEntity,
inference_parameters: Optional[InferenceParameters] = None) -> DatasetEntity:
"""
@@ -265,6 +271,7 @@ def infer(self, dataset: DatasetEntity,
return dataset
+ @check_input_parameters_type()
def evaluate(
self, output_resultset: ResultSetEntity, evaluation_metric: Optional[str] = None
):
@@ -272,6 +279,7 @@ def evaluate(
logger.info(f"Computes performance of {performance}")
output_resultset.performance = performance
+ @check_input_parameters_type()
def export(self, export_type: ExportType, output_model: ModelEntity):
assert export_type == ExportType.OPENVINO
output_model.model_format = ModelFormat.OPENVINO
diff --git a/external/deep-object-reid/torchreid_tasks/model_wrappers/classification.py b/external/deep-object-reid/torchreid_tasks/model_wrappers/classification.py
index 3abe6e0a8cc..8c99f4acf14 100644
--- a/external/deep-object-reid/torchreid_tasks/model_wrappers/classification.py
+++ b/external/deep-object-reid/torchreid_tasks/model_wrappers/classification.py
@@ -14,7 +14,9 @@
import cv2
import numpy as np
-from typing import Any, Dict
+from typing import Any, Dict, Iterable, Union
+from ote_sdk.utils.argument_checks import check_input_parameters_type
+
try:
from openvino.model_zoo.model_api.models.classification import Classification
@@ -64,6 +66,7 @@ def _get_outputs(self):
'labels must match ({} != {})'.format(layer_shape[1], len(self.labels)))
return layer_name
+ @check_input_parameters_type()
def preprocess(self, image: np.ndarray):
meta = {'original_shape': image.shape}
resized_image = self.resize(image, (self.w, self.h))
@@ -76,6 +79,7 @@ def preprocess(self, image: np.ndarray):
dict_inputs = {self.image_blob_name: resized_image}
return dict_inputs, meta
+ @check_input_parameters_type()
def postprocess(self, outputs: Dict[str, np.ndarray], metadata: Dict[str, Any]):
logits = outputs[self.out_layer_name].squeeze()
if self.multilabel:
@@ -85,6 +89,7 @@ def postprocess(self, outputs: Dict[str, np.ndarray], metadata: Dict[str, Any]):
return get_multiclass_predictions(logits)
+ @check_input_parameters_type()
def postprocess_aux_outputs(self, outputs: Dict[str, np.ndarray], metadata: Dict[str, Any]):
features = preprocess_features_for_actmap(outputs['features'])
actmap = get_actmap(features[0], (metadata['original_shape'][1], metadata['original_shape'][0]))
@@ -102,8 +107,8 @@ def postprocess_aux_outputs(self, outputs: Dict[str, np.ndarray], metadata: Dict
return actmap, repr_vector, act_score
-
-def preprocess_features_for_actmap(features):
+@check_input_parameters_type()
+def preprocess_features_for_actmap(features: Union[np.ndarray, Iterable, int, float]):
features = np.mean(features, axis=1)
b, h, w = features.shape
features = features.reshape(b, h * w)
@@ -113,7 +118,9 @@ def preprocess_features_for_actmap(features):
return features
-def get_actmap(features, output_res):
+@check_input_parameters_type()
+def get_actmap(features: Union[np.ndarray, Iterable, int, float],
+ output_res: Union[tuple, list]):
am = cv2.resize(features, output_res)
am = 255 * (am - np.min(am)) / (np.max(am) - np.min(am) + 1e-12)
am = np.uint8(np.floor(am))
@@ -121,16 +128,19 @@ def get_actmap(features, output_res):
return am
+@check_input_parameters_type()
def sigmoid_numpy(x: np.ndarray):
return 1. / (1. + np.exp(-1. * x))
+@check_input_parameters_type()
def softmax_numpy(x: np.ndarray):
x = np.exp(x)
x /= np.sum(x)
return x
+@check_input_parameters_type()
def get_hierarchical_predictions(logits: np.ndarray, multihead_class_info: dict,
pos_thr: float = 0.5, activate: bool = True):
predicted_labels = []
@@ -157,6 +167,7 @@ def get_hierarchical_predictions(logits: np.ndarray, multihead_class_info: dict,
return predicted_labels
+@check_input_parameters_type()
def get_multiclass_predictions(logits: np.ndarray, activate: bool = True):
index = np.argmax(logits)
@@ -165,6 +176,7 @@ def get_multiclass_predictions(logits: np.ndarray, activate: bool = True):
return [(index, logits[index])]
+@check_input_parameters_type()
def get_multilabel_predictions(logits: np.ndarray, pos_thr: float = 0.5, activate: bool = True):
if activate:
logits = sigmoid_numpy(logits)
diff --git a/external/deep-object-reid/torchreid_tasks/monitors.py b/external/deep-object-reid/torchreid_tasks/monitors.py
index 1cadd875f6f..bef363f3fd6 100644
--- a/external/deep-object-reid/torchreid_tasks/monitors.py
+++ b/external/deep-object-reid/torchreid_tasks/monitors.py
@@ -17,6 +17,10 @@
import abc
from torch.utils.tensorboard import SummaryWriter
+from ote_sdk.utils.argument_checks import (
+ DirectoryPathCheck,
+ check_input_parameters_type,
+)
class IMetricsMonitor(metaclass=abc.ABCMeta):
@@ -72,10 +76,12 @@ def reset(self):
class MetricsMonitor(IMetricsMonitor):
+ @check_input_parameters_type({"log_dir": DirectoryPathCheck})
def __init__(self, log_dir):
self.log_dir = log_dir
self.tb = None
+ @check_input_parameters_type()
def add_scalar(self, capture: str, value: float, timestamp: int):
if not self.tb:
self.tb = SummaryWriter(self.log_dir)
@@ -102,6 +108,7 @@ class DefaultMetricsMonitor(IMetricsMonitor):
def __init__(self):
self.metrics_dict = {}
+ @check_input_parameters_type()
def add_scalar(self, capture: str, value: float, timestamp: int):
if capture in self.metrics_dict:
self.metrics_dict[capture].append((timestamp, value))
@@ -111,9 +118,11 @@ def add_scalar(self, capture: str, value: float, timestamp: int):
def get_metric_keys(self):
return self.metrics_dict.keys()
+ @check_input_parameters_type()
def get_metric_values(self, capture: str):
return [item[1] for item in self.metrics_dict[capture]]
+ @check_input_parameters_type()
def get_metric_timestamps(self, capture: str):
return [item[0] for item in self.metrics_dict[capture]]
diff --git a/external/deep-object-reid/torchreid_tasks/nncf_task.py b/external/deep-object-reid/torchreid_tasks/nncf_task.py
index fc51d02c6c6..d6ec45fb894 100644
--- a/external/deep-object-reid/torchreid_tasks/nncf_task.py
+++ b/external/deep-object-reid/torchreid_tasks/nncf_task.py
@@ -31,6 +31,10 @@
from ote_sdk.entities.train_parameters import default_progress_callback
from ote_sdk.usecases.tasks.interfaces.export_interface import ExportType
from ote_sdk.usecases.tasks.interfaces.optimization_interface import IOptimizationTask, OptimizationType
+from ote_sdk.utils.argument_checks import (
+ DatasetParamTypeCheck,
+ check_input_parameters_type,
+)
from scripts.default_config import imagedata_kwargs, lr_scheduler_kwargs, optimizer_kwargs
from torchreid.apis.training import run_training
from torchreid.integration.nncf.compression import check_nncf_is_enabled, is_nncf_state, wrap_nncf_model
@@ -47,6 +51,7 @@
class OTEClassificationNNCFTask(OTEClassificationInferenceTask, IOptimizationTask):
+ @check_input_parameters_type()
def __init__(self, task_environment: TaskEnvironment):
""""
Task for compressing classification models using NNCF.
@@ -153,12 +158,13 @@ def _load_aux_models_data(self, model: ModelEntity):
aux_models_data.append(model_data)
return aux_models_data
+ @check_input_parameters_type({"dataset": DatasetParamTypeCheck})
def optimize(
self,
optimization_type: OptimizationType,
dataset: DatasetEntity,
output_model: ModelEntity,
- optimization_parameters: Optional[OptimizationParameters],
+ optimization_parameters: Optional[OptimizationParameters] = None,
):
""" Optimize a model on a dataset """
if optimization_type is not OptimizationType.NNCF:
@@ -250,6 +256,7 @@ def optimize(
output_model.optimization_methods = self._optimization_methods
output_model.precision = self._precision
+ @check_input_parameters_type()
def save_model(self, output_model: ModelEntity):
state_dict = None
if self._compression_ctrl is not None:
@@ -259,6 +266,7 @@ def save_model(self, output_model: ModelEntity):
}
self._save_model(output_model, state_dict)
+ @check_input_parameters_type()
def export(self, export_type: ExportType, output_model: ModelEntity):
if self._compression_ctrl is None:
super().export(export_type, output_model)
diff --git a/external/deep-object-reid/torchreid_tasks/openvino_task.py b/external/deep-object-reid/torchreid_tasks/openvino_task.py
index 8687fbaff64..bffb7ccacc4 100644
--- a/external/deep-object-reid/torchreid_tasks/openvino_task.py
+++ b/external/deep-object-reid/torchreid_tasks/openvino_task.py
@@ -52,6 +52,10 @@
IOptimizationTask,
OptimizationType,
)
+from ote_sdk.utils.argument_checks import (
+ DatasetParamTypeCheck,
+ check_input_parameters_type,
+)
from compression.api import DataLoader
from compression.engines.ie_engine import IEEngine
@@ -74,6 +78,7 @@
class OpenVINOClassificationInferencer(BaseInferencer):
+ @check_input_parameters_type()
def __init__(
self,
hparams: OTEClassificationParameters,
@@ -109,15 +114,18 @@ def __init__(
self.converter = ClassificationToAnnotationConverter(self.label_schema)
+ @check_input_parameters_type()
def pre_process(self, image: np.ndarray) -> Tuple[Dict[str, np.ndarray], Dict[str, Any]]:
return self.model.preprocess(image)
+ @check_input_parameters_type()
def post_process(self, prediction: Dict[str, np.ndarray], metadata: Dict[str, Any]) -> Tuple[AnnotationSceneEntity,
np.ndarray, np.ndarray]:
prediction = self.model.postprocess(prediction, metadata)
return self.converter.convert_to_annotation(prediction, metadata)
+ @check_input_parameters_type()
def predict(self, image: np.ndarray) -> Tuple[AnnotationSceneEntity, np.ndarray, np.ndarray]:
image, metadata = self.pre_process(image)
raw_predictions = self.forward(image)
@@ -126,17 +134,20 @@ def predict(self, image: np.ndarray) -> Tuple[AnnotationSceneEntity, np.ndarray,
return predictions, features, repr_vectors, act_score
+ @check_input_parameters_type()
def forward(self, inputs: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
return self.model.infer_sync(inputs)
class OTEOpenVinoDataLoader(DataLoader):
+ @check_input_parameters_type({"dataset": DatasetParamTypeCheck})
def __init__(self, dataset: DatasetEntity, inferencer: BaseInferencer):
super().__init__(config=None)
self.dataset = dataset
self.inferencer = inferencer
- def __getitem__(self, index):
+ @check_input_parameters_type()
+ def __getitem__(self, index: int):
image = self.dataset[index].numpy
annotation = self.dataset[index].annotation_scene
inputs, metadata = self.inferencer.pre_process(image)
@@ -148,6 +159,7 @@ def __len__(self):
class OpenVINOClassificationTask(IDeploymentTask, IInferenceTask, IEvaluationTask, IOptimizationTask):
+ @check_input_parameters_type()
def __init__(self, task_environment: TaskEnvironment):
self.task_environment = task_environment
self.hparams = self.task_environment.get_hyper_parameters(OTEClassificationParameters)
@@ -160,6 +172,7 @@ def load_inferencer(self) -> OpenVINOClassificationInferencer:
self.model.get_data("openvino.xml"),
self.model.get_data("openvino.bin"))
+ @check_input_parameters_type({"dataset": DatasetParamTypeCheck})
def infer(self, dataset: DatasetEntity,
inference_parameters: Optional[InferenceParameters] = None) -> DatasetEntity:
update_progress_callback = default_progress_callback
@@ -187,6 +200,7 @@ def infer(self, dataset: DatasetEntity,
update_progress_callback(int(i / dataset_size * 100))
return dataset
+ @check_input_parameters_type()
def evaluate(self,
output_result_set: ResultSetEntity,
evaluation_metric: Optional[str] = None):
@@ -195,6 +209,7 @@ def evaluate(self,
'but parameter is ignored. Use accuracy instead.')
output_result_set.performance = MetricsHelper.compute_accuracy(output_result_set).get_performance()
+ @check_input_parameters_type()
def deploy(self,
output_model: ModelEntity) -> None:
logger.info('Deploying the model')
@@ -227,11 +242,12 @@ def deploy(self,
output_model.exportable_code = zip_buffer.getvalue()
logger.info('Deploying completed')
+ @check_input_parameters_type({"dataset": DatasetParamTypeCheck})
def optimize(self,
optimization_type: OptimizationType,
dataset: DatasetEntity,
output_model: ModelEntity,
- optimization_parameters: Optional[OptimizationParameters]):
+ optimization_parameters: Optional[OptimizationParameters] = None):
if optimization_type is not OptimizationType.POT:
raise ValueError("POT is the only supported optimization type for OpenVino models")
diff --git a/external/deep-object-reid/torchreid_tasks/train_task.py b/external/deep-object-reid/torchreid_tasks/train_task.py
index d6ed8b641f3..adeed8cbef1 100644
--- a/external/deep-object-reid/torchreid_tasks/train_task.py
+++ b/external/deep-object-reid/torchreid_tasks/train_task.py
@@ -35,12 +35,17 @@
from torchreid_tasks.utils import (OTEClassificationDataset, TrainingProgressCallback)
from torchreid.ops import DataParallel
from torchreid.utils import load_pretrained_weights, set_random_seed
+from ote_sdk.utils.argument_checks import (
+ DatasetParamTypeCheck,
+ check_input_parameters_type,
+)
logger = logging.getLogger(__name__)
class OTEClassificationTrainingTask(OTEClassificationInferenceTask, ITrainingTask):
+ @check_input_parameters_type()
def __init__(self, task_environment: TaskEnvironment):
super().__init__(task_environment)
self._aux_model_snap_paths = {}
@@ -55,6 +60,7 @@ def cancel_training(self):
logger.info("Cancel training requested.")
self.stop_callback.stop()
+ @check_input_parameters_type()
def save_model(self, output_model: ModelEntity):
for name, path in self._aux_model_snap_paths.items():
with open(path, 'rb') as read_file:
@@ -78,6 +84,7 @@ def _generate_training_metrics_group(self) -> Optional[List[MetricsGroup]]:
return output
+ @check_input_parameters_type({"dataset": DatasetParamTypeCheck})
def train(self, dataset: DatasetEntity, output_model: ModelEntity,
train_parameters: Optional[TrainParameters] = None):
""" Trains a model on a dataset """
diff --git a/external/deep-object-reid/torchreid_tasks/utils.py b/external/deep-object-reid/torchreid_tasks/utils.py
index cec3a974abe..d2adf9a56ae 100644
--- a/external/deep-object-reid/torchreid_tasks/utils.py
+++ b/external/deep-object-reid/torchreid_tasks/utils.py
@@ -24,7 +24,7 @@
import time
from os import path as osp
from operator import itemgetter
-from typing import List
+from typing import Iterable, List, Union
import cv2 as cv
import numpy as np
@@ -38,11 +38,18 @@
from ote_sdk.entities.label import Domain, LabelEntity
from ote_sdk.entities.label_schema import (LabelGroup, LabelGroupType,
LabelSchemaEntity)
+from ote_sdk.entities.model_template import ModelTemplate
from ote_sdk.entities.scored_label import ScoredLabel
from ote_sdk.entities.shapes.rectangle import Rectangle
from ote_sdk.entities.subset import Subset
from ote_sdk.entities.train_parameters import UpdateProgressCallback
from ote_sdk.usecases.reporting.time_monitor_callback import TimeMonitorCallback
+from ote_sdk.utils.argument_checks import (
+ DatasetParamTypeCheck,
+ OptionalDirectoryPathCheck,
+ check_input_parameters_type,
+)
+from torch.nn.modules import Module
from torchreid.utils import set_model_attr, get_model_attr
@@ -53,6 +60,12 @@ class ClassificationType(Enum):
class ClassificationDatasetAdapter(DatasetEntity):
+ @check_input_parameters_type({"train_ann_file": OptionalDirectoryPathCheck,
+ "train_data_root": OptionalDirectoryPathCheck,
+ "val_ann_file": OptionalDirectoryPathCheck,
+ "val_data_root": OptionalDirectoryPathCheck,
+ "test_ann_file": OptionalDirectoryPathCheck,
+ "test_data_root": OptionalDirectoryPathCheck})
def __init__(self,
train_ann_file=None,
train_data_root=None,
@@ -202,7 +215,8 @@ def generate_label_schema(self):
return label_schema
-def generate_label_schema(not_empty_labels, multilabel=False):
+@check_input_parameters_type()
+def generate_label_schema(not_empty_labels: List[LabelEntity], multilabel: bool = False):
assert len(not_empty_labels) > 1
label_schema = LabelSchemaEntity()
@@ -218,6 +232,7 @@ def generate_label_schema(not_empty_labels, multilabel=False):
return label_schema
+@check_input_parameters_type()
def get_multihead_class_info(label_schema: LabelSchemaEntity):
all_groups = label_schema.get_groups(include_empty=False)
all_groups_str = []
@@ -260,8 +275,9 @@ def get_multihead_class_info(label_schema: LabelSchemaEntity):
class OTEClassificationDataset:
- def __init__(self, ote_dataset: DatasetEntity, labels, multilabel=False, hierarchical=False,
- mixed_cls_heads_info={}, keep_empty_label=False):
+ @check_input_parameters_type({"ote_dataset": DatasetParamTypeCheck})
+ def __init__(self, ote_dataset: DatasetEntity, labels: List[LabelEntity], multilabel: bool = False,
+ hierarchical: bool = False, mixed_cls_heads_info: dict = {}, keep_empty_label: bool = False):
super().__init__()
self.ote_dataset = ote_dataset
self.multilabel = multilabel
@@ -313,7 +329,8 @@ def __init__(self, ote_dataset: DatasetEntity, labels, multilabel=False, hierarc
else:
self.annotation.append({'label': class_indices[0]})
- def __getitem__(self, idx):
+ @check_input_parameters_type()
+ def __getitem__(self, idx: int):
sample = self.ote_dataset[idx].numpy # This returns 8-bit numpy array of shape (height, width, RGB)
label = self.annotation[idx]['label']
return {'img': sample, 'label': label}
@@ -328,13 +345,15 @@ def get_classes(self):
return self.label_names
-def get_task_class(path):
+@check_input_parameters_type()
+def get_task_class(path: str):
module_name, class_name = path.rsplit('.', 1)
module = importlib.import_module(module_name)
return getattr(module, class_name)
-def reload_hyper_parameters(model_template):
+@check_input_parameters_type()
+def reload_hyper_parameters(model_template: ModelTemplate):
""" This function copies template.yaml file and its configuration.yaml dependency to temporal folder.
Then it re-loads hyper parameters from copied template.yaml file.
This function should not be used in general case, it is assumed that
@@ -353,7 +372,8 @@ def reload_hyper_parameters(model_template):
assert model_template.hyper_parameters.data
-def set_values_as_default(parameters):
+@check_input_parameters_type()
+def set_values_as_default(parameters: dict):
for v in parameters.values():
if isinstance(v, dict) and 'value' not in v:
set_values_as_default(v)
@@ -363,7 +383,8 @@ def set_values_as_default(parameters):
@contextmanager
-def force_fp32(model):
+@check_input_parameters_type()
+def force_fp32(model: Module):
mix_precision_status = get_model_attr(model, 'mix_precision')
set_model_attr(model, 'mix_precision', False)
try:
@@ -435,7 +456,8 @@ def on_initialization_end(self):
self.update_progress_callback(self.get_progress())
-def preprocess_features_for_actmap(features):
+@check_input_parameters_type()
+def preprocess_features_for_actmap(features: Union[np.ndarray, Iterable, int, float]):
features = np.mean(features, axis=1)
b, h, w = features.shape
features = features.reshape(b, h * w)
@@ -445,7 +467,9 @@ def preprocess_features_for_actmap(features):
return features
-def get_actmap(features, output_res):
+@check_input_parameters_type()
+def get_actmap(features: Union[np.ndarray, Iterable, int, float],
+ output_res: Union[tuple, list]):
am = cv.resize(features, output_res)
am = 255 * (am - np.min(am)) / (np.max(am) - np.min(am) + 1e-12)
am = np.uint8(np.floor(am))
@@ -453,22 +477,26 @@ def get_actmap(features, output_res):
return am
-def active_score_from_probs(predictions):
+@check_input_parameters_type()
+def active_score_from_probs(predictions: Union[np.ndarray, Iterable, int, float]):
top_idxs = np.argpartition(predictions, -2)[-2:]
top_probs = predictions[top_idxs]
return np.max(top_probs) - np.min(top_probs)
+@check_input_parameters_type()
def sigmoid_numpy(x: np.ndarray):
return 1. / (1. + np.exp(-1. * x))
+@check_input_parameters_type()
def softmax_numpy(x: np.ndarray):
x = np.exp(x)
x /= np.sum(x)
return x
+@check_input_parameters_type()
def get_multiclass_predictions(logits: np.ndarray, labels: List[LabelEntity],
activate: bool = True) -> List[ScoredLabel]:
i = np.argmax(logits)
@@ -477,6 +505,7 @@ def get_multiclass_predictions(logits: np.ndarray, labels: List[LabelEntity],
return [ScoredLabel(labels[i], probability=float(logits[i]))]
+@check_input_parameters_type()
def get_multilabel_predictions(logits: np.ndarray, labels: List[LabelEntity],
pos_thr: float = 0.5, activate: bool = True) -> List[ScoredLabel]:
if activate:
@@ -490,6 +519,7 @@ def get_multilabel_predictions(logits: np.ndarray, labels: List[LabelEntity],
return item_labels
+@check_input_parameters_type()
def get_hierarchical_predictions(logits: np.ndarray, labels: List[LabelEntity],
label_schema: LabelSchemaEntity, multihead_class_info: dict,
pos_thr: float = 0.5, activate: bool = True) -> List[ScoredLabel]: