Skip to content

Commit

Permalink
Merge pull request #1004 from openvinotoolkit/vsaltykovx/add_mmsegmen…
Browse files Browse the repository at this point in the history
…tation_params_validation_tests

[OTE] Added mmsegmentation input parameters validation and tests
  • Loading branch information
goodsong81 authored Apr 27, 2022
2 parents 6f903d9 + 7930a38 commit 91463b3
Show file tree
Hide file tree
Showing 25 changed files with 3,300 additions and 50 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -20,28 +20,35 @@
import os
import tempfile
from collections import defaultdict
from typing import List, Optional
from typing import List, Optional, Sequence, Union

from mmcv import Config, ConfigDict
from ote_sdk.entities.datasets import DatasetEntity
from ote_sdk.entities.label import LabelEntity
from ote_sdk.usecases.reporting.time_monitor_callback import TimeMonitorCallback
from ote_sdk.utils.argument_checks import (
DatasetParamTypeCheck,
DirectoryPathCheck,
check_input_parameters_type,
)

from .configuration import OTESegmentationConfig


logger = logging.getLogger(__name__)


@check_input_parameters_type()
def is_epoch_based_runner(runner_config: ConfigDict):
return 'Epoch' in runner_config.type


@check_input_parameters_type({"work_dir": DirectoryPathCheck})
def patch_config(config: Config,
work_dir: str,
labels: List[LabelEntity],
random_seed: Optional[int] = None,
distributed=False):
distributed: bool = False):
# Set runner if not defined.
if 'runner' not in config:
config.runner = {'type': 'IterBasedRunner'}
Expand Down Expand Up @@ -111,6 +118,7 @@ def patch_config(config: Config,
config.seed = random_seed


@check_input_parameters_type()
def set_hyperparams(config: Config, hyperparams: OTESegmentationConfig):
config.data.samples_per_gpu = int(hyperparams.learning_parameters.batch_size)
config.data.workers_per_gpu = int(hyperparams.learning_parameters.num_workers)
Expand Down Expand Up @@ -138,7 +146,8 @@ def set_hyperparams(config: Config, hyperparams: OTESegmentationConfig):
rescale_num_iterations(config, schedule_scale)


def rescale_num_iterations(config: Config, schedule_scale: float):
@check_input_parameters_type()
def rescale_num_iterations(config: Union[Config, ConfigDict], schedule_scale: float):
# rescale number of iterations for lr scheduler
if config.lr_config.policy == 'customstep':
config.lr_config.step = [int(schedule_scale * step) for step in config.lr_config.step]
Expand Down Expand Up @@ -174,7 +183,9 @@ def rescale_num_iterations(config: Config, schedule_scale: float):
config.model[head_type] = heads


def patch_adaptive_repeat_dataset(config: Config, num_samples: int, decay: float = 0.002, factor: float = 10):
@check_input_parameters_type()
def patch_adaptive_repeat_dataset(
config: Union[Config, ConfigDict], num_samples: int, decay: float = 0.002, factor: float = 10):
if config.data.train.type != 'RepeatDataset':
return

Expand Down Expand Up @@ -203,12 +214,15 @@ def patch_adaptive_repeat_dataset(config: Config, num_samples: int, decay: float
rescale_num_iterations(config, schedule_scale)


@check_input_parameters_type({"dataset": DatasetParamTypeCheck})
def prepare_for_testing(config: Config, dataset: DatasetEntity) -> Config:
config = copy.deepcopy(config)
config.data.test.ote_dataset = dataset
return config


@check_input_parameters_type({"train_dataset": DatasetParamTypeCheck,
"val_dataset": DatasetParamTypeCheck})
def prepare_for_training(config: Config, train_dataset: DatasetEntity, val_dataset: DatasetEntity,
time_monitor: TimeMonitorCallback, learning_curves: defaultdict) -> Config:
config = copy.deepcopy(config)
Expand All @@ -229,7 +243,8 @@ def prepare_for_training(config: Config, train_dataset: DatasetEntity, val_datas
return config


def config_to_string(config: Config) -> str:
@check_input_parameters_type()
def config_to_string(config: Union[Config, ConfigDict]) -> str:
"""
Convert a full mmsegmentation config to a string.
Expand All @@ -248,6 +263,7 @@ def config_to_string(config: Config) -> str:
return Config(config_copy).pretty_text


@check_input_parameters_type()
def config_from_string(config_string: str) -> Config:
"""
Generate an mmsegmentation config dict object from a string.
Expand All @@ -262,7 +278,8 @@ def config_from_string(config_string: str) -> Config:
return Config.fromfile(temp_file.name)


def save_config_to_file(config: Config):
@check_input_parameters_type()
def save_config_to_file(config: Union[Config, ConfigDict]):
""" Dump the full config to a file. Filename is 'config.py', it is saved in the current work_dir. """

filepath = os.path.join(config.work_dir, 'config.py')
Expand All @@ -271,7 +288,8 @@ def save_config_to_file(config: Config):
f.write(config_string)


def prepare_work_dir(config: Config) -> str:
@check_input_parameters_type()
def prepare_work_dir(config: Union[Config, ConfigDict]) -> str:
base_work_dir = config.work_dir
checkpoint_dirs = glob.glob(os.path.join(base_work_dir, "checkpoints_round_*"))
train_round_checkpoint_dir = os.path.join(base_work_dir, f"checkpoints_round_{len(checkpoint_dirs)}")
Expand All @@ -286,6 +304,7 @@ def prepare_work_dir(config: Config) -> str:
return train_round_checkpoint_dir


@check_input_parameters_type()
def set_distributed_mode(config: Config, distributed: bool):
if distributed:
return
Expand All @@ -311,6 +330,7 @@ def _replace_syncbn(_node, _norm_cfg):
_replace_syncbn(head, norm_cfg)


@check_input_parameters_type()
def set_data_classes(config: Config, label_names: List[str]):
# Save labels in data configs.
for subset in ('train', 'val', 'test'):
Expand All @@ -322,6 +342,7 @@ def set_data_classes(config: Config, label_names: List[str]):
config.data[subset].classes = label_names


@check_input_parameters_type()
def set_num_classes(config: Config, num_classes: int):
assert num_classes > 1

Expand All @@ -340,7 +361,8 @@ def set_num_classes(config: Config, num_classes: int):
head.num_classes = num_classes


def patch_color_conversion(pipeline):
@check_input_parameters_type()
def patch_color_conversion(pipeline: Sequence[dict]):
# Default data format for OTE is RGB, while mmseg uses BGR, so negate the color conversion flag.
for pipeline_step in pipeline:
if pipeline_step.type == 'Normalize':
Expand All @@ -353,6 +375,7 @@ def patch_color_conversion(pipeline):
patch_color_conversion(pipeline_step.transforms)


@check_input_parameters_type()
def patch_datasets(config: Config):
assert 'data' in config
for subset in ('train', 'val', 'test'):
Expand All @@ -375,7 +398,8 @@ def patch_datasets(config: Config):
patch_color_conversion(cfg.pipeline)


def remove_from_config(config, key: str):
@check_input_parameters_type()
def remove_from_config(config: Union[Config, ConfigDict], key: str):
if key in config:
if isinstance(config, Config):
del config._cfg_dict[key]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,10 @@
from ote_sdk.usecases.tasks.interfaces.export_interface import ExportType, IExportTask
from ote_sdk.usecases.tasks.interfaces.inference_interface import IInferenceTask
from ote_sdk.usecases.tasks.interfaces.unload_interface import IUnload
from ote_sdk.utils.argument_checks import (
DatasetParamTypeCheck,
check_input_parameters_type,
)


from mmseg.apis import export_model
Expand All @@ -61,6 +65,7 @@
class OTESegmentationInferenceTask(IInferenceTask, IExportTask, IEvaluationTask, IUnload):
task_environment: TaskEnvironment

@check_input_parameters_type()
def __init__(self, task_environment: TaskEnvironment):
""""
Task for training semantic segmentation models using OTESegmentation.
Expand Down Expand Up @@ -160,6 +165,7 @@ def _create_model(config: Config, from_scratch: bool = False):

return model

@check_input_parameters_type({"dataset": DatasetParamTypeCheck})
def infer(self, dataset: DatasetEntity,
inference_parameters: Optional[InferenceParameters] = None) -> DatasetEntity:
""" Analyzes a dataset using the latest inference model. """
Expand Down Expand Up @@ -261,6 +267,7 @@ def _infer_segmentor(self,

self._add_predictions_to_dataset_item(result[0], repr_vector, dataset_item, save_mask_visualization)

@check_input_parameters_type()
def evaluate(self, output_result_set: ResultSetEntity, evaluation_metric: Optional[str] = None):
""" Computes performance on a resultset """

Expand Down Expand Up @@ -304,6 +311,7 @@ def unload(self):
logger.warning(f"Done unloading. "
f"Torch is still occupying {torch.cuda.memory_allocated()} bytes of GPU memory")

@check_input_parameters_type()
def export(self, export_type: ExportType, output_model: ModelEntity):
assert export_type == ExportType.OPENVINO

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,17 +14,20 @@

import cv2
import numpy as np
from typing import Any, Dict
from typing import Any, Dict, Optional

from openvino.model_zoo.model_api.models import SegmentationModel
from openvino.model_zoo.model_api.models.types import NumericalValue
from openvino.model_zoo.model_api.adapters.model_adapter import ModelAdapter
from ote_sdk.utils.argument_checks import check_input_parameters_type
from ote_sdk.utils.segmentation_utils import create_hard_prediction_from_soft_prediction


class BlurSegmentation(SegmentationModel):
__model__ = 'blur_segmentation'

def __init__(self, model_adapter, configuration=None, preload=False):
@check_input_parameters_type()
def __init__(self, model_adapter: ModelAdapter, configuration: Optional[dict] = None, preload: bool = False):
super().__init__(model_adapter, configuration, preload)

@classmethod
Expand Down Expand Up @@ -53,6 +56,7 @@ def _get_outputs(self):

return layer_name

@check_input_parameters_type()
def postprocess(self, outputs: Dict[str, np.ndarray], metadata: Dict[str, Any]):
predictions = outputs[self.output_blob_name].squeeze()
soft_prediction = np.transpose(predictions, axes=(1, 2, 0))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,10 @@
from ote_sdk.usecases.tasks.interfaces.optimization_interface import IOptimizationTask
from ote_sdk.usecases.tasks.interfaces.optimization_interface import OptimizationParameters
from ote_sdk.usecases.tasks.interfaces.optimization_interface import OptimizationType
from ote_sdk.utils.argument_checks import (
DatasetParamTypeCheck,
check_input_parameters_type,
)

from mmseg.apis import train_segmentor
from segmentation_tasks.apis.segmentation import OTESegmentationInferenceTask
Expand All @@ -57,6 +61,7 @@


class OTESegmentationNNCFTask(OTESegmentationInferenceTask, IOptimizationTask):
@check_input_parameters_type()
def __init__(self, task_environment: TaskEnvironment):
""""
Task for compressing object detection models using NNCF.
Expand Down Expand Up @@ -167,12 +172,13 @@ def _create_compressed_model(self, dataset, config):
dataloader_for_init=init_dataloader,
is_accuracy_aware=is_acc_aware_training_set)

@check_input_parameters_type({"dataset": DatasetParamTypeCheck})
def optimize(
self,
optimization_type: OptimizationType,
dataset: DatasetEntity,
output_model: ModelEntity,
optimization_parameters: Optional[OptimizationParameters],
optimization_parameters: Optional[OptimizationParameters] = None,
):
if optimization_type is not OptimizationType.NNCF:
raise RuntimeError("NNCF is the only supported optimization")
Expand Down Expand Up @@ -219,6 +225,7 @@ def optimize(

self._is_training = False

@check_input_parameters_type()
def export(self, export_type: ExportType, output_model: ModelEntity):
if self._compression_ctrl is None:
super().export(export_type, output_model)
Expand All @@ -228,6 +235,7 @@ def export(self, export_type: ExportType, output_model: ModelEntity):
super().export(export_type, output_model)
self._model.enable_dynamic_graph_building()

@check_input_parameters_type()
def save_model(self, output_model: ModelEntity):
buffer = io.BytesIO()
hyperparams = self._task_environment.get_hyper_parameters(OTESegmentationConfig)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,10 @@
from ote_sdk.usecases.tasks.interfaces.evaluate_interface import IEvaluationTask
from ote_sdk.usecases.tasks.interfaces.inference_interface import IInferenceTask
from ote_sdk.usecases.tasks.interfaces.optimization_interface import IOptimizationTask, OptimizationType
from ote_sdk.utils.argument_checks import (
DatasetParamTypeCheck,
check_input_parameters_type,
)

from compression.api import DataLoader
from compression.engines.ie_engine import IEEngine
Expand All @@ -67,6 +71,7 @@


class OpenVINOSegmentationInferencer(BaseInferencer):
@check_input_parameters_type()
def __init__(
self,
hparams: OTESegmentationConfig,
Expand All @@ -93,9 +98,11 @@ def __init__(
self.model = Model.create_model(hparams.postprocessing.class_name.value, model_adapter, self.configuration, preload=True)
self.converter = SegmentationToAnnotationConverter(label_schema)

@check_input_parameters_type()
def pre_process(self, image: np.ndarray) -> Tuple[Dict[str, np.ndarray], Dict[str, Any]]:
return self.model.preprocess(image)

@check_input_parameters_type()
def post_process(self, prediction: Dict[str, np.ndarray], metadata: Dict[str, Any]) -> AnnotationSceneEntity:
hard_prediction = self.model.postprocess(prediction, metadata)
soft_prediction = metadata['soft_predictions']
Expand All @@ -105,16 +112,19 @@ def post_process(self, prediction: Dict[str, np.ndarray], metadata: Dict[str, An

return predicted_scene, soft_prediction, feature_vector

@check_input_parameters_type()
def forward(self, inputs: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
return self.model.infer_sync(inputs)


class OTEOpenVinoDataLoader(DataLoader):
@check_input_parameters_type({"dataset": DatasetParamTypeCheck})
def __init__(self, dataset: DatasetEntity, inferencer: BaseInferencer):
self.dataset = dataset
self.inferencer = inferencer

def __getitem__(self, index):
@check_input_parameters_type()
def __getitem__(self, index: int):
image = self.dataset[index].numpy
annotation = self.dataset[index].annotation_scene
inputs, metadata = self.inferencer.pre_process(image)
Expand All @@ -126,6 +136,7 @@ def __len__(self):


class OpenVINOSegmentationTask(IDeploymentTask, IInferenceTask, IEvaluationTask, IOptimizationTask):
@check_input_parameters_type()
def __init__(self,
task_environment: TaskEnvironment):
self.task_environment = task_environment
Expand All @@ -148,6 +159,7 @@ def load_inferencer(self) -> OpenVINOSegmentationInferencer:
self.model.get_data("openvino.xml"),
self.model.get_data("openvino.bin"))

@check_input_parameters_type({"dataset": DatasetParamTypeCheck})
def infer(self,
dataset: DatasetEntity,
inference_parameters: Optional[InferenceParameters] = None) -> DatasetEntity:
Expand Down Expand Up @@ -190,6 +202,7 @@ def infer(self,

return dataset

@check_input_parameters_type()
def evaluate(self,
output_result_set: ResultSetEntity,
evaluation_metric: Optional[str] = None):
Expand All @@ -201,6 +214,7 @@ def evaluate(self,

output_result_set.performance = metrics.get_performance()

@check_input_parameters_type()
def deploy(self,
output_model: ModelEntity) -> None:
logger.info('Deploying the model')
Expand Down Expand Up @@ -233,11 +247,12 @@ def deploy(self,
output_model.exportable_code = zip_buffer.getvalue()
logger.info('Deploying completed')

@check_input_parameters_type({"dataset": DatasetParamTypeCheck})
def optimize(self,
optimization_type: OptimizationType,
dataset: DatasetEntity,
output_model: ModelEntity,
optimization_parameters: Optional[OptimizationParameters]):
optimization_parameters: Optional[OptimizationParameters] = None):
logger.info('Start POT optimization')

if optimization_type is not OptimizationType.POT:
Expand Down
Loading

0 comments on commit 91463b3

Please sign in to comment.