diff --git a/CHANGELOG.md b/CHANGELOG.md index 59ab2823ddb..a98637c6094 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ All notable changes to this project will be documented in this file. - Enhance DetCon logic and SupCon for semantic segmentation() - Classification task refactoring () - Extend OTX explain CLI () +- Segmentation task refactoring () ### Bug fixes diff --git a/docs/source/guide/reference/algorithm/segmentation/tasks.rst b/docs/source/guide/reference/algorithm/segmentation/tasks.rst index 759ef328f1f..15c516fbe56 100644 --- a/docs/source/guide/reference/algorithm/segmentation/tasks.rst +++ b/docs/source/guide/reference/algorithm/segmentation/tasks.rst @@ -5,6 +5,6 @@ Tasks :maxdepth: 3 :caption: Contents: -.. automodule:: otx.algorithms.segmentation.tasks +.. automodule:: otx.algorithms.segmentation.task :members: :undoc-members: \ No newline at end of file diff --git a/otx/algorithms/common/adapters/mmcv/hooks/__init__.py b/otx/algorithms/common/adapters/mmcv/hooks/__init__.py index 4c48242bc35..f4d8c8c8c2d 100644 --- a/otx/algorithms/common/adapters/mmcv/hooks/__init__.py +++ b/otx/algorithms/common/adapters/mmcv/hooks/__init__.py @@ -35,6 +35,7 @@ from .fp16_sam_optimizer_hook import Fp16SAMOptimizerHook from .ib_loss_hook import IBLossHook from .logger_hook import LoggerReplaceHook, OTXLoggerHook +from .lr_updater_hook import CustomstepLrUpdaterHook from .model_ema_v2_hook import ModelEmaV2Hook from .no_bias_decay_hook import NoBiasDecayHook from .progress_hook import OTXProgressHook @@ -55,6 +56,7 @@ "AdaptiveTrainSchedulingHook", "CancelInterfaceHook", "CancelTrainingHook", + "CustomstepLrUpdaterHook", "CheckpointHookWithValResults", "EnsureCorrectBestCheckpointHook", "ComposedDataLoadersHook", diff --git a/otx/algorithms/segmentation/adapters/mmseg/nncf/hooks.py b/otx/algorithms/common/adapters/mmcv/hooks/lr_updater_hook.py similarity index 98% rename from otx/algorithms/segmentation/adapters/mmseg/nncf/hooks.py rename to otx/algorithms/common/adapters/mmcv/hooks/lr_updater_hook.py index b7270e60354..69b60ddf87f 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/nncf/hooks.py +++ b/otx/algorithms/common/adapters/mmcv/hooks/lr_updater_hook.py @@ -1,4 +1,4 @@ -"""NNCF task related hooks.""" +"""Module for defining LrUpdaterHook and CustomLRUpdateHook for self-supervised learning using mmseg.""" # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # diff --git a/otx/algorithms/segmentation/__init__.py b/otx/algorithms/segmentation/__init__.py index c1e757c9b4c..e389538a18a 100644 --- a/otx/algorithms/segmentation/__init__.py +++ b/otx/algorithms/segmentation/__init__.py @@ -2,3 +2,7 @@ # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 + +from .task import OTXSegmentationTask + +__all__ = ["OTXSegmentationTask"] diff --git a/otx/algorithms/segmentation/adapters/mmseg/__init__.py b/otx/algorithms/segmentation/adapters/mmseg/__init__.py index 4651aac0f2b..ff3a9626547 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/__init__.py +++ b/otx/algorithms/segmentation/adapters/mmseg/__init__.py @@ -15,7 +15,6 @@ # See the License for the specific language governing permissions # and limitations under the License. - from .datasets import MPASegDataset from .models import ( ClassIncrEncoderDecoder, diff --git a/otx/algorithms/segmentation/adapters/mmseg/configurer.py b/otx/algorithms/segmentation/adapters/mmseg/configurer.py new file mode 100644 index 00000000000..e0315491501 --- /dev/null +++ b/otx/algorithms/segmentation/adapters/mmseg/configurer.py @@ -0,0 +1,638 @@ +"""Base configurer for mmseg config.""" +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +import importlib +import json +import os +from typing import Any, Dict, List, Optional + +import numpy as np +import torch +from mmcv.runner import CheckpointLoader +from mmcv.utils import Config, ConfigDict +from torch import distributed as dist + +from otx.algorithms.common.adapters.mmcv.utils import ( + align_data_config_with_recipe, + build_dataloader, + build_dataset, + patch_adaptive_interval_training, + patch_default_config, + patch_early_stopping, + patch_fp16, + patch_persistent_workers, + patch_runner, +) +from otx.algorithms.common.adapters.mmcv.utils.config_utils import ( + recursively_update_cfg, + remove_custom_hook, + update_or_add_custom_hook, +) +from otx.algorithms.common.utils.logger import get_logger +from otx.algorithms.segmentation.adapters.mmseg.utils import ( + patch_datasets, + patch_evaluation, +) + +logger = get_logger() + + +# pylint: disable=too-many-public-methods +class SegmentationConfigurer: + """Patch config to support otx train.""" + + def __init__(self): + self.task_adapt_type: Optional[str] = None + self.task_adapt_op: str = "REPLACE" + self.org_model_classes: List[str] = [] + self.model_classes: List[str] = [] + self.data_classes: List[str] = [] + + # pylint: disable=too-many-arguments + def configure( + self, + cfg: Config, + model_ckpt: str, + data_cfg: Config, + training: bool = True, + subset: str = "train", + ir_options: Optional[Config] = None, + data_classes: Optional[List[str]] = None, + model_classes: Optional[List[str]] = None, + ) -> Config: + """Create MMCV-consumable config from given inputs.""" + logger.info(f"configure!: training={training}") + + self.configure_base(cfg, data_cfg, data_classes, model_classes) + self.configure_device(cfg, training) + self.configure_ckpt(cfg, model_ckpt) + self.configure_model(cfg, ir_options) + self.configure_data(cfg, training, data_cfg) + self.configure_task(cfg, training) + self.configure_hook(cfg) + self.configure_samples_per_gpu(cfg, subset) + self.configure_fp16_optimizer(cfg) + self.configure_compat_cfg(cfg) + return cfg + + def configure_base( + self, + cfg: Config, + data_cfg: Optional[Config], + data_classes: Optional[List[str]], + model_classes: Optional[List[str]], + ) -> None: + """Basic configuration work for recipe. + + Patchings in this function are handled task level previously + This function might need to be re-orgianized + """ + + options_for_patch_datasets = {"type": "MPASegDataset"} + + patch_default_config(cfg) + patch_runner(cfg) + patch_datasets( + cfg, + **options_for_patch_datasets, + ) # for OTX compatibility + patch_evaluation(cfg) # for OTX compatibility + patch_fp16(cfg) + patch_adaptive_interval_training(cfg) + patch_early_stopping(cfg) + patch_persistent_workers(cfg) + + if data_cfg is not None: + align_data_config_with_recipe(data_cfg, cfg) + + # update model config -> model label schema + cfg["model_classes"] = model_classes + if data_classes is not None: + train_data_cfg: Config = self.get_data_cfg(data_cfg, "train") + train_data_cfg["data_classes"] = data_classes + new_classes: List[str] = np.setdiff1d(data_classes, model_classes).tolist() + train_data_cfg["new_classes"] = new_classes + + def configure_model( + self, + cfg: Config, + ir_options: Optional[Config], + ) -> None: + """Patch config's model. + + Change model type to super type + Patch for OMZ backbones + """ + if ir_options is None: + ir_options = {"ir_model_path": None, "ir_weight_path": None, "ir_weight_init": False} + + cfg.model_task = cfg.model.pop("task", "segmentation") + if cfg.model_task != "segmentation": + raise ValueError(f"Given cfg ({cfg.filename}) is not supported by segmentation recipe") + + super_type = cfg.model.pop("super_type", None) + if super_type: + cfg.model.arch_type = cfg.model.type + cfg.model.type = super_type + + # OV-plugin + ir_model_path = ir_options.get("ir_model_path") + if ir_model_path: + + def is_mmov_model(key: str, value: Any) -> bool: + if key == "type" and value.startswith("MMOV"): + return True + return False + + ir_weight_path = ir_options.get("ir_weight_path", None) + ir_weight_init = ir_options.get("ir_weight_init", False) + recursively_update_cfg( + cfg, + is_mmov_model, + {"model_path": ir_model_path, "weight_path": ir_weight_path, "init_weight": ir_weight_init}, + ) + + def configure_data( + self, + cfg: Config, + training: bool, + data_cfg: Optional[Config], + ) -> None: + """Patch cfg.data. + + Merge cfg and data_cfg + Match cfg.data.train.type to super_type + Patch for unlabeled data path ==> This may be moved to SemiSegmentationConfigurer + """ + if data_cfg: + cfg.merge_from_dict(data_cfg) + + def configure_split(target: str) -> None: + def update_transform(opt: Config, pipeline: Config, idx: int, transform: Config) -> None: + if isinstance(opt, dict): + if "_delete_" in opt.keys() and opt.get("_delete_", False): + # if option include _delete_=True, remove this transform from pipeline + logger.info(f"configure_data: {transform['type']} is deleted") + del pipeline[idx] + return + logger.info(f"configure_data: {transform['type']} is updated with {opt}") + transform.update(**opt) + + # pylint: disable=too-many-branches, too-many-nested-blocks + def update_config(src: Config, pipeline_options: Config) -> None: + logger.info(f"update_config() {pipeline_options}") + if src.get("pipeline") is not None or ( + src.get("dataset") is not None and src.get("dataset").get("pipeline") is not None + ): + if src.get("pipeline") is not None: + pipeline = src.get("pipeline", None) + else: + pipeline = src.get("dataset").get("pipeline") + if isinstance(pipeline, list): + for idx, transform in enumerate(pipeline): + for opt_key, opt in pipeline_options.items(): + if transform["type"] == opt_key: + update_transform(opt, pipeline, idx, transform) + elif isinstance(pipeline, dict): + for _, pipe in pipeline.items(): + for idx, transform in enumerate(pipe): + for opt_key, opt in pipeline_options.items(): + if transform["type"] == opt_key: + update_transform(opt, pipe, idx, transform) + else: + raise NotImplementedError(f"pipeline type of {type(pipeline)} is not supported") + else: + logger.info("no pipeline in the data split") + + split = cfg.data.get(target) + if split is not None: + if isinstance(split, list): + for sub_item in split: + update_config(sub_item, pipeline_options) + elif isinstance(split, dict): + update_config(split, pipeline_options) + else: + logger.warning(f"type of split '{target}'' should be list or dict but {type(split)}") + + logger.info("configure_data()") + logger.debug(f"[args] {cfg.data}") + pipeline_options = cfg.data.pop("pipeline_options", None) + if pipeline_options is not None and isinstance(pipeline_options, dict): + configure_split("train") + configure_split("val") + if not training: + configure_split("test") + configure_split("unlabeled") + + train_data_cfg = self.get_data_cfg(cfg, "train") + for mode in ["train", "val", "test"]: + if train_data_cfg.type == "MPASegDataset" and cfg.data.get(mode, False): + if cfg.data[mode]["type"] != "MPASegDataset": + # Wrap original dataset config + org_type = cfg.data[mode]["type"] + cfg.data[mode]["type"] = "MPASegDataset" + cfg.data[mode]["org_type"] = org_type + + def configure_task( + self, + cfg: Config, + training: bool, + ) -> None: + """Patch config to support training algorithm.""" + if "task_adapt" in cfg: + logger.info(f"task config!!!!: training={training}") + cfg["task_adapt"].get("op", "REPLACE") + + # Task classes + self.configure_classes(cfg) + # Ignored mode + self.configure_ignore(cfg) + + def configure_ignore(self, cfg: Config) -> None: + """Change to incremental loss (ignore mode).""" + if cfg.get("ignore", False): + cfg_loss_decode = ConfigDict( + type="CrossEntropyLossWithIgnore", + use_sigmoid=False, + loss_weight=1.0, + ) + + if "decode_head" in cfg.model: + decode_head = cfg.model.decode_head + if decode_head.type == "FCNHead": + decode_head.type = "CustomFCNHead" + decode_head.loss_decode = cfg_loss_decode + + # pylint: disable=too-many-branches + def configure_classes(self, cfg: Config) -> None: + """Patch classes for model and dataset.""" + org_model_classes = self.get_model_classes(cfg) + data_classes = self.get_data_classes(cfg) + + if "background" not in org_model_classes: + org_model_classes = ["background"] + org_model_classes + if "background" not in data_classes: + data_classes = ["background"] + data_classes + + # Model classes + if self.task_adapt_op == "REPLACE": + if len(data_classes) == 1: # 'background' + model_classes = org_model_classes.copy() + else: + model_classes = data_classes.copy() + elif self.task_adapt_op == "MERGE": + model_classes = org_model_classes + [cls for cls in data_classes if cls not in org_model_classes] + else: + raise KeyError(f"{self.task_adapt_op} is not supported for task_adapt options!") + + cfg.task_adapt.final = model_classes + cfg.model.task_adapt = ConfigDict( + src_classes=org_model_classes, + dst_classes=model_classes, + ) + + # Model architecture + if "decode_head" in cfg.model: + decode_head = cfg.model.decode_head + if isinstance(decode_head, Config): + decode_head.num_classes = len(model_classes) + elif isinstance(decode_head, list): + for head in decode_head: + head.num_classes = len(model_classes) + + # For SupConDetCon + if "SupConDetCon" in cfg.model.type: + cfg.model.num_classes = len(model_classes) + + # Task classes + self.org_model_classes = org_model_classes + self.model_classes = model_classes + + # Functions below are come from base stage + def configure_ckpt(self, cfg: Config, model_ckpt: str) -> None: + """Patch checkpoint path for pretrained weight. + + Replace cfg.load_from to model_ckpt + Replace cfg.load_from to pretrained + Replace cfg.resume_from to cfg.load_from + """ + if model_ckpt: + cfg.load_from = self.get_model_ckpt(model_ckpt) + if cfg.get("resume", False): + cfg.resume_from = cfg.load_from + if cfg.get("load_from", None) and cfg.model.backbone.get("pretrained", None): + cfg.model.backbone.pretrained = None + + @staticmethod + def get_model_ckpt(ckpt_path: str, new_path: Optional[str] = None) -> str: + """Get pytorch model weights.""" + ckpt = CheckpointLoader.load_checkpoint(ckpt_path, map_location="cpu") + if "model" in ckpt: + ckpt = ckpt["model"] + if not new_path: + new_path = ckpt_path[:-3] + "converted.pth" + torch.save(ckpt, new_path) + return new_path + return ckpt_path + + @staticmethod + def get_model_classes(cfg: Config) -> List[str]: + """Extract trained classes info from checkpoint file. + + MMCV-based models would save class info in ckpt['meta']['CLASSES'] + For other cases, try to get the info from cfg.model.classes (with pop()) + - Which means that model classes should be specified in model-cfg for + non-MMCV models (e.g. OMZ models) + """ + + def get_model_meta(cfg: Config) -> Config: + ckpt_path = cfg.get("load_from", None) + meta = {} + if ckpt_path: + ckpt = CheckpointLoader.load_checkpoint(ckpt_path, map_location="cpu") + meta = ckpt.get("meta", {}) + return meta + + def read_label_schema(ckpt_path, name_only=True, file_name="label_schema.json"): + serialized_label_schema = [] + if any(ckpt_path.endswith(extension) for extension in (".xml", ".bin", ".pth")): + label_schema_path = os.path.join(os.path.dirname(ckpt_path), file_name) + if os.path.exists(label_schema_path): + with open(label_schema_path, encoding="UTF-8") as read_file: + serialized_label_schema = json.load(read_file) + if serialized_label_schema: + if name_only: + all_classes = [labels["name"] for labels in serialized_label_schema["all_labels"].values()] + else: + all_classes = serialized_label_schema + else: + all_classes = [] + return all_classes + + classes: List[str] = [] + meta = get_model_meta(cfg) + # for MPA classification legacy compatibility + classes = meta.get("CLASSES", []) + classes = meta.get("classes", classes) + if classes is None: + classes = [] + + if len(classes) == 0: + ckpt_path = cfg.get("load_from", None) + if ckpt_path: + classes = read_label_schema(ckpt_path) + if len(classes) == 0: + classes = cfg.model.pop("classes", cfg.pop("model_classes", [])) + return classes + + def get_data_classes(self, cfg: Config) -> List[str]: + """Get data classes from train cfg.""" + data_classes: List[str] = [] + train_cfg = self.get_data_cfg(cfg, "train") + if "data_classes" in train_cfg: + data_classes = list(train_cfg.pop("data_classes", [])) + elif "classes" in train_cfg: + data_classes = list(train_cfg.classes) + return data_classes + + @staticmethod + def get_data_cfg(cfg: Config, subset: str) -> Config: + """Get subset's data cfg.""" + assert subset in ["train", "val", "test"], f"Unknown subset:{subset}" + if "dataset" in cfg.data[subset]: # Concat|RepeatDataset + dataset = cfg.data[subset].dataset + while hasattr(dataset, "dataset"): + dataset = dataset.dataset + return dataset + return cfg.data[subset] + + @staticmethod + def configure_hook(cfg: Config) -> None: + """Update cfg.custom_hooks based on cfg.custom_hook_options.""" + + def update_hook(opt: Config, custom_hooks: Any, idx: int, hook: Config) -> None: + """Delete of update a custom hook.""" + if isinstance(opt, dict): + if opt.get("_delete_", False): + # if option include _delete_=True, remove this hook from custom_hooks + logger.info(f"configure_hook: {hook['type']} is deleted") + del custom_hooks[idx] + else: + logger.info(f"configure_hook: {hook['type']} is updated with {opt}") + hook.update(**opt) + + hook_cfg = ConfigDict(type="LoggerReplaceHook") + update_or_add_custom_hook(cfg, hook_cfg) + + custom_hook_options = cfg.pop("custom_hook_options", {}) + # logger.info(f"configure_hook() {cfg.get('custom_hooks', [])} <- {custom_hook_options}") + custom_hooks = cfg.get("custom_hooks", []) + for idx, hook in enumerate(custom_hooks): + for opt_key, opt in custom_hook_options.items(): + if hook["type"] == opt_key: + update_hook(opt, custom_hooks, idx, hook) + + def configure_device(self, cfg: Config, training: bool) -> None: + """Setting device for training and inference.""" + cfg.distributed = False + if torch.distributed.is_initialized(): + cfg.gpu_ids = [int(os.environ["LOCAL_RANK"])] + if training: # TODO multi GPU is available only in training. Evaluation needs to be supported later. + cfg.distributed = True + self.configure_distributed(cfg) + elif "gpu_ids" not in cfg: + gpu_ids = os.environ.get("CUDA_VISIBLE_DEVICES") + logger.info(f"CUDA_VISIBLE_DEVICES = {gpu_ids}") + if gpu_ids is not None: + cfg.gpu_ids = range(len(gpu_ids.split(","))) + else: + cfg.gpu_ids = range(1) + + # consider "cuda" and "cpu" device only + if not torch.cuda.is_available(): + cfg.device = "cpu" + cfg.gpu_ids = range(-1, 0) + else: + cfg.device = "cuda" + + def configure_samples_per_gpu(self, cfg: Config, subset: str) -> None: + """Settings samples_per_gpu for training and inference.""" + + dataloader_cfg = cfg.data.get(f"{subset}_dataloader", ConfigDict()) + samples_per_gpu = dataloader_cfg.get("samples_per_gpu", cfg.data.get("samples_per_gpu", 1)) + + data_cfg = self.get_data_cfg(cfg, subset) + if data_cfg.get("otx_dataset") is not None: + dataset_len = len(data_cfg.otx_dataset) + + if getattr(cfg, "distributed", False): + dataset_len = dataset_len // dist.get_world_size() + + # set batch size as a total dataset + # if it is smaller than total dataset + if dataset_len < samples_per_gpu: + dataloader_cfg.samples_per_gpu = dataset_len + + # drop the last batch if the last batch size is 1 + # batch size of 1 is a runtime error for training batch normalization layer + if subset in ("train", "unlabeled") and dataset_len % samples_per_gpu == 1: + dataloader_cfg.drop_last = True + + cfg.data[f"{subset}_dataloader"] = dataloader_cfg + + @staticmethod + def configure_fp16_optimizer(cfg: Config) -> None: + """Configure Fp16OptimizerHook and Fp16SAMOptimizerHook.""" + fp16_config = cfg.pop("fp16", None) + if fp16_config is not None: + optim_type = cfg.optimizer_config.get("type", "OptimizerHook") + opts: Config = dict( + distributed=getattr(cfg, "distributed", False), + **fp16_config, + ) + if optim_type == "SAMOptimizerHook": + opts["type"] = "Fp16SAMOptimizerHook" + elif optim_type == "OptimizerHook": + opts["type"] = "Fp16OptimizerHook" + else: + # does not support optimizerhook type + # let mm library handle it + cfg.fp16 = fp16_config + opts = dict() + cfg.optimizer_config.update(opts) + + @staticmethod + def configure_distributed(cfg: Config) -> None: + """Patching for distributed training.""" + if hasattr(cfg, "dist_params") and cfg.dist_params.get("linear_scale_lr", False): + new_lr = len(cfg.gpu_ids) * cfg.optimizer.lr + logger.info( + f"enabled linear scaling rule to the learning rate. \ + changed LR from {cfg.optimizer.lr} to {new_lr}" + ) + cfg.optimizer.lr = new_lr + + @staticmethod + def configure_compat_cfg( + cfg: Config, + ): + """Modify config to keep the compatibility.""" + + def _configure_dataloader(cfg: Config) -> None: + global_dataloader_cfg: Dict[str, str] = {} + global_dataloader_cfg.update( + { + k: cfg.data.pop(k) + for k in list(cfg.data.keys()) + if k + not in [ + "train", + "val", + "test", + "unlabeled", + "train_dataloader", + "val_dataloader", + "test_dataloader", + "unlabeled_dataloader", + ] + } + ) + + for subset in ["train", "val", "test", "unlabeled"]: + if subset not in cfg.data: + continue + dataloader_cfg = cfg.data.get(f"{subset}_dataloader", None) + if dataloader_cfg is None: + raise AttributeError(f"{subset}_dataloader is not found in config.") + dataloader_cfg = {**global_dataloader_cfg, **dataloader_cfg} + cfg.data[f"{subset}_dataloader"] = dataloader_cfg + + _configure_dataloader(cfg) + + +class IncrSegmentationConfigurer(SegmentationConfigurer): + """Patch config to support incremental learning for semantic segmentation.""" + + def configure_task(self, cfg: ConfigDict, training: bool) -> None: + """Patch config to support incremental learning.""" + super().configure_task(cfg, training) + + new_classes: List[str] = np.setdiff1d(self.model_classes, self.org_model_classes).tolist() + + # Check if new classes are added + has_new_class: bool = len(new_classes) > 0 + + # Update TaskAdaptHook (use incremental sampler) + task_adapt_hook = ConfigDict( + type="TaskAdaptHook", + src_classes=self.org_model_classes, + dst_classes=self.model_classes, + model_type=cfg.model.type, + sampler_flag=has_new_class, + efficient_mode=cfg["task_adapt"].get("efficient_mode", False), + ) + update_or_add_custom_hook(cfg, task_adapt_hook) + + +class SemiSLSegmentationConfigurer(SegmentationConfigurer): + """Patch config to support semi supervised learning for semantic segmentation.""" + + def configure_data(self, cfg: ConfigDict, training: bool, data_cfg: ConfigDict) -> None: + """Patch cfg.data.""" + super().configure_data(cfg, training, data_cfg) + # Set unlabeled data hook + if training: + if cfg.data.get("unlabeled", False) and cfg.data.unlabeled.get("otx_dataset", False): + self.configure_unlabeled_dataloader(cfg) + + def configure_task(self, cfg: ConfigDict, training: bool, **kwargs: Any) -> None: + """Adjust settings for task adaptation.""" + super().configure_task(cfg, training, **kwargs) + + # Don't pass task_adapt arg to semi-segmentor + if cfg.model.type != "ClassIncrEncoderDecoder" and cfg.model.get("task_adapt", False): + cfg.model.pop("task_adapt") + + # Remove task adapt hook (set default torch random sampler) + remove_custom_hook(cfg, "TaskAdaptHook") + + @staticmethod + def configure_unlabeled_dataloader(cfg: ConfigDict) -> None: + """Patch for unlabled dataloader.""" + + model_task: Dict[str, str] = { + "classification": "mmcls", + "detection": "mmdet", + "segmentation": "mmseg", + } # noqa + if "unlabeled" in cfg.data: + task_lib_module = importlib.import_module(f"{model_task[cfg.model_task]}.datasets") + dataset_builder = getattr(task_lib_module, "build_dataset") + dataloader_builder = getattr(task_lib_module, "build_dataloader") + + dataset = build_dataset(cfg, "unlabeled", dataset_builder, consume=True) + unlabeled_dataloader = build_dataloader( + dataset, + cfg, + "unlabeled", + dataloader_builder, + distributed=cfg.distributed, + consume=True, + ) + + custom_hooks = cfg.get("custom_hooks", []) + updated = False + for custom_hook in custom_hooks: + if custom_hook["type"] == "ComposedDataLoadersHook": + custom_hook["data_loaders"] = [*custom_hook["data_loaders"], unlabeled_dataloader] + updated = True + if not updated: + custom_hooks.append( + ConfigDict( + type="ComposedDataLoadersHook", + data_loaders=unlabeled_dataloader, + ) + ) + cfg.custom_hooks = custom_hooks diff --git a/otx/algorithms/segmentation/adapters/mmseg/datasets/dataset.py b/otx/algorithms/segmentation/adapters/mmseg/datasets/dataset.py index eb267d4fe12..97137a0d9f4 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/datasets/dataset.py +++ b/otx/algorithms/segmentation/adapters/mmseg/datasets/dataset.py @@ -118,6 +118,7 @@ def __init__( pipeline: Sequence[dict], classes: Optional[List[str]] = None, test_mode: bool = False, + **kwargs, ): self.otx_dataset = otx_dataset self.test_mode = test_mode diff --git a/otx/algorithms/segmentation/adapters/mmseg/nncf/__init__.py b/otx/algorithms/segmentation/adapters/mmseg/nncf/__init__.py index 1463ba35711..361ebcbb38a 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/nncf/__init__.py +++ b/otx/algorithms/segmentation/adapters/mmseg/nncf/__init__.py @@ -15,9 +15,7 @@ # and limitations under the License. from .builder import build_nncf_segmentor -from .hooks import CustomstepLrUpdaterHook __all__ = [ "build_nncf_segmentor", - "CustomstepLrUpdaterHook", ] diff --git a/otx/algorithms/segmentation/tasks/nncf.py b/otx/algorithms/segmentation/adapters/mmseg/nncf/task.py similarity index 78% rename from otx/algorithms/segmentation/tasks/nncf.py rename to otx/algorithms/segmentation/adapters/mmseg/nncf/task.py index 28693e19688..abfefa73595 100644 --- a/otx/algorithms/segmentation/tasks/nncf.py +++ b/otx/algorithms/segmentation/adapters/mmseg/nncf/task.py @@ -18,9 +18,10 @@ from typing import List, Optional import otx.algorithms.segmentation.adapters.mmseg.nncf.patches # noqa: F401 # pylint: disable=unused-import -from otx.algorithms.common.tasks.nncf_base import NNCFBaseTask +from otx.algorithms.common.tasks.nncf_task import NNCFBaseTask from otx.algorithms.common.utils.logger import get_logger from otx.algorithms.segmentation.adapters.mmseg.nncf import build_nncf_segmentor +from otx.algorithms.segmentation.adapters.mmseg.task import MMSegmentationTask from otx.api.entities.datasets import DatasetEntity from otx.api.entities.metrics import ( CurveMetric, @@ -32,22 +33,31 @@ VisualizationInfo, VisualizationType, ) -from otx.api.entities.model import ModelEntity +from otx.api.entities.model import ( + ModelEntity, +) from otx.api.entities.optimization_parameters import OptimizationParameters - -from .inference import SegmentationInferenceTask +from otx.api.entities.task_environment import TaskEnvironment logger = get_logger() -class SegmentationNNCFTask(NNCFBaseTask, SegmentationInferenceTask): # pylint: disable=too-many-ancestors +class SegmentationNNCFTask(NNCFBaseTask, MMSegmentationTask): # pylint: disable=too-many-ancestors """SegmentationNNCFTask.""" - def _initialize_post_hook(self, options=None): - super()._initialize_post_hook(options) + def __init__(self, task_environment: TaskEnvironment, output_path: Optional[str] = None): + super().__init__() # type: ignore [call-arg] + super(NNCFBaseTask, self).__init__(task_environment, output_path) + self._set_attributes_by_hyperparams() + + def _init_task(self, export: bool = False): # noqa + super(NNCFBaseTask, self)._init_task(export) + self._prepare_optimize(export) - export = options.get("export", False) - options["model_builder"] = partial( + def _prepare_optimize(self, export=False): + super()._prepare_optimize() + + self.model_builder = partial( self.model_builder, nncf_model_builder=build_nncf_segmentor, return_compression_ctrl=False, @@ -59,12 +69,8 @@ def _optimize( dataset: DatasetEntity, optimization_parameters: Optional[OptimizationParameters] = None, ): - results = self._run_task( - "SegTrainer", - mode="train", - dataset=dataset, - parameters=optimization_parameters, - ) + results = self._train_model(dataset) + return results def _optimize_post_hook( diff --git a/otx/algorithms/segmentation/adapters/mmseg/task.py b/otx/algorithms/segmentation/adapters/mmseg/task.py new file mode 100644 index 00000000000..fee2f0548e7 --- /dev/null +++ b/otx/algorithms/segmentation/adapters/mmseg/task.py @@ -0,0 +1,563 @@ +"""Task of OTX Segmentation using mmsegmentation training backend.""" + +# Copyright (C) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions +# and limitations under the License. + +import glob +import io +import os +import time +from contextlib import nullcontext +from copy import deepcopy +from typing import Any, Dict, Optional, Union + +import torch +from mmcv.runner import wrap_fp16_model +from mmcv.utils import Config, ConfigDict, get_git_hash +from mmseg import __version__ +from mmseg.apis import train_segmentor +from mmseg.datasets import build_dataloader, build_dataset +from mmseg.utils import collect_env + +from otx.algorithms.common.adapters.mmcv.hooks.recording_forward_hook import ( + BaseRecordingForwardHook, + FeatureVectorHook, +) +from otx.algorithms.common.adapters.mmcv.utils import ( + build_data_parallel, + get_configs_by_pairs, + patch_data_pipeline, + patch_from_hyperparams, +) +from otx.algorithms.common.adapters.mmcv.utils.config_utils import ( + MPAConfig, + update_or_add_custom_hook, +) +from otx.algorithms.common.configs.training_base import TrainType +from otx.algorithms.common.utils import set_random_seed +from otx.algorithms.common.utils.data import get_dataset +from otx.algorithms.common.utils.logger import get_logger +from otx.algorithms.segmentation.adapters.mmseg.configurer import ( + IncrSegmentationConfigurer, + SegmentationConfigurer, + SemiSLSegmentationConfigurer, +) +from otx.algorithms.segmentation.adapters.mmseg.utils.builder import build_segmentor +from otx.algorithms.segmentation.adapters.mmseg.utils.exporter import SegmentationExporter +from otx.algorithms.segmentation.task import OTXSegmentationTask + +# from otx.algorithms.segmentation.utils import get_det_model_api_configuration +from otx.api.configuration import cfg_helper +from otx.api.configuration.helper.utils import ids_to_strings +from otx.api.entities.datasets import DatasetEntity +from otx.api.entities.inference_parameters import InferenceParameters +from otx.api.entities.model import ( + ModelEntity, + ModelPrecision, +) +from otx.api.entities.subset import Subset +from otx.api.entities.task_environment import TaskEnvironment +from otx.api.serialization.label_mapper import label_schema_to_bytes +from otx.core.data import caching + +logger = get_logger() + +# TODO Remove unnecessary pylint disable +# pylint: disable=too-many-lines + + +class MMSegmentationTask(OTXSegmentationTask): + """Task class for OTX segmentation using mmsegmentation training backend.""" + + # pylint: disable=too-many-instance-attributes + def __init__(self, task_environment: TaskEnvironment, output_path: Optional[str] = None): + super().__init__(task_environment, output_path) + self._data_cfg: Optional[Config] = None + self._recipe_cfg: Optional[Config] = None + + # pylint: disable=too-many-locals, too-many-branches, too-many-statements + def _init_task(self, export: bool = False): # noqa + """Initialize task.""" + self._recipe_cfg = MPAConfig.fromfile(os.path.join(self._model_dir, "model.py")) + self._recipe_cfg.domain = self._task_type.domain + self._config = self._recipe_cfg + + set_random_seed(self._recipe_cfg.get("seed", 5), logger, self._recipe_cfg.get("deterministic", False)) + + # Belows may go to the configure function + patch_data_pipeline(self._recipe_cfg, self.data_pipeline_path) + + if not export: + patch_from_hyperparams(self._recipe_cfg, self._hyperparams) + + if "custom_hooks" in self.override_configs: + override_custom_hooks = self.override_configs.pop("custom_hooks") + for override_custom_hook in override_custom_hooks: + update_or_add_custom_hook(self._recipe_cfg, ConfigDict(override_custom_hook)) + if len(self.override_configs) > 0: + logger.info(f"before override configs merging = {self._recipe_cfg}") + self._recipe_cfg.merge_from_dict(self.override_configs) + logger.info(f"after override configs merging = {self._recipe_cfg}") + + # add Cancel training hook + update_or_add_custom_hook( + self._recipe_cfg, + ConfigDict(type="CancelInterfaceHook", init_callback=self.on_hook_initialized), + ) + if self._time_monitor is not None: + update_or_add_custom_hook( + self._recipe_cfg, + ConfigDict( + type="OTXProgressHook", + time_monitor=self._time_monitor, + verbose=True, + priority=71, + ), + ) + self._recipe_cfg.log_config.hooks.append({"type": "OTXLoggerHook", "curves": self._learning_curves}) + + # Update recipe with caching modules + self._update_caching_modules(self._recipe_cfg.data) + + logger.info("initialized.") + + # pylint: disable=too-many-arguments + def configure( + self, + training=True, + subset="train", + ir_options=None, + ): + """Patch mmcv configs for OTX segmentation settings.""" + + # deepcopy all configs to make sure + # changes under MPA and below does not take an effect to OTX for clear distinction + recipe_cfg = deepcopy(self._recipe_cfg) + data_cfg = deepcopy(self._data_cfg) + assert recipe_cfg is not None, "'recipe_cfg' is not initialized." + + if self._data_cfg is not None: + data_classes = [label.name for label in self._labels] + else: + data_classes = None + model_classes = [label.name for label in self._model_label_schema] + + recipe_cfg.work_dir = self._output_path + recipe_cfg.resume = self._resume + + if self._train_type == TrainType.Incremental: + configurer = IncrSegmentationConfigurer() + elif self._train_type == TrainType.Semisupervised: + configurer = SemiSLSegmentationConfigurer() + else: + configurer = SegmentationConfigurer() + cfg = configurer.configure( + recipe_cfg, self._model_ckpt, data_cfg, training, subset, ir_options, data_classes, model_classes + ) + self._config = cfg + return cfg + + def build_model( + self, + cfg: Config, + fp16: bool = False, + **kwargs, + ) -> torch.nn.Module: + """Build model from model_builder.""" + model_builder = getattr(self, "model_builder", build_segmentor) + model = model_builder(cfg, **kwargs) + if bool(fp16): + wrap_fp16_model(model) + return model + + def _infer_model( + self, + dataset: DatasetEntity, + inference_parameters: Optional[InferenceParameters] = None, + ): + """Main infer function.""" + self._data_cfg = ConfigDict( + data=ConfigDict( + train=ConfigDict( + otx_dataset=None, + labels=self._labels, + ), + test=ConfigDict( + otx_dataset=dataset, + labels=self._labels, + ), + ) + ) + + dump_features = True + + self._init_task() + + cfg = self.configure(False, "test", None) + logger.info("infer!") + + # FIXME: Currently segmentor does not support multi batch inference. + if "test" in cfg.data and "test_dataloader" in cfg.data: + cfg.data.test_dataloader["samples_per_gpu"] = 1 + + # Data loader + mm_dataset = build_dataset(cfg.data.test) + dataloader = build_dataloader( + mm_dataset, + samples_per_gpu=cfg.data.test_dataloader.get("samples_per_gpu", 1), + workers_per_gpu=cfg.data.test_dataloader.get("workers_per_gpu", 0), + num_gpus=len(cfg.gpu_ids), + dist=cfg.distributed, + seed=cfg.get("seed", None), + persistent_workers=False, + shuffle=False, + ) + + # Target classes + if "task_adapt" in cfg: + target_classes = cfg.task_adapt.final + if len(target_classes) < 1: + raise KeyError( + f"target_classes={target_classes} is empty check the metadata from model ckpt or recipe " + "configuration" + ) + else: + target_classes = mm_dataset.CLASSES + + # Model + model = self.build_model(cfg, fp16=cfg.get("fp16", False)) + model.CLASSES = target_classes + model.eval() + feature_model = model.model_s if self._train_type == TrainType.Semisupervised else model + model = build_data_parallel(model, cfg, distributed=False) + + # InferenceProgressCallback (Time Monitor enable into Infer task) + time_monitor = None + if cfg.get("custom_hooks", None): + time_monitor = [hook.time_monitor for hook in cfg.custom_hooks if hook.type == "OTXProgressHook"] + time_monitor = time_monitor[0] if time_monitor else None + if time_monitor is not None: + + # pylint: disable=unused-argument + def pre_hook(module, inp): + time_monitor.on_test_batch_begin(None, None) + + def hook(module, inp, outp): + time_monitor.on_test_batch_end(None, None) + + model.register_forward_pre_hook(pre_hook) + model.register_forward_hook(hook) + + eval_predictions = [] + feature_vectors = [] + + if not dump_features: + feature_vector_hook: Union[nullcontext, BaseRecordingForwardHook] = nullcontext() + else: + feature_vector_hook = FeatureVectorHook(feature_model) + + with feature_vector_hook: + for data in dataloader: + with torch.no_grad(): + result = model(return_loss=False, output_logits=True, **data) + eval_predictions.append(result) + if isinstance(feature_vector_hook, nullcontext): + feature_vectors = [None] * len(mm_dataset) + else: + feature_vectors = feature_vector_hook.records + + assert len(eval_predictions) == len(feature_vectors), ( + "Number of elements should be the same, however, number of outputs are ", + f"{len(eval_predictions)} and {len(feature_vectors)}", + ) + + outputs = dict( + classes=target_classes, + eval_predictions=eval_predictions, + feature_vectors=feature_vectors, + ) + return outputs + + # pylint: disable=too-many-branches, too-many-statements + def _train_model( + self, + dataset: DatasetEntity, + ): + """Train function in MMSegmentationTask.""" + logger.info("init data cfg.") + self._data_cfg = ConfigDict(data=ConfigDict()) + + for cfg_key, subset in zip( + ["train", "val", "unlabeled"], + [Subset.TRAINING, Subset.VALIDATION, Subset.UNLABELED], + ): + subset = get_dataset(dataset, subset) + if subset and self._data_cfg is not None: + self._data_cfg.data[cfg_key] = ConfigDict( + otx_dataset=subset, + labels=self._labels, + ) + + self._is_training = True + + self._init_task() + + cfg = self.configure(True, "train", None) + logger.info("train!") + + timestamp = time.strftime("%Y%m%d_%H%M%S", time.localtime()) + + # Environment + logger.info(f"cfg.gpu_ids = {cfg.gpu_ids}, distributed = {cfg.distributed}") + env_info_dict = collect_env() + env_info = "\n".join([(f"{k}: {v}") for k, v in env_info_dict.items()]) + dash_line = "-" * 60 + "\n" + logger.info(f"Environment info:\n{dash_line}{env_info}\n{dash_line}") + + # Data + datasets = [build_dataset(cfg.data.train)] + + # FIXME: Currently segmentor does not support multi batch evaluation. + # For the Self-SL case, there is no val data. So, need to check the + + if "val" in cfg.data and "val_dataloader" in cfg.data: + cfg.data.val_dataloader["samples_per_gpu"] = 1 + + # Target classes + if "task_adapt" in cfg: + target_classes = cfg.task_adapt.final + else: + target_classes = datasets[0].CLASSES + + # Metadata + meta = dict() + meta["env_info"] = env_info + meta["seed"] = cfg.seed + meta["exp_name"] = cfg.work_dir + if cfg.checkpoint_config is not None: + cfg.checkpoint_config.meta = dict( + mmseg_version=__version__ + get_git_hash()[:7], + CLASSES=target_classes, + ) + + # Model + model = self.build_model(cfg, fp16=cfg.get("fp16", False)) + model.train() + model.CLASSES = target_classes + + if cfg.distributed: + torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) + if cfg.dist_params.get("linear_scale_lr", False): + new_lr = len(cfg.gpu_ids) * cfg.optimizer.lr + logger.info( + f"enabled linear scaling rule to the learning rate. \ + changed LR from {cfg.optimizer.lr} to {new_lr}" + ) + cfg.optimizer.lr = new_lr + + validate = bool(cfg.data.get("val", None)) + train_segmentor( + model, + datasets, + cfg, + distributed=cfg.distributed, + validate=validate, + timestamp=timestamp, + meta=meta, + ) + + # Save outputs + output_ckpt_path = os.path.join(cfg.work_dir, "latest.pth") + best_ckpt_path = glob.glob(os.path.join(cfg.work_dir, "best_mDice_*.pth")) + if len(best_ckpt_path) > 0: + output_ckpt_path = best_ckpt_path[0] + best_ckpt_path = glob.glob(os.path.join(cfg.work_dir, "best_mIoU_*.pth")) + if len(best_ckpt_path) > 0: + output_ckpt_path = best_ckpt_path[0] + return dict( + final_ckpt=output_ckpt_path, + ) + + def _explain_model(self): + """Explain function of OTX Segmentation Task.""" + raise NotImplementedError + + # pylint: disable=too-many-statements + def _export_model( + self, + precision: ModelPrecision = ModelPrecision.FP32, + dump_features: bool = True, + ): + """Export function of OTX Segmentation Task.""" + # copied from OTX inference_task.py + self._init_task(export=True) + + cfg = self.configure(False, "test", None) + + self._precision[0] = precision + export_options: Dict[str, Any] = {} + export_options["deploy_cfg"] = self._init_deploy_cfg() + if export_options.get("precision", None) is None: + assert len(self._precision) == 1 + export_options["precision"] = str(self._precision[0]) + + export_options["deploy_cfg"]["dump_features"] = dump_features + if dump_features: + output_names = export_options["deploy_cfg"]["ir_config"]["output_names"] + if "feature_vector" not in output_names: + output_names.append("feature_vector") + if export_options["deploy_cfg"]["codebase_config"]["task"] != "Segmentation": + if "saliency_map" not in output_names: + output_names.append("saliency_map") + export_options["model_builder"] = getattr(self, "model_builder", build_segmentor) + + if self._precision[0] == ModelPrecision.FP16: + export_options["deploy_cfg"]["backend_config"]["mo_options"]["flags"].append("--compress_to_fp16") + + exporter = SegmentationExporter() + results = exporter.run( + cfg, + **export_options, + ) + return results + + # This should moved somewhere + def _init_deploy_cfg(self) -> Union[Config, None]: + base_dir = os.path.abspath(os.path.dirname(self._task_environment.model_template.model_template_path)) + deploy_cfg_path = os.path.join(base_dir, "deployment.py") + deploy_cfg = None + if os.path.exists(deploy_cfg_path): + deploy_cfg = MPAConfig.fromfile(deploy_cfg_path) + + def patch_input_preprocessing(deploy_cfg): + normalize_cfg = get_configs_by_pairs( + self._recipe_cfg.data.test.pipeline, + dict(type="Normalize"), + ) + assert len(normalize_cfg) == 1 + normalize_cfg = normalize_cfg[0] + + options = dict(flags=[], args={}) + # NOTE: OTX loads image in RGB format + # so that `to_rgb=True` means a format change to BGR instead. + # Conventionally, OpenVINO IR expects a image in BGR format + # but OpenVINO IR under OTX assumes a image in RGB format. + # + # `to_rgb=True` -> a model was trained with images in BGR format + # and a OpenVINO IR needs to reverse input format from RGB to BGR + # `to_rgb=False` -> a model was trained with images in RGB format + # and a OpenVINO IR does not need to do a reverse + if normalize_cfg.get("to_rgb", False): + options["flags"] += ["--reverse_input_channels"] + # value must be a list not a tuple + if normalize_cfg.get("mean", None) is not None: + options["args"]["--mean_values"] = list(normalize_cfg.get("mean")) + if normalize_cfg.get("std", None) is not None: + options["args"]["--scale_values"] = list(normalize_cfg.get("std")) + + # fill default + backend_config = deploy_cfg.backend_config + if backend_config.get("mo_options") is None: + backend_config.mo_options = ConfigDict() + mo_options = backend_config.mo_options + if mo_options.get("args") is None: + mo_options.args = ConfigDict() + if mo_options.get("flags") is None: + mo_options.flags = [] + + # already defiend options have higher priority + options["args"].update(mo_options.args) + mo_options.args = ConfigDict(options["args"]) + # make sure no duplicates + mo_options.flags.extend(options["flags"]) + mo_options.flags = list(set(mo_options.flags)) + + def patch_input_shape(deploy_cfg): + resize_cfg = get_configs_by_pairs( + self._recipe_cfg.data.test.pipeline, + dict(type="Resize"), + ) + assert len(resize_cfg) == 1 + resize_cfg = resize_cfg[0] + size = resize_cfg.size + if isinstance(size, int): + size = (size, size) + assert all(isinstance(i, int) and i > 0 for i in size) + # default is static shape to prevent an unexpected error + # when converting to OpenVINO IR + deploy_cfg.backend_config.model_inputs = [ConfigDict(opt_shapes=ConfigDict(input=[1, 3, *size]))] + + patch_input_preprocessing(deploy_cfg) + if not deploy_cfg.backend_config.get("model_inputs", []): + patch_input_shape(deploy_cfg) + + return deploy_cfg + + # This should be removed + def update_override_configurations(self, config): + """Update override_configs.""" + logger.info(f"update override config with: {config}") + config = ConfigDict(**config) + self.override_configs.update(config) + + def save_model(self, output_model: ModelEntity): + """Save best model weights in SegmentationTrainTask.""" + logger.info("called save_model") + buffer = io.BytesIO() + hyperparams_str = ids_to_strings(cfg_helper.convert(self._hyperparams, dict, enum_to_str=True)) + labels = {label.name: label.color.rgb_tuple for label in self._labels} + model_ckpt = torch.load(self._model_ckpt) + modelinfo = { + "model": model_ckpt, + "config": hyperparams_str, + "labels": labels, + "VERSION": 1, + } + + torch.save(modelinfo, buffer) + output_model.set_data("weights.pth", buffer.getvalue()) + output_model.set_data( + "label_schema.json", + label_schema_to_bytes(self._task_environment.label_schema), + ) + output_model.precision = self._precision + + # These need to be moved somewhere + def _update_caching_modules(self, data_cfg: Config) -> None: + def _find_max_num_workers(cfg: dict): + num_workers = [0] + for key, value in cfg.items(): + if key == "workers_per_gpu" and isinstance(value, int): + num_workers += [value] + elif isinstance(value, dict): + num_workers += [_find_max_num_workers(value)] + + return max(num_workers) + + def _get_mem_cache_size(): + if not hasattr(self._hyperparams.algo_backend, "mem_cache_size"): + return 0 + + return self._hyperparams.algo_backend.mem_cache_size + + max_num_workers = _find_max_num_workers(data_cfg) + mem_cache_size = _get_mem_cache_size() + + mode = "multiprocessing" if max_num_workers > 0 else "singleprocessing" + caching.MemCacheHandlerSingleton.create(mode, mem_cache_size) + + update_or_add_custom_hook( + self._recipe_cfg, + ConfigDict(type="MemCacheHook", priority="VERY_LOW"), + ) diff --git a/otx/algorithms/segmentation/adapters/mmseg/tasks/__init__.py b/otx/algorithms/segmentation/adapters/mmseg/tasks/__init__.py deleted file mode 100644 index 6197506e2f0..00000000000 --- a/otx/algorithms/segmentation/adapters/mmseg/tasks/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Initialize OTX Segmentation with MMSEG.""" -# Copyright (C) 2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import otx.algorithms.common.adapters.mmcv.hooks -import otx.algorithms.segmentation.adapters.mmseg -import otx.algorithms.segmentation.adapters.mmseg.models -import otx.algorithms.segmentation.adapters.mmseg.models.schedulers -from otx.algorithms.segmentation.adapters.mmseg.tasks.incremental import ( - IncrSegInferrer, - IncrSegTrainer, -) -from otx.algorithms.segmentation.adapters.mmseg.tasks.semisl import ( - SemiSLSegExporter, - SemiSLSegInferrer, - SemiSLSegTrainer, -) - -# flake8: noqa -from . import exporter, inferrer, stage, trainer diff --git a/otx/algorithms/segmentation/adapters/mmseg/tasks/incremental/__init__.py b/otx/algorithms/segmentation/adapters/mmseg/tasks/incremental/__init__.py deleted file mode 100644 index 3c2102526da..00000000000 --- a/otx/algorithms/segmentation/adapters/mmseg/tasks/incremental/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -"""Initialize OTX Segmentation with MMSEG.""" -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from .inferrer import IncrSegInferrer -from .trainer import IncrSegTrainer - -__all__ = ["IncrSegInferrer", "IncrSegTrainer"] diff --git a/otx/algorithms/segmentation/adapters/mmseg/tasks/incremental/inferrer.py b/otx/algorithms/segmentation/adapters/mmseg/tasks/incremental/inferrer.py deleted file mode 100644 index df95c6df7cb..00000000000 --- a/otx/algorithms/segmentation/adapters/mmseg/tasks/incremental/inferrer.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Inference for OTX segmentation model with Incremental learning.""" -# Copyright (C) 2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES -from otx.algorithms.segmentation.adapters.mmseg.tasks.inferrer import SegInferrer - -from .stage import IncrSegStage - - -# pylint: disable=super-init-not-called -@STAGES.register_module() -class IncrSegInferrer(IncrSegStage, SegInferrer): - """Inference class for incremental learning.""" - - def __init__(self, **kwargs): - IncrSegStage.__init__(self, **kwargs) diff --git a/otx/algorithms/segmentation/adapters/mmseg/tasks/incremental/stage.py b/otx/algorithms/segmentation/adapters/mmseg/tasks/incremental/stage.py deleted file mode 100644 index 0f833bf1ff4..00000000000 --- a/otx/algorithms/segmentation/adapters/mmseg/tasks/incremental/stage.py +++ /dev/null @@ -1,42 +0,0 @@ -"""Stage for Incremental learning OTX segmentation with MMSEG.""" -# Copyright (C) 2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import numpy as np -from mmcv import ConfigDict - -from otx.algorithms.common.adapters.mmcv.utils.config_utils import ( - update_or_add_custom_hook, -) -from otx.algorithms.common.utils.logger import get_logger -from otx.algorithms.segmentation.adapters.mmseg.tasks.stage import SegStage - -logger = get_logger() - - -class IncrSegStage(SegStage): - """Calss for incremental learning for segmentation.""" - - def __init__(self, **kwargs): - super().__init__(**kwargs) - - def configure_task(self, cfg, training): - """Adjust settings for task adaptation.""" - super().configure_task(cfg, training) - - new_classes = np.setdiff1d(self.model_classes, self.org_model_classes).tolist() - - # Check if new classes are added - has_new_class = len(new_classes) > 0 - - # Update TaskAdaptHook (use incremental sampler) - task_adapt_hook = ConfigDict( - type="TaskAdaptHook", - src_classes=self.org_model_classes, - dst_classes=self.model_classes, - model_type=cfg.model.type, - sampler_flag=has_new_class, - efficient_mode=cfg["task_adapt"].get("efficient_mode", False), - ) - update_or_add_custom_hook(cfg, task_adapt_hook) diff --git a/otx/algorithms/segmentation/adapters/mmseg/tasks/incremental/trainer.py b/otx/algorithms/segmentation/adapters/mmseg/tasks/incremental/trainer.py deleted file mode 100644 index 6a95ca28b23..00000000000 --- a/otx/algorithms/segmentation/adapters/mmseg/tasks/incremental/trainer.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Trainer for Incremental OTX Segmentation with MMSEG.""" -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES -from otx.algorithms.common.utils.logger import get_logger -from otx.algorithms.segmentation.adapters.mmseg.tasks.trainer import SegTrainer - -from .stage import IncrSegStage - -logger = get_logger() - - -# pylint: disable=super-init-not-called -@STAGES.register_module() -class IncrSegTrainer(IncrSegStage, SegTrainer): - """Trainer for incremental segmentation.""" - - def __init__(self, **kwargs): - IncrSegStage.__init__(self, **kwargs) diff --git a/otx/algorithms/segmentation/adapters/mmseg/tasks/inferrer.py b/otx/algorithms/segmentation/adapters/mmseg/tasks/inferrer.py deleted file mode 100644 index 3a5fc12d378..00000000000 --- a/otx/algorithms/segmentation/adapters/mmseg/tasks/inferrer.py +++ /dev/null @@ -1,187 +0,0 @@ -"""OTX segmentation inference with MMSEG.""" -# Copyright (C) 2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import copy # noqa: E402 -import warnings # noqa: E402 -from contextlib import nullcontext - -import torch -from mmcv.utils import Config, ConfigDict -from mmseg.datasets import build_dataloader as mmseg_build_dataloader -from mmseg.datasets import build_dataset as mmseg_build_dataset - -from otx.algorithms.common.adapters.mmcv.hooks.recording_forward_hook import ( - FeatureVectorHook, -) -from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES -from otx.algorithms.common.adapters.mmcv.tasks.stage import Stage -from otx.algorithms.common.adapters.mmcv.utils import ( - build_data_parallel, - build_dataloader, - build_dataset, -) -from otx.algorithms.common.utils.logger import get_logger - -from .stage import SegStage - -logger = get_logger() - - -@STAGES.register_module() -class SegInferrer(SegStage): - """Inference class with MMSEG.""" - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.dataset = None - - def run(self, model_cfg, model_ckpt, data_cfg, **kwargs): - """Run inference stage for segmentation. - - - Configuration - - Environment setup - - Run inference via MMSegmentation -> MMCV - """ - self._init_logger() - mode = kwargs.get("mode", "train") - if mode not in self.mode: - logger.warning(f"Supported modes are {self.mode} but '{mode}' is given.") - return {} - - cfg = self.configure(model_cfg, model_ckpt, data_cfg, training=False, **kwargs) - logger.info("infer!") - - model_builder = kwargs.get("model_builder", None) - dump_features = kwargs.get("dump_features", False) - outputs = self.infer( - cfg, - model_builder=model_builder, - dump_features=dump_features, - ) - - return dict(outputs=outputs) - - # pylint: disable=too-many-locals, too-many-branches - def infer(self, cfg, model_builder=None, dump_features=False): - """Main inference function.""" - # TODO: distributed inference - - data_cfg = cfg.data.test.copy() - - # Input source - input_source = cfg.get("input_source", "test") - logger.info(f"Inferring on input source: data.{input_source}") - if input_source == "train": - src_data_cfg = Stage.get_data_cfg(cfg, "train") - else: - src_data_cfg = cfg.data[input_source] - - if "classes" in src_data_cfg: - data_cfg.classes = src_data_cfg.classes - - data_cfg = Config( - ConfigDict( - data=ConfigDict( - samples_per_gpu=cfg.data.get("samples_per_gpu", 1), - workers_per_gpu=cfg.data.get("workers_per_gpu", 0), - test=data_cfg, - test_dataloader=cfg.data.get("test_dataloader", {}).copy(), - ), - gpu_ids=cfg.gpu_ids, - seed=cfg.get("seed", None), - model_task=cfg.model_task, - ) - ) - self.configure_samples_per_gpu(data_cfg, "test", distributed=False) - self.configure_compat_cfg(data_cfg) - samples_per_gpu = data_cfg.data.test_dataloader.get("samples_per_gpu", 1) - if samples_per_gpu > 1: - # Replace 'ImageToTensor' to 'DefaultFormatBundle' - data_cfg.data.test.pipeline = replace_ImageToTensor(data_cfg.data.test.pipeline) - - # Data loader - self.dataset = build_dataset(data_cfg, "test", mmseg_build_dataset) - test_dataloader = build_dataloader( - self.dataset, - data_cfg, - "test", - mmseg_build_dataloader, - distributed=False, - # segmentor does not support various sized batch images - samples_per_gpu=1, - ) - - # Target classes - if "task_adapt" in cfg: - target_classes = cfg.task_adapt.final - if len(target_classes) < 1: - raise KeyError( - f"target_classes={target_classes} is empty check the metadata from model ckpt or recipe " - "configuration" - ) - else: - target_classes = self.dataset.CLASSES - - # Model - cfg.model.pretrained = None - if cfg.model.get("neck"): - if isinstance(cfg.model.neck, list): - for neck_cfg in cfg.model.neck: - if neck_cfg.get("rfp_backbone"): - if neck_cfg.rfp_backbone.get("pretrained"): - neck_cfg.rfp_backbone.pretrained = None - elif cfg.model.neck.get("rfp_backbone"): - if cfg.model.neck.rfp_backbone.get("pretrained"): - cfg.model.neck.rfp_backbone.pretrained = None - cfg.model.test_cfg.return_repr_vector = True - model = self.build_model(cfg, model_builder, fp16=cfg.get("fp16", False)) - model.CLASSES = target_classes - model.eval() - feature_model = self._get_feature_module(model) - model = build_data_parallel(model, cfg, distributed=False) - - # InferenceProgressCallback (Time Monitor enable into Infer task) - self.set_inference_progress_callback(model, cfg) - - eval_predictions = [] - feature_vectors = [] - with FeatureVectorHook(feature_model) if dump_features else nullcontext() as fhook: - for data in test_dataloader: - with torch.no_grad(): - result = model(return_loss=False, output_logits=True, **data) - eval_predictions.append(result) - feature_vectors = fhook.records if dump_features else [None] * len(self.dataset) - - assert len(eval_predictions) == len(feature_vectors), ( - "Number of elements should be the same, however, number of outputs are ", - f"{len(eval_predictions)} and {len(feature_vectors)}", - ) - - outputs = dict( - classes=target_classes, - eval_predictions=eval_predictions, - feature_vectors=feature_vectors, - ) - return outputs - - -# pylint: disable=invalid-name -def replace_ImageToTensor(pipelines): - """Change ImageToTensor pipeline to DefaultFormatBundle.""" - pipelines = copy.deepcopy(pipelines) - for i, pipeline in enumerate(pipelines): - if pipeline["type"] == "MultiScaleFlipAug": - assert "transforms" in pipeline - pipeline["transforms"] = replace_ImageToTensor(pipeline["transforms"]) - elif pipeline["type"] == "ImageToTensor": - warnings.warn( - '"ImageToTensor" pipeline is replaced by ' - '"DefaultFormatBundle" for batch inference. It is ' - "recommended to manually replace it in the test " - "data pipeline in your config file.", - UserWarning, - ) - pipelines[i] = {"type": "DefaultFormatBundle"} - return pipelines diff --git a/otx/algorithms/segmentation/adapters/mmseg/tasks/semisl/__init__.py b/otx/algorithms/segmentation/adapters/mmseg/tasks/semisl/__init__.py deleted file mode 100644 index cb937a180d0..00000000000 --- a/otx/algorithms/segmentation/adapters/mmseg/tasks/semisl/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -"""Initialize Semi-SL tasks for OTX segmentation.""" -# Copyright (C) 2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from .exporter import SemiSLSegExporter -from .inferrer import SemiSLSegInferrer -from .trainer import SemiSLSegTrainer - -__all__ = ["SemiSLSegExporter", "SemiSLSegInferrer", "SemiSLSegTrainer"] diff --git a/otx/algorithms/segmentation/adapters/mmseg/tasks/semisl/exporter.py b/otx/algorithms/segmentation/adapters/mmseg/tasks/semisl/exporter.py deleted file mode 100644 index 42eb2ff0c09..00000000000 --- a/otx/algorithms/segmentation/adapters/mmseg/tasks/semisl/exporter.py +++ /dev/null @@ -1,31 +0,0 @@ -"""Export task for Semi-SL OTX Segmentation with MMSEG.""" -# Copyright (C) 2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES -from otx.algorithms.common.utils.logger import get_logger -from otx.algorithms.segmentation.adapters.mmseg.tasks.exporter import SegExporter - -from .stage import SemiSLSegStage - -logger = get_logger() - - -@STAGES.register_module() -class SemiSLSegExporter(SemiSLSegStage, SegExporter): - """Exporter for semi-sl segmentation.""" - - def __init__(self, **kwargs): - SemiSLSegStage.__init__(self, **kwargs) - - def configure(self, model_cfg, model_ckpt, data_cfg, training=False, **kwargs): - """Patch config for semi-sl segmentation.""" - cfg = SemiSLSegStage.configure(self, model_cfg, model_ckpt, data_cfg, training=training, **kwargs) - - cfg.model.type = cfg.model.orig_type - cfg.model.pop("orig_type", False) - cfg.model.pop("unsup_weight", False) - cfg.model.pop("semisl_start_iter", False) - - return cfg diff --git a/otx/algorithms/segmentation/adapters/mmseg/tasks/semisl/inferrer.py b/otx/algorithms/segmentation/adapters/mmseg/tasks/semisl/inferrer.py deleted file mode 100644 index 73d38ced93c..00000000000 --- a/otx/algorithms/segmentation/adapters/mmseg/tasks/semisl/inferrer.py +++ /dev/null @@ -1,29 +0,0 @@ -"""Inferenc for Semi-SL OTX classification with MMCLS.""" -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES -from otx.algorithms.segmentation.adapters.mmseg.tasks.inferrer import SegInferrer - -from .stage import SemiSLSegStage - - -# pylint: disable=super-init-not-called -@STAGES.register_module() -class SemiSLSegInferrer(SemiSLSegStage, SegInferrer): - """Inference class for Semi-SL.""" - - def __init__(self, **kwargs): - SemiSLSegStage.__init__(self, **kwargs) - - def configure(self, model_cfg, model_ckpt, data_cfg, training=False, **kwargs): - """Patch config for semi-sl classification.""" - cfg = SemiSLSegStage.configure(self, model_cfg, model_ckpt, data_cfg, training=training, **kwargs) - - cfg.model.type = cfg.model.orig_type - cfg.model.pop("orig_type", False) - cfg.model.pop("unsup_weight", False) - cfg.model.pop("semisl_start_iter", False) - - return cfg diff --git a/otx/algorithms/segmentation/adapters/mmseg/tasks/semisl/stage.py b/otx/algorithms/segmentation/adapters/mmseg/tasks/semisl/stage.py deleted file mode 100644 index f77feb68277..00000000000 --- a/otx/algorithms/segmentation/adapters/mmseg/tasks/semisl/stage.py +++ /dev/null @@ -1,36 +0,0 @@ -"""Stage for Semi-SL OTX Segmentation with MMSEG.""" -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from otx.algorithms.common.adapters.mmcv.utils.config_utils import remove_custom_hook -from otx.algorithms.common.utils.logger import get_logger -from otx.algorithms.segmentation.adapters.mmseg.tasks.stage import SegStage - -logger = get_logger() - - -class SemiSLSegStage(SegStage): - """Semi-SL stage for segmentation.""" - - def __init__(self, **kwargs): - super().__init__(**kwargs) - - def configure_data(self, cfg, training, data_cfg): - """Patch cfg.data.""" - super().configure_data(cfg, training, data_cfg) - # Set unlabeled data hook - if training: - if cfg.data.get("unlabeled", False) and cfg.data.unlabeled.get("otx_dataset", False): - self.configure_unlabeled_dataloader(cfg, self.distributed) - - def configure_task(self, cfg, training, **kwargs): - """Adjust settings for task adaptation.""" - super().configure_task(cfg, training, **kwargs) - - # Don't pass task_adapt arg to semi-segmentor - if cfg.model.type != "ClassIncrEncoderDecoder" and cfg.model.get("task_adapt", False): - cfg.model.pop("task_adapt") - - # Remove task adapt hook (set default torch random sampler) - remove_custom_hook(cfg, "TaskAdaptHook") diff --git a/otx/algorithms/segmentation/adapters/mmseg/tasks/semisl/trainer.py b/otx/algorithms/segmentation/adapters/mmseg/tasks/semisl/trainer.py deleted file mode 100644 index 57931944990..00000000000 --- a/otx/algorithms/segmentation/adapters/mmseg/tasks/semisl/trainer.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Train Semi-SL OTX Segmentation model with MMSEG.""" -# Copyright (C) 2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES -from otx.algorithms.common.utils.logger import get_logger -from otx.algorithms.segmentation.adapters.mmseg.tasks.trainer import SegTrainer - -from .stage import SemiSLSegStage - -logger = get_logger() - - -# pylint: disable=super-init-not-called -@STAGES.register_module() -class SemiSLSegTrainer(SemiSLSegStage, SegTrainer): - """Class for semi-sl segmentation model train.""" - - def __init__(self, **kwargs): - SemiSLSegStage.__init__(self, **kwargs) diff --git a/otx/algorithms/segmentation/adapters/mmseg/tasks/stage.py b/otx/algorithms/segmentation/adapters/mmseg/tasks/stage.py deleted file mode 100644 index f6e6aeedde5..00000000000 --- a/otx/algorithms/segmentation/adapters/mmseg/tasks/stage.py +++ /dev/null @@ -1,152 +0,0 @@ -"""Base stage for OTX Segmentation.""" -# Copyright (C) 2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -from mmcv import ConfigDict - -from otx.algorithms.common.adapters.mmcv.tasks.stage import Stage -from otx.algorithms.common.adapters.mmcv.utils.config_utils import ( - recursively_update_cfg, -) -from otx.algorithms.common.utils.logger import get_logger -from otx.algorithms.segmentation.adapters.mmseg.utils.builder import build_segmentor - -logger = get_logger() - - -class SegStage(Stage): - """Class for configuration of segmentation recipe.""" - - MODEL_BUILDER = build_segmentor - - def configure(self, model_cfg, model_ckpt, data_cfg, training=True, **kwargs): - """Create MMCV-consumable config from given inputs.""" - logger.info(f"configure!: training={training}") - - cfg = self.cfg - self.configure_model(cfg, model_cfg, **kwargs) - self.configure_ckpt(cfg, model_ckpt, kwargs.get("pretrained", None)) - self.configure_data(cfg, training, data_cfg) - self.configure_task(cfg, training) - self.configure_hook(cfg) - - return cfg - - def configure_model(self, cfg, model_cfg, **kwargs): - """Patch model_cfg.""" - if model_cfg: - if hasattr(model_cfg, "model"): - cfg.merge_from_dict(model_cfg) - else: - raise ValueError( - "Unexpected config was passed through 'model_cfg'. " - "it should have 'model' attribute in the config" - ) - cfg.model_task = cfg.model.pop("task", "segmentation") - if cfg.model_task != "segmentation": - raise ValueError(f"Given model_cfg ({model_cfg.filename}) is not supported by segmentation recipe") - - # OV-plugin - ir_model_path = kwargs.get("ir_model_path") - if ir_model_path: - - def is_mmov_model(key, value): - if key == "type" and value.startswith("MMOV"): - return True - return False - - ir_weight_path = kwargs.get("ir_weight_path", None) - ir_weight_init = kwargs.get("ir_weight_init", False) - recursively_update_cfg( - cfg, - is_mmov_model, - {"model_path": ir_model_path, "weight_path": ir_weight_path, "init_weight": ir_weight_init}, - ) - - def configure_data(self, cfg, training, data_cfg): # noqa: C901 - """Patch data_cfg.""" - # Data - if data_cfg: - cfg.merge_from_dict(data_cfg) - - # Dataset - super().configure_data(cfg, training) - src_data_cfg = Stage.get_data_cfg(cfg, "train") - for mode in ["train", "val", "test"]: - if src_data_cfg.type == "MPASegDataset" and cfg.data.get(mode, False): - if cfg.data[mode]["type"] != "MPASegDataset": - # Wrap original dataset config - org_type = cfg.data[mode]["type"] - cfg.data[mode]["type"] = "MPASegDataset" - cfg.data[mode]["org_type"] = org_type - - def configure_task(self, cfg, training): - """Adjust settings for task adaptation.""" - if cfg.get("task_adapt", None): - logger.info(f"task config!!!!: training={training}") - task_adapt_op = cfg["task_adapt"].get("op", "REPLACE") - - # Task classes - self.configure_classes(cfg, task_adapt_op) - # Ignored mode - self.configure_ignore(cfg) - - def configure_classes(self, cfg, task_adapt_op): - """Patch model_classes and data_classes.""" - # Task classes - org_model_classes = self.get_model_classes(cfg) - data_classes = self.get_data_classes(cfg) - if "background" not in org_model_classes: - org_model_classes = ["background"] + org_model_classes - if "background" not in data_classes: - data_classes = ["background"] + data_classes - - # Model classes - if task_adapt_op == "REPLACE": - if len(data_classes) == 1: # 'background' - model_classes = org_model_classes.copy() - else: - model_classes = data_classes.copy() - elif task_adapt_op == "MERGE": - model_classes = org_model_classes + [cls for cls in data_classes if cls not in org_model_classes] - else: - raise KeyError(f"{task_adapt_op} is not supported for task_adapt options!") - - cfg.task_adapt.final = model_classes - cfg.model.task_adapt = ConfigDict( - src_classes=org_model_classes, - dst_classes=model_classes, - ) - - # Model architecture - if "decode_head" in cfg.model: - decode_head = cfg.model.decode_head - if isinstance(decode_head, dict): - decode_head.num_classes = len(model_classes) - elif isinstance(decode_head, list): - for head in decode_head: - head.num_classes = len(model_classes) - - # For SupConDetCon - if "SupConDetCon" in cfg.model.type: - cfg.model.num_classes = len(model_classes) - - # Task classes - self.org_model_classes = org_model_classes - self.model_classes = model_classes - - def configure_ignore(self, cfg): - """Change to incremental loss (ignore mode).""" - if cfg.get("ignore", False): - cfg_loss_decode = ConfigDict( - type="CrossEntropyLossWithIgnore", - use_sigmoid=False, - loss_weight=1.0, - ) - - if "decode_head" in cfg.model: - decode_head = cfg.model.decode_head - if decode_head.type == "FCNHead": - decode_head.type = "CustomFCNHead" - decode_head.loss_decode = cfg_loss_decode diff --git a/otx/algorithms/segmentation/adapters/mmseg/tasks/trainer.py b/otx/algorithms/segmentation/adapters/mmseg/tasks/trainer.py deleted file mode 100644 index fa05d8277e9..00000000000 --- a/otx/algorithms/segmentation/adapters/mmseg/tasks/trainer.py +++ /dev/null @@ -1,134 +0,0 @@ -"""Base Trainer for OTX segmentation with MMSEG.""" -# Copyright (C) 2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import glob -import os -import time - -from mmcv import get_git_hash -from mmseg import __version__ -from mmseg.apis import train_segmentor -from mmseg.datasets import build_dataset -from mmseg.utils import collect_env -from torch import nn - -from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES -from otx.algorithms.common.utils.logger import get_logger - -from .stage import SegStage - -logger = get_logger() - - -@STAGES.register_module() -class SegTrainer(SegStage): - """Class for OTX segmentation train.""" - - # pylint: disable=too-many-locals - def run(self, model_cfg, model_ckpt, data_cfg, **kwargs): - """Run training stage for segmentation. - - - Configuration - - Environment setup - - Run training via MMSegmentation -> MMCV - """ - self._init_logger() - mode = kwargs.get("mode", "train") - if mode not in self.mode: - logger.warning(f"Supported modes are {self.mode} but '{mode}' is given.") - return {} - - cfg = self.configure(model_cfg, model_ckpt, data_cfg, **kwargs) - logger.info("train!") - - # FIXME: what is this? Why do we need? - if cfg.runner.type == "IterBasedRunner": - cfg.runner = dict(type=cfg.runner.type, max_iters=cfg.runner.max_iters) - - timestamp = time.strftime("%Y%m%d_%H%M%S", time.localtime()) - - # Environment - logger.info(f"cfg.gpu_ids = {cfg.gpu_ids}, distributed = {self.distributed}") - env_info_dict = collect_env() - env_info = "\n".join([(f"{k}: {v}") for k, v in env_info_dict.items()]) - dash_line = "-" * 60 + "\n" - logger.info(f"Environment info:\n{dash_line}{env_info}\n{dash_line}") - - # Data - datasets = [build_dataset(cfg.data.train)] - - # FIXME: Currently segmentor does not support multi batch evaluation. - # For the Self-SL case, there is no val data. So, need to check the - if "val" in cfg.data and "val_dataloader" in cfg.data: - cfg.data.val_dataloader.samples_per_gpu = 1 - - # Target classes - if "task_adapt" in cfg: - target_classes = cfg.task_adapt.final - else: - target_classes = datasets[0].CLASSES - - # Metadata - meta = dict() - meta["env_info"] = env_info - meta["seed"] = cfg.seed - meta["exp_name"] = cfg.work_dir - if cfg.checkpoint_config is not None: - cfg.checkpoint_config.meta = dict( - mmseg_version=__version__ + get_git_hash()[:7], - CLASSES=target_classes, - ) - - self.configure_samples_per_gpu(cfg, "train", self.distributed) - self.configure_fp16_optimizer(cfg, self.distributed) - - # Model - model_builder = kwargs.get("model_builder", None) - model = self.build_model(cfg, model_builder) - model.train() - model.CLASSES = target_classes - - if self.distributed: - self._modify_cfg_for_distributed(model, cfg) - - self.configure_compat_cfg(cfg) - - # Save config - # cfg.dump(osp.join(cfg.work_dir, 'config.py')) - # logger.info(f'Config:\n{cfg.pretty_text}') - - validate = "val" in cfg.data - train_segmentor( - model, - datasets, - cfg, - distributed=self.distributed, - validate=validate, - timestamp=timestamp, - meta=meta, - ) - - # Save outputs - output_ckpt_path = os.path.join(cfg.work_dir, "latest.pth") - best_ckpt_path = glob.glob(os.path.join(cfg.work_dir, "best_mDice_*.pth")) - if len(best_ckpt_path) > 0: - output_ckpt_path = best_ckpt_path[0] - best_ckpt_path = glob.glob(os.path.join(cfg.work_dir, "best_mIoU_*.pth")) - if len(best_ckpt_path) > 0: - output_ckpt_path = best_ckpt_path[0] - return dict( - final_ckpt=output_ckpt_path, - ) - - def _modify_cfg_for_distributed(self, model, cfg): - nn.SyncBatchNorm.convert_sync_batchnorm(model) - - if cfg.dist_params.get("linear_scale_lr", False): - new_lr = len(cfg.gpu_ids) * cfg.optimizer.lr - logger.info( - f"enabled linear scaling rule to the learning rate. \ - changed LR from {cfg.optimizer.lr} to {new_lr}" - ) - cfg.optimizer.lr = new_lr diff --git a/otx/algorithms/segmentation/adapters/mmseg/utils/__init__.py b/otx/algorithms/segmentation/adapters/mmseg/utils/__init__.py index 690936587bd..fed92702c3f 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/utils/__init__.py +++ b/otx/algorithms/segmentation/adapters/mmseg/utils/__init__.py @@ -23,9 +23,11 @@ set_hyperparams, ) from .data_utils import get_valid_label_mask_per_batch, load_dataset_items +from .exporter import SegmentationExporter __all__ = [ "patch_config", + "SegmentationExporter", "patch_datasets", "patch_evaluation", "prepare_for_training", diff --git a/otx/algorithms/segmentation/adapters/mmseg/tasks/exporter.py b/otx/algorithms/segmentation/adapters/mmseg/utils/exporter.py similarity index 82% rename from otx/algorithms/segmentation/adapters/mmseg/tasks/exporter.py rename to otx/algorithms/segmentation/adapters/mmseg/utils/exporter.py index b9104ee7297..f5afb1d6f4e 100644 --- a/otx/algorithms/segmentation/adapters/mmseg/tasks/exporter.py +++ b/otx/algorithms/segmentation/adapters/mmseg/utils/exporter.py @@ -6,25 +6,24 @@ import numpy as np from mmcv.runner import wrap_fp16_model -from otx.algorithms.common.adapters.mmcv.tasks.exporter_mixin import ExporterMixin +from otx.algorithms.common.adapters.mmcv.tasks.exporter import Exporter from otx.algorithms.common.adapters.mmcv.tasks.registry import STAGES from otx.algorithms.common.adapters.mmdeploy.utils import sync_batchnorm_2_batchnorm from otx.algorithms.common.utils.logger import get_logger - -from .stage import SegStage +from otx.algorithms.segmentation.adapters.mmseg.utils.builder import build_segmentor logger = get_logger() @STAGES.register_module() -class SegExporter(ExporterMixin, SegStage): - """Class for segmentation model export.""" +class SegmentationExporter(Exporter): + """Exporter for OTX Segmentation using mmsegmentation training backend.""" - def run(self, model_cfg, model_ckpt, data_cfg, **kwargs): # noqa: C901 + def run(self, cfg, **kwargs): # noqa: C901 """Run exporter stage.""" precision = kwargs.get("precision", "FP32") - model_builder = kwargs.get("model_builder", self.MODEL_BUILDER) + model_builder = kwargs.get("model_builder", build_segmentor) def model_builder_helper(*args, **kwargs): model = model_builder(*args, **kwargs) @@ -42,7 +41,7 @@ def model_builder_helper(*args, **kwargs): kwargs["model_builder"] = model_builder_helper - return super().run(model_cfg, model_ckpt, data_cfg, **kwargs) + return super().run(cfg, **kwargs) @staticmethod def naive_export(output_dir, model_builder, precision, cfg, model_name="model"): diff --git a/otx/algorithms/segmentation/adapters/openvino/__init__.py b/otx/algorithms/segmentation/adapters/openvino/__init__.py index da9b8319b9e..dcb19d47029 100644 --- a/otx/algorithms/segmentation/adapters/openvino/__init__.py +++ b/otx/algorithms/segmentation/adapters/openvino/__init__.py @@ -2,3 +2,7 @@ # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 + +from .task import OpenVINOSegmentationInferencer, OpenVINOSegmentationTask, OTXOpenVinoDataLoader + +__all__ = ["OpenVINOSegmentationTask", "OpenVINOSegmentationInferencer", "OTXOpenVinoDataLoader"] diff --git a/otx/algorithms/segmentation/tasks/openvino.py b/otx/algorithms/segmentation/adapters/openvino/task.py similarity index 100% rename from otx/algorithms/segmentation/tasks/openvino.py rename to otx/algorithms/segmentation/adapters/openvino/task.py diff --git a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18/template.yaml b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18/template.yaml index eefad88a0b0..4a214b5cb5b 100644 --- a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18/template.yaml +++ b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18/template.yaml @@ -12,9 +12,9 @@ framework: OTXSegmentation v0.14.0 # Task implementations. entrypoints: - base: otx.algorithms.segmentation.tasks.SegmentationTrainTask - openvino: otx.algorithms.segmentation.tasks.OpenVINOSegmentationTask - nncf: otx.algorithms.segmentation.tasks.SegmentationNNCFTask + base: otx.algorithms.segmentation.adapters.mmseg.task.MMSegmentationTask + openvino: otx.algorithms.segmentation.adapters.openvino.task.OpenVINOSegmentationTask + nncf: otx.algorithms.segmentation.adapters.mmseg.nncf.task.SegmentationNNCFTask # Capabilities. capabilities: diff --git a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/supcon/model.py b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/supcon/model.py index 7a3309e8ec6..009ed8fe4f9 100644 --- a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/supcon/model.py +++ b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/supcon/model.py @@ -82,4 +82,4 @@ resume_from = None -fp16 = dict(_delete_=True, loss_scale=512.0) +fp16 = dict(loss_scale=512.0) diff --git a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/template.yaml b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/template.yaml index c81686610b8..ca8bd75be0a 100644 --- a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/template.yaml +++ b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_18_mod2/template.yaml @@ -12,9 +12,9 @@ framework: OTXSegmentation v0.14.0 # Task implementations. entrypoints: - base: otx.algorithms.segmentation.tasks.SegmentationTrainTask - openvino: otx.algorithms.segmentation.tasks.OpenVINOSegmentationTask - nncf: otx.algorithms.segmentation.tasks.SegmentationNNCFTask + base: otx.algorithms.segmentation.adapters.mmseg.task.MMSegmentationTask + openvino: otx.algorithms.segmentation.adapters.openvino.task.OpenVINOSegmentationTask + nncf: otx.algorithms.segmentation.adapters.mmseg.nncf.task.SegmentationNNCFTask # Capabilities. capabilities: diff --git a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_s_mod2/supcon/model.py b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_s_mod2/supcon/model.py index 7fa99e45051..eb025a041de 100644 --- a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_s_mod2/supcon/model.py +++ b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_s_mod2/supcon/model.py @@ -91,4 +91,4 @@ resume_from = None -fp16 = dict(_delete_=True, loss_scale=512.0) +fp16 = dict(loss_scale=512.0) diff --git a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_s_mod2/template.yaml b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_s_mod2/template.yaml index 1fc1ed6fff8..6e3de18b4f9 100644 --- a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_s_mod2/template.yaml +++ b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_s_mod2/template.yaml @@ -12,9 +12,9 @@ framework: OTXSegmentation v0.14.0 # Task implementations. entrypoints: - base: otx.algorithms.segmentation.tasks.SegmentationTrainTask - openvino: otx.algorithms.segmentation.tasks.OpenVINOSegmentationTask - nncf: otx.algorithms.segmentation.tasks.SegmentationNNCFTask + base: otx.algorithms.segmentation.adapters.mmseg.task.MMSegmentationTask + openvino: otx.algorithms.segmentation.adapters.openvino.task.OpenVINOSegmentationTask + nncf: otx.algorithms.segmentation.adapters.mmseg.nncf.task.SegmentationNNCFTask # Capabilities. capabilities: diff --git a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/supcon/model.py b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/supcon/model.py index c250820edab..c6878a5d56b 100644 --- a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/supcon/model.py +++ b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/supcon/model.py @@ -86,4 +86,4 @@ resume_from = None -fp16 = dict(_delete_=True, loss_scale=512.0) +fp16 = dict(loss_scale=512.0) diff --git a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/template.yaml b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/template.yaml index 827ef4284d8..28efe84420d 100644 --- a/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/template.yaml +++ b/otx/algorithms/segmentation/configs/ocr_lite_hrnet_x_mod3/template.yaml @@ -12,9 +12,9 @@ framework: OTXSegmentation v0.14.0 # Task implementations. entrypoints: - base: otx.algorithms.segmentation.tasks.SegmentationTrainTask - openvino: otx.algorithms.segmentation.tasks.OpenVINOSegmentationTask - nncf: otx.algorithms.segmentation.tasks.SegmentationNNCFTask + base: otx.algorithms.segmentation.adapters.mmseg.task.MMSegmentationTask + openvino: otx.algorithms.segmentation.adapters.openvino.task.OpenVINOSegmentationTask + nncf: otx.algorithms.segmentation.adapters.mmseg.nncf.task.SegmentationNNCFTask # Capabilities. capabilities: diff --git a/otx/algorithms/segmentation/task.py b/otx/algorithms/segmentation/task.py new file mode 100644 index 00000000000..5ec5b7e3afb --- /dev/null +++ b/otx/algorithms/segmentation/task.py @@ -0,0 +1,367 @@ +"""Task of OTX Segmentation.""" + +# Copyright (C) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions +# and limitations under the License. + +import io +import os +from abc import ABC, abstractmethod +from typing import List, Optional + +import numpy as np +import torch +from mmcv.utils import ConfigDict + +from otx.algorithms.common.configs.training_base import TrainType +from otx.algorithms.common.tasks.base_task import TRAIN_TYPE_DIR_PATH, OTXTask +from otx.algorithms.common.utils.callback import ( + InferenceProgressCallback, + TrainingProgressCallback, +) +from otx.algorithms.common.utils.logger import get_logger +from otx.algorithms.segmentation.adapters.openvino.model_wrappers.blur import ( + get_activation_map, +) +from otx.algorithms.segmentation.configs.base import SegmentationConfig +from otx.api.configuration import cfg_helper +from otx.api.configuration.helper.utils import ids_to_strings +from otx.api.entities.datasets import DatasetEntity +from otx.api.entities.explain_parameters import ExplainParameters +from otx.api.entities.inference_parameters import InferenceParameters +from otx.api.entities.metrics import ( + CurveMetric, + InfoMetric, + LineChartInfo, + MetricsGroup, + Performance, + ScoreMetric, + VisualizationInfo, + VisualizationType, +) +from otx.api.entities.model import ( + ModelEntity, + ModelFormat, + ModelOptimizationType, + ModelPrecision, +) +from otx.api.entities.result_media import ResultMediaEntity +from otx.api.entities.resultset import ResultSetEntity +from otx.api.entities.task_environment import TaskEnvironment +from otx.api.entities.tensor import TensorEntity +from otx.api.entities.train_parameters import TrainParameters, default_progress_callback +from otx.api.serialization.label_mapper import label_schema_to_bytes +from otx.api.usecases.evaluation.metrics_helper import MetricsHelper +from otx.api.usecases.tasks.interfaces.export_interface import ExportType +from otx.api.utils.segmentation_utils import ( + create_annotation_from_segmentation_map, + create_hard_prediction_from_soft_prediction, +) + +logger = get_logger() +RECIPE_TRAIN_TYPE = { + TrainType.Semisupervised: "semisl.py", + TrainType.Incremental: "incremental.py", + TrainType.Selfsupervised: "selfsl.py", +} + + +class OTXSegmentationTask(OTXTask, ABC): + """Task class for OTX segmentation.""" + + # pylint: disable=too-many-instance-attributes, too-many-locals + def __init__(self, task_environment: TaskEnvironment, output_path: Optional[str] = None): + super().__init__(task_environment, output_path) + self._task_config = SegmentationConfig + self._hyperparams: ConfigDict = task_environment.get_hyper_parameters(self._task_config) + self._model_name = task_environment.model_template.name + self._train_type = self._hyperparams.algo_backend.train_type + self.metric = "mDice" + self._label_dictionary = dict(enumerate(sorted(self._labels), 1)) + + self._model_dir = os.path.join( + os.path.abspath(os.path.dirname(self._task_environment.model_template.model_template_path)), + TRAIN_TYPE_DIR_PATH[self._train_type.name], + ) + if ( + self._train_type in RECIPE_TRAIN_TYPE + and self._train_type == TrainType.Incremental + and self._hyperparams.learning_parameters.enable_supcon + and not self._model_dir.endswith("supcon") + ): + self._model_dir = os.path.join(self._model_dir, "supcon") + + if task_environment.model is not None: + self._load_model() + + self.data_pipeline_path = os.path.join(self._model_dir, "data_pipeline.py") + + def _load_model_ckpt(self, model: Optional[ModelEntity]): + if model and "weights.pth" in model.model_adapters: + # If a model has been trained and saved for the task already, create empty model and load weights here + buffer = io.BytesIO(model.get_data("weights.pth")) + model_data = torch.load(buffer, map_location=torch.device("cpu")) + return model_data + return None + + def infer( + self, + dataset: DatasetEntity, + inference_parameters: Optional[InferenceParameters] = None, + ) -> DatasetEntity: + """Main infer function.""" + logger.info("infer()") + + update_progress_callback = default_progress_callback + if inference_parameters is not None: + update_progress_callback = inference_parameters.update_progress # type: ignore + + self._time_monitor = InferenceProgressCallback(len(dataset), update_progress_callback) + # If confidence threshold is adaptive then up-to-date value should be stored in the model + # and should not be changed during inference. Otherwise user-specified value should be taken. + + predictions = self._infer_model(dataset, InferenceParameters(is_evaluation=True)) + prediction_results = zip(predictions["eval_predictions"], predictions["feature_vectors"]) + self._add_predictions_to_dataset(prediction_results, dataset, dump_soft_prediction=False) + + logger.info("Inference completed") + return dataset + + def train( + self, dataset: DatasetEntity, output_model: ModelEntity, train_parameters: Optional[TrainParameters] = None + ): + """Train function for OTX segmentation task. + + Actual training is processed by _train_model fucntion + """ + logger.info("train()") + # Check for stop signal when training has stopped. + # If should_stop is true, training was cancelled and no new + if self._should_stop: # type: ignore + logger.info("Training cancelled.") + self._should_stop = False + self._is_training = False + return + + # Set OTX LoggerHook & Time Monitor + if train_parameters: + update_progress_callback = train_parameters.update_progress + else: + update_progress_callback = default_progress_callback + self._time_monitor = TrainingProgressCallback(update_progress_callback) + + results = self._train_model(dataset) + + # Check for stop signal when training has stopped. If should_stop is true, training was cancelled and no new + if self._should_stop: + logger.info("Training cancelled.") + self._should_stop = False + self._is_training = False + return + + # get output model + model_ckpt = results.get("final_ckpt") + if model_ckpt is None: + logger.error("cannot find final checkpoint from the results.") + # output_model.model_status = ModelStatus.FAILED + return + # update checkpoint to the newly trained model + self._model_ckpt = model_ckpt + + # get prediction on validation set + self._is_training = False + + # Get training metrics group from learning curves + training_metrics, best_score = self._generate_training_metrics(self._learning_curves) + performance = Performance( + score=ScoreMetric(value=best_score, name=self.metric), + dashboard_metrics=training_metrics, + ) + + logger.info(f"Final model performance: {str(performance)}") + # save resulting model + self.save_model(output_model) + output_model.performance = performance + self._is_training = False + logger.info("train done.") + + def export( + self, + export_type: ExportType, + output_model: ModelEntity, + precision: ModelPrecision = ModelPrecision.FP32, + dump_features: bool = True, + ): + """Export function of OTX Task.""" + logger.info("Exporting the model") + if export_type != ExportType.OPENVINO: + raise RuntimeError(f"not supported export type {export_type}") + output_model.model_format = ModelFormat.OPENVINO + output_model.optimization_type = ModelOptimizationType.MO + + results = self._export_model(precision, dump_features) + outputs = results.get("outputs") + logger.debug(f"results of run_task = {outputs}") + if outputs is None: + raise RuntimeError(results.get("msg")) + + bin_file = outputs.get("bin") + xml_file = outputs.get("xml") + + if xml_file is None or bin_file is None: + raise RuntimeError("invalid status of exporting. bin and xml should not be None") + with open(bin_file, "rb") as f: + output_model.set_data("openvino.bin", f.read()) + with open(xml_file, "rb") as f: + output_model.set_data("openvino.xml", f.read()) + output_model.precision = self._precision + output_model.optimization_methods = self._optimization_methods + output_model.has_xai = dump_features + output_model.set_data("label_schema.json", label_schema_to_bytes(self._task_environment.label_schema)) + logger.info("Exporting completed") + + def explain( + self, + dataset: DatasetEntity, + explain_parameters: Optional[ExplainParameters] = None, + ) -> DatasetEntity: + """Main explain function of OTX Task.""" + raise NotImplementedError + + def evaluate( + self, + output_resultset: ResultSetEntity, + evaluation_metric: Optional[str] = None, + ): + """Evaluate function of OTX Segmentation Task.""" + logger.info("called evaluate()") + if evaluation_metric is not None: + logger.warning( + f"Requested to use {evaluation_metric} metric, " "but parameter is ignored. Use mDice instead." + ) + metric = MetricsHelper.compute_dice_averaged_over_pixels(output_resultset) + logger.info(f"mDice after evaluation: {metric.overall_dice.value}") + output_resultset.performance = metric.get_performance() + logger.info("Evaluation completed") + + def _add_predictions_to_dataset(self, prediction_results, dataset, dump_soft_prediction): + """Loop over dataset again to assign predictions. Convert from MMSegmentation format to OTX format.""" + + for dataset_item, (prediction, feature_vector) in zip(dataset, prediction_results): + soft_prediction = np.transpose(prediction[0], axes=(1, 2, 0)) + hard_prediction = create_hard_prediction_from_soft_prediction( + soft_prediction=soft_prediction, + soft_threshold=self._hyperparams.postprocessing.soft_threshold, + blur_strength=self._hyperparams.postprocessing.blur_strength, + ) + annotations = create_annotation_from_segmentation_map( + hard_prediction=hard_prediction, + soft_prediction=soft_prediction, + label_map=self._label_dictionary, + ) + dataset_item.append_annotations(annotations=annotations) + + if feature_vector is not None: + active_score = TensorEntity(name="representation_vector", numpy=feature_vector.reshape(-1)) + dataset_item.append_metadata_item(active_score, model=self._task_environment.model) + + if dump_soft_prediction: + for label_index, label in self._label_dictionary.items(): + if label_index == 0: + continue + current_label_soft_prediction = soft_prediction[:, :, label_index] + class_act_map = get_activation_map(current_label_soft_prediction) + result_media = ResultMediaEntity( + name=label.name, + type="soft_prediction", + label=label, + annotation_scene=dataset_item.annotation_scene, + roi=dataset_item.roi, + numpy=class_act_map, + ) + dataset_item.append_metadata_item(result_media, model=self._task_environment.model) + + def save_model(self, output_model: ModelEntity): + """Save best model weights in SegmentationTrainTask.""" + logger.info("called save_model") + buffer = io.BytesIO() + hyperparams_str = ids_to_strings(cfg_helper.convert(self._hyperparams, dict, enum_to_str=True)) + labels = {label.name: label.color.rgb_tuple for label in self._labels} + model_ckpt = torch.load(self._model_ckpt) + modelinfo = { + "model": model_ckpt, + "config": hyperparams_str, + "labels": labels, + "VERSION": 1, + } + + torch.save(modelinfo, buffer) + output_model.set_data("weights.pth", buffer.getvalue()) + output_model.set_data( + "label_schema.json", + label_schema_to_bytes(self._task_environment.label_schema), + ) + output_model.precision = self._precision + + def _generate_training_metrics(self, learning_curves): + """Get Training metrics (epochs & scores). + + Parses the mmsegmentation logs to get metrics from the latest training run + :return output List[MetricsGroup] + """ + output: List[MetricsGroup] = [] + # Model architecture + architecture = InfoMetric(name="Model architecture", value=self._model_name) + visualization_info_architecture = VisualizationInfo( + name="Model architecture", visualisation_type=VisualizationType.TEXT + ) + output.append( + MetricsGroup( + metrics=[architecture], + visualization_info=visualization_info_architecture, + ) + ) + # Learning curves + best_score = -1 + for key, curve in learning_curves.items(): + metric_curve = CurveMetric(xs=curve.x, ys=curve.y, name=key) + if key == f"val/{self.metric}": + best_score = max(curve.y) + visualization_info = LineChartInfo(name=key, x_axis_label="Epoch", y_axis_label=key) + output.append(MetricsGroup(metrics=[metric_curve], visualization_info=visualization_info)) + + return output, best_score + + @abstractmethod + def _train_model(self, dataset: DatasetEntity): + """Train model and return the results.""" + raise NotImplementedError + + @abstractmethod + def _infer_model( + self, + dataset: DatasetEntity, + inference_parameters: Optional[InferenceParameters] = None, + ): + """Get inference results from dataset.""" + raise NotImplementedError + + @abstractmethod + def _export_model(self, precision, dump_features): + """Export model and return the results.""" + raise NotImplementedError + + @abstractmethod + def _explain_model(self, dataset: DatasetEntity, explain_parameters: Optional[ExplainParameters]): + """Explain model and return the results.""" + raise NotImplementedError diff --git a/otx/algorithms/segmentation/tasks/__init__.py b/otx/algorithms/segmentation/tasks/__init__.py deleted file mode 100644 index 366dbb7cb2e..00000000000 --- a/otx/algorithms/segmentation/tasks/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -"""Task Initialization of OTX Detection.""" - -# Copyright (C) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions -# and limitations under the License. - -import otx.algorithms.common.adapters.mmcv.models as OTXBackbones -import otx.algorithms.segmentation.adapters.mmseg.tasks as MPASegmentation - -from .inference import SegmentationInferenceTask -from .nncf import SegmentationNNCFTask -from .openvino import OpenVINOSegmentationTask -from .train import SegmentationTrainTask - -__all__ = [ - "MPASegmentation", - "SegmentationInferenceTask", - "SegmentationTrainTask", - "SegmentationNNCFTask", - "OpenVINOSegmentationTask", - "OTXBackbones", -] diff --git a/otx/algorithms/segmentation/tasks/inference.py b/otx/algorithms/segmentation/tasks/inference.py deleted file mode 100644 index 21733188d02..00000000000 --- a/otx/algorithms/segmentation/tasks/inference.py +++ /dev/null @@ -1,288 +0,0 @@ -"""Inference Task of OTX Segmentation.""" - -# Copyright (C) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions -# and limitations under the License. - -import os -from typing import Dict, Optional - -import numpy as np -from mmcv.utils import ConfigDict - -from otx.algorithms.common.adapters.mmcv.utils import ( - patch_data_pipeline, - patch_default_config, - patch_runner, - remove_from_configs_by_type, -) -from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig -from otx.algorithms.common.configs import TrainType -from otx.algorithms.common.tasks import BaseTask -from otx.algorithms.common.utils.callback import InferenceProgressCallback -from otx.algorithms.common.utils.logger import get_logger -from otx.algorithms.segmentation.adapters.mmseg.utils.builder import build_segmentor -from otx.algorithms.segmentation.adapters.mmseg.utils.config_utils import ( - patch_datasets, - patch_evaluation, -) -from otx.algorithms.segmentation.adapters.openvino.model_wrappers.blur import ( - get_activation_map, -) -from otx.algorithms.segmentation.configs.base import SegmentationConfig -from otx.api.entities.datasets import DatasetEntity -from otx.api.entities.inference_parameters import InferenceParameters -from otx.api.entities.inference_parameters import ( - default_progress_callback as default_infer_progress_callback, -) -from otx.api.entities.model import ( - ModelEntity, - ModelFormat, - ModelOptimizationType, - ModelPrecision, -) -from otx.api.entities.result_media import ResultMediaEntity -from otx.api.entities.resultset import ResultSetEntity -from otx.api.entities.task_environment import TaskEnvironment -from otx.api.entities.tensor import TensorEntity -from otx.api.serialization.label_mapper import label_schema_to_bytes -from otx.api.usecases.evaluation.metrics_helper import MetricsHelper -from otx.api.usecases.tasks.interfaces.evaluate_interface import IEvaluationTask -from otx.api.usecases.tasks.interfaces.export_interface import ExportType, IExportTask -from otx.api.usecases.tasks.interfaces.inference_interface import IInferenceTask -from otx.api.usecases.tasks.interfaces.unload_interface import IUnload -from otx.api.utils.argument_checks import ( - DatasetParamTypeCheck, - check_input_parameters_type, -) -from otx.api.utils.segmentation_utils import ( - create_annotation_from_segmentation_map, - create_hard_prediction_from_soft_prediction, -) - -logger = get_logger() - - -RECIPE_TRAIN_TYPE = { - TrainType.Semisupervised: "semisl.py", - TrainType.Incremental: "incremental.py", - TrainType.Selfsupervised: "selfsl.py", -} - - -# pylint: disable=too-many-locals, too-many-instance-attributes, attribute-defined-outside-init -class SegmentationInferenceTask(BaseTask, IInferenceTask, IExportTask, IEvaluationTask, IUnload): - """Inference Task Implementation of OTX Segmentation.""" - - @check_input_parameters_type() - def __init__(self, task_environment: TaskEnvironment, **kwargs): - # self._should_stop = False - self.freeze = True - self.metric = "mDice" - self._label_dictionary: Dict = {} - - super().__init__(SegmentationConfig, task_environment, **kwargs) - self._label_dictionary = dict(enumerate(sorted(self._labels), 1)) - - @check_input_parameters_type({"dataset": DatasetParamTypeCheck}) - def infer( - self, dataset: DatasetEntity, inference_parameters: Optional[InferenceParameters] = None - ) -> DatasetEntity: - """Main infer function of OTX Segmentation.""" - logger.info("infer()") - - if inference_parameters is not None: - update_progress_callback = inference_parameters.update_progress - is_evaluation = inference_parameters.is_evaluation - else: - update_progress_callback = default_infer_progress_callback - is_evaluation = False - - self._time_monitor = InferenceProgressCallback(len(dataset), update_progress_callback) - - stage_module = "SegInferrer" - self._data_cfg = self._init_test_data_cfg(dataset) - - dump_features = True - - results = self._run_task( - stage_module, - mode="train", - dataset=dataset, - dump_features=dump_features, - ) - logger.debug(f"result of run_task {stage_module} module = {results}") - predictions = results["outputs"] - prediction_results = zip(predictions["eval_predictions"], predictions["feature_vectors"]) - self._add_predictions_to_dataset(prediction_results, dataset, dump_soft_prediction=not is_evaluation) - return dataset - - @check_input_parameters_type() - def evaluate(self, output_resultset: ResultSetEntity, evaluation_metric: Optional[str] = None): - """Evaluate function of OTX Segmentation Task.""" - logger.info("called evaluate()") - - if evaluation_metric is not None: - logger.warning( - f"Requested to use {evaluation_metric} metric, " "but parameter is ignored. Use mDice instead." - ) - logger.info("Computing mDice") - metrics = MetricsHelper.compute_dice_averaged_over_pixels(output_resultset) - logger.info(f"mDice after evaluation: {metrics.overall_dice.value}") - output_resultset.performance = metrics.get_performance() - - def unload(self): - """Unload the task.""" - self.cleanup() - - @check_input_parameters_type() - def export( - self, - export_type: ExportType, - output_model: ModelEntity, - precision: ModelPrecision = ModelPrecision.FP32, - dump_features: bool = False, - ): - """Export function of OTX Segmentation Task.""" - logger.info("Exporting the model") - if export_type != ExportType.OPENVINO: - raise RuntimeError(f"not supported export type {export_type}") - output_model.model_format = ModelFormat.OPENVINO - output_model.optimization_type = ModelOptimizationType.MO - - stage_module = "SegExporter" - results = self._run_task( - stage_module, - mode="train", - export=True, - dump_features=dump_features, - enable_fp16=(precision == ModelPrecision.FP16), - ) - outputs = results.get("outputs") - logger.debug(f"results of run_task = {outputs}") - if outputs is None: - raise RuntimeError(results.get("msg")) - - bin_file = outputs.get("bin") - xml_file = outputs.get("xml") - if xml_file is None or bin_file is None: - raise RuntimeError("invalid status of exporting. bin and xml should not be None") - with open(bin_file, "rb") as f: - output_model.set_data("openvino.bin", f.read()) - with open(xml_file, "rb") as f: - output_model.set_data("openvino.xml", f.read()) - output_model.precision = self._precision - output_model.optimization_methods = self._optimization_methods - output_model.has_xai = dump_features - output_model.set_data("label_schema.json", label_schema_to_bytes(self._task_environment.label_schema)) - logger.info("Exporting completed") - - def _init_recipe(self): - logger.info("called _init_recipe()") - # TODO: Need to remove the hard coding for supcon only. - if ( - self._train_type in RECIPE_TRAIN_TYPE - and self._train_type == TrainType.Incremental - and self._hyperparams.learning_parameters.enable_supcon - and not self._model_dir.endswith("supcon") - ): - self._model_dir = os.path.join(self._model_dir, "supcon") - - self._recipe_cfg = self._init_model_cfg() - options_for_patch_datasets = {"type": "MPASegDataset"} - patch_default_config(self._recipe_cfg) - patch_runner(self._recipe_cfg) - patch_data_pipeline(self._recipe_cfg, self.data_pipeline_path) - patch_datasets( - self._recipe_cfg, - self._task_type.domain, - **options_for_patch_datasets, - ) # for OTX compatibility - patch_evaluation(self._recipe_cfg) # for OTX compatibility - if self._recipe_cfg.get("evaluation", None): - self.metric = self._recipe_cfg.evaluation.metric - - if self._recipe_cfg.get("override_configs", None): - self.override_configs.update(self._recipe_cfg.override_configs) - - if not self.freeze: - remove_from_configs_by_type(self._recipe_cfg.custom_hooks, "FreezeLayers") - - def _update_stage_module(self, stage_module: str): - module_prefix = {TrainType.Semisupervised: "SemiSL", TrainType.Incremental: "Incr"} - if self._train_type == TrainType.Semisupervised and stage_module == "SegExporter": - stage_module = "SemiSLSegExporter" - elif self._train_type in module_prefix and stage_module in ["SegTrainer", "SegInferrer"]: - stage_module = module_prefix[self._train_type] + stage_module - - return stage_module - - def _init_model_cfg(self): - model_cfg = MPAConfig.fromfile(os.path.join(self._model_dir, "model.py")) - return model_cfg - - def _init_test_data_cfg(self, dataset: DatasetEntity): - data_cfg = ConfigDict( - data=ConfigDict( - train=ConfigDict( - otx_dataset=None, - labels=self._labels, - ), - test=ConfigDict( - otx_dataset=dataset, - labels=self._labels, - ), - ) - ) - return data_cfg - - def _add_predictions_to_dataset(self, prediction_results, dataset, dump_soft_prediction): - """Loop over dataset again to assign predictions. Convert from MMSegmentation format to OTX format.""" - - for dataset_item, (prediction, feature_vector) in zip(dataset, prediction_results): - soft_prediction = np.transpose(prediction[0], axes=(1, 2, 0)) - hard_prediction = create_hard_prediction_from_soft_prediction( - soft_prediction=soft_prediction, - soft_threshold=self._hyperparams.postprocessing.soft_threshold, - blur_strength=self._hyperparams.postprocessing.blur_strength, - ) - annotations = create_annotation_from_segmentation_map( - hard_prediction=hard_prediction, - soft_prediction=soft_prediction, - label_map=self._label_dictionary, - ) - dataset_item.append_annotations(annotations=annotations) - - if feature_vector is not None: - active_score = TensorEntity(name="representation_vector", numpy=feature_vector.reshape(-1)) - dataset_item.append_metadata_item(active_score, model=self._task_environment.model) - - if dump_soft_prediction: - for label_index, label in self._label_dictionary.items(): - if label_index == 0: - continue - current_label_soft_prediction = soft_prediction[:, :, label_index] - class_act_map = get_activation_map(current_label_soft_prediction) - result_media = ResultMediaEntity( - name=label.name, - type="soft_prediction", - label=label, - annotation_scene=dataset_item.annotation_scene, - roi=dataset_item.roi, - numpy=class_act_map, - ) - dataset_item.append_metadata_item(result_media, model=self._task_environment.model) - - def _initialize_post_hook(self, options=None): - super()._initialize_post_hook(options) - options["model_builder"] = build_segmentor diff --git a/otx/algorithms/segmentation/tasks/train.py b/otx/algorithms/segmentation/tasks/train.py deleted file mode 100644 index 6f8ac4d183d..00000000000 --- a/otx/algorithms/segmentation/tasks/train.py +++ /dev/null @@ -1,200 +0,0 @@ -"""Train Task of OTX Segmentation.""" - -# Copyright (C) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions -# and limitations under the License. - -import io -from typing import List, Optional - -import torch -from mmcv.utils import ConfigDict - -from otx.algorithms.common.utils.callback import TrainingProgressCallback -from otx.algorithms.common.utils.data import get_dataset -from otx.algorithms.common.utils.logger import get_logger -from otx.api.configuration import cfg_helper -from otx.api.configuration.helper.utils import ids_to_strings -from otx.api.entities.datasets import DatasetEntity -from otx.api.entities.metrics import ( - CurveMetric, - InfoMetric, - LineChartInfo, - MetricsGroup, - Performance, - ScoreMetric, - VisualizationInfo, - VisualizationType, -) -from otx.api.entities.model import ModelEntity -from otx.api.entities.subset import Subset -from otx.api.entities.train_parameters import TrainParameters, default_progress_callback -from otx.api.serialization.label_mapper import label_schema_to_bytes -from otx.api.usecases.tasks.interfaces.training_interface import ITrainingTask -from otx.api.utils.argument_checks import ( - DatasetParamTypeCheck, - check_input_parameters_type, -) - -from .inference import SegmentationInferenceTask - -logger = get_logger() - - -# pylint: disable=too-many-locals, too-many-instance-attributes, too-many-ancestors -class SegmentationTrainTask(SegmentationInferenceTask, ITrainingTask): - """Train Task Implementation of OTX Segmentation.""" - - @check_input_parameters_type() - def save_model(self, output_model: ModelEntity): - """Save best model weights in SegmentationTrainTask.""" - logger.info(f"called save_model: {self._model_ckpt}") - buffer = io.BytesIO() - hyperparams_str = ids_to_strings(cfg_helper.convert(self._hyperparams, dict, enum_to_str=True)) - labels = {label.name: label.color.rgb_tuple for label in self._labels} - model_ckpt = torch.load(self._model_ckpt) - modelinfo = { - "model": model_ckpt, - "config": hyperparams_str, - "labels": labels, - "VERSION": 1, - } - - torch.save(modelinfo, buffer) - output_model.set_data("weights.pth", buffer.getvalue()) - output_model.set_data( - "label_schema.json", - label_schema_to_bytes(self._task_environment.label_schema), - ) - output_model.precision = self._precision - - def cancel_training(self): - """Cancel training function in SegmentationTrainTask. - - Sends a cancel training signal to gracefully stop the optimizer. The signal consists of creating a - '.stop_training' file in the current work_dir. The runner checks for this file periodically. - The stopping mechanism allows stopping after each iteration, but validation will still be carried out. Stopping - will therefore take some time. - """ - logger.info("Cancel training requested.") - self._should_stop = True - if self.cancel_interface is not None: - self.cancel_interface.cancel() - else: - logger.info("but training was not started yet. reserved it to cancel") - self.reserved_cancel = True - - @check_input_parameters_type({"dataset": DatasetParamTypeCheck}) - def train( - self, - dataset: DatasetEntity, - output_model: ModelEntity, - train_parameters: Optional[TrainParameters] = None, - ): - """Train function in SegmentationTrainTask.""" - logger.info("train()") - # Check for stop signal between pre-eval and training. - # If training is cancelled at this point, - if self._should_stop: - logger.info("Training cancelled.") - self._should_stop = False - self._is_training = False - return - - # Set OTX LoggerHook & Time Monitor - if train_parameters is not None: - update_progress_callback = train_parameters.update_progress - else: - update_progress_callback = default_progress_callback - self._time_monitor = TrainingProgressCallback(update_progress_callback) - - self._data_cfg = self._init_train_data_cfg(dataset) - self._is_training = True - results = self._run_task("SegTrainer", mode="train", dataset=dataset, parameters=train_parameters) - - # Check for stop signal when training has stopped. - # If should_stop is true, training was cancelled and no new - if self._should_stop: - logger.info("Training cancelled.") - self._should_stop = False - self._is_training = False - return - - # get output model - model_ckpt = results.get("final_ckpt") - if model_ckpt is None: - logger.error("cannot find final checkpoint from the results.") - # output_model.model_status = ModelStatus.FAILED - return - # update checkpoint to the newly trained model - self._model_ckpt = model_ckpt - - # Get training metrics group from learning curves - training_metrics, best_score = self._generate_training_metrics_group(self._learning_curves) - performance = Performance( - score=ScoreMetric(value=best_score, name=self.metric), - dashboard_metrics=training_metrics, - ) - - logger.info(f"Final model performance: {str(performance)}") - # save resulting model - self.save_model(output_model) - output_model.performance = performance - # output_model.model_status = ModelStatus.SUCCESS - self._is_training = False - logger.info("train done.") - - def _init_train_data_cfg(self, dataset: DatasetEntity): - logger.info("init data cfg.") - data_cfg = ConfigDict(data=ConfigDict()) - - for cfg_key, subset in zip( - ["train", "val", "unlabeled"], - [Subset.TRAINING, Subset.VALIDATION, Subset.UNLABELED], - ): - subset = get_dataset(dataset, subset) - if subset: - data_cfg.data[cfg_key] = ConfigDict( - otx_dataset=subset, - labels=self._labels, - ) - - return data_cfg - - def _generate_training_metrics_group(self, learning_curves): - """Get Training metrics (epochs & scores). - - Parses the mmsegmentation logs to get metrics from the latest training run - :return output List[MetricsGroup] - """ - output: List[MetricsGroup] = [] - # Model architecture - architecture = InfoMetric(name="Model architecture", value=self._model_name) - visualization_info_architecture = VisualizationInfo( - name="Model architecture", visualisation_type=VisualizationType.TEXT - ) - output.append( - MetricsGroup( - metrics=[architecture], - visualization_info=visualization_info_architecture, - ) - ) - # Learning curves - best_score = -1 - for key, curve in learning_curves.items(): - metric_curve = CurveMetric(xs=curve.x, ys=curve.y, name=key) - if key == f"val/{self.metric}": - best_score = max(curve.y) - visualization_info = LineChartInfo(name=key, x_axis_label="Epoch", y_axis_label=key) - output.append(MetricsGroup(metrics=[metric_curve], visualization_info=visualization_info)) - return output, best_score diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/nncf/test_mmseg_nncf_hooks.py b/tests/unit/algorithms/segmentation/adapters/mmseg/nncf/test_mmseg_nncf_hooks.py index ec5308bdd86..8352fba4bb2 100644 --- a/tests/unit/algorithms/segmentation/adapters/mmseg/nncf/test_mmseg_nncf_hooks.py +++ b/tests/unit/algorithms/segmentation/adapters/mmseg/nncf/test_mmseg_nncf_hooks.py @@ -9,7 +9,7 @@ from torch.optim import SGD from otx.algorithms.common.adapters.mmcv.utils import build_data_parallel -from otx.algorithms.segmentation.adapters.mmseg.nncf.hooks import ( +from otx.algorithms.common.adapters.mmcv.hooks import ( CustomstepLrUpdaterHook, ) from tests.test_suite.e2e_test_system import e2e_pytest_unit diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/__init__.py b/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/__init__.py deleted file mode 100644 index 1e19f1159d9..00000000000 --- a/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/incremental/__init__.py b/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/incremental/__init__.py deleted file mode 100644 index 1e19f1159d9..00000000000 --- a/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/incremental/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/incremental/test_seg_incremental_stage.py b/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/incremental/test_seg_incremental_stage.py deleted file mode 100644 index 637b11524d1..00000000000 --- a/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/incremental/test_seg_incremental_stage.py +++ /dev/null @@ -1,24 +0,0 @@ -import pytest - -from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig -from otx.algorithms.segmentation.adapters.mmseg.tasks.incremental.stage import ( - IncrSegStage, -) -from tests.test_suite.e2e_test_system import e2e_pytest_unit -from tests.unit.algorithms.segmentation.test_helpers import DEFAULT_RECIPE_CONFIG_PATH - - -class TestOTXSegStage: - @pytest.fixture(autouse=True) - def setup(self) -> None: - cfg = MPAConfig.fromfile(DEFAULT_RECIPE_CONFIG_PATH) - self.stage = IncrSegStage(name="", mode="train", config=cfg, common_cfg=None, index=0) - - @e2e_pytest_unit - def test_configure_task(self, mocker): - mock_update_hook = mocker.patch( - "otx.algorithms.segmentation.adapters.mmseg.tasks.incremental.stage.update_or_add_custom_hook" - ) - self.stage.configure_task(self.stage.cfg, True) - - mock_update_hook.assert_called_once() diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/semisl/__init__.py b/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/semisl/__init__.py deleted file mode 100644 index 1e19f1159d9..00000000000 --- a/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/semisl/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/semisl/test_seg_semisl_inferrer.py b/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/semisl/test_seg_semisl_inferrer.py deleted file mode 100644 index 48f955d54a8..00000000000 --- a/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/semisl/test_seg_semisl_inferrer.py +++ /dev/null @@ -1,29 +0,0 @@ -import os - -import pytest - -from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig -from otx.algorithms.segmentation.adapters.mmseg.tasks.semisl.inferrer import ( - SemiSLSegInferrer, -) -from tests.test_suite.e2e_test_system import e2e_pytest_unit -from tests.unit.algorithms.segmentation.test_helpers import DEFAULT_SEG_TEMPLATE_DIR - -SEMISL_RECIPE_CONFIG_PATH = "otx/recipes/stages/segmentation/semisl.py" - - -class TestOTXSegStage: - @pytest.fixture(autouse=True) - def setup(self) -> None: - cfg = MPAConfig.fromfile(SEMISL_RECIPE_CONFIG_PATH) - self.inferrer = SemiSLSegInferrer(name="", mode="train", config=cfg, common_cfg=None, index=0) - self.model_cfg = MPAConfig.fromfile(os.path.join(DEFAULT_SEG_TEMPLATE_DIR, "semisl/model.py")) - self.data_cfg = MPAConfig.fromfile(os.path.join(DEFAULT_SEG_TEMPLATE_DIR, "semisl/data_pipeline.py")) - - @e2e_pytest_unit - def test_configure(self): - updated_cfg = self.inferrer.configure(self.model_cfg, "", self.data_cfg) - - assert "orig_type" not in updated_cfg - assert "unsup_weight" not in updated_cfg - assert "semisl_start_iter" not in updated_cfg diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/semisl/test_seg_semisl_stage.py b/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/semisl/test_seg_semisl_stage.py deleted file mode 100644 index 25b8ff7156b..00000000000 --- a/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/semisl/test_seg_semisl_stage.py +++ /dev/null @@ -1,35 +0,0 @@ -import pytest - -from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig -from otx.algorithms.segmentation.adapters.mmseg.tasks.semisl.stage import SemiSLSegStage -from otx.algorithms.segmentation.adapters.mmseg.tasks.stage import SegStage -from tests.test_suite.e2e_test_system import e2e_pytest_unit - -SEMISL_RECIPE_CONFIG_PATH = "otx/recipes/stages/segmentation/semisl.py" - - -class TestOTXSegStage: - @pytest.fixture(autouse=True) - def setup(self) -> None: - cfg = MPAConfig.fromfile(SEMISL_RECIPE_CONFIG_PATH) - self.stage = SemiSLSegStage(name="", mode="train", config=cfg, common_cfg=None, index=0) - - @e2e_pytest_unit - def test_configure_data(self, mocker): - mock_ul_dataloader = mocker.patch.object(SegStage, "configure_unlabeled_dataloader") - fake_semisl_data_cfg = {"data": {"unlabeled": {"otx_dataset": "foo"}}} - self.stage.configure_data(self.stage.cfg, True, fake_semisl_data_cfg) - - mock_ul_dataloader.assert_called_once() - - @e2e_pytest_unit - def test_configure_task(self, mocker): - fake_model_cfg = {"model": {"type": "", "task_adapt": True}} - self.stage.cfg.merge_from_dict(fake_model_cfg) - mock_remove_hook = mocker.patch( - "otx.algorithms.segmentation.adapters.mmseg.tasks.semisl.stage.remove_custom_hook" - ) - self.stage.configure_task(self.stage.cfg, True) - - assert "task_adapt" not in self.stage.cfg.model - mock_remove_hook.assert_called_once() diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/test_seg_exporter.py b/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/test_seg_exporter.py deleted file mode 100644 index 0d787740b4b..00000000000 --- a/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/test_seg_exporter.py +++ /dev/null @@ -1,39 +0,0 @@ -import os - -import pytest - -from otx.algorithms.common.adapters.mmcv.tasks.exporter_mixin import ExporterMixin -from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig -from otx.algorithms.common.adapters.mmdeploy.apis import NaiveExporter -from otx.algorithms.segmentation.adapters.mmseg.tasks.exporter import SegExporter -from otx.algorithms.segmentation.adapters.mmseg.utils.builder import build_segmentor -from tests.test_suite.e2e_test_system import e2e_pytest_unit -from tests.unit.algorithms.segmentation.test_helpers import ( - DEFAULT_RECIPE_CONFIG_PATH, - DEFAULT_SEG_TEMPLATE_DIR, -) - - -class TestOTXSegExporter: - @pytest.fixture(autouse=True) - def setup(self) -> None: - cfg = MPAConfig.fromfile(DEFAULT_RECIPE_CONFIG_PATH) - self.exporter = SegExporter(name="", mode="train", config=cfg, common_cfg=None, index=0) - self.model_cfg = MPAConfig.fromfile(os.path.join(DEFAULT_SEG_TEMPLATE_DIR, "model.py")) - self.data_cfg = MPAConfig.fromfile(os.path.join(DEFAULT_SEG_TEMPLATE_DIR, "data_pipeline.py")) - - @e2e_pytest_unit - def test_run(self, mocker): - args = {"precision": "FP32", "model_builder": build_segmentor} - mocker.patch.object(ExporterMixin, "run", return_value=True) - returned_value = self.exporter.run(self.model_cfg, "", self.data_cfg, **args) - - assert "model_builder" in args - assert returned_value is True - - @e2e_pytest_unit - def test_naive_export(self, mocker): - mock_export_ov = mocker.patch.object(NaiveExporter, "export2openvino") - self.exporter.naive_export("", build_segmentor, "FP32", self.data_cfg) - - mock_export_ov.assert_called_once() diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/test_seg_inferrer.py b/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/test_seg_inferrer.py deleted file mode 100644 index 0d58cb09b87..00000000000 --- a/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/test_seg_inferrer.py +++ /dev/null @@ -1,70 +0,0 @@ -import os - -import pytest - -from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig -from otx.algorithms.segmentation.adapters.mmseg.tasks.inferrer import ( - SegInferrer, - replace_ImageToTensor, -) -from tests.test_suite.e2e_test_system import e2e_pytest_unit -from tests.unit.algorithms.segmentation.test_helpers import ( - DEFAULT_RECIPE_CONFIG_PATH, - DEFAULT_SEG_TEMPLATE_DIR, -) - - -class TestOTXSegTrainer: - @pytest.fixture(autouse=True) - def setup(self) -> None: - cfg = MPAConfig.fromfile(DEFAULT_RECIPE_CONFIG_PATH) - self.inferrer = SegInferrer(name="", mode="train", config=cfg, common_cfg=None, index=0) - self.model_cfg = MPAConfig.fromfile(os.path.join(DEFAULT_SEG_TEMPLATE_DIR, "model.py")) - self.data_cfg = MPAConfig.fromfile(os.path.join(DEFAULT_SEG_TEMPLATE_DIR, "data_pipeline.py")) - - @e2e_pytest_unit - def test_run(self, mocker): - fake_output = {"classes": [1, 2], "eval_predictions": None, "feature_vectors": None} - mock_infer = mocker.patch.object(SegInferrer, "infer", return_value=fake_output) - - returned_value = self.inferrer.run(self.model_cfg, "", self.data_cfg) - mock_infer.assert_called_once() - assert returned_value == {"outputs": fake_output} - - @e2e_pytest_unit - def test_infer(self, mocker): - cfg = self.inferrer.configure(self.model_cfg, "", self.data_cfg, training=False) - mocker.patch.object(SegInferrer, "configure_samples_per_gpu") - mocker.patch.object(SegInferrer, "configure_compat_cfg") - mock_infer_callback = mocker.patch.object(SegInferrer, "set_inference_progress_callback") - - returned_value = self.inferrer.infer(cfg) - mock_infer_callback.assert_called_once() - - assert "classes" in returned_value - assert "eval_predictions" in returned_value - assert "feature_vectors" in returned_value - assert len(returned_value["eval_predictions"]) >= 0 - - -@e2e_pytest_unit -def test_replace_ImageToTensor(): - test_pipeline = [ - dict(type="LoadImageFromFile"), - dict( - type="MultiScaleFlipAug", - transforms=[ - dict(type="Resize", keep_ratio=False), - dict(type="ImageToTensor", keys=["img"]), - ], - ), - dict(type="ImageToTensor", keys=["img"]), - ] - returned_value = replace_ImageToTensor(test_pipeline) - - for pipeline in returned_value: - if "transforms" in pipeline: - values = [p["type"] for p in pipeline["transforms"]] - else: - values = [pipeline["type"]] - assert "ImageToTensor" not in values diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/test_seg_stage.py b/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/test_seg_stage.py deleted file mode 100644 index 0b064cfabde..00000000000 --- a/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/test_seg_stage.py +++ /dev/null @@ -1,89 +0,0 @@ -import os - -import pytest - -from otx.algorithms.common.adapters.mmcv.tasks.stage import Stage -from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig -from otx.algorithms.segmentation.adapters.mmseg.tasks.stage import SegStage -from tests.test_suite.e2e_test_system import e2e_pytest_unit -from tests.unit.algorithms.segmentation.test_helpers import ( - DEFAULT_RECIPE_CONFIG_PATH, - DEFAULT_SEG_TEMPLATE_DIR, -) - - -class TestOTXSegStage: - @pytest.fixture(autouse=True) - def setup(self) -> None: - cfg = MPAConfig.fromfile(DEFAULT_RECIPE_CONFIG_PATH) - self.stage = SegStage(name="", mode="train", config=cfg, common_cfg=None, index=0) - self.model_cfg = MPAConfig.fromfile(os.path.join(DEFAULT_SEG_TEMPLATE_DIR, "model.py")) - self.data_cfg = MPAConfig.fromfile(os.path.join(DEFAULT_SEG_TEMPLATE_DIR, "data_pipeline.py")) - - @e2e_pytest_unit - def test_configure(self, mocker): - mock_cfg_model = mocker.patch.object(SegStage, "configure_model") - mock_cfg_ckpt = mocker.patch.object(SegStage, "configure_ckpt") - mock_cfg_data = mocker.patch.object(SegStage, "configure_data") - mock_cfg_task = mocker.patch.object(SegStage, "configure_task") - mock_cfg_hook = mocker.patch.object(SegStage, "configure_hook") - - fake_arg = {"pretrained": True, "foo": "bar"} - returned_value = self.stage.configure(self.model_cfg, "", self.data_cfg, True, **fake_arg) - mock_cfg_model.assert_called_once_with(self.stage.cfg, self.model_cfg, **fake_arg) - mock_cfg_ckpt.assert_called_once_with(self.stage.cfg, "", fake_arg.get("pretrained", None)) - mock_cfg_data.assert_called_once_with(self.stage.cfg, True, self.data_cfg) - mock_cfg_task.assert_called_once_with(self.stage.cfg, True) - mock_cfg_hook.assert_called_once_with(self.stage.cfg) - - assert returned_value == self.stage.cfg - - @e2e_pytest_unit - def test_configure_model(self): - fake_arg = {"ir_model_path": {"ir_weight_path": "", "ir_weight_init": ""}} - self.stage.configure_model(self.stage.cfg, self.model_cfg, **fake_arg) - - assert self.stage.cfg.model_task - - @e2e_pytest_unit - def test_configure_data(self, mocker): - mock_super_cfg_data = mocker.patch.object(Stage, "configure_data") - self.stage.configure_data(self.stage.cfg, True, self.data_cfg) - - mock_super_cfg_data.assert_called_once() - assert self.stage.cfg.data - assert self.stage.cfg.data.train - assert self.stage.cfg.data.val - - @e2e_pytest_unit - def test_configure_task(self, mocker): - mock_cfg_classes = mocker.patch.object(SegStage, "configure_classes") - mock_cfg_ignore = mocker.patch.object(SegStage, "configure_ignore") - self.stage.configure_task(self.stage.cfg, True) - - mock_cfg_classes.assert_called_once() - mock_cfg_ignore.assert_called_once() - - @e2e_pytest_unit - def test_configure_classes_replace(self, mocker): - mocker.patch.object(Stage, "get_data_classes", return_value=["foo", "bar"]) - self.stage.configure_classes(self.stage.cfg, "REPLACE") - - assert "background" in self.stage.model_classes - assert self.stage.model_classes == ["background", "foo", "bar"] - - @e2e_pytest_unit - def test_configure_classes_merge(self, mocker): - mocker.patch.object(Stage, "get_model_classes", return_value=["foo", "bar"]) - mocker.patch.object(Stage, "get_data_classes", return_value=["foo", "baz"]) - self.stage.configure_classes(self.stage.cfg, "MERGE") - - assert "background" in self.stage.model_classes - assert self.stage.model_classes == ["background", "foo", "bar", "baz"] - - @e2e_pytest_unit - def test_configure_ignore(self): - self.stage.configure_ignore(self.stage.cfg) - - if "decode_head" in self.stage.cfg.model: - assert self.stage.cfg.model.decode_head.loss_decode.type == "CrossEntropyLossWithIgnore" diff --git a/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/test_seg_trainer.py b/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/test_seg_trainer.py deleted file mode 100644 index 8bdb6783529..00000000000 --- a/tests/unit/algorithms/segmentation/adapters/mmseg/tasks/test_seg_trainer.py +++ /dev/null @@ -1,43 +0,0 @@ -import os - -import pytest - -from otx.algorithms.common.adapters.mmcv.utils.config_utils import MPAConfig -from otx.algorithms.segmentation.adapters.mmseg.tasks.trainer import SegTrainer -from tests.test_suite.e2e_test_system import e2e_pytest_unit -from tests.unit.algorithms.segmentation.test_helpers import ( - DEFAULT_RECIPE_CONFIG_PATH, - DEFAULT_SEG_TEMPLATE_DIR, -) - - -class TestOTXSegTrainer: - @pytest.fixture(autouse=True) - def setup(self) -> None: - cfg = MPAConfig.fromfile(DEFAULT_RECIPE_CONFIG_PATH) - self.trainer = SegTrainer(name="", mode="train", config=cfg, common_cfg=None, index=0) - self.model_cfg = MPAConfig.fromfile(os.path.join(DEFAULT_SEG_TEMPLATE_DIR, "model.py")) - self.data_cfg = MPAConfig.fromfile(os.path.join(DEFAULT_SEG_TEMPLATE_DIR, "data_pipeline.py")) - - @e2e_pytest_unit - def test_run(self, mocker): - mocker.patch.object(SegTrainer, "configure_samples_per_gpu") - mocker.patch.object(SegTrainer, "configure_fp16_optimizer") - mocker.patch.object(SegTrainer, "configure_compat_cfg") - mock_train_segmentor = mocker.patch("otx.algorithms.segmentation.adapters.mmseg.tasks.trainer.train_segmentor") - - self.trainer.run(self.model_cfg, "", self.data_cfg) - mock_train_segmentor.assert_called_once() - - @e2e_pytest_unit - def test_run_with_distributed(self, mocker): - self.trainer._distributed = True - mocker.patch.object(SegTrainer, "configure_samples_per_gpu") - mocker.patch.object(SegTrainer, "configure_fp16_optimizer") - mocker.patch.object(SegTrainer, "configure_compat_cfg") - spy_cfg_dist = mocker.spy(SegTrainer, "_modify_cfg_for_distributed") - mock_train_segmentor = mocker.patch("otx.algorithms.segmentation.adapters.mmseg.tasks.trainer.train_segmentor") - - self.trainer.run(self.model_cfg, "", self.data_cfg) - spy_cfg_dist.assert_called_once() - mock_train_segmentor.assert_called_once() diff --git a/tests/unit/algorithms/segmentation/tasks/test_segmentation_inference.py b/tests/unit/algorithms/segmentation/tasks/test_segmentation_inference.py deleted file mode 100644 index ca49c15a139..00000000000 --- a/tests/unit/algorithms/segmentation/tasks/test_segmentation_inference.py +++ /dev/null @@ -1,140 +0,0 @@ -# Copyright (C) 2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import os - -import numpy as np -import pytest - -from otx.algorithms.common.tasks import BaseTask -from otx.algorithms.segmentation.tasks import SegmentationInferenceTask -from otx.api.configuration.helper import create -from otx.api.entities.annotation import Annotation -from otx.api.entities.datasets import DatasetEntity -from otx.api.entities.label import LabelEntity -from otx.api.entities.metrics import Performance, ScoreMetric -from otx.api.entities.model import ModelPrecision -from otx.api.entities.model_template import parse_model_template -from otx.api.entities.resultset import ResultSetEntity -from otx.api.entities.scored_label import ScoredLabel -from otx.api.entities.shapes.polygon import Point, Polygon -from otx.api.usecases.evaluation.metrics_helper import MetricsHelper -from otx.api.usecases.tasks.interfaces.export_interface import ExportType -from otx.api.utils.shape_factory import ShapeFactory -from tests.test_suite.e2e_test_system import e2e_pytest_unit -from tests.unit.algorithms.segmentation.test_helpers import ( - DEFAULT_SEG_TEMPLATE_DIR, - generate_otx_dataset, - init_environment, -) - - -class TestOTXSegTaskInference: - @pytest.fixture(autouse=True) - def setup(self, otx_model, tmp_dir_path) -> None: - model_template = parse_model_template(os.path.join(DEFAULT_SEG_TEMPLATE_DIR, "template.yaml")) - hyper_parameters = create(model_template.hyper_parameters.data) - task_env = init_environment(hyper_parameters, model_template) - self.output_path = str(tmp_dir_path) - self.seg_train_task = SegmentationInferenceTask(task_env, output_path=self.output_path) - self.model = otx_model - - @e2e_pytest_unit - def test_infer(self, mocker): - dataset = generate_otx_dataset(5) - fake_output = {"outputs": {"eval_predictions": np.zeros((5, 1)), "feature_vectors": np.zeros((5, 1))}} - fake_annotation = [ - Annotation( - Polygon(points=[Point(0, 0)]), - id=0, - labels=[ScoredLabel(LabelEntity(name="fake", domain="SEGMENTATION"), probability=1.0)], - ) - ] - - mock_run_task = mocker.patch.object(BaseTask, "_run_task", return_value=fake_output) - mocker.patch("numpy.transpose") - mocker.patch("otx.algorithms.segmentation.tasks.inference.create_hard_prediction_from_soft_prediction") - mocker.patch( - "otx.algorithms.segmentation.tasks.inference.create_annotation_from_segmentation_map", - return_value=fake_annotation, - ) - mocker.patch("otx.algorithms.segmentation.tasks.inference.get_activation_map", return_value=np.zeros((1, 1))) - mocker.patch.object(ShapeFactory, "shape_produces_valid_crop", return_value=True) - - updated_dataset = self.seg_train_task.infer(dataset, None) - - mock_run_task.assert_called_once() - for updated in updated_dataset: - assert updated.annotation_scene.contains_any([LabelEntity(name="fake", domain="SEGMENTATION")]) - - @e2e_pytest_unit - def test_evaluate(self, mocker): - result_set = ResultSetEntity( - model=self.model, - ground_truth_dataset=DatasetEntity(), - prediction_dataset=DatasetEntity(), - ) - fake_metrics = mocker.patch("otx.api.usecases.evaluation.dice.DiceAverage", autospec=True) - fake_metrics.get_performance.return_value = Performance( - score=ScoreMetric(name="fake", value=0.1), dashboard_metrics="mDice" - ) - mocker.patch.object(MetricsHelper, "compute_dice_averaged_over_pixels", return_value=fake_metrics) - self.seg_train_task.evaluate(result_set) - - assert result_set.performance.score.value == 0.1 - - @pytest.mark.parametrize("precision", [ModelPrecision.FP16, ModelPrecision.FP32]) - @e2e_pytest_unit - def test_export(self, mocker, precision: ModelPrecision): - fake_output = {"outputs": {"bin": None, "xml": None}} - mock_run_task = mocker.patch.object(BaseTask, "_run_task", return_value=fake_output) - - with pytest.raises(RuntimeError): - self.seg_train_task.export(ExportType.OPENVINO, self.model, precision) - mock_run_task.assert_called_once() - - @pytest.mark.parametrize("precision", [ModelPrecision.FP16, ModelPrecision.FP32]) - @pytest.mark.parametrize("dump_features", [True, False]) - @e2e_pytest_unit - def test_export_with_model_files(self, mocker, precision: ModelPrecision, dump_features: bool): - with open(f"{self.output_path}/model.xml", "wb") as f: - f.write(b"foo") - with open(f"{self.output_path}/model.bin", "wb") as f: - f.write(b"bar") - - fake_output = {"outputs": {"bin": f"{self.output_path}/model.xml", "xml": f"{self.output_path}/model.bin"}} - mock_run_task = mocker.patch.object(BaseTask, "_run_task", return_value=fake_output) - self.seg_train_task.export(ExportType.OPENVINO, self.model, precision, dump_features) - - mock_run_task.assert_called_once() - assert self.model.get_data("openvino.bin") - assert self.model.get_data("openvino.xml") - assert self.model.has_xai == dump_features - - @e2e_pytest_unit - def test_unload(self, mocker): - mock_cleanup = mocker.patch.object(BaseTask, "cleanup") - self.seg_train_task.unload() - - mock_cleanup.assert_called_once() - - @e2e_pytest_unit - @pytest.mark.parametrize("model_dir", ["...", ".../supcon", ".../supcon/workspace"]) - def test_init_recipe_supcon(self, mocker, model_dir: str): - mocker.patch("otx.algorithms.segmentation.tasks.inference.SegmentationInferenceTask._init_model_cfg") - mocker.patch("otx.algorithms.segmentation.tasks.inference.patch_default_config") - mocker.patch("otx.algorithms.segmentation.tasks.inference.patch_runner") - mocker.patch("otx.algorithms.segmentation.tasks.inference.patch_data_pipeline") - mocker.patch("otx.algorithms.segmentation.tasks.inference.patch_datasets") - mocker.patch("otx.algorithms.segmentation.tasks.inference.patch_evaluation") - - self.seg_train_task._hyperparams.learning_parameters.enable_supcon = True - self.seg_train_task._model_dir = model_dir - - self.seg_train_task._init_recipe() - - assert self.seg_train_task._model_dir.endswith("supcon") - - self.seg_train_task._hyperparams.learning_parameters.enable_supcon = False - self.seg_train_task._hyperparams._model_dir = os.path.abspath(DEFAULT_SEG_TEMPLATE_DIR) diff --git a/tests/unit/algorithms/segmentation/tasks/test_segmentation_inference_task_params_validation.py b/tests/unit/algorithms/segmentation/tasks/test_segmentation_inference_task_params_validation.py deleted file mode 100644 index 226ed2775f4..00000000000 --- a/tests/unit/algorithms/segmentation/tasks/test_segmentation_inference_task_params_validation.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright (C) 2021-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import pytest - -from otx.algorithms.segmentation.tasks import SegmentationInferenceTask -from otx.api.configuration.configurable_parameters import ConfigurableParameters -from otx.api.entities.datasets import DatasetEntity -from otx.api.entities.inference_parameters import InferenceParameters -from otx.api.entities.label_schema import LabelSchemaEntity -from otx.api.entities.model import ModelConfiguration, ModelEntity -from otx.api.entities.resultset import ResultSetEntity -from otx.api.usecases.tasks.interfaces.export_interface import ExportType -from tests.test_suite.e2e_test_system import e2e_pytest_unit -from tests.unit.api.parameters_validation.validation_helper import ( - check_value_error_exception_raised, -) - - -class MockSegmentationInferenceTask(SegmentationInferenceTask): - def __init__(self): - pass - - -class TestInferenceTaskInputParamsValidation: - @staticmethod - def model(): - model_configuration = ModelConfiguration( - configurable_parameters=ConfigurableParameters(header="header", description="description"), - label_schema=LabelSchemaEntity(), - ) - return ModelEntity(train_dataset=DatasetEntity(), configuration=model_configuration) - - @e2e_pytest_unit - def test_otx_segmentation_inference_task_init_params_validation(self): - """ - Description: - Check SegmentationInferenceTask object initialization parameters validation - - Input data: - "task_environment" non-TaskEnvironment object - - Expected results: - Test passes if ValueError exception is raised when unexpected type object is specified as - SegmentationInferenceTask object initialization parameter - """ - with pytest.raises(ValueError): - SegmentationInferenceTask(task_environment="unexpected string") # type: ignore - - @e2e_pytest_unit - def test_otx_segmentation_inference_task_infer_params_validation(self): - """ - Description: - Check SegmentationInferenceTask object "infer" method input parameters validation - - Input data: - SegmentationInferenceTask object. "infer" method unexpected-type input parameters - - Expected results: - Test passes if ValueError exception is raised when unexpected type object is specified as - input parameter for "infer" method - """ - task = MockSegmentationInferenceTask() - correct_values_dict = { - "dataset": DatasetEntity(), - "inference_parameters": InferenceParameters(), - } - unexpected_str = "unexpected string" - unexpected_values = [ - # Unexpected string is specified as "dataset" parameter - ("dataset", unexpected_str), - # Unexpected string is specified as "inference_parameters" parameter - ("inference_parameters", unexpected_str), - ] - - check_value_error_exception_raised( - correct_parameters=correct_values_dict, - unexpected_values=unexpected_values, - class_or_function=task.infer, - ) - - @e2e_pytest_unit - def test_otx_segmentation_inference_task_evaluate_params_validation(self): - """ - Description: - Check SegmentationInferenceTask object "evaluate" method input parameters validation - - Input data: - SegmentationInferenceTask object. "evaluate" method unexpected-type input parameters - - Expected results: - Test passes if ValueError exception is raised when unexpected type object is specified as - input parameter for "evaluate" method - """ - task = MockSegmentationInferenceTask() - model = self.model() - result_set = ResultSetEntity( - model=model, - ground_truth_dataset=DatasetEntity(), - prediction_dataset=DatasetEntity(), - ) - correct_values_dict = { - "output_result_set": result_set, - "evaluation_metric": "metric", - } - unexpected_int = 1 - unexpected_values = [ - # Unexpected integer is specified as "output_result_set" parameter - ("output_result_set", unexpected_int), - # Unexpected integer is specified as "evaluation_metric" parameter - ("evaluation_metric", unexpected_int), - ] - - check_value_error_exception_raised( - correct_parameters=correct_values_dict, - unexpected_values=unexpected_values, - class_or_function=task.evaluate, - ) - - @e2e_pytest_unit - def test_otx_segmentation_inference_task_export_params_validation(self): - """ - Description: - Check SegmentationInferenceTask object "export" method input parameters validation - - Input data: - SegmentationInferenceTask object. "export" method unexpected-type input parameters - - Expected results: - Test passes if ValueError exception is raised when unexpected type object is specified as - input parameter for "export" method - """ - task = MockSegmentationInferenceTask() - model = self.model() - correct_values_dict = { - "export_type": ExportType.OPENVINO, - "output_model": model, - } - unexpected_str = "unexpected string" - unexpected_values = [ - # Unexpected string is specified as "export_type" parameter - ("export_type", unexpected_str), - # Unexpected string is specified as "output_model" parameter - ("output_model", unexpected_str), - ] - - check_value_error_exception_raised( - correct_parameters=correct_values_dict, - unexpected_values=unexpected_values, - class_or_function=task.export, - ) diff --git a/tests/unit/algorithms/segmentation/tasks/test_segmentation_nncf.py b/tests/unit/algorithms/segmentation/tasks/test_segmentation_nncf.py index 99a78c036a9..5e9dfe9bfb7 100644 --- a/tests/unit/algorithms/segmentation/tasks/test_segmentation_nncf.py +++ b/tests/unit/algorithms/segmentation/tasks/test_segmentation_nncf.py @@ -7,9 +7,7 @@ import pytest from mmcv.utils import Config -from otx.algorithms.common.tasks import BaseTask -from otx.algorithms.common.tasks.nncf_base import NNCFBaseTask -from otx.algorithms.segmentation.tasks import SegmentationNNCFTask +from otx.algorithms.segmentation.adapters.mmseg.nncf.task import SegmentationNNCFTask from otx.api.configuration.helper import create from otx.api.entities.metrics import NullPerformance from otx.api.entities.model_template import parse_model_template @@ -50,20 +48,17 @@ def test_optimize(self, mocker): mock_lcurve_val.x = [0, 1] mock_lcurve_val.y = [0.1, 0.2] - mock_run_task = mocker.patch.object(BaseTask, "_run_task", return_value={"final_ckpt": ""}) self.seg_nncf_task._learning_curves = {f"val/{self.seg_nncf_task.metric}": mock_lcurve_val} mocker.patch.object(SegmentationNNCFTask, "save_model") + mocker.patch.object(SegmentationNNCFTask, "_train_model") + mocker.patch( + "otx.algorithms.segmentation.adapters.mmseg.nncf.task.build_nncf_segmentor", + return_value=( + mocker.MagicMock(), + mocker.MagicMock(), + ), + ) self.seg_nncf_task.optimize(OptimizationType.NNCF, self.dataset, self.model) - mock_run_task.assert_called_once() assert self.model.performance != NullPerformance() assert self.model.performance.score.value == 0.2 - - @e2e_pytest_unit - def test_initialize(self, mocker): - """Test initialize method in OTXDetTaskNNCF.""" - options = {} - self.seg_nncf_task._initialize(options) - - assert "model_builder" in options - assert NNCFBaseTask.model_builder == options["model_builder"].func diff --git a/tests/unit/algorithms/segmentation/tasks/test_segmentation_nncf_task_params_validation.py b/tests/unit/algorithms/segmentation/tasks/test_segmentation_nncf_task_params_validation.py deleted file mode 100644 index 80a27af9891..00000000000 --- a/tests/unit/algorithms/segmentation/tasks/test_segmentation_nncf_task_params_validation.py +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright (C) 2021-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import pytest - -from otx.algorithms.segmentation.tasks import SegmentationNNCFTask -from otx.api.configuration.configurable_parameters import ConfigurableParameters -from otx.api.entities.datasets import DatasetEntity -from otx.api.entities.label_schema import LabelSchemaEntity -from otx.api.entities.model import ModelConfiguration, ModelEntity -from otx.api.usecases.tasks.interfaces.export_interface import ExportType -from otx.api.usecases.tasks.interfaces.optimization_interface import OptimizationType -from tests.test_suite.e2e_test_system import e2e_pytest_unit -from tests.unit.api.parameters_validation.validation_helper import ( - check_value_error_exception_raised, -) - - -class MockNNCFTask(SegmentationNNCFTask): - def __init__(self): - pass - - -class TestNNCFTaskInputParamsValidation: - @staticmethod - def model(): - model_configuration = ModelConfiguration( - configurable_parameters=ConfigurableParameters(header="header", description="description"), - label_schema=LabelSchemaEntity(), - ) - return ModelEntity(train_dataset=DatasetEntity(), configuration=model_configuration) - - @e2e_pytest_unit - def test_nncf_segmentation_task_init_params_validation(self): - """ - Description: - Check SegmentationNNCFTask object initialization parameters validation - - Input data: - SegmentationNNCFTask object initialization parameter with unexpected type - - Expected results: - Test passes if ValueError exception is raised when unexpected type object is specified as - SegmentationNNCFTask object initialization parameter - """ - with pytest.raises(ValueError): - SegmentationNNCFTask(task_environment="unexpected string") # type: ignore - - @e2e_pytest_unit - def test_nncf_segmentation_task_optimize_params_validation(self): - """ - Description: - Check SegmentationNNCFTask object "optimize" method input parameters validation - - Input data: - SegmentationNNCFTask object. "optimize" method unexpected-type input parameters - - Expected results: - Test passes if ValueError exception is raised when unexpected type object is specified as - input parameter for "optimize" method - """ - task = MockNNCFTask() - correct_values_dict = { - "optimization_type": OptimizationType.NNCF, - "dataset": DatasetEntity(), - "output_model": self.model(), - } - unexpected_str = "unexpected string" - unexpected_values = [ - # Unexpected string is specified as "optimization_type" parameter - ("optimization_type", unexpected_str), - # Unexpected string is specified as "dataset" parameter - ("dataset", unexpected_str), - # Unexpected string is specified as "output_model" parameter - ("output_model", unexpected_str), - # Unexpected string is specified as "optimization_parameters" parameter - ("optimization_parameters", unexpected_str), - ] - check_value_error_exception_raised( - correct_parameters=correct_values_dict, - unexpected_values=unexpected_values, - class_or_function=task.optimize, - ) - - @e2e_pytest_unit - def test_nncf_segmentation_task_export_params_validation(self): - """ - Description: - Check SegmentationNNCFTask object "export" method input parameters validation - - Input data: - SegmentationNNCFTask object. "export" method unexpected-type input parameters - - Expected results: - Test passes if ValueError exception is raised when unexpected type object is specified as - input parameter for "export" method - """ - task = MockNNCFTask() - correct_values_dict = { - "export_type": ExportType.OPENVINO, - "output_model": self.model(), - } - unexpected_str = "unexpected string" - unexpected_values = [ - # Unexpected string is specified as "export_type" parameter - ("export_type", unexpected_str), - # Unexpected string is specified as "output_model" parameter - ("output_model", unexpected_str), - ] - check_value_error_exception_raised( - correct_parameters=correct_values_dict, - unexpected_values=unexpected_values, - class_or_function=task.export, - ) - - @e2e_pytest_unit - def test_nncf_segmentation_task_save_model_params_validation(self): - """ - Description: - Check SegmentationNNCFTask object "save_model" method input parameters validation - - Input data: - SegmentationNNCFTask object, "output_model" non-ModelEntity object - - Expected results: - Test passes if ValueError exception is raised when unexpected type object is specified as - input parameter for "save_model" method - """ - task = MockNNCFTask() - with pytest.raises(ValueError): - task.save_model(output_model="unexpected string") # type: ignore diff --git a/tests/unit/algorithms/segmentation/tasks/test_segmentation_openvino.py b/tests/unit/algorithms/segmentation/tasks/test_segmentation_openvino.py index 2f674f6f14b..2fe0186b0be 100644 --- a/tests/unit/algorithms/segmentation/tasks/test_segmentation_openvino.py +++ b/tests/unit/algorithms/segmentation/tasks/test_segmentation_openvino.py @@ -9,9 +9,10 @@ import pytest from openvino.model_zoo.model_api.models import Model -import otx.algorithms.segmentation.tasks.openvino +import otx.algorithms.segmentation.adapters.openvino + from otx.algorithms.segmentation.configs.base import SegmentationConfig -from otx.algorithms.segmentation.tasks.openvino import ( +from otx.algorithms.segmentation.adapters.openvino import ( OpenVINOSegmentationInferencer, OpenVINOSegmentationTask, ) @@ -47,7 +48,7 @@ def setup(self, mocker) -> None: hyper_parameters = create(model_template.hyper_parameters.data) seg_params = SegmentationConfig(header=hyper_parameters.header) label_schema = generate_otx_label_schema() - mocker.patch("otx.algorithms.segmentation.tasks.openvino.OpenvinoAdapter") + mocker.patch("otx.algorithms.segmentation.adapters.openvino.task.OpenvinoAdapter") mocker.patch.object(Model, "create_model") self.seg_ov_inferencer = OpenVINOSegmentationInferencer(seg_params, label_schema, "") self.seg_ov_inferencer.model = mocker.patch("openvino.model_zoo.model_api.models.Model", autospec=True) @@ -103,7 +104,7 @@ def setup(self, mocker, otx_model) -> None: label_schema = generate_otx_label_schema() task_env = init_environment(hyper_parameters, model_template) seg_params = SegmentationConfig(header=hyper_parameters.header) - mocker.patch("otx.algorithms.segmentation.tasks.openvino.OpenvinoAdapter") + mocker.patch("otx.algorithms.segmentation.adapters.openvino.task.OpenvinoAdapter") mocker.patch.object(Model, "create_model") seg_ov_inferencer = OpenVINOSegmentationInferencer(seg_params, label_schema, "") @@ -126,7 +127,9 @@ def test_infer(self, mocker): mock_predict = mocker.patch.object( OpenVINOSegmentationInferencer, "predict", return_value=(fake_ann_scene, None, fake_input) ) - mocker.patch("otx.algorithms.segmentation.tasks.openvino.get_activation_map", return_value=np.zeros((5, 1))) + mocker.patch( + "otx.algorithms.segmentation.adapters.openvino.task.get_activation_map", return_value=np.zeros((5, 1)) + ) mocker.patch.object(ShapeFactory, "shape_produces_valid_crop", return_value=True) updated_dataset = self.seg_ov_task.infer(self.dataset) @@ -171,10 +174,10 @@ def patch_save_model(model, dir_path, model_name): output_model = copy.deepcopy(otx_model) self.seg_ov_task.model.set_data("openvino.bin", b"foo") self.seg_ov_task.model.set_data("openvino.xml", b"bar") - mocker.patch("otx.algorithms.segmentation.tasks.openvino.load_model", autospec=True) - mocker.patch("otx.algorithms.segmentation.tasks.openvino.create_pipeline", autospec=True) - mocker.patch("otx.algorithms.segmentation.tasks.openvino.save_model", new=patch_save_model) - spy_compress = mocker.spy(otx.algorithms.segmentation.tasks.openvino, "compress_model_weights") + mocker.patch("otx.algorithms.segmentation.adapters.openvino.task.load_model", autospec=True) + mocker.patch("otx.algorithms.segmentation.adapters.openvino.task.create_pipeline", autospec=True) + mocker.patch("otx.algorithms.segmentation.adapters.openvino.task.save_model", new=patch_save_model) + spy_compress = mocker.spy(otx.algorithms.segmentation.adapters.openvino.task, "compress_model_weights") self.seg_ov_task.optimize(OptimizationType.POT, dataset=dataset, output_model=output_model) spy_compress.assert_called_once() diff --git a/tests/unit/algorithms/segmentation/tasks/test_segmentation_openvino_task_params_validation.py b/tests/unit/algorithms/segmentation/tasks/test_segmentation_openvino_task_params_validation.py index 06d6e4f6cd2..af4112021e8 100644 --- a/tests/unit/algorithms/segmentation/tasks/test_segmentation_openvino_task_params_validation.py +++ b/tests/unit/algorithms/segmentation/tasks/test_segmentation_openvino_task_params_validation.py @@ -6,7 +6,7 @@ import pytest from otx.algorithms.segmentation.configs.base import SegmentationConfig -from otx.algorithms.segmentation.tasks.openvino import ( +from otx.algorithms.segmentation.adapters.openvino import ( OpenVINOSegmentationInferencer, OpenVINOSegmentationTask, OTXOpenVinoDataLoader, diff --git a/tests/unit/algorithms/segmentation/tasks/test_segmentation_train.py b/tests/unit/algorithms/segmentation/tasks/test_segmentation_train.py deleted file mode 100644 index 739da11c2d9..00000000000 --- a/tests/unit/algorithms/segmentation/tasks/test_segmentation_train.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (C) 2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import os - -import pytest - -from otx.algorithms.common.tasks import BaseTask -from otx.algorithms.segmentation.tasks import SegmentationTrainTask -from otx.api.configuration.helper import create -from otx.api.entities.metrics import NullPerformance -from otx.api.entities.model_template import parse_model_template -from tests.test_suite.e2e_test_system import e2e_pytest_unit -from tests.unit.algorithms.segmentation.test_helpers import ( - DEFAULT_SEG_TEMPLATE_DIR, - generate_otx_dataset, - init_environment, -) - - -class TestOTXSegTaskTrain: - @pytest.fixture(autouse=True) - def setup(self, otx_model, tmp_dir_path) -> None: - model_template = parse_model_template(os.path.join(DEFAULT_SEG_TEMPLATE_DIR, "template.yaml")) - hyper_parameters = create(model_template.hyper_parameters.data) - task_env = init_environment(hyper_parameters, model_template) - self.model = otx_model - self.seg_train_task = SegmentationTrainTask(task_env, output_path=str(tmp_dir_path)) - - @e2e_pytest_unit - def test_save_model(self, mocker): - mocker.patch("torch.load", return_value="") - self.seg_train_task.save_model(self.model) - - assert self.model.get_data("weights.pth") - assert self.model.get_data("label_schema.json") - - @e2e_pytest_unit - def test_train(self, mocker): - from otx.algorithms.common.adapters.mmcv.hooks import OTXLoggerHook - - self.dataset = generate_otx_dataset() - - mock_lcurve_val = OTXLoggerHook.Curve() - mock_lcurve_val.x = [0, 1] - mock_lcurve_val.y = [0.1, 0.2] - - mock_run_task = mocker.patch.object(BaseTask, "_run_task", return_value={"final_ckpt": ""}) - self.seg_train_task._learning_curves = {f"val/{self.seg_train_task.metric}": mock_lcurve_val} - mocker.patch.object(SegmentationTrainTask, "save_model") - self.seg_train_task.train(self.dataset, self.model) - - mock_run_task.assert_called_once() - assert self.model.performance != NullPerformance() - assert self.model.performance.score.value == 0.2 - - @e2e_pytest_unit - def test_cancel_training(self): - self.seg_train_task.cancel_training() - - assert self.seg_train_task._should_stop is True diff --git a/tests/unit/algorithms/segmentation/tasks/test_segmentation_train_task_params_validation.py b/tests/unit/algorithms/segmentation/tasks/test_segmentation_train_task_params_validation.py deleted file mode 100644 index 31674169661..00000000000 --- a/tests/unit/algorithms/segmentation/tasks/test_segmentation_train_task_params_validation.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright (C) 2021-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import pytest - -from otx.algorithms.segmentation.tasks import SegmentationTrainTask -from otx.api.configuration.configurable_parameters import ConfigurableParameters -from otx.api.entities.datasets import DatasetEntity -from otx.api.entities.label_schema import LabelSchemaEntity -from otx.api.entities.model import ModelConfiguration, ModelEntity -from tests.test_suite.e2e_test_system import e2e_pytest_unit -from tests.unit.api.parameters_validation.validation_helper import ( - check_value_error_exception_raised, -) - - -class MockSegmentationTrainingTask(SegmentationTrainTask): - def __init__(self): - pass - - -class TestSegmentationTrainTaskInputParamsValidation: - @staticmethod - def model(): - model_configuration = ModelConfiguration( - configurable_parameters=ConfigurableParameters(header="header", description="description"), - label_schema=LabelSchemaEntity(), - ) - return ModelEntity(train_dataset=DatasetEntity(), configuration=model_configuration) - - @e2e_pytest_unit - def test_train_task_train_input_params_validation(self): - """ - Description: - Check SegmentationTrainTask object "train" method input parameters validation - - Input data: - SegmentationTrainTask object, "train" method unexpected-type input parameters - - Expected results: - Test passes if ValueError exception is raised when unexpected type object is specified as - input parameter for "train" method - """ - task = MockSegmentationTrainingTask() - correct_values_dict = { - "dataset": DatasetEntity(), - "output_model": self.model(), - } - unexpected_str = "unexpected string" - unexpected_values = [ - # Unexpected string is specified as "dataset" parameter - ("dataset", unexpected_str), - # Unexpected string is specified as "output_model" parameter - ("output_model", unexpected_str), - # Unexpected string is specified as "train_parameters" parameter - ("train_parameters", unexpected_str), - ] - - check_value_error_exception_raised( - correct_parameters=correct_values_dict, - unexpected_values=unexpected_values, - class_or_function=task.train, - ) - - @e2e_pytest_unit - def test_train_task_save_model_input_params_validation(self): - """ - Description: - Check SegmentationTrainTask object "save_model" method input parameters validation - - Input data: - SegmentationTrainTask object, "model" non-ModelEntity object - - Expected results: - Test passes if ValueError exception is raised when unexpected type object is specified as - input parameter for "save_model" method - """ - task = MockSegmentationTrainingTask() - with pytest.raises(ValueError): - task.save_model("unexpected string") # type: ignore