diff --git a/src/otx/algo/callbacks/adaptive_train_scheduling.py b/src/otx/algo/callbacks/adaptive_train_scheduling.py index ae63040f056..afeeaa0bb35 100644 --- a/src/otx/algo/callbacks/adaptive_train_scheduling.py +++ b/src/otx/algo/callbacks/adaptive_train_scheduling.py @@ -104,7 +104,7 @@ def _revert_func(config: LRSchedulerConfig, saved_frequency: int) -> None: config.frequency = saved_frequency for config in lr_configs: - if hasattr(config, "frequency"): + if hasattr(config, "frequency") and hasattr(config, "interval") and config.interval == "epoch": msg = ( "The frequency of LRscheduler will be changed due to the effect of adaptive interval: " f"{config.frequency} --> {adaptive_interval}." diff --git a/src/otx/cli/cli.py b/src/otx/cli/cli.py index 2c7159f990a..764e84794a8 100644 --- a/src/otx/cli/cli.py +++ b/src/otx/cli/cli.py @@ -156,18 +156,19 @@ def engine_subcommand_parser(**kwargs) -> ArgumentParser: sub_configs=True, ) # Optimizer & Scheduler Settings - from lightning.pytorch.cli import LRSchedulerTypeTuple + from lightning.pytorch.cli import ReduceLROnPlateau from torch.optim import Optimizer + from torch.optim.lr_scheduler import LRScheduler optim_kwargs = {"instantiate": False, "fail_untyped": False, "skip": {"params"}} scheduler_kwargs = {"instantiate": False, "fail_untyped": False, "skip": {"optimizer"}} parser.add_subclass_arguments( - baseclass=(Optimizer,), + baseclass=(Optimizer, list), nested_key="optimizer", **optim_kwargs, ) parser.add_subclass_arguments( - baseclass=LRSchedulerTypeTuple, + baseclass=(LRScheduler, ReduceLROnPlateau, list), nested_key="scheduler", **scheduler_kwargs, ) @@ -341,11 +342,17 @@ def instantiate_model(self, model_config: Namespace) -> tuple: # Update self.config with model self.config[self.subcommand].update(Namespace(model=model_config)) - optimizer_kwargs = namespace_to_dict(self.get_config_value(self.config_init, "optimizer", Namespace())) - scheduler_kwargs = namespace_to_dict(self.get_config_value(self.config_init, "scheduler", Namespace())) from otx.core.utils.instantiators import partial_instantiate_class - return model, partial_instantiate_class(optimizer_kwargs), partial_instantiate_class(scheduler_kwargs) + optimizer_kwargs = self.get_config_value(self.config_init, "optimizer", {}) + optimizer_kwargs = optimizer_kwargs if isinstance(optimizer_kwargs, list) else [optimizer_kwargs] + optimizers = partial_instantiate_class([_opt for _opt in optimizer_kwargs if _opt]) + + scheduler_kwargs = self.get_config_value(self.config_init, "scheduler", {}) + scheduler_kwargs = scheduler_kwargs if isinstance(scheduler_kwargs, list) else [scheduler_kwargs] + schedulers = partial_instantiate_class([_sch for _sch in scheduler_kwargs if _sch]) + + return model, optimizers, schedulers def get_config_value(self, config: Namespace, key: str, default: Any = None) -> Any: # noqa: ANN401 """Retrieves the value of a configuration key from the given config object. @@ -357,8 +364,10 @@ def get_config_value(self, config: Namespace, key: str, default: Any = None) -> Returns: Any: The value of the configuration key, or the default value if the key is not found. + if the value is a Namespace, it is converted to a dictionary. """ - return config.get(str(self.subcommand), config).get(key, default) + result = config.get(str(self.subcommand), config).get(key, default) + return namespace_to_dict(result) if isinstance(result, Namespace) else result def get_subcommand_parser(self, subcommand: str | None) -> ArgumentParser: """Returns the argument parser for the specified subcommand. diff --git a/src/otx/cli/utils/jsonargparse.py b/src/otx/cli/utils/jsonargparse.py index d16a1a238c6..8ea735e9d07 100644 --- a/src/otx/cli/utils/jsonargparse.py +++ b/src/otx/cli/utils/jsonargparse.py @@ -178,7 +178,7 @@ def list_override(configs: Namespace, key: str, overrides: list) -> None: ... ... ... ] """ - if key not in configs: + if key not in configs or configs[key] is None: return for target in overrides: class_path = target.get("class_path", None) diff --git a/src/otx/core/model/module/action_classification.py b/src/otx/core/model/module/action_classification.py index 867f7378283..cdc4d065982 100644 --- a/src/otx/core/model/module/action_classification.py +++ b/src/otx/core/model/module/action_classification.py @@ -28,8 +28,8 @@ def __init__( self, otx_model: OTXActionClsModel, torch_compile: bool, - optimizer: OptimizerCallable = lambda p: torch.optim.SGD(p, lr=0.01), - scheduler: LRSchedulerCallable = torch.optim.lr_scheduler.ConstantLR, + optimizer: list[OptimizerCallable] | OptimizerCallable = lambda p: torch.optim.SGD(p, lr=0.01), + scheduler: list[LRSchedulerCallable] | LRSchedulerCallable = torch.optim.lr_scheduler.ConstantLR, ): super().__init__( otx_model=otx_model, diff --git a/src/otx/core/model/module/action_detection.py b/src/otx/core/model/module/action_detection.py index cf9ff35baaf..3e5f0ba7d46 100644 --- a/src/otx/core/model/module/action_detection.py +++ b/src/otx/core/model/module/action_detection.py @@ -29,8 +29,8 @@ def __init__( self, otx_model: OTXActionDetModel, torch_compile: bool, - optimizer: OptimizerCallable = lambda p: torch.optim.SGD(p, lr=0.01), - scheduler: LRSchedulerCallable = torch.optim.lr_scheduler.ConstantLR, + optimizer: list[OptimizerCallable] | OptimizerCallable = lambda p: torch.optim.SGD(p, lr=0.01), + scheduler: list[LRSchedulerCallable] | LRSchedulerCallable = torch.optim.lr_scheduler.ConstantLR, ): super().__init__( otx_model=otx_model, diff --git a/src/otx/core/model/module/base.py b/src/otx/core/model/module/base.py index b0e90ef16c9..8dd4f0e5618 100644 --- a/src/otx/core/model/module/base.py +++ b/src/otx/core/model/module/base.py @@ -12,7 +12,6 @@ from lightning import LightningModule from torch import Tensor -from otx.algo.schedulers.warmup_schedulers import BaseWarmupScheduler from otx.core.data.entity.base import ( OTXBatchDataEntity, OTXBatchLossEntity, @@ -34,11 +33,13 @@ def __init__( self, optimizer: torch.optim.Optimizer, num_warmup_steps: int = 1000, + interval: str = "step", ): - if num_warmup_steps > 0: + if not num_warmup_steps > 0: msg = f"num_warmup_steps should be > 0, got {num_warmup_steps}" - ValueError(msg) + raise ValueError(msg) self.num_warmup_steps = num_warmup_steps + self.interval = interval super().__init__(optimizer, lambda step: min(step / num_warmup_steps, 1.0)) @@ -50,8 +51,8 @@ def __init__( *, otx_model: OTXModel, torch_compile: bool, - optimizer: OptimizerCallable = lambda p: torch.optim.SGD(p, lr=0.01), - scheduler: LRSchedulerCallable = torch.optim.lr_scheduler.ConstantLR, + optimizer: list[OptimizerCallable] | OptimizerCallable = lambda p: torch.optim.SGD(p, lr=0.01), + scheduler: list[LRSchedulerCallable] | LRSchedulerCallable = torch.optim.lr_scheduler.ConstantLR, ): super().__init__() @@ -110,7 +111,7 @@ def setup(self, stage: str) -> None: if self.torch_compile and stage == "fit": self.model = torch.compile(self.model) - def configure_optimizers(self) -> tuple[list[torch.optim.Optimizer], list[torch.optim.Optimizer]]: + def configure_optimizers(self) -> tuple[list[torch.optim.Optimizer], list[dict]]: """Choose what optimizers and learning-rate schedulers to use in your optimization. Normally you'd need one. But in the case of GANs or similar you might have multiple. @@ -120,34 +121,26 @@ def configure_optimizers(self) -> tuple[list[torch.optim.Optimizer], list[torch. :return: A dict containing the configured optimizers and learning-rate schedulers to be used for training. """ - optimizer = ( - self.hparams.optimizer(params=self.parameters()) - if callable(self.hparams.optimizer) - else self.hparams.optimizer - ) - - scheduler = ( - self.hparams.scheduler(optimizer=optimizer) if callable(self.hparams.scheduler) else self.hparams.scheduler - ) - - lr_scheduler_configs = [] - if isinstance(scheduler, BaseWarmupScheduler) and scheduler.warmup_steps > 0: - lr_scheduler_configs += [ - { - "scheduler": LinearWarmupScheduler(optimizer, num_warmup_steps=scheduler.warmup_steps), - "interval": "step", - }, - ] - lr_scheduler_configs += [ - { - "scheduler": scheduler, - "monitor": self.lr_scheduler_monitor_key, - "interval": "epoch", - "frequency": self.trainer.check_val_every_n_epoch, - }, + + def ensure_list(item: Any) -> list: # noqa: ANN401 + return item if isinstance(item, list) else [item] + + optimizers = [ + optimizer(params=self.parameters()) if callable(optimizer) else optimizer + for optimizer in ensure_list(self.hparams.optimizer) ] - return [optimizer], lr_scheduler_configs + lr_schedulers = [] + for scheduler_config in ensure_list(self.hparams.scheduler): + scheduler = scheduler_config(optimizers[0]) if callable(scheduler_config) else scheduler_config + lr_scheduler_config = {"scheduler": scheduler} + if hasattr(scheduler, "interval"): + lr_scheduler_config["interval"] = scheduler.interval + if hasattr(scheduler, "monitor"): + lr_scheduler_config["monitor"] = scheduler.monitor + lr_schedulers.append(lr_scheduler_config) + + return optimizers, lr_schedulers def register_load_state_dict_pre_hook(self, model_classes: list[str], ckpt_classes: list[str]) -> None: """Register self.model's load_state_dict_pre_hook. diff --git a/src/otx/core/model/module/classification.py b/src/otx/core/model/module/classification.py index 7505824b48c..fd1f9cf5431 100644 --- a/src/otx/core/model/module/classification.py +++ b/src/otx/core/model/module/classification.py @@ -37,8 +37,8 @@ def __init__( self, otx_model: OTXMulticlassClsModel, torch_compile: bool, - optimizer: OptimizerCallable = lambda p: torch.optim.SGD(p, lr=0.01), - scheduler: LRSchedulerCallable = torch.optim.lr_scheduler.ConstantLR, + optimizer: list[OptimizerCallable] | OptimizerCallable = lambda p: torch.optim.SGD(p, lr=0.01), + scheduler: list[LRSchedulerCallable] | LRSchedulerCallable = torch.optim.lr_scheduler.ConstantLR, ): super().__init__( otx_model=otx_model, @@ -130,8 +130,8 @@ def __init__( self, otx_model: OTXMultilabelClsModel, torch_compile: bool, - optimizer: OptimizerCallable = lambda p: torch.optim.SGD(p, lr=0.01), - scheduler: LRSchedulerCallable = torch.optim.lr_scheduler.ConstantLR, + optimizer: list[OptimizerCallable] | OptimizerCallable = lambda p: torch.optim.SGD(p, lr=0.01), + scheduler: list[LRSchedulerCallable] | LRSchedulerCallable = torch.optim.lr_scheduler.ConstantLR, ): super().__init__( otx_model=otx_model, @@ -218,8 +218,8 @@ def __init__( self, otx_model: OTXHlabelClsModel, torch_compile: bool, - optimizer: OptimizerCallable = lambda p: torch.optim.SGD(p, lr=0.01), - scheduler: LRSchedulerCallable = torch.optim.lr_scheduler.ConstantLR, + optimizer: list[OptimizerCallable] | OptimizerCallable = lambda p: torch.optim.SGD(p, lr=0.01), + scheduler: list[LRSchedulerCallable] | LRSchedulerCallable = torch.optim.lr_scheduler.ConstantLR, ): super().__init__( otx_model=otx_model, diff --git a/src/otx/core/model/module/detection.py b/src/otx/core/model/module/detection.py index 3869a5a798f..f2d9938874a 100644 --- a/src/otx/core/model/module/detection.py +++ b/src/otx/core/model/module/detection.py @@ -29,8 +29,8 @@ def __init__( self, otx_model: ExplainableOTXDetModel, torch_compile: bool, - optimizer: OptimizerCallable = lambda p: torch.optim.SGD(p, lr=0.01), - scheduler: LRSchedulerCallable = torch.optim.lr_scheduler.ConstantLR, + optimizer: list[OptimizerCallable] | OptimizerCallable = lambda p: torch.optim.SGD(p, lr=0.01), + scheduler: list[LRSchedulerCallable] | LRSchedulerCallable = torch.optim.lr_scheduler.ConstantLR, ): super().__init__( otx_model=otx_model, diff --git a/src/otx/core/model/module/instance_segmentation.py b/src/otx/core/model/module/instance_segmentation.py index 4d27ece3a4c..40bf4fb4fb3 100644 --- a/src/otx/core/model/module/instance_segmentation.py +++ b/src/otx/core/model/module/instance_segmentation.py @@ -32,8 +32,8 @@ def __init__( self, otx_model: ExplainableOTXInstanceSegModel, torch_compile: bool, - optimizer: OptimizerCallable = lambda p: torch.optim.SGD(p, lr=0.01), - scheduler: LRSchedulerCallable = torch.optim.lr_scheduler.ConstantLR, + optimizer: list[OptimizerCallable] | OptimizerCallable = lambda p: torch.optim.SGD(p, lr=0.01), + scheduler: list[LRSchedulerCallable] | LRSchedulerCallable = torch.optim.lr_scheduler.ConstantLR, ): super().__init__( otx_model=otx_model, diff --git a/src/otx/core/model/module/rotated_detection.py b/src/otx/core/model/module/rotated_detection.py index 84275563b2e..12bfd84d5a7 100644 --- a/src/otx/core/model/module/rotated_detection.py +++ b/src/otx/core/model/module/rotated_detection.py @@ -25,8 +25,8 @@ def __init__( self, otx_model: OTXRotatedDetModel, torch_compile: bool, - optimizer: OptimizerCallable = lambda p: torch.optim.SGD(p, lr=0.01), - scheduler: LRSchedulerCallable = torch.optim.lr_scheduler.ConstantLR, + optimizer: list[OptimizerCallable] | OptimizerCallable = lambda p: torch.optim.SGD(p, lr=0.01), + scheduler: list[LRSchedulerCallable] | LRSchedulerCallable = torch.optim.lr_scheduler.ConstantLR, ): super().__init__( otx_model=otx_model, diff --git a/src/otx/core/model/module/segmentation.py b/src/otx/core/model/module/segmentation.py index de028c5c40a..000b2cdea3d 100644 --- a/src/otx/core/model/module/segmentation.py +++ b/src/otx/core/model/module/segmentation.py @@ -29,8 +29,8 @@ def __init__( self, otx_model: OTXSegmentationModel, torch_compile: bool, - optimizer: OptimizerCallable = lambda p: torch.optim.SGD(p, lr=0.01), - scheduler: LRSchedulerCallable = torch.optim.lr_scheduler.ConstantLR, + optimizer: list[OptimizerCallable] | OptimizerCallable = lambda p: torch.optim.SGD(p, lr=0.01), + scheduler: list[LRSchedulerCallable] | LRSchedulerCallable = torch.optim.lr_scheduler.ConstantLR, ): super().__init__( otx_model=otx_model, diff --git a/src/otx/core/model/module/visual_prompting.py b/src/otx/core/model/module/visual_prompting.py index 599f2a06c79..68a62ef8398 100644 --- a/src/otx/core/model/module/visual_prompting.py +++ b/src/otx/core/model/module/visual_prompting.py @@ -36,8 +36,8 @@ def __init__( self, otx_model: OTXVisualPromptingModel, torch_compile: bool, - optimizer: OptimizerCallable = lambda p: torch.optim.SGD(p, lr=0.01), - scheduler: LRSchedulerCallable = torch.optim.lr_scheduler.ConstantLR, + optimizer: list[OptimizerCallable] | OptimizerCallable = lambda p: torch.optim.SGD(p, lr=0.01), + scheduler: list[LRSchedulerCallable] | LRSchedulerCallable = torch.optim.lr_scheduler.ConstantLR, ): super().__init__( otx_model=otx_model, diff --git a/src/otx/core/utils/instantiators.py b/src/otx/core/utils/instantiators.py index b19f0105cea..5ca301b1163 100644 --- a/src/otx/core/utils/instantiators.py +++ b/src/otx/core/utils/instantiators.py @@ -66,24 +66,29 @@ def instantiate_loggers(logger_cfg: list | None) -> list[Logger]: return logger -def partial_instantiate_class(init: dict | None) -> partial | None: +def partial_instantiate_class(init: list | dict | None) -> list[partial] | None: """Partially instantiates a class with the given initialization arguments. Copy from lightning.pytorch.cli.instantiate_class and modify it to use partial. Args: - init (dict): A dictionary containing the initialization arguments. - It should have the following keys: + init (list | dict | None): A dictionary containing the initialization arguments. + It should have the following each keys: - "init_args" (dict): A dictionary of keyword arguments to be passed to the class constructor. - "class_path" (str): The fully qualified path of the class to be instantiated. Returns: - partial: A partial object representing the partially instantiated class. + list[partial] | None: A partial object representing the partially instantiated class. """ if not init: return None - kwargs = init.get("init_args", {}) - class_module, class_name = init["class_path"].rsplit(".", 1) - module = __import__(class_module, fromlist=[class_name]) - args_class = getattr(module, class_name) - return partial(args_class, **kwargs) + if not isinstance(init, list): + init = [init] + items: list[partial] = [] + for item in init: + kwargs = item.get("init_args", {}) + class_module, class_name = item["class_path"].rsplit(".", 1) + module = __import__(class_module, fromlist=[class_name]) + args_class = getattr(module, class_name) + items.append(partial(args_class, **kwargs)) + return items diff --git a/src/otx/engine/engine.py b/src/otx/engine/engine.py index 64e63fe2137..694a29827ac 100644 --- a/src/otx/engine/engine.py +++ b/src/otx/engine/engine.py @@ -83,8 +83,8 @@ def __init__( work_dir: PathLike = "./otx-workspace", datamodule: OTXDataModule | None = None, model: OTXModel | str | None = None, - optimizer: OptimizerCallable | None = None, - scheduler: LRSchedulerCallable | None = None, + optimizer: list[OptimizerCallable] | OptimizerCallable | None = None, + scheduler: list[LRSchedulerCallable] | LRSchedulerCallable | None = None, checkpoint: PathLike | None = None, device: DeviceType = DeviceType.auto, **kwargs, @@ -97,9 +97,10 @@ def __init__( work_dir (PathLike, optional): Working directory for the engine. Defaults to "./otx-workspace". datamodule (OTXDataModule | None, optional): The data module for the engine. Defaults to None. model (OTXModel | str | None, optional): The model for the engine. Defaults to None. - optimizer (OptimizerCallable | None, optional): The optimizer for the engine. Defaults to None. - scheduler (LRSchedulerCallable | None, optional): The learning rate scheduler for the engine. + optimizer (list[OptimizerCallable] | OptimizerCallable | None, optional): The optimizer for the engine. Defaults to None. + scheduler (list[LRSchedulerCallable] | LRSchedulerCallable | None, optional): + The learning rate scheduler for the engine. Defaults to None. checkpoint (PathLike | None, optional): Path to the checkpoint file. Defaults to None. device (DeviceType, optional): The device type to use. Defaults to DeviceType.auto. **kwargs: Additional keyword arguments for pl.Trainer. @@ -132,10 +133,10 @@ def __init__( meta_info=self._datamodule.meta_info if self._datamodule is not None else None, ) ) - self.optimizer: OptimizerCallable | None = ( + self.optimizer: list[OptimizerCallable] | OptimizerCallable | None = ( optimizer if optimizer is not None else self._auto_configurator.get_optimizer() ) - self.scheduler: LRSchedulerCallable | None = ( + self.scheduler: list[LRSchedulerCallable] | LRSchedulerCallable | None = ( scheduler if scheduler is not None else self._auto_configurator.get_scheduler() ) @@ -667,15 +668,15 @@ def datamodule(self) -> OTXDataModule: def _build_lightning_module( self, model: OTXModel, - optimizer: OptimizerCallable, - scheduler: LRSchedulerCallable, + optimizer: list[OptimizerCallable] | OptimizerCallable | None, + scheduler: list[LRSchedulerCallable] | LRSchedulerCallable | None, ) -> OTXLitModule: """Builds a LightningModule for engine workflow. Args: model (OTXModel): The OTXModel instance. - optimizer (OptimizerCallable): The optimizer callable. - scheduler (LRSchedulerCallable): The learning rate scheduler callable. + optimizer (list[OptimizerCallable] | OptimizerCallable | None): The optimizer callable. + scheduler (list[LRSchedulerCallable] | LRSchedulerCallable | None): The learning rate scheduler callable. Returns: OTXLitModule: The built LightningModule instance. diff --git a/src/otx/engine/utils/auto_configurator.py b/src/otx/engine/utils/auto_configurator.py index 98f7f3506e1..edeceb9ce50 100644 --- a/src/otx/engine/utils/auto_configurator.py +++ b/src/otx/engine/utils/auto_configurator.py @@ -249,21 +249,21 @@ def get_model(self, model_name: str | None = None, meta_info: LabelInfo | None = logger.warning(f"Set Default Model: {self.config['model']}") return instantiate_class(args=(), init=self.config["model"]) - def get_optimizer(self) -> OptimizerCallable | None: + def get_optimizer(self) -> list[OptimizerCallable] | None: """Returns the optimizer callable based on the configuration. Returns: - OptimizerCallable | None: The optimizer callable. + list[OptimizerCallable] | None: The optimizer callable. """ optimizer_config = self.config.get("optimizer", None) logger.warning(f"Set Default Optimizer: {optimizer_config}") return partial_instantiate_class(init=optimizer_config) - def get_scheduler(self) -> LRSchedulerCallable | None: + def get_scheduler(self) -> list[LRSchedulerCallable] | None: """Returns the instantiated scheduler based on the configuration. Returns: - LRSchedulerCallable | None: The instantiated scheduler. + list[LRSchedulerCallable] | None: The instantiated scheduler. """ scheduler_config = self.config.get("scheduler", None) logger.warning(f"Set Default Scheduler: {scheduler_config}") diff --git a/src/otx/recipe/action/action_classification/x3d.yaml b/src/otx/recipe/action/action_classification/x3d.yaml index 98fa0340eae..e43d0b2c1e9 100644 --- a/src/otx/recipe/action/action_classification/x3d.yaml +++ b/src/otx/recipe/action/action_classification/x3d.yaml @@ -10,13 +10,15 @@ optimizer: weight_decay: 0.0001 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau - init_args: - warmup_steps: 100 - mode: max - factor: 0.5 - patience: 2 - monitor: val/accuracy + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 100 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 2 + monitor: val/accuracy engine: task: ACTION_CLASSIFICATION diff --git a/src/otx/recipe/action/action_detection/x3d_fastrcnn.yaml b/src/otx/recipe/action/action_detection/x3d_fastrcnn.yaml index f297429cce6..36c51f4247e 100644 --- a/src/otx/recipe/action/action_detection/x3d_fastrcnn.yaml +++ b/src/otx/recipe/action/action_detection/x3d_fastrcnn.yaml @@ -12,13 +12,15 @@ optimizer: weight_decay: 0.00001 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau - init_args: - warmup_steps: 100 - mode: max - factor: 0.5 - patience: 2 - monitor: val/map_50 + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 100 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 2 + monitor: val/map_50 engine: task: ACTION_DETECTION diff --git a/src/otx/recipe/classification/h_label_cls/efficientnet_b0_light.yaml b/src/otx/recipe/classification/h_label_cls/efficientnet_b0_light.yaml index 6717202c843..a6b696b1497 100644 --- a/src/otx/recipe/classification/h_label_cls/efficientnet_b0_light.yaml +++ b/src/otx/recipe/classification/h_label_cls/efficientnet_b0_light.yaml @@ -11,9 +11,8 @@ optimizer: lr: 0.0049 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau + class_path: lightning.pytorch.cli.ReduceLROnPlateau init_args: - warmup_steps: 0 mode: max factor: 0.5 patience: 1 @@ -27,6 +26,11 @@ callback_monitor: val/accuracy data: ../../_base_/data/mmpretrain_base.yaml overrides: + max_epochs: 90 + callbacks: + - class_path: lightning.pytorch.callbacks.EarlyStopping + init_args: + patience: 3 data: task: H_LABEL_CLS config: diff --git a/src/otx/recipe/classification/h_label_cls/efficientnet_v2_light.yaml b/src/otx/recipe/classification/h_label_cls/efficientnet_v2_light.yaml index 6f63839aee3..9a12f9005e9 100644 --- a/src/otx/recipe/classification/h_label_cls/efficientnet_v2_light.yaml +++ b/src/otx/recipe/classification/h_label_cls/efficientnet_v2_light.yaml @@ -13,9 +13,8 @@ optimizer: weight_decay: 0.0001 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau + class_path: lightning.pytorch.cli.ReduceLROnPlateau init_args: - warmup_steps: 0 mode: max factor: 0.5 patience: 1 @@ -29,6 +28,11 @@ callback_monitor: val/accuracy data: ../../_base_/data/mmpretrain_base.yaml overrides: + max_epochs: 90 + callbacks: + - class_path: lightning.pytorch.callbacks.EarlyStopping + init_args: + patience: 3 data: task: H_LABEL_CLS config: diff --git a/src/otx/recipe/classification/h_label_cls/mobilenet_v3_large_light.yaml b/src/otx/recipe/classification/h_label_cls/mobilenet_v3_large_light.yaml index fb4e34bc725..12f731da739 100644 --- a/src/otx/recipe/classification/h_label_cls/mobilenet_v3_large_light.yaml +++ b/src/otx/recipe/classification/h_label_cls/mobilenet_v3_large_light.yaml @@ -13,13 +13,15 @@ optimizer: weight_decay: 0.0001 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau - init_args: - warmup_steps: 10 - mode: max - factor: 0.5 - patience: 1 - monitor: val/accuracy + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 10 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 1 + monitor: val/accuracy engine: task: H_LABEL_CLS @@ -29,6 +31,11 @@ callback_monitor: val/accuracy data: ../../_base_/data/mmpretrain_base.yaml overrides: + max_epochs: 90 + callbacks: + - class_path: lightning.pytorch.callbacks.EarlyStopping + init_args: + patience: 3 data: task: H_LABEL_CLS config: diff --git a/src/otx/recipe/classification/h_label_cls/otx_deit_tiny.yaml b/src/otx/recipe/classification/h_label_cls/otx_deit_tiny.yaml index d837293b0a3..a6d2e62b6a3 100644 --- a/src/otx/recipe/classification/h_label_cls/otx_deit_tiny.yaml +++ b/src/otx/recipe/classification/h_label_cls/otx_deit_tiny.yaml @@ -12,13 +12,15 @@ optimizer: weight_decay: 0.05 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau - init_args: - warmup_steps: 10 - mode: max - factor: 0.5 - patience: 1 - monitor: val/accuracy + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 10 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 1 + monitor: val/accuracy engine: task: H_LABEL_CLS @@ -28,6 +30,11 @@ callback_monitor: val/accuracy data: ../../_base_/data/mmpretrain_base.yaml overrides: + max_epochs: 90 + callbacks: + - class_path: lightning.pytorch.callbacks.EarlyStopping + init_args: + patience: 3 data: task: H_LABEL_CLS config: diff --git a/src/otx/recipe/classification/multi_class_cls/efficientnet_b0_light.yaml b/src/otx/recipe/classification/multi_class_cls/efficientnet_b0_light.yaml index ec9309a707c..7c7511b134b 100644 --- a/src/otx/recipe/classification/multi_class_cls/efficientnet_b0_light.yaml +++ b/src/otx/recipe/classification/multi_class_cls/efficientnet_b0_light.yaml @@ -12,9 +12,8 @@ optimizer: weight_decay: 0.0001 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau + class_path: lightning.pytorch.cli.ReduceLROnPlateau init_args: - warmup_steps: 0 mode: max factor: 0.5 patience: 1 @@ -27,3 +26,10 @@ engine: callback_monitor: val/accuracy data: ../../_base_/data/mmpretrain_base.yaml + +overrides: + max_epochs: 90 + callbacks: + - class_path: lightning.pytorch.callbacks.EarlyStopping + init_args: + patience: 3 diff --git a/src/otx/recipe/classification/multi_class_cls/efficientnet_v2_light.yaml b/src/otx/recipe/classification/multi_class_cls/efficientnet_v2_light.yaml index eb50774b193..c4402ac4810 100644 --- a/src/otx/recipe/classification/multi_class_cls/efficientnet_v2_light.yaml +++ b/src/otx/recipe/classification/multi_class_cls/efficientnet_v2_light.yaml @@ -12,9 +12,8 @@ optimizer: weight_decay: 0.0001 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau + class_path: lightning.pytorch.cli.ReduceLROnPlateau init_args: - warmup_steps: 0 mode: max factor: 0.5 patience: 1 @@ -28,6 +27,11 @@ callback_monitor: val/accuracy data: ../../_base_/data/mmpretrain_base.yaml overrides: + max_epochs: 90 + callbacks: + - class_path: lightning.pytorch.callbacks.EarlyStopping + init_args: + patience: 3 data: config: train_subset: diff --git a/src/otx/recipe/classification/multi_class_cls/mobilenet_v3_large_light.yaml b/src/otx/recipe/classification/multi_class_cls/mobilenet_v3_large_light.yaml index 200fe8b08cc..080cc830be7 100644 --- a/src/otx/recipe/classification/multi_class_cls/mobilenet_v3_large_light.yaml +++ b/src/otx/recipe/classification/multi_class_cls/mobilenet_v3_large_light.yaml @@ -12,13 +12,15 @@ optimizer: weight_decay: 0.0001 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau - init_args: - warmup_steps: 10 - mode: max - factor: 0.5 - patience: 1 - monitor: val/accuracy + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 10 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 1 + monitor: val/accuracy engine: task: MULTI_CLASS_CLS @@ -28,6 +30,11 @@ callback_monitor: val/accuracy data: ../../_base_/data/mmpretrain_base.yaml overrides: + max_epochs: 90 + callbacks: + - class_path: lightning.pytorch.callbacks.EarlyStopping + init_args: + patience: 3 data: config: train_subset: diff --git a/src/otx/recipe/classification/multi_class_cls/otx_deit_tiny.yaml b/src/otx/recipe/classification/multi_class_cls/otx_deit_tiny.yaml index 5c3373ce98f..da0c5522854 100644 --- a/src/otx/recipe/classification/multi_class_cls/otx_deit_tiny.yaml +++ b/src/otx/recipe/classification/multi_class_cls/otx_deit_tiny.yaml @@ -10,13 +10,15 @@ optimizer: weight_decay: 0.05 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau - init_args: - warmup_steps: 10 - mode: max - factor: 0.5 - patience: 1 - monitor: val/accuracy + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 10 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 1 + monitor: val/accuracy engine: task: MULTI_CLASS_CLS @@ -25,3 +27,10 @@ engine: callback_monitor: val/accuracy data: ../../_base_/data/mmpretrain_base.yaml + +overrides: + max_epochs: 90 + callbacks: + - class_path: lightning.pytorch.callbacks.EarlyStopping + init_args: + patience: 3 diff --git a/src/otx/recipe/classification/multi_class_cls/otx_dino_v2.yaml b/src/otx/recipe/classification/multi_class_cls/otx_dino_v2.yaml index e911b5e06a2..e69f1e1aa8e 100644 --- a/src/otx/recipe/classification/multi_class_cls/otx_dino_v2.yaml +++ b/src/otx/recipe/classification/multi_class_cls/otx_dino_v2.yaml @@ -34,6 +34,11 @@ callback_monitor: val/accuracy data: ../../_base_/data/mmpretrain_base.yaml overrides: + max_epochs: 90 + callbacks: + - class_path: lightning.pytorch.callbacks.EarlyStopping + init_args: + patience: 3 data: config: train_subset: diff --git a/src/otx/recipe/classification/multi_class_cls/otx_dino_v2_linear_probe.yaml b/src/otx/recipe/classification/multi_class_cls/otx_dino_v2_linear_probe.yaml index 4ea1f2d67bf..3aaba0b25bd 100644 --- a/src/otx/recipe/classification/multi_class_cls/otx_dino_v2_linear_probe.yaml +++ b/src/otx/recipe/classification/multi_class_cls/otx_dino_v2_linear_probe.yaml @@ -36,6 +36,11 @@ callback_monitor: val/accuracy data: ../../_base_/data/mmpretrain_base.yaml overrides: + max_epochs: 90 + callbacks: + - class_path: lightning.pytorch.callbacks.EarlyStopping + init_args: + patience: 3 data: config: train_subset: diff --git a/src/otx/recipe/classification/multi_class_cls/otx_efficientnet_b0.yaml b/src/otx/recipe/classification/multi_class_cls/otx_efficientnet_b0.yaml index 8526dc42b0d..678cf451556 100644 --- a/src/otx/recipe/classification/multi_class_cls/otx_efficientnet_b0.yaml +++ b/src/otx/recipe/classification/multi_class_cls/otx_efficientnet_b0.yaml @@ -14,10 +14,10 @@ optimizer: scheduler: class_path: lightning.pytorch.cli.ReduceLROnPlateau init_args: - mode: min + mode: max factor: 0.5 patience: 1 - monitor: train/loss + monitor: val/accuracy engine: task: MULTI_CLASS_CLS @@ -26,3 +26,10 @@ engine: callback_monitor: val/accuracy data: ../../_base_/data/mmpretrain_base.yaml + +overrides: + max_epochs: 90 + callbacks: + - class_path: lightning.pytorch.callbacks.EarlyStopping + init_args: + patience: 3 diff --git a/src/otx/recipe/classification/multi_class_cls/otx_efficientnet_v2.yaml b/src/otx/recipe/classification/multi_class_cls/otx_efficientnet_v2.yaml index 1dee8cd1331..e0a2db7f69b 100644 --- a/src/otx/recipe/classification/multi_class_cls/otx_efficientnet_v2.yaml +++ b/src/otx/recipe/classification/multi_class_cls/otx_efficientnet_v2.yaml @@ -14,10 +14,10 @@ optimizer: scheduler: class_path: lightning.pytorch.cli.ReduceLROnPlateau init_args: - mode: min + mode: max factor: 0.5 patience: 1 - monitor: train/loss + monitor: val/accuracy engine: task: MULTI_CLASS_CLS @@ -27,6 +27,11 @@ callback_monitor: val/accuracy data: ../../_base_/data/mmpretrain_base.yaml overrides: + max_epochs: 90 + callbacks: + - class_path: lightning.pytorch.callbacks.EarlyStopping + init_args: + patience: 3 data: config: train_subset: diff --git a/src/otx/recipe/classification/multi_class_cls/otx_mobilenet_v3_large.yaml b/src/otx/recipe/classification/multi_class_cls/otx_mobilenet_v3_large.yaml index 5c280d6b397..7058f87da0e 100644 --- a/src/otx/recipe/classification/multi_class_cls/otx_mobilenet_v3_large.yaml +++ b/src/otx/recipe/classification/multi_class_cls/otx_mobilenet_v3_large.yaml @@ -12,12 +12,15 @@ optimizer: weight_decay: 0.0001 scheduler: - class_path: lightning.pytorch.cli.ReduceLROnPlateau - init_args: - mode: min - factor: 0.5 - patience: 1 - monitor: train/loss + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 10 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 1 + monitor: val/accuracy engine: task: MULTI_CLASS_CLS @@ -27,6 +30,11 @@ callback_monitor: val/accuracy data: ../../_base_/data/mmpretrain_base.yaml overrides: + max_epochs: 90 + callbacks: + - class_path: lightning.pytorch.callbacks.EarlyStopping + init_args: + patience: 3 data: config: train_subset: diff --git a/src/otx/recipe/classification/multi_label_cls/efficientnet_b0_light.yaml b/src/otx/recipe/classification/multi_label_cls/efficientnet_b0_light.yaml index 7b055bb74d7..e904e4bfe12 100644 --- a/src/otx/recipe/classification/multi_label_cls/efficientnet_b0_light.yaml +++ b/src/otx/recipe/classification/multi_label_cls/efficientnet_b0_light.yaml @@ -9,9 +9,8 @@ optimizer: lr: 0.0049 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau + class_path: lightning.pytorch.cli.ReduceLROnPlateau init_args: - warmup_steps: 0 mode: max factor: 0.5 patience: 1 @@ -25,6 +24,11 @@ callback_monitor: val/accuracy data: ../../_base_/data/mmpretrain_base.yaml overrides: + max_epochs: 90 + callbacks: + - class_path: lightning.pytorch.callbacks.EarlyStopping + init_args: + patience: 3 data: task: MULTI_LABEL_CLS config: diff --git a/src/otx/recipe/classification/multi_label_cls/efficientnet_v2_light.yaml b/src/otx/recipe/classification/multi_label_cls/efficientnet_v2_light.yaml index 7517e625995..ab0c328d6b2 100644 --- a/src/otx/recipe/classification/multi_label_cls/efficientnet_v2_light.yaml +++ b/src/otx/recipe/classification/multi_label_cls/efficientnet_v2_light.yaml @@ -11,9 +11,8 @@ optimizer: weight_decay: 0.0001 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau + class_path: lightning.pytorch.cli.ReduceLROnPlateau init_args: - warmup_steps: 0 mode: max factor: 0.5 patience: 1 @@ -27,6 +26,11 @@ callback_monitor: val/accuracy data: ../../_base_/data/mmpretrain_base.yaml overrides: + max_epochs: 90 + callbacks: + - class_path: lightning.pytorch.callbacks.EarlyStopping + init_args: + patience: 3 data: task: MULTI_LABEL_CLS config: diff --git a/src/otx/recipe/classification/multi_label_cls/mobilenet_v3_large_light.yaml b/src/otx/recipe/classification/multi_label_cls/mobilenet_v3_large_light.yaml index 78f34991e0d..5f9f82ae0f8 100644 --- a/src/otx/recipe/classification/multi_label_cls/mobilenet_v3_large_light.yaml +++ b/src/otx/recipe/classification/multi_label_cls/mobilenet_v3_large_light.yaml @@ -11,13 +11,15 @@ optimizer: weight_decay: 0.0001 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau - init_args: - warmup_steps: 10 - mode: max - factor: 0.5 - patience: 1 - monitor: val/accuracy + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 10 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 1 + monitor: val/accuracy engine: task: MULTI_LABEL_CLS @@ -27,6 +29,11 @@ callback_monitor: val/accuracy data: ../../_base_/data/mmpretrain_base.yaml overrides: + max_epochs: 90 + callbacks: + - class_path: lightning.pytorch.callbacks.EarlyStopping + init_args: + patience: 3 data: task: MULTI_LABEL_CLS config: diff --git a/src/otx/recipe/classification/multi_label_cls/otx_deit_tiny.yaml b/src/otx/recipe/classification/multi_label_cls/otx_deit_tiny.yaml index 2c1b64e8452..9e66ece0fbf 100644 --- a/src/otx/recipe/classification/multi_label_cls/otx_deit_tiny.yaml +++ b/src/otx/recipe/classification/multi_label_cls/otx_deit_tiny.yaml @@ -10,13 +10,15 @@ optimizer: weight_decay: 0.05 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau - init_args: - warmup_steps: 10 - mode: max - factor: 0.5 - patience: 1 - monitor: val/accuracy + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 10 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 1 + monitor: val/accuracy engine: task: MULTI_LABEL_CLS @@ -26,6 +28,11 @@ callback_monitor: val/accuracy data: ../../_base_/data/mmpretrain_base.yaml overrides: + max_epochs: 90 + callbacks: + - class_path: lightning.pytorch.callbacks.EarlyStopping + init_args: + patience: 3 data: task: MULTI_LABEL_CLS config: diff --git a/src/otx/recipe/detection/atss_mobilenetv2.yaml b/src/otx/recipe/detection/atss_mobilenetv2.yaml index c35d5129bda..69d1cd52c7d 100644 --- a/src/otx/recipe/detection/atss_mobilenetv2.yaml +++ b/src/otx/recipe/detection/atss_mobilenetv2.yaml @@ -12,13 +12,15 @@ optimizer: weight_decay: 0.0001 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau - init_args: - warmup_steps: 3 - mode: max - factor: 0.5 - patience: 5 - monitor: val/map_50 + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 3 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 5 + monitor: val/map_50 engine: task: DETECTION diff --git a/src/otx/recipe/detection/atss_r50_fpn.yaml b/src/otx/recipe/detection/atss_r50_fpn.yaml index 0f005a3b18d..43a20e292f4 100644 --- a/src/otx/recipe/detection/atss_r50_fpn.yaml +++ b/src/otx/recipe/detection/atss_r50_fpn.yaml @@ -10,13 +10,15 @@ optimizer: weight_decay: 0.0 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau - init_args: - warmup_steps: 3 - mode: max - factor: 0.5 - patience: 5 - monitor: val/map_50 + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 3 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 5 + monitor: val/map_50 engine: task: DETECTION diff --git a/src/otx/recipe/detection/atss_resnext101.yaml b/src/otx/recipe/detection/atss_resnext101.yaml index f46baa1c21f..bb0a7b939f9 100644 --- a/src/otx/recipe/detection/atss_resnext101.yaml +++ b/src/otx/recipe/detection/atss_resnext101.yaml @@ -12,13 +12,15 @@ optimizer: weight_decay: 0.0001 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau - init_args: - warmup_steps: 3 - mode: max - factor: 0.5 - patience: 5 - monitor: val/map_50 + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 3 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 5 + monitor: val/map_50 engine: task: DETECTION diff --git a/src/otx/recipe/detection/ssd_mobilenetv2.yaml b/src/otx/recipe/detection/ssd_mobilenetv2.yaml index 84350f1e232..09b10bc4eea 100644 --- a/src/otx/recipe/detection/ssd_mobilenetv2.yaml +++ b/src/otx/recipe/detection/ssd_mobilenetv2.yaml @@ -12,13 +12,15 @@ optimizer: weight_decay: 0.0001 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau - init_args: - warmup_steps: 3 - mode: max - factor: 0.5 - patience: 5 - monitor: val/map_50 + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 3 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 5 + monitor: val/map_50 engine: task: DETECTION diff --git a/src/otx/recipe/detection/yolox_l.yaml b/src/otx/recipe/detection/yolox_l.yaml index 01c83b61e1d..690f7bfd4f4 100644 --- a/src/otx/recipe/detection/yolox_l.yaml +++ b/src/otx/recipe/detection/yolox_l.yaml @@ -12,13 +12,15 @@ optimizer: weight_decay: 0.0001 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau - init_args: - warmup_steps: 3 - mode: max - factor: 0.5 - patience: 5 - monitor: val/map_50 + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 3 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 5 + monitor: val/map_50 engine: task: DETECTION diff --git a/src/otx/recipe/detection/yolox_l_tile.yaml b/src/otx/recipe/detection/yolox_l_tile.yaml index e49efa21de6..af45f977f3b 100644 --- a/src/otx/recipe/detection/yolox_l_tile.yaml +++ b/src/otx/recipe/detection/yolox_l_tile.yaml @@ -12,13 +12,15 @@ optimizer: weight_decay: 0.0001 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau - init_args: - warmup_steps: 3 - mode: max - factor: 0.5 - patience: 5 - monitor: val/map_50 + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 3 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 5 + monitor: val/map_50 engine: task: DETECTION diff --git a/src/otx/recipe/detection/yolox_s.yaml b/src/otx/recipe/detection/yolox_s.yaml index c2b5878ad1d..0bf3a268446 100644 --- a/src/otx/recipe/detection/yolox_s.yaml +++ b/src/otx/recipe/detection/yolox_s.yaml @@ -12,13 +12,15 @@ optimizer: weight_decay: 0.0001 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau - init_args: - warmup_steps: 3 - mode: max - factor: 0.5 - patience: 5 - monitor: val/map_50 + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 3 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 5 + monitor: val/map_50 engine: task: DETECTION diff --git a/src/otx/recipe/detection/yolox_s_tile.yaml b/src/otx/recipe/detection/yolox_s_tile.yaml index 21da346c6b9..5f44cbe8964 100644 --- a/src/otx/recipe/detection/yolox_s_tile.yaml +++ b/src/otx/recipe/detection/yolox_s_tile.yaml @@ -12,13 +12,15 @@ optimizer: weight_decay: 0.0001 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau - init_args: - warmup_steps: 3 - mode: max - factor: 0.5 - patience: 5 - monitor: val/map_50 + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 3 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 5 + monitor: val/map_50 engine: task: DETECTION diff --git a/src/otx/recipe/detection/yolox_tiny.yaml b/src/otx/recipe/detection/yolox_tiny.yaml index fafc40e6bfc..9997a0022e8 100644 --- a/src/otx/recipe/detection/yolox_tiny.yaml +++ b/src/otx/recipe/detection/yolox_tiny.yaml @@ -11,13 +11,15 @@ optimizer: weight_decay: 0.0001 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau - init_args: - warmup_steps: 3 - mode: max - factor: 0.5 - patience: 5 - monitor: val/map_50 + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 3 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 5 + monitor: val/map_50 engine: task: DETECTION diff --git a/src/otx/recipe/detection/yolox_tiny_tile.yaml b/src/otx/recipe/detection/yolox_tiny_tile.yaml index f3633769e81..54d37914817 100644 --- a/src/otx/recipe/detection/yolox_tiny_tile.yaml +++ b/src/otx/recipe/detection/yolox_tiny_tile.yaml @@ -12,13 +12,15 @@ optimizer: weight_decay: 0.0001 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau - init_args: - warmup_steps: 3 - mode: max - factor: 0.5 - patience: 5 - monitor: val/map_50 + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 3 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 5 + monitor: val/map_50 engine: task: DETECTION diff --git a/src/otx/recipe/detection/yolox_x.yaml b/src/otx/recipe/detection/yolox_x.yaml index cb3496f9e2a..68162a0164d 100644 --- a/src/otx/recipe/detection/yolox_x.yaml +++ b/src/otx/recipe/detection/yolox_x.yaml @@ -12,13 +12,15 @@ optimizer: weight_decay: 0.0001 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau - init_args: - warmup_steps: 3 - mode: max - factor: 0.5 - patience: 5 - monitor: val/map_50 + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 3 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 5 + monitor: val/map_50 engine: task: DETECTION diff --git a/src/otx/recipe/detection/yolox_x_tile.yaml b/src/otx/recipe/detection/yolox_x_tile.yaml index d7d23780abf..f5c74413cf8 100644 --- a/src/otx/recipe/detection/yolox_x_tile.yaml +++ b/src/otx/recipe/detection/yolox_x_tile.yaml @@ -12,13 +12,15 @@ optimizer: weight_decay: 0.0001 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau - init_args: - warmup_steps: 3 - mode: max - factor: 0.5 - patience: 5 - monitor: val/map_50 + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 3 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 5 + monitor: val/map_50 engine: task: DETECTION diff --git a/src/otx/recipe/instance_segmentation/maskrcnn_efficientnetb2b.yaml b/src/otx/recipe/instance_segmentation/maskrcnn_efficientnetb2b.yaml index 4a207a8f4b6..bdba41b089f 100644 --- a/src/otx/recipe/instance_segmentation/maskrcnn_efficientnetb2b.yaml +++ b/src/otx/recipe/instance_segmentation/maskrcnn_efficientnetb2b.yaml @@ -12,13 +12,15 @@ optimizer: weight_decay: 0.001 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau - init_args: - warmup_steps: 100 - mode: max - factor: 0.5 - patience: 5 - monitor: val/map_50 + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 100 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 5 + monitor: val/map_50 engine: task: INSTANCE_SEGMENTATION @@ -28,6 +30,7 @@ callback_monitor: val/map_50 data: ../_base_/data/mmdet_base.yaml overrides: + max_epochs: 100 data: task: INSTANCE_SEGMENTATION config: diff --git a/src/otx/recipe/instance_segmentation/maskrcnn_efficientnetb2b_tile.yaml b/src/otx/recipe/instance_segmentation/maskrcnn_efficientnetb2b_tile.yaml index aab00bcca18..c795668081c 100644 --- a/src/otx/recipe/instance_segmentation/maskrcnn_efficientnetb2b_tile.yaml +++ b/src/otx/recipe/instance_segmentation/maskrcnn_efficientnetb2b_tile.yaml @@ -12,13 +12,15 @@ optimizer: weight_decay: 0.001 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau - init_args: - warmup_steps: 100 - mode: max - factor: 0.5 - patience: 5 - monitor: val/map_50 + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 100 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 5 + monitor: val/map_50 engine: task: INSTANCE_SEGMENTATION @@ -28,6 +30,7 @@ callback_monitor: val/map_50 data: ../_base_/data/mmdet_base.yaml overrides: + max_epochs: 100 gradient_clip_val: 35.0 data: task: INSTANCE_SEGMENTATION diff --git a/src/otx/recipe/instance_segmentation/maskrcnn_r50.yaml b/src/otx/recipe/instance_segmentation/maskrcnn_r50.yaml index 56cdb8cd1d2..2ea4a57884f 100644 --- a/src/otx/recipe/instance_segmentation/maskrcnn_r50.yaml +++ b/src/otx/recipe/instance_segmentation/maskrcnn_r50.yaml @@ -12,13 +12,15 @@ optimizer: weight_decay: 0.001 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau - init_args: - warmup_steps: 100 - mode: max - factor: 0.5 - patience: 5 - monitor: val/map_50 + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 100 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 5 + monitor: val/map_50 engine: task: INSTANCE_SEGMENTATION @@ -28,6 +30,7 @@ callback_monitor: val/map_50 data: ../_base_/data/mmdet_base.yaml overrides: + max_epochs: 100 gradient_clip_val: 35.0 data: task: INSTANCE_SEGMENTATION diff --git a/src/otx/recipe/instance_segmentation/maskrcnn_r50_tile.yaml b/src/otx/recipe/instance_segmentation/maskrcnn_r50_tile.yaml index 9375bfff69e..398943e1744 100644 --- a/src/otx/recipe/instance_segmentation/maskrcnn_r50_tile.yaml +++ b/src/otx/recipe/instance_segmentation/maskrcnn_r50_tile.yaml @@ -12,13 +12,15 @@ optimizer: weight_decay: 0.001 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau - init_args: - warmup_steps: 100 - mode: max - factor: 0.5 - patience: 5 - monitor: val/map_50 + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 100 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 5 + monitor: val/map_50 engine: task: INSTANCE_SEGMENTATION @@ -28,6 +30,7 @@ callback_monitor: val/map_50 data: ../_base_/data/mmdet_base.yaml overrides: + max_epochs: 100 gradient_clip_val: 35.0 data: task: INSTANCE_SEGMENTATION diff --git a/src/otx/recipe/instance_segmentation/maskrcnn_swint.yaml b/src/otx/recipe/instance_segmentation/maskrcnn_swint.yaml index 5a8a7ddf3e8..8806f46b905 100644 --- a/src/otx/recipe/instance_segmentation/maskrcnn_swint.yaml +++ b/src/otx/recipe/instance_segmentation/maskrcnn_swint.yaml @@ -10,13 +10,15 @@ optimizer: weight_decay: 0.05 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau - init_args: - warmup_steps: 100 - mode: max - factor: 0.5 - patience: 5 - monitor: val/map_50 + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 100 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 5 + monitor: val/map_50 engine: task: INSTANCE_SEGMENTATION @@ -26,6 +28,7 @@ callback_monitor: val/map_50 data: ../_base_/data/mmdet_base.yaml overrides: + max_epochs: 100 data: task: INSTANCE_SEGMENTATION config: diff --git a/src/otx/recipe/rotated_detection/maskrcnn_efficientnetb2b.yaml b/src/otx/recipe/rotated_detection/maskrcnn_efficientnetb2b.yaml index 64e89e0850a..b5f5a9a8cfe 100644 --- a/src/otx/recipe/rotated_detection/maskrcnn_efficientnetb2b.yaml +++ b/src/otx/recipe/rotated_detection/maskrcnn_efficientnetb2b.yaml @@ -12,12 +12,15 @@ optimizer: weight_decay: 0.001 scheduler: - class_path: lightning.pytorch.cli.ReduceLROnPlateau - init_args: - mode: min - factor: 0.1 - patience: 10 - monitor: train/loss + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 100 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.1 + patience: 10 + monitor: val/map_50 engine: task: ROTATED_DETECTION @@ -27,6 +30,7 @@ callback_monitor: val/map_50 data: ../_base_/data/mmdet_base.yaml overrides: + max_epochs: 100 data: task: ROTATED_DETECTION config: diff --git a/src/otx/recipe/rotated_detection/maskrcnn_r50.yaml b/src/otx/recipe/rotated_detection/maskrcnn_r50.yaml index 20bdc6c4ec2..415e11b66fa 100644 --- a/src/otx/recipe/rotated_detection/maskrcnn_r50.yaml +++ b/src/otx/recipe/rotated_detection/maskrcnn_r50.yaml @@ -12,12 +12,15 @@ optimizer: weight_decay: 0.001 scheduler: - class_path: lightning.pytorch.cli.ReduceLROnPlateau - init_args: - mode: min - factor: 0.1 - patience: 10 - monitor: train/loss + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 100 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.1 + patience: 10 + monitor: val/map_50 engine: task: ROTATED_DETECTION @@ -27,6 +30,7 @@ callback_monitor: val/map_50 data: ../_base_/data/mmdet_base.yaml overrides: + max_epochs: 100 data: task: ROTATED_DETECTION config: diff --git a/src/otx/recipe/semantic_segmentation/litehrnet_18.yaml b/src/otx/recipe/semantic_segmentation/litehrnet_18.yaml index 8956be79ecd..99995b831da 100644 --- a/src/otx/recipe/semantic_segmentation/litehrnet_18.yaml +++ b/src/otx/recipe/semantic_segmentation/litehrnet_18.yaml @@ -14,13 +14,15 @@ optimizer: weight_decay: 0.0 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau - init_args: - warmup_steps: 100 - mode: max - factor: 0.5 - patience: 5 - monitor: val/Dice + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 100 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 5 + monitor: val/Dice engine: task: SEMANTIC_SEGMENTATION @@ -29,3 +31,6 @@ engine: callback_monitor: val/Dice data: ../_base_/data/mmseg_base.yaml + +overrides: + max_epochs: 300 diff --git a/src/otx/recipe/semantic_segmentation/litehrnet_s.yaml b/src/otx/recipe/semantic_segmentation/litehrnet_s.yaml index aae8197a084..fdf93f5734c 100644 --- a/src/otx/recipe/semantic_segmentation/litehrnet_s.yaml +++ b/src/otx/recipe/semantic_segmentation/litehrnet_s.yaml @@ -14,13 +14,15 @@ optimizer: weight_decay: 0.0 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau - init_args: - warmup_steps: 100 - mode: max - factor: 0.5 - patience: 5 - monitor: val/Dice + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 100 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 5 + monitor: val/Dice engine: task: SEMANTIC_SEGMENTATION @@ -29,3 +31,6 @@ engine: callback_monitor: val/Dice data: ../_base_/data/mmseg_base.yaml + +overrides: + max_epochs: 300 diff --git a/src/otx/recipe/semantic_segmentation/litehrnet_x.yaml b/src/otx/recipe/semantic_segmentation/litehrnet_x.yaml index ec6fa516248..3df08bb4eb3 100644 --- a/src/otx/recipe/semantic_segmentation/litehrnet_x.yaml +++ b/src/otx/recipe/semantic_segmentation/litehrnet_x.yaml @@ -14,13 +14,15 @@ optimizer: weight_decay: 0.0 scheduler: - class_path: otx.algo.schedulers.WarmupReduceLROnPlateau - init_args: - warmup_steps: 100 - mode: max - factor: 0.5 - patience: 5 - monitor: val/Dice + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 100 + - class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 5 + monitor: val/Dice engine: task: SEMANTIC_SEGMENTATION @@ -29,3 +31,6 @@ engine: callback_monitor: val/Dice data: ../_base_/data/mmseg_base.yaml + +overrides: + max_epochs: 300 diff --git a/src/otx/recipe/semantic_segmentation/segnext_b.yaml b/src/otx/recipe/semantic_segmentation/segnext_b.yaml index 2501234f6b3..f2330302006 100644 --- a/src/otx/recipe/semantic_segmentation/segnext_b.yaml +++ b/src/otx/recipe/semantic_segmentation/segnext_b.yaml @@ -14,12 +14,14 @@ optimizer: weight_decay: 0.01 scheduler: - class_path: otx.algo.schedulers.warmup_schedulers.WarmupPolynomialLR - init_args: - warmup_steps: 20 - total_iters: 100 - power: 0.9 - last_epoch: -1 + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 20 + - class_path: torch.optim.lr_scheduler.PolynomialLR + init_args: + total_iters: 100 + power: 0.9 + last_epoch: -1 engine: task: SEMANTIC_SEGMENTATION @@ -29,6 +31,7 @@ callback_monitor: val/Dice data: ../_base_/data/mmseg_base.yaml overrides: + max_epochs: 170 data: config: train_subset: diff --git a/src/otx/recipe/semantic_segmentation/segnext_s.yaml b/src/otx/recipe/semantic_segmentation/segnext_s.yaml index e319bc5ada7..7f814f34119 100644 --- a/src/otx/recipe/semantic_segmentation/segnext_s.yaml +++ b/src/otx/recipe/semantic_segmentation/segnext_s.yaml @@ -14,12 +14,14 @@ optimizer: weight_decay: 0.01 scheduler: - class_path: otx.algo.schedulers.warmup_schedulers.WarmupPolynomialLR - init_args: - warmup_steps: 20 - total_iters: 100 - power: 0.9 - last_epoch: -1 + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 20 + - class_path: torch.optim.lr_scheduler.PolynomialLR + init_args: + total_iters: 100 + power: 0.9 + last_epoch: -1 engine: task: SEMANTIC_SEGMENTATION @@ -29,6 +31,7 @@ callback_monitor: val/Dice data: ../_base_/data/mmseg_base.yaml overrides: + max_epochs: 170 data: config: train_subset: diff --git a/src/otx/recipe/semantic_segmentation/segnext_t.yaml b/src/otx/recipe/semantic_segmentation/segnext_t.yaml index e8f657de00d..3de98141813 100644 --- a/src/otx/recipe/semantic_segmentation/segnext_t.yaml +++ b/src/otx/recipe/semantic_segmentation/segnext_t.yaml @@ -14,12 +14,14 @@ optimizer: weight_decay: 0.01 scheduler: - class_path: otx.algo.schedulers.warmup_schedulers.WarmupPolynomialLR - init_args: - warmup_steps: 20 - total_iters: 100 - power: 0.9 - last_epoch: -1 + - class_path: otx.core.model.module.base.LinearWarmupScheduler + init_args: + num_warmup_steps: 20 + - class_path: torch.optim.lr_scheduler.PolynomialLR + init_args: + total_iters: 100 + power: 0.9 + last_epoch: -1 engine: task: SEMANTIC_SEGMENTATION @@ -29,6 +31,7 @@ callback_monitor: val/Dice data: ../_base_/data/mmseg_base.yaml overrides: + max_epochs: 170 data: config: train_subset: diff --git a/tests/unit/algo/callbacks/test_adaptive_train_scheduling.py b/tests/unit/algo/callbacks/test_adaptive_train_scheduling.py index 0229ea0c2fa..9d019be0f0a 100644 --- a/tests/unit/algo/callbacks/test_adaptive_train_scheduling.py +++ b/tests/unit/algo/callbacks/test_adaptive_train_scheduling.py @@ -32,6 +32,7 @@ def test_callback(self, caplog) -> None: mock_lr_scheduler_config = MagicMock(spec=LRSchedulerConfig) mock_lr_scheduler_config.frequency = 1 + mock_lr_scheduler_config.interval = "epoch" mock_trainer.lr_scheduler_configs = [mock_lr_scheduler_config] with caplog.at_level(log.WARNING): diff --git a/tests/unit/core/model/module/test_base.py b/tests/unit/core/model/module/test_base.py index 7436a53a721..bf9961da165 100644 --- a/tests/unit/core/model/module/test_base.py +++ b/tests/unit/core/model/module/test_base.py @@ -8,8 +8,8 @@ from unittest.mock import MagicMock, create_autospec import pytest +from lightning.pytorch.cli import ReduceLROnPlateau from lightning.pytorch.trainer import Trainer -from otx.algo.schedulers.warmup_schedulers import WarmupReduceLROnPlateau from otx.core.model.entity.base import OTXModel from otx.core.model.module.base import LinearWarmupScheduler, OTXLitModule from torch.optim import Optimizer @@ -33,14 +33,19 @@ def optimizer_factory(*args, **kargs) -> Optimizer: # noqa: ARG001 return optimizer_factory @pytest.fixture() - def mock_scheduler(self) -> WarmupReduceLROnPlateau: - scheduler = MagicMock(spec=WarmupReduceLROnPlateau) - scheduler.warmup_steps = 10 + def mock_scheduler(self) -> list[LinearWarmupScheduler | ReduceLROnPlateau]: + scheduler_object_1 = MagicMock() + warmup_scheduler = MagicMock(spec=LinearWarmupScheduler) + warmup_scheduler.num_warmup_steps = 10 + warmup_scheduler.interval = "step" + scheduler_object_1.return_value = warmup_scheduler - def scheduler_factory(*args, **kargs) -> WarmupReduceLROnPlateau: # noqa: ARG001 - return scheduler + scheduler_object_2 = MagicMock() + lr_scheduler = MagicMock(spec=ReduceLROnPlateau) + lr_scheduler.monitor = "val/loss" + scheduler_object_2.return_value = lr_scheduler - return scheduler_factory + return [scheduler_object_1, scheduler_object_2] def test_configure_optimizers(self, mock_otx_model, mock_optimizer, mock_scheduler) -> None: module = OTXLitModule( @@ -61,7 +66,3 @@ def test_configure_optimizers(self, mock_otx_model, mock_optimizer, mock_schedul assert "scheduler" in lr_schedulers[1] assert "monitor" in lr_schedulers[1] - assert "interval" in lr_schedulers[1] - assert "frequency" in lr_schedulers[1] - - assert lr_schedulers[1]["frequency"] == 2 diff --git a/tests/unit/engine/utils/test_auto_configurator.py b/tests/unit/engine/utils/test_auto_configurator.py index aa65edf0a80..0abe3a77207 100644 --- a/tests/unit/engine/utils/test_auto_configurator.py +++ b/tests/unit/engine/utils/test_auto_configurator.py @@ -124,9 +124,19 @@ def test_get_model(self) -> None: def test_get_optimizer(self) -> None: task = OTXTaskType.SEMANTIC_SEGMENTATION auto_configurator = AutoConfigurator(task=task) - assert callable(auto_configurator.get_optimizer()) + optimizer = auto_configurator.get_optimizer() + if isinstance(optimizer, list): + for opt in optimizer: + assert callable(opt) + else: + assert callable(optimizer) def test_get_scheduler(self) -> None: task = OTXTaskType.INSTANCE_SEGMENTATION auto_configurator = AutoConfigurator(task=task) - assert callable(auto_configurator.get_scheduler()) + scheduler = auto_configurator.get_scheduler() + if isinstance(scheduler, list): + for sch in scheduler: + assert callable(sch) + else: + assert callable(scheduler)