Skip to content

Commit

Permalink
add unit test
Browse files Browse the repository at this point in the history
  • Loading branch information
eunwoosh committed Jul 23, 2024
1 parent b740b23 commit 2095074
Show file tree
Hide file tree
Showing 4 changed files with 55 additions and 9 deletions.
20 changes: 11 additions & 9 deletions src/otx/core/data/transform_libs/torchvision.py
Original file line number Diff line number Diff line change
Expand Up @@ -635,7 +635,7 @@ class RandomResizedCrop(tvt_v2.Transform, NumpytoTVTensorMixin):
is made. This crop is finally resized to given size.
Args:
scale (sequence | int): Desired output scale of the crop. If size is an
scale (Sequence[int] | int): Desired output scale of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
crop_ratio_range (tuple): Range of the random size of the cropped
Expand All @@ -654,7 +654,7 @@ class RandomResizedCrop(tvt_v2.Transform, NumpytoTVTensorMixin):

def __init__(
self,
scale: Sequence | int,
scale: Sequence[int] | int,
crop_ratio_range: tuple[float, float] = (0.08, 1.0),
aspect_ratio_range: tuple[float, float] = (3.0 / 4.0, 4.0 / 3.0),
max_attempts: int = 10,
Expand Down Expand Up @@ -2342,7 +2342,7 @@ class RandomCrop(tvt_v2.Transform, NumpytoTVTensorMixin):
The absolute `crop_size` is sampled based on `crop_type` and `image_size`, then the cropped results are generated.
Args:
crop_size (tuple): The relative ratio or absolute pixels of
crop_size (tuple[int, int]): The relative ratio or absolute pixels of
(height, width).
crop_type (str, optional): One of "relative_range", "relative",
"absolute", "absolute_range". "relative" randomly crops
Expand All @@ -2367,7 +2367,7 @@ class RandomCrop(tvt_v2.Transform, NumpytoTVTensorMixin):

def __init__(
self,
crop_size: tuple, # (H, W)
crop_size: tuple[int, int], # (H, W)
crop_type: str = "absolute",
cat_max_ratio: int | float = 1,
allow_negative_crop: bool = False,
Expand Down Expand Up @@ -3122,9 +3122,7 @@ def generate(cls, config: SubsetConfig) -> Compose:

transforms = []
for cfg_transform in config.transforms:
for val in cfg_transform["init_args"].values():
if isinstance(val, str) and "^{input_size}" in val:
cls._eval_input_size(cfg_transform, config.input_size)
cls._eval_input_size(cfg_transform, config.input_size)
transform = cls._dispatch_transform(cfg_transform)
transforms.append(transform)

Expand All @@ -3143,6 +3141,8 @@ def _eval_input_size(cls, cfg_transform: dict[str, Any], input_size: int | tuple
return
if isinstance(input_size, int):
input_size = (input_size, input_size)
else:
input_size = tuple(input_size)

def check_type(value, expected_type) -> bool:
try:
Expand All @@ -3160,9 +3160,11 @@ def check_type(value, expected_type) -> bool:

available_types = typing.get_type_hints(model_cls.__init__).get(key)
if available_types is None or check_type(input_size, available_types): # pass tuple[int, int]
cfg_transform["init_args"][key] = eval(val.replace("^{input_size}", "np.array(input_size)")).tolist()
cfg_transform["init_args"][key] = tuple(
eval(val.replace("^{input_size}", "np.array(input_size)")).round().astype(np.int32).tolist()
)
elif check_type(input_size[0], available_types): # pass int
cfg_transform["init_args"][key] = eval(val.replace("^{input_size}", "input_size[0]"))
cfg_transform["init_args"][key] = round(eval(val.replace("^{input_size}", "input_size[0]")))
else:
msg = f"{key} argument should be able to get int or tuple[int, int], but it can get {available_types}"
raise RuntimeError(msg)
Expand Down
8 changes: 8 additions & 0 deletions tests/unit/core/data/test_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,18 +46,21 @@ def fxt_config(self) -> DictConfig:
)
train_subset.num_workers = 0
train_subset.batch_size = 4
train_subset.input_size = None
val_subset = MagicMock(spec=SubsetConfig)
val_subset.sampler = DictConfig(
{"class_path": "torch.utils.data.RandomSampler", "init_args": {"num_samples": 3}},
)
val_subset.num_workers = 0
val_subset.batch_size = 3
val_subset.input_size = None
test_subset = MagicMock(spec=SubsetConfig)
test_subset.sampler = DictConfig(
{"class_path": "torch.utils.data.RandomSampler", "init_args": {"num_samples": 3}},
)
test_subset.num_workers = 0
test_subset.batch_size = 1
test_subset.input_size = None
unlabeled_subset = MagicMock(spec=UnlabeledDataConfig)
unlabeled_subset.data_root = None
tile_config = MagicMock(spec=TileConfig)
Expand Down Expand Up @@ -101,6 +104,7 @@ def test_init(
fxt_config.train_subset.subset_name = "train_1"
fxt_config.val_subset.subset_name = "val_1"
fxt_config.test_subset.subset_name = "test_1"
input_size = (512, 512)

# Dataset will have "train_0", "train_1", "val_0", ..., "test_1" subsets
mock_dm_subsets = {f"{name}_{idx}": MagicMock() for name in ["train", "val", "test"] for idx in range(2)}
Expand All @@ -115,13 +119,17 @@ def test_init(
train_subset=fxt_config.train_subset,
val_subset=fxt_config.val_subset,
test_subset=fxt_config.test_subset,
input_size=input_size,
)

assert module.train_dataloader().batch_size == 4
assert module.val_dataloader().batch_size == 3
assert module.test_dataloader().batch_size == 1
assert module.predict_dataloader().batch_size == 1
assert mock_otx_dataset_factory.create.call_count == 3
assert fxt_config.train_subset.input_size == input_size
assert fxt_config.val_subset.input_size == input_size
assert fxt_config.test_subset.input_size == input_size

@patch("otx.core.data.module.OTXDatasetFactory")
@patch("otx.core.data.module.DmDataset.import_from")
Expand Down
30 changes: 30 additions & 0 deletions tests/unit/core/data/test_transform_libs.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,6 +186,36 @@ def test_transform(
item = dataset[0]
assert isinstance(item, data_entity_cls)

@pytest.fixture
def fxt_config_w_input_size(self) -> list[dict[str, Any]]:
cfg = f"""
input_size:
- 224
- 224
transforms:
- class_path: otx.core.data.transform_libs.torchvision.ResizetoLongestEdge
init_args:
size: ^{{input_size}} * 2
- class_path: otx.core.data.transform_libs.torchvision.RandomResize
init_args:
scale: ^{{input_size}} * 0.5
- class_path: otx.core.data.transform_libs.torchvision.RandomCrop
init_args:
crop_size: ^{{input_size}}
- class_path: otx.core.data.transform_libs.torchvision.RandomResize
init_args:
scale: ^{{input_size}} * 1.1
"""
return OmegaConf.create(cfg)

def test_eval_input_size(self, fxt_config_w_input_size):
transform = TorchVisionTransformLib.generate(fxt_config_w_input_size)
assert isinstance(transform, v2.Compose)
assert transform.transforms[0].size == 448 # ResizetoLongestEdge gets an integer
assert transform.transforms[1].scale == (112, 112) # RandomResize gets sequence of integer
assert transform.transforms[2].crop_size == (224, 224) # RandomCrop gets sequence of integer
assert transform.transforms[3].scale == (round(224 * 1.1), round(224 * 1.1)) # check round

@pytest.fixture(params=["RGB", "BGR"])
def fxt_image_color_channel(self, request) -> ImageColorChannel:
return ImageColorChannel(request.param)
Expand Down
6 changes: 6 additions & 0 deletions tests/unit/core/utils/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
get_mean_std_from_data_processing,
is_ckpt_for_finetuning,
is_ckpt_from_otx_v1,
get_obj_from_str,
)


Expand Down Expand Up @@ -113,3 +114,8 @@ def test_get_idx_list_per_classes(fxt_dm_dataset):
expected_result["0"] = list(range(100))
expected_result["1"] = list(range(100, 108))
assert result == expected_result

def test_get_obj_from_str():
obj_path = "otx.core.utils.utils.get_mean_std_from_data_processing"
obj = get_obj_from_str(obj_path)
assert obj == get_mean_std_from_data_processing

0 comments on commit 2095074

Please sign in to comment.