Skip to content

Commit

Permalink
Merge pull request #1078 from openvinotoolkit/sstrehlk/add_instance_s…
Browse files Browse the repository at this point in the history
…egmentation_training_tests

Add Instance Segmentation (object counting) training tests.
  • Loading branch information
goodsong81 authored and sstrehlk committed May 2, 2022
1 parent 91463b3 commit 2cf0444
Show file tree
Hide file tree
Showing 2 changed files with 231 additions and 97 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -349,3 +349,47 @@
: "metrics.accuracy.f-measure":
"base": "nncf_evaluation.metrics.accuracy.f-measure"
"max_diff": 0.06

? "ACTION-training_evaluation,model-Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50,dataset-aeromonas,num_iters-CONFIG,batch-CONFIG,usecase-reallife"
: "metrics.accuracy.f-measure":
"target_value": 0.92
"max_diff_if_less_threshold": 0.06
"max_diff_if_greater_threshold": 0.06
? "ACTION-export_evaluation,model-Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50,dataset-aeromonas,num_iters-CONFIG,batch-CONFIG,usecase-reallife"
: "metrics.accuracy.f-measure":
"base": "training_evaluation.metrics.accuracy.f-measure"
"max_diff": 0.01
? "ACTION-pot_evaluation,model-Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50,dataset-aeromonas,num_iters-CONFIG,batch-CONFIG,usecase-reallife"
: "metrics.accuracy.f-measure":
"base": "training_evaluation.metrics.accuracy.f-measure"
"max_diff": 0.01
? "ACTION-nncf_evaluation,model-Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50,dataset-aeromonas,num_iters-CONFIG,batch-CONFIG,usecase-reallife"
: "metrics.accuracy.f-measure":
"base": "training_evaluation.metrics.accuracy.f-measure"
"max_diff_if_less_threshold": 0.01
? "ACTION-nncf_export_evaluation,model-Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50,dataset-aeromonas,num_iters-CONFIG,batch-CONFIG,usecase-reallife"
: "metrics.accuracy.f-measure":
"base": "nncf_evaluation.metrics.accuracy.f-measure"
"max_diff": 0.01

? "ACTION-training_evaluation,model-Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B,dataset-aeromonas,num_iters-CONFIG,batch-CONFIG,usecase-reallife"
: "metrics.accuracy.f-measure":
"target_value": 0.92
"max_diff_if_less_threshold": 0.06
"max_diff_if_greater_threshold": 0.06
? "ACTION-export_evaluation,model-Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B,dataset-aeromonas,num_iters-CONFIG,batch-CONFIG,usecase-reallife"
: "metrics.accuracy.f-measure":
"base": "training_evaluation.metrics.accuracy.f-measure"
"max_diff": 0.01
? "ACTION-pot_evaluation,model-Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B,dataset-aeromonas,num_iters-CONFIG,batch-CONFIG,usecase-reallife"
: "metrics.accuracy.f-measure":
"base": "training_evaluation.metrics.accuracy.f-measure"
"max_diff": 0.01
? "ACTION-nncf_evaluation,model-Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B,dataset-aeromonas,num_iters-CONFIG,batch-CONFIG,usecase-reallife"
: "metrics.accuracy.f-measure":
"base": "training_evaluation.metrics.accuracy.f-measure"
"max_diff_if_less_threshold": 0.01
? "ACTION-nncf_export_evaluation,model-Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B,dataset-aeromonas,num_iters-CONFIG,batch-CONFIG,usecase-reallife"
: "metrics.accuracy.f-measure":
"base": 'nncf_evaluation.metrics.accuracy.f-measure'
"max_diff": 0.01
284 changes: 187 additions & 97 deletions external/mmdetection/tests/test_ote_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,26 +70,26 @@ def _get_dataset_params_from_dataset_definitions(dataset_definitions, dataset_na
return params


def _create_object_detection_dataset_and_labels_schema(dataset_params):
def _create_object_detection_dataset_and_labels_schema(dataset_params, domain: Domain):
logger.debug(f'Using for train annotation file {dataset_params.annotations_train}')
logger.debug(f'Using for val annotation file {dataset_params.annotations_val}')
labels_list = []
items = load_dataset_items_coco_format(
ann_file_path=dataset_params.annotations_train,
data_root_dir=dataset_params.images_train_dir,
domain=Domain.DETECTION,
domain=domain,
subset=Subset.TRAINING,
labels_list=labels_list)
items.extend(load_dataset_items_coco_format(
ann_file_path=dataset_params.annotations_val,
data_root_dir=dataset_params.images_val_dir,
domain=Domain.DETECTION,
domain=domain,
subset=Subset.VALIDATION,
labels_list=labels_list))
items.extend(load_dataset_items_coco_format(
ann_file_path=dataset_params.annotations_test,
data_root_dir=dataset_params.images_test_dir,
domain=Domain.DETECTION,
domain=domain,
subset=Subset.TESTING,
labels_list=labels_list))
dataset = DatasetEntity(items=items)
Expand All @@ -98,7 +98,6 @@ def _create_object_detection_dataset_and_labels_schema(dataset_params):


class ObjectDetectionTrainingTestParameters(DefaultOTETestCreationParametersInterface):

def test_bunches(self) -> List[Dict[str, Any]]:
test_bunches = [
dict(
Expand Down Expand Up @@ -133,6 +132,32 @@ def test_bunches(self) -> List[Dict[str, Any]]:
return deepcopy(test_bunches)


class InstanceSegmentationTrainingTestParameters(DefaultOTETestCreationParametersInterface):
def test_bunches(self) -> List[Dict[str, Any]]:
test_bunches = [
dict(
model_name=[
'Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50',
'Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B',
],
dataset_name='aeromonas_short',
usecase='precommit',
),
dict(
model_name=[
'Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50',
'Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B',
],
dataset_name='aeromonas',
num_training_iters=KEEP_CONFIG_FIELD_VALUE,
batch_size=KEEP_CONFIG_FIELD_VALUE,
usecase=REALLIFE_USECASE_CONSTANT,
),

]
return deepcopy(test_bunches)


def get_dummy_compressed_model(task):
"""
Return compressed model without initialization
Expand All @@ -156,6 +181,128 @@ def get_dummy_compressed_model(task):
return compressed_model


@pytest.fixture
def params_factories_for_test_actions_fx(current_test_parameters_fx,
dataset_definitions_fx, template_paths_fx,
ote_current_reference_dir_fx) -> Dict[str, Callable[[], Dict]]:
logger.debug('params_factories_for_test_actions_fx: begin')

test_parameters = deepcopy(current_test_parameters_fx)
dataset_definitions = deepcopy(dataset_definitions_fx)
template_paths = deepcopy(template_paths_fx)

def _training_params_factory() -> Dict:
if dataset_definitions is None:
pytest.skip('The parameter "--dataset-definitions" is not set')

model_name = test_parameters['model_name']
if "Custom_Object_Detection" in model_name:
domain = Domain.DETECTION
elif "Custom_Counting_Instance_Segmentation" in model_name:
domain = Domain.INSTANCE_SEGMENTATION
else:
domain = None
dataset_name = test_parameters['dataset_name']
num_training_iters = test_parameters['num_training_iters']
batch_size = test_parameters['batch_size']

dataset_params = _get_dataset_params_from_dataset_definitions(dataset_definitions, dataset_name)

if model_name not in template_paths:
raise ValueError(f'Model {model_name} is absent in template_paths, '
f'template_paths.keys={list(template_paths.keys())}')
template_path = make_path_be_abs(template_paths[model_name], template_paths[ROOT_PATH_KEY])

logger.debug('training params factory: Before creating dataset and labels_schema')
dataset, labels_schema = _create_object_detection_dataset_and_labels_schema(
dataset_params, domain)
logger.debug('training params factory: After creating dataset and labels_schema')

return {
'dataset': dataset,
'labels_schema': labels_schema,
'template_path': template_path,
'num_training_iters': num_training_iters,
'batch_size': batch_size,
}

def _nncf_graph_params_factory() -> Dict:
if dataset_definitions is None:
pytest.skip('The parameter "--dataset-definitions" is not set')

model_name = test_parameters['model_name']
if "Custom_Object_Detection" in model_name:
domain = Domain.DETECTION
elif "Custom_Counting_Instance_Segmentation" in model_name:
domain = Domain.INSTANCE_SEGMENTATION
else:
domain = None
dataset_name = test_parameters['dataset_name']

dataset_params = _get_dataset_params_from_dataset_definitions(dataset_definitions, dataset_name)

if model_name not in template_paths:
raise ValueError(f'Model {model_name} is absent in template_paths, '
f'template_paths.keys={list(template_paths.keys())}')
template_path = make_path_be_abs(template_paths[model_name], template_paths[ROOT_PATH_KEY])

logger.debug('training params factory: Before creating dataset and labels_schema')
dataset, labels_schema = _create_object_detection_dataset_and_labels_schema(
dataset_params, domain)
logger.debug('training params factory: After creating dataset and labels_schema')

return {
'dataset': dataset,
'labels_schema': labels_schema,
'template_path': template_path,
'reference_dir': ote_current_reference_dir_fx,
'fn_get_compressed_model': get_dummy_compressed_model,
}

params_factories_for_test_actions = {
'training': _training_params_factory,
'nncf_graph': _nncf_graph_params_factory,
}
logger.debug('params_factories_for_test_actions_fx: end')
return params_factories_for_test_actions


# TODO(lbeynens): move to common fixtures
@pytest.fixture
def data_collector_fx(request) -> DataCollector:
setup = deepcopy(request.node.callspec.params)
setup['environment_name'] = os.environ.get('TT_ENVIRONMENT_NAME', 'no-env')
setup['test_type'] = os.environ.get('TT_TEST_TYPE', 'no-test-type') # TODO: get from e2e test type
setup['scenario'] = 'api' # TODO(lbeynens): get from a fixture!
setup['test'] = request.node.name
setup['project'] = 'ote'
if 'test_parameters' in setup:
assert isinstance(setup['test_parameters'], dict)
if 'dataset_name' not in setup:
setup['dataset_name'] = setup['test_parameters'].get('dataset_name')
if 'model_name' not in setup:
setup['model_name'] = setup['test_parameters'].get('model_name')
if 'test_stage' not in setup:
setup['test_stage'] = setup['test_parameters'].get('test_stage')
if 'usecase' not in setup:
setup['usecase'] = setup['test_parameters'].get('usecase')
model_name = setup['test_parameters'].get('model_name')
if "Custom_Object_Detection" in model_name:
subject = 'custom-object-detection'
elif "Custom_Counting_Instance_Segmentation" in model_name:
subject = 'custom-counting-instance-seg'
else:
subject = None
setup['subject'] = subject
logger.info(f'creating DataCollector: setup=\n{pformat(setup, width=140)}')
data_collector = DataCollector(name='TestOTEIntegration',
setup=setup)
with data_collector:
logger.info('data_collector is created')
yield data_collector
logger.info('data_collector is released')


class TestOTEReallifeObjectDetection(OTETrainingTestInterface):
"""
The main class of running test in this file.
Expand All @@ -172,74 +319,45 @@ def get_list_of_tests(cls, usecase: Optional[str] = None):
return cls.helper.get_list_of_tests(usecase)

@pytest.fixture
def params_factories_for_test_actions_fx(self, current_test_parameters_fx,
dataset_definitions_fx, template_paths_fx,
ote_current_reference_dir_fx) -> Dict[str,Callable[[], Dict]]:
logger.debug('params_factories_for_test_actions_fx: begin')

test_parameters = deepcopy(current_test_parameters_fx)
dataset_definitions = deepcopy(dataset_definitions_fx)
template_paths = deepcopy(template_paths_fx)
def _training_params_factory() -> Dict:
if dataset_definitions is None:
pytest.skip('The parameter "--dataset-definitions" is not set')

model_name = test_parameters['model_name']
dataset_name = test_parameters['dataset_name']
num_training_iters = test_parameters['num_training_iters']
batch_size = test_parameters['batch_size']

dataset_params = _get_dataset_params_from_dataset_definitions(dataset_definitions, dataset_name)

if model_name not in template_paths:
raise ValueError(f'Model {model_name} is absent in template_paths, '
f'template_paths.keys={list(template_paths.keys())}')
template_path = make_path_be_abs(template_paths[model_name], template_paths[ROOT_PATH_KEY])

logger.debug('training params factory: Before creating dataset and labels_schema')
dataset, labels_schema = _create_object_detection_dataset_and_labels_schema(dataset_params)
logger.debug('training params factory: After creating dataset and labels_schema')

return {
'dataset': dataset,
'labels_schema': labels_schema,
'template_path': template_path,
'num_training_iters': num_training_iters,
'batch_size': batch_size,
}

def _nncf_graph_params_factory() -> Dict:
if dataset_definitions is None:
pytest.skip('The parameter "--dataset-definitions" is not set')

model_name = test_parameters['model_name']
dataset_name = test_parameters['dataset_name']

dataset_params = _get_dataset_params_from_dataset_definitions(dataset_definitions, dataset_name)
def test_case_fx(self, current_test_parameters_fx, params_factories_for_test_actions_fx):
"""
This fixture returns the test case class OTEIntegrationTestCase that should be used for the current test.
Note that the cache from the test helper allows to store the instance of the class
between the tests.
If the main parameters used for this test are the same as the main parameters used for the previous test,
the instance of the test case class will be kept and re-used. It is helpful for tests that can
re-use the result of operations (model training, model optimization, etc) made for the previous tests,
if these operations are time-consuming.
If the main parameters used for this test differs w.r.t. the previous test, a new instance of
test case class will be created.
"""
test_case = type(self).helper.get_test_case(current_test_parameters_fx,
params_factories_for_test_actions_fx)
return test_case

if model_name not in template_paths:
raise ValueError(f'Model {model_name} is absent in template_paths, '
f'template_paths.keys={list(template_paths.keys())}')
template_path = make_path_be_abs(template_paths[model_name], template_paths[ROOT_PATH_KEY])
@e2e_pytest_performance
def test(self,
test_parameters,
test_case_fx, data_collector_fx,
cur_test_expected_metrics_callback_fx):
test_case_fx.run_stage(test_parameters['test_stage'], data_collector_fx,
cur_test_expected_metrics_callback_fx)

logger.debug('training params factory: Before creating dataset and labels_schema')
dataset, labels_schema = _create_object_detection_dataset_and_labels_schema(dataset_params)
logger.debug('training params factory: After creating dataset and labels_schema')

return {
'dataset': dataset,
'labels_schema': labels_schema,
'template_path': template_path,
'reference_dir': ote_current_reference_dir_fx,
'fn_get_compressed_model': get_dummy_compressed_model,
}
class TestInstanceSegmentation(OTETrainingTestInterface):
"""
The main class of running test in this file.
"""
PERFORMANCE_RESULTS = None # it is required for e2e system
helper = OTETestHelper(InstanceSegmentationTrainingTestParameters())

params_factories_for_test_actions = {
'training': _training_params_factory,
'nncf_graph': _nncf_graph_params_factory,
}
logger.debug('params_factories_for_test_actions_fx: end')
return params_factories_for_test_actions
@classmethod
def get_list_of_tests(cls, usecase: Optional[str] = None):
"""
This method should be a classmethod. It is called before fixture initialization, during
tests discovering.
"""
return cls.helper.get_list_of_tests(usecase)

@pytest.fixture
def test_case_fx(self, current_test_parameters_fx, params_factories_for_test_actions_fx):
Expand All @@ -258,34 +376,6 @@ def test_case_fx(self, current_test_parameters_fx, params_factories_for_test_act
params_factories_for_test_actions_fx)
return test_case

# TODO(lbeynens): move to common fixtures
@pytest.fixture
def data_collector_fx(self, request) -> DataCollector:
setup = deepcopy(request.node.callspec.params)
setup['environment_name'] = os.environ.get('TT_ENVIRONMENT_NAME', 'no-env')
setup['test_type'] = os.environ.get('TT_TEST_TYPE', 'no-test-type') # TODO: get from e2e test type
setup['scenario'] = 'api' # TODO(lbeynens): get from a fixture!
setup['test'] = request.node.name
setup['subject'] = 'custom-object-detection'
setup['project'] = 'ote'
if 'test_parameters' in setup:
assert isinstance(setup['test_parameters'], dict)
if 'dataset_name' not in setup:
setup['dataset_name'] = setup['test_parameters'].get('dataset_name')
if 'model_name' not in setup:
setup['model_name'] = setup['test_parameters'].get('model_name')
if 'test_stage' not in setup:
setup['test_stage'] = setup['test_parameters'].get('test_stage')
if 'usecase' not in setup:
setup['usecase'] = setup['test_parameters'].get('usecase')
logger.info(f'creating DataCollector: setup=\n{pformat(setup, width=140)}')
data_collector = DataCollector(name='TestOTEIntegration',
setup=setup)
with data_collector:
logger.info('data_collector is created')
yield data_collector
logger.info('data_collector is released')

@e2e_pytest_performance
def test(self,
test_parameters,
Expand Down

0 comments on commit 2cf0444

Please sign in to comment.