Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[UT] reduce memory usage while running unit test #291

Merged
merged 1 commit into from
Apr 27, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 10 additions & 13 deletions tests/test_models/test_algorithms/test_byol.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,15 @@

backbone = dict(
type='ResNet',
depth=50,
depth=18,
in_channels=3,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN'))
neck = dict(
type='NonLinearNeck',
in_channels=2048,
hid_channels=4,
out_channels=4,
in_channels=512,
hid_channels=2,
out_channels=2,
with_bias=True,
with_last_bn=False,
with_avg_pool=True,
Expand All @@ -25,9 +25,9 @@
type='LatentPredictHead',
predictor=dict(
type='NonLinearNeck',
in_channels=4,
hid_channels=4,
out_channels=4,
in_channels=2,
hid_channels=2,
out_channels=2,
with_bias=True,
with_last_bn=False,
with_avg_pool=False,
Expand All @@ -42,15 +42,12 @@ def test_byol():
alg = BYOL(backbone=backbone, neck=neck, head=None)

alg = BYOL(backbone=backbone, neck=neck, head=head)
fake_input = torch.randn((16, 3, 224, 224))
fake_input = torch.randn((2, 3, 224, 224))
fake_backbone_out = alg.extract_feat(fake_input)
assert fake_backbone_out[0].size() == torch.Size([16, 2048, 7, 7])
assert fake_backbone_out[0].size() == torch.Size([2, 512, 7, 7])
with pytest.raises(AssertionError):
fake_out = alg.forward_train(fake_input)

fake_input = [
torch.randn((16, 3, 224, 224)),
torch.randn((16, 3, 224, 224))
]
fake_input = [torch.randn((2, 3, 224, 224)), torch.randn((2, 3, 224, 224))]
fake_out = alg.forward_train(fake_input)
assert fake_out['loss'].item() > -4
14 changes: 7 additions & 7 deletions tests/test_models/test_algorithms/test_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,23 +13,23 @@ def test_classification():
with_sobel = True,
backbone = dict(
type='ResNet',
depth=50,
depth=18,
in_channels=2,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN'),
frozen_stages=4)
head = dict(
type='ClsHead', with_avg_pool=True, in_channels=2048, num_classes=4)
type='ClsHead', with_avg_pool=True, in_channels=512, num_classes=4)

alg = Classification(backbone=backbone, with_sobel=with_sobel, head=head)
assert hasattr(alg, 'sobel_layer')
assert hasattr(alg, 'head')

fake_input = torch.randn((16, 3, 224, 224))
fake_labels = torch.ones(16, dtype=torch.long)
fake_input = torch.randn((2, 3, 224, 224))
fake_labels = torch.ones(2, dtype=torch.long)
fake_out = alg.forward_test(fake_input)
assert 'head4' in fake_out
assert fake_out['head4'].size() == torch.Size([16, 4])
assert fake_out['head4'].size() == torch.Size([2, 4])

fake_out = alg.forward_train(fake_input, fake_labels)
assert fake_out['loss'].item() > 0
Expand All @@ -51,7 +51,7 @@ def test_classification():
alg = Classification(backbone=backbone, head=head)
assert alg.with_head

fake_input = torch.randn((16, 3, 224, 224))
fake_labels = torch.ones(16, dtype=torch.long)
fake_input = torch.randn((2, 3, 224, 224))
fake_labels = torch.ones(2, dtype=torch.long)
fake_out = alg.forward_train(fake_input, fake_labels)
assert fake_out['loss'].item() > 0
10 changes: 5 additions & 5 deletions tests/test_models/test_algorithms/test_deepcluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,15 +10,15 @@
with_sobel = True,
backbone = dict(
type='ResNet',
depth=50,
depth=18,
in_channels=2,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN'))
neck = dict(type='AvgPool2dNeck')
head = dict(
type='ClsHead',
with_avg_pool=False, # already has avgpool in the neck
in_channels=2048,
in_channels=512,
num_classes=num_classes)


Expand All @@ -34,11 +34,11 @@ def test_deepcluster():
assert hasattr(alg, 'neck')
assert hasattr(alg, 'head')

fake_input = torch.randn((16, 3, 224, 224))
fake_labels = torch.ones(16, dtype=torch.long)
fake_input = torch.randn((2, 3, 224, 224))
fake_labels = torch.ones(2, dtype=torch.long)
fake_out = alg.forward(fake_input, mode='test')
assert 'head0' in fake_out
assert fake_out['head0'].size() == torch.Size([16, num_classes])
assert fake_out['head0'].size() == torch.Size([2, num_classes])

fake_out = alg.forward_train(fake_input, fake_labels)
alg.set_reweight(fake_labels)
Expand Down
20 changes: 10 additions & 10 deletions tests/test_models/test_algorithms/test_densecl.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,20 +9,20 @@
from mmselfsup.models.algorithms import DenseCL

queue_len = 32
feat_dim = 4
feat_dim = 2
momentum = 0.999
loss_lambda = 0.5
backbone = dict(
type='ResNet',
depth=50,
depth=18,
in_channels=3,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN'))
neck = dict(
type='DenseCLNeck',
in_channels=2048,
hid_channels=4,
out_channels=4,
in_channels=512,
hid_channels=2,
out_channels=2,
num_grid=None)
head = dict(type='ContrastiveHead', temperature=0.2)

Expand Down Expand Up @@ -57,14 +57,14 @@ def test_densecl():
assert alg.queue.size() == torch.Size([feat_dim, queue_len])
assert alg.queue2.size() == torch.Size([feat_dim, queue_len])

fake_input = torch.randn((16, 3, 224, 224))
fake_input = torch.randn((2, 3, 224, 224))
with pytest.raises(AssertionError):
fake_out = alg.forward_train(fake_input)

fake_out = alg.forward_test(fake_input)
assert fake_out[0] is None
assert fake_out[2] is None
assert fake_out[1].size() == torch.Size([16, 2048, 49])
assert fake_out[1].size() == torch.Size([2, 512, 49])

mmselfsup.models.algorithms.densecl.batch_shuffle_ddp = MagicMock(
side_effect=mock_batch_shuffle_ddp)
Expand All @@ -75,10 +75,10 @@ def test_densecl():
fake_loss = alg.forward_train([fake_input, fake_input])
assert fake_loss['loss_single'] > 0
assert fake_loss['loss_dense'] > 0
assert alg.queue_ptr.item() == 16
assert alg.queue2_ptr.item() == 16
assert alg.queue_ptr.item() == 2
assert alg.queue2_ptr.item() == 2

# test train step with 2 keys in loss
fake_outputs = alg.train_step(dict(img=[fake_input, fake_input]), None)
assert fake_outputs['loss'].item() > -1
assert fake_outputs['num_samples'] == 16
assert fake_outputs['num_samples'] == 2
4 changes: 2 additions & 2 deletions tests/test_models/test_algorithms/test_mae.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@ def test_mae():
alg = MAE(backbone=None, neck=neck, head=head)
alg = MAE(backbone=backbone, neck=neck, head=head)

fake_input = torch.randn((16, 3, 224, 224))
fake_input = torch.randn((2, 3, 224, 224))
fake_loss = alg.forward_train(fake_input)
fake_feature = alg.extract_feat(fake_input)
assert isinstance(fake_loss['loss'].item(), float)
assert list(fake_feature[0].shape) == [16, 50, 768]
assert list(fake_feature[0].shape) == [2, 50, 768]
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def test_mmcls_classifier_wrapper():
neck=dict(type='mmcls.GlobalAveragePooling'),
head=dict(
type='mmcls.LinearClsHead',
num_classes=1000,
num_classes=2,
in_channels=1024,
init_cfg=None, # suppress the default init_cfg of LinearClsHead.
loss=dict(
Expand All @@ -34,8 +34,8 @@ def test_mmcls_classifier_wrapper():
dict(type='Constant', layer='LayerNorm', val=1., bias=0.)
],
train_cfg=dict(augments=[
dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
dict(type='BatchMixup', alpha=0.8, num_classes=2, prob=0.5),
dict(type='BatchCutMix', alpha=1.0, num_classes=2, prob=0.5)
]))
model = ALGORITHMS.build(model_config)
fake_inputs = torch.rand((2, 3, 192, 192))
Expand All @@ -47,7 +47,7 @@ def test_mmcls_classifier_wrapper():

# test mode
outputs = model(fake_inputs, mode='test')
assert list(outputs['head3'].shape) == [2, 1000]
assert list(outputs['head3'].shape) == [2, 2]

# extract mode
outputs = model(fake_inputs, mode='extract')
Expand Down
16 changes: 8 additions & 8 deletions tests/test_models/test_algorithms/test_moco.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,19 +9,19 @@
from mmselfsup.models.algorithms import MoCo

queue_len = 32
feat_dim = 4
feat_dim = 2
momentum = 0.999
backbone = dict(
type='ResNet',
depth=50,
depth=18,
in_channels=3,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN'))
neck = dict(
type='MoCoV2Neck',
in_channels=2048,
hid_channels=4,
out_channels=4,
in_channels=512,
hid_channels=2,
out_channels=2,
with_avg_pool=True)
head = dict(type='ContrastiveHead', temperature=0.2)

Expand Down Expand Up @@ -54,9 +54,9 @@ def test_moco():
momentum=momentum)
assert alg.queue.size() == torch.Size([feat_dim, queue_len])

fake_input = torch.randn((16, 3, 224, 224))
fake_input = torch.randn((2, 3, 224, 224))
fake_backbone_out = alg.extract_feat(fake_input)
assert fake_backbone_out[0].size() == torch.Size([16, 2048, 7, 7])
assert fake_backbone_out[0].size() == torch.Size([2, 512, 7, 7])
with pytest.raises(AssertionError):
fake_backbone_out = alg.forward_train(fake_input)

Expand All @@ -68,4 +68,4 @@ def test_moco():
side_effect=mock_concat_all_gather)
fake_loss = alg.forward_train([fake_input, fake_input])
assert fake_loss['loss'] > 0
assert alg.queue_ptr.item() == 16
assert alg.queue_ptr.item() == 2
16 changes: 8 additions & 8 deletions tests/test_models/test_algorithms/test_mocov3.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@
neck = dict(
type='NonLinearNeck',
in_channels=384,
hid_channels=8,
out_channels=8,
hid_channels=2,
out_channels=2,
num_layers=2,
with_bias=False,
with_last_bn=True,
Expand All @@ -28,9 +28,9 @@
type='MoCoV3Head',
predictor=dict(
type='NonLinearNeck',
in_channels=8,
hid_channels=8,
out_channels=8,
in_channels=2,
hid_channels=2,
out_channels=2,
num_layers=2,
with_bias=False,
with_last_bn=True,
Expand All @@ -51,7 +51,7 @@ def test_mocov3():
alg.init_weights()
alg.momentum_update()

fake_input = torch.randn((16, 3, 224, 224))
fake_input = torch.randn((2, 3, 224, 224))
fake_backbone_out = alg.forward(fake_input, mode='extract')
assert fake_backbone_out[0][0].size() == torch.Size([16, 384, 14, 14])
assert fake_backbone_out[0][1].size() == torch.Size([16, 384])
assert fake_backbone_out[0][0].size() == torch.Size([2, 384, 14, 14])
assert fake_backbone_out[0][1].size() == torch.Size([2, 384])
10 changes: 5 additions & 5 deletions tests/test_models/test_algorithms/test_npid.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,14 @@

backbone = dict(
type='ResNet',
depth=50,
depth=18,
in_channels=3,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN'))
neck = dict(
type='LinearNeck', in_channels=2048, out_channels=4, with_avg_pool=True)
type='LinearNeck', in_channels=512, out_channels=2, with_avg_pool=True)
head = dict(type='ContrastiveHead', temperature=0.07)
memory_bank = dict(type='SimpleMemory', length=8, feat_dim=4, momentum=0.5)
memory_bank = dict(type='SimpleMemory', length=8, feat_dim=2, momentum=0.5)


@pytest.mark.skipif(
Expand All @@ -30,6 +30,6 @@ def test_npid():

alg = NPID(
backbone=backbone, neck=neck, head=head, memory_bank=memory_bank)
fake_input = torch.randn((16, 3, 224, 224))
fake_input = torch.randn((2, 3, 224, 224))
fake_backbone_out = alg.extract_feat(fake_input)
assert fake_backbone_out[0].size() == torch.Size([16, 2048, 7, 7])
assert fake_backbone_out[0].size() == torch.Size([2, 512, 7, 7])
16 changes: 8 additions & 8 deletions tests/test_models/test_algorithms/test_odc.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,26 +9,26 @@
num_classes = 5
backbone = dict(
type='ResNet',
depth=50,
depth=18,
in_channels=3,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN'))
neck = dict(
type='ODCNeck',
in_channels=2048,
hid_channels=4,
out_channels=4,
in_channels=512,
hid_channels=2,
out_channels=2,
norm_cfg=dict(type='BN1d'),
with_avg_pool=True)
head = dict(
type='ClsHead',
with_avg_pool=False,
in_channels=4,
in_channels=2,
num_classes=num_classes)
memory_bank = dict(
type='ODCMemory',
length=8,
feat_dim=4,
feat_dim=2,
momentum=0.5,
num_classes=num_classes,
min_cluster=2,
Expand All @@ -48,7 +48,7 @@ def test_odc():
alg = ODC(backbone=backbone, neck=neck, head=head, memory_bank=memory_bank)
alg.set_reweight()

fake_input = torch.randn((16, 3, 224, 224))
fake_input = torch.randn((2, 3, 224, 224))
fake_out = alg.forward_test(fake_input)
assert 'head0' in fake_out
assert fake_out['head0'].size() == torch.Size([16, num_classes])
assert fake_out['head0'].size() == torch.Size([2, num_classes])
12 changes: 6 additions & 6 deletions tests/test_models/test_algorithms/test_relative_loc.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,16 +8,16 @@

backbone = dict(
type='ResNet',
depth=50,
depth=18,
in_channels=3,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN'))
neck = dict(
type='RelativeLocNeck',
in_channels=2048,
out_channels=4,
in_channels=512,
out_channels=2,
with_avg_pool=True)
head = dict(type='ClsHead', with_avg_pool=False, in_channels=4, num_classes=8)
head = dict(type='ClsHead', with_avg_pool=False, in_channels=2, num_classes=8)


@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit')
Expand Down Expand Up @@ -49,6 +49,6 @@ def test_relative_loc():
assert 'head4' in fake_out

# extract
fake_input = torch.randn((16, 3, 224, 224))
fake_input = torch.randn((2, 3, 224, 224))
fake_backbone_out = alg.forward(fake_input, mode='extract')
assert fake_backbone_out[0].size() == torch.Size([16, 2048, 7, 7])
assert fake_backbone_out[0].size() == torch.Size([2, 512, 7, 7])
Loading