diff --git a/examples/model_compress/model_prune_torch.py b/examples/model_compress/model_prune_torch.py index 5402fec9de..5f79ba104a 100644 --- a/examples/model_compress/model_prune_torch.py +++ b/examples/model_compress/model_prune_torch.py @@ -53,13 +53,13 @@ 'op_types': ['Conv2d'] }] }, - 'l1': { + 'l1filter': { 'dataset_name': 'cifar10', 'model_name': 'vgg16', 'pruner_class': L1FilterPruner, 'config_list': [{ 'sparsity': 0.5, - 'op_types': ['default'], + 'op_types': ['Conv2d'], 'op_names': ['feature.0', 'feature.24', 'feature.27', 'feature.30', 'feature.34', 'feature.37'] }] }, @@ -69,7 +69,7 @@ 'pruner_class': ActivationMeanRankFilterPruner, 'config_list': [{ 'sparsity': 0.5, - 'op_types': ['default'], + 'op_types': ['Conv2d'], 'op_names': ['feature.0', 'feature.24', 'feature.27', 'feature.30', 'feature.34', 'feature.37'] }] }, diff --git a/examples/model_compress/model_speedup.py b/examples/model_compress/model_speedup.py index f6ada91d96..2214fc137b 100644 --- a/examples/model_compress/model_speedup.py +++ b/examples/model_compress/model_speedup.py @@ -25,7 +25,7 @@ 'model_name': 'vgg16', 'device': 'cuda', 'input_shape': [64, 3, 32, 32], - 'masks_file': './checkpoints/mask_vgg16_cifar10_l1.pth' + 'masks_file': './checkpoints/mask_vgg16_cifar10_l1filter.pth' }, 'fpgm': { 'model_name': 'naive', diff --git a/examples/trials/mnist-pytorch/mnist.py b/examples/trials/mnist-pytorch/mnist.py index 4ced9c9c7e..ec9641af00 100644 --- a/examples/trials/mnist-pytorch/mnist.py +++ b/examples/trials/mnist-pytorch/mnist.py @@ -15,14 +15,6 @@ import torch.optim as optim from torchvision import datasets, transforms -# Temporary patch this example until the MNIST dataset download issue get resolved -# https://github.com/pytorch/vision/issues/1938 -import urllib - -opener = urllib.request.build_opener() -opener.addheaders = [('User-agent', 'Mozilla/5.0')] -urllib.request.install_opener(opener) - logger = logging.getLogger('mnist_AutoML') @@ -48,6 +40,8 @@ def forward(self, x): def train(args, model, device, train_loader, optimizer, epoch): model.train() for batch_idx, (data, target) in enumerate(train_loader): + if (args['batch_num'] is not None) and batch_idx >= args['batch_num']: + break data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) @@ -119,16 +113,15 @@ def main(args): train(args, model, device, train_loader, optimizer, epoch) test_acc = test(args, model, device, test_loader) - if epoch < args['epochs']: - # report intermediate result - nni.report_intermediate_result(test_acc) - logger.debug('test accuracy %g', test_acc) - logger.debug('Pipe send intermediate result done.') - else: - # report final result - nni.report_final_result(test_acc) - logger.debug('Final result is %g', test_acc) - logger.debug('Send final result done.') + # report intermediate result + nni.report_intermediate_result(test_acc) + logger.debug('test accuracy %g', test_acc) + logger.debug('Pipe send intermediate result done.') + + # report final result + nni.report_final_result(test_acc) + logger.debug('Final result is %g', test_acc) + logger.debug('Send final result done.') def get_params(): @@ -138,6 +131,7 @@ def get_params(): default='/tmp/pytorch/mnist/input_data', help="data directory") parser.add_argument('--batch_size', type=int, default=64, metavar='N', help='input batch size for training (default: 64)') + parser.add_argument("--batch_num", type=int, default=None) parser.add_argument("--hidden_size", type=int, default=512, metavar='N', help='hidden layer size (default: 512)') parser.add_argument('--lr', type=float, default=0.01, metavar='LR', @@ -165,6 +159,7 @@ def get_params(): logger.debug(tuner_params) params = vars(get_params()) params.update(tuner_params) + print(params) main(params) except Exception as exception: logger.exception(exception) diff --git a/test/config/examples/mnist-pytorch.yml b/test/config/examples/mnist-pytorch.yml index 6aab3fc80f..c62f0579d4 100644 --- a/test/config/examples/mnist-pytorch.yml +++ b/test/config/examples/mnist-pytorch.yml @@ -1,7 +1,7 @@ authorName: nni experimentName: default_test maxExecDuration: 15m -maxTrialNum: 4 +maxTrialNum: 2 trialConcurrency: 2 searchSpacePath: ./mnist_pytorch_search_space.json @@ -13,7 +13,7 @@ assessor: optimize_mode: maximize trial: codeDir: ../../../examples/trials/mnist-pytorch - command: python3 mnist.py --epochs 1 + command: python3 mnist.py --epochs 1 --batch_num 10 gpuNum: 0 useAnnotation: false diff --git a/test/config/integration_tests.yml b/test/config/integration_tests.yml index bd52a69596..61bfc3a796 100644 --- a/test/config/integration_tests.yml +++ b/test/config/integration_tests.yml @@ -77,16 +77,15 @@ testCases: kwargs: expected_result_file: expected_metrics.json -# to be enabled -#- name: metrics-dict -# configFile: test/config/metrics_test/config_dict_metrics.yml -# config: -# maxTrialNum: 1 -# trialConcurrency: 1 -# validator: -# class: MetricsValidator -# kwargs: -# expected_result_file: expected_metrics_dict.json +- name: metrics-dict + configFile: test/config/metrics_test/config_dict_metrics.yml + config: + maxTrialNum: 1 + trialConcurrency: 1 + validator: + class: MetricsValidator + kwargs: + expected_result_file: expected_metrics_dict.json - name: nnicli configFile: test/config/examples/sklearn-regression.yml diff --git a/test/config/pr_tests.yml b/test/config/pr_tests.yml index 365b038d0b..75be8bbc01 100644 --- a/test/config/pr_tests.yml +++ b/test/config/pr_tests.yml @@ -31,16 +31,15 @@ testCases: kwargs: expected_result_file: expected_metrics.json -# to be enabled -#- name: metrics-dict -# configFile: test/config/metrics_test/config_dict_metrics.yml -# config: -# maxTrialNum: 1 -# trialConcurrency: 1 -# validator: -# class: MetricsValidator -# kwargs: -# expected_result_file: expected_metrics_dict.json +- name: metrics-dict + configFile: test/config/metrics_test/config_dict_metrics.yml + config: + maxTrialNum: 1 + trialConcurrency: 1 + validator: + class: MetricsValidator + kwargs: + expected_result_file: expected_metrics_dict.json - name: nnicli configFile: test/config/examples/sklearn-regression.yml diff --git a/test/nni_test/nnitest/validators.py b/test/nni_test/nnitest/validators.py index 2fc43abe89..26faf0f423 100644 --- a/test/nni_test/nnitest/validators.py +++ b/test/nni_test/nnitest/validators.py @@ -35,8 +35,8 @@ def check_metrics(self, nni_source_dir, **kwargs): assert len(trial_final_result) == 1, 'there should be 1 final result' assert trial_final_result[0] == expected_metrics['final_result'] # encode dict/number into json string to compare them in set - assert set([json.dumps(x) for x in trial_intermediate_result]) \ - == set([json.dumps(x) for x in expected_metrics['intermediate_result']]) + assert set([json.dumps(x, sort_keys=True) for x in trial_intermediate_result]) \ + == set([json.dumps(x, sort_keys=True) for x in expected_metrics['intermediate_result']]) def get_metric_results(self, metrics): intermediate_result = {} diff --git a/test/scripts/model_compression.sh b/test/scripts/model_compression.sh index c3a7754fb5..f76a7064d6 100644 --- a/test/scripts/model_compression.sh +++ b/test/scripts/model_compression.sh @@ -6,22 +6,14 @@ echo "" echo "===========================Testing: pruning and speedup===========================" cd ${CWD}/../examples/model_compress -echo "testing slim pruning and speedup..." -python3 model_prune_torch.py --pruner_name slim --pretrain_epochs 1 --prune_epochs 1 -python3 model_speedup.py --example_name slim --model_checkpoint ./checkpoints/pruned_vgg19_cifar10_slim.pth \ - --masks_file ./checkpoints/mask_vgg19_cifar10_slim.pth - -echo "testing l1 pruning and speedup..." -python3 model_prune_torch.py --pruner_name l1 --pretrain_epochs 1 --prune_epochs 1 -python3 model_speedup.py --example_name l1filter --model_checkpoint ./checkpoints/pruned_vgg16_cifar10_l1.pth \ - --masks_file ./checkpoints/mask_vgg16_cifar10_l1.pth - -echo "testing apoz pruning and speedup..." -python3 model_prune_torch.py --pruner_name apoz --pretrain_epochs 1 --prune_epochs 1 -python3 model_speedup.py --example_name apoz --model_checkpoint ./checkpoints/pruned_vgg16_cifar10_apoz.pth \ - --masks_file ./checkpoints/mask_vgg16_cifar10_apoz.pth - -for name in level fpgm mean_activation +for name in fpgm slim l1filter apoz +do + echo "testing $name pruning and speedup..." + python3 model_prune_torch.py --pruner_name $name --pretrain_epochs 1 --prune_epochs 1 + python3 model_speedup.py --example_name $name +done + +for name in level mean_activation do echo "testing $name pruning..." python3 model_prune_torch.py --pruner_name $name --pretrain_epochs 1 --prune_epochs 1