Skip to content

Commit

Permalink
Merge pull request #300 from microsoft/master
Browse files Browse the repository at this point in the history
merge master
  • Loading branch information
SparkSnail authored Jul 6, 2021
2 parents f9dbdb4 + e50270c commit 437b020
Show file tree
Hide file tree
Showing 71 changed files with 536 additions and 227 deletions.
6 changes: 6 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,12 @@ lib-cov

# Coverage directory used by tools like istanbul
coverage
junit/
coverage.xml
test-*.xml
.coverage.*
htmlcov/
.coverage

# nyc test coverage
.nyc_output
Expand Down
2 changes: 2 additions & 0 deletions dependencies/develop.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,5 +6,7 @@ sphinx-rtd-theme
sphinxcontrib-websupport
nbsphinx
pytest
pytest-cov
pytest-azurepipelines
coverage
ipython
1 change: 1 addition & 0 deletions dependencies/recommended.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

-f https://download.pytorch.org/whl/torch_stable.html
tensorflow
keras
torch == 1.6.0+cpu ; sys_platform != "darwin"
torch == 1.6.0 ; sys_platform == "darwin"
torchvision == 0.7.0+cpu ; sys_platform != "darwin"
Expand Down
1 change: 0 additions & 1 deletion dependencies/required.txt
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
astor
hyperopt == 0.1.2
json_tricks
netifaces
psutil
pyyaml
requests
Expand Down
Empty file added nni/algorithms/__init__.py
Empty file.
Empty file.
Empty file.
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,6 @@
from pathlib import Path, PurePath
from typing import overload, Union, List

from numpy import tri

from nni.experiment import Experiment, ExperimentConfig
from nni.algorithms.compression.pytorch.auto_compress.interface import AbstractAutoCompressionModule

Expand Down Expand Up @@ -62,7 +60,8 @@ def __init__(self, auto_compress_module: AbstractAutoCompressionModule, config=N

def start(self, port: int, debug: bool) -> None:
trial_code_directory = str(PurePath(Path(self.config.trial_code_directory).absolute())) + '/'
assert self.module_file_path.startswith(trial_code_directory), 'The file path of the user-provided module should under trial_code_directory.'
assert self.module_file_path.startswith(trial_code_directory), \
'The file path of the user-provided module should under trial_code_directory.'
relative_module_path = self.module_file_path.split(trial_code_directory)[1]
# only support linux, need refactor?
command = 'python3 -m nni.algorithms.compression.pytorch.auto_compress.trial_entry --module_file_name {} --module_class_name {}'
Expand Down
5 changes: 3 additions & 2 deletions nni/algorithms/compression/pytorch/auto_compress/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,11 @@ def _add_pruner_config(self, pruner_name: str, config_list: list, **algo_kwargs)
pruner_name
Supported pruner name: 'level', 'slim', 'l1', 'l2', 'fpgm', 'taylorfo', 'apoz', 'mean_activation'.
config_list
Except 'op_types' and 'op_names', other config value can be written as `{'_type': ..., '_value': ...}`.
Except 'op_types' and 'op_names', other config value can be written as ``{'_type': ..., '_value': ...}``.
**algo_kwargs
The additional pruner parameters except 'model', 'config_list', 'optimizer', 'trainer', 'criterion'.
i.e., you can set `statistics_batch_num={'_type': 'choice', '_value': [1, 2, 3]}` in TaylorFOWeightFilterPruner or just `statistics_batch_num=1`.
i.e., you can set ``statistics_batch_num={'_type': 'choice', '_value': [1, 2, 3]}``
in TaylorFOWeightFilterPruner or just ``statistics_batch_num=1``.
"""
sub_search_space = {'_name': pruner_name}
for config in config_list:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def compress(self):
self._trainer(self.bound_model, optimizer=self.optimizer, criterion=self._criterion, epoch=epoch)
# NOTE: workaround for statistics_batch_num bigger than max batch number in one epoch, need refactor
if hasattr(self.masker, 'statistics_batch_num') and hasattr(self, 'iterations'):
if self.iterations < self.masker.statistics_batch_num:
if self.iterations < self.masker.statistics_batch_num: # pylint: disable=access-member-before-definition
self.iterations = self.masker.statistics_batch_num
self.update_mask()
self.bound_model.train(training)
Expand Down Expand Up @@ -118,7 +118,8 @@ class AGPPruner(IterativePruner):
choose from `['level', 'slim', 'l1', 'l2', 'fpgm', 'taylorfo', 'apoz', 'mean_activation']`, by default `level`
"""

def __init__(self, model, config_list, optimizer, trainer, criterion, num_iterations=10, epochs_per_iteration=1, pruning_algorithm='level'):
def __init__(self, model, config_list, optimizer, trainer, criterion,
num_iterations=10, epochs_per_iteration=1, pruning_algorithm='level'):
super().__init__(model, config_list, optimizer=optimizer, trainer=trainer, criterion=criterion,
num_iterations=num_iterations, epochs_per_iteration=epochs_per_iteration)
assert isinstance(optimizer, torch.optim.Optimizer), "AGP pruner is an iterative pruner, please pass optimizer of the model to it"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,10 @@
from schema import And, Optional
from nni.compression.pytorch.compressor import Pruner
from nni.compression.pytorch.utils.config_validation import CompressorSchema
from .constants_pruner import PRUNER_DICT
from nni.compression.pytorch.utils.sensitivity_analysis import SensitivityAnalysis

from .constants_pruner import PRUNER_DICT


MAX_PRUNE_RATIO_PER_ITER = 0.95

Expand Down
5 changes: 3 additions & 2 deletions nni/algorithms/compression/pytorch/quantization/quantizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -245,7 +245,7 @@ def _dequantize(self, op, quantized_val):
def quantize_weight(self, wrapper, **kwargs):
config = wrapper.config
module = wrapper.module
input = kwargs['input_tensor']
input = kwargs['input_tensor'] # pylint: disable=redefined-builtin
weight = copy.deepcopy(wrapper.module.old_weight.data)
weight_bits = get_bits_length(config, 'weight')
quant_start_step = config.get('quant_start_step', 0)
Expand Down Expand Up @@ -304,7 +304,8 @@ def quantize_output(self, output, wrapper, **kwargs):
module.ema_decay)
module.tracked_max_activation = update_ema(module.tracked_max_activation, current_max,
module.ema_decay)
module.scale, module.zero_point = update_quantization_param(output_bits, module.tracked_min_activation, module.tracked_max_activation)
module.scale, module.zero_point = update_quantization_param(
output_bits, module.tracked_min_activation, module.tracked_max_activation)
out = self._quantize(output_bits, module, output)
out = self._dequantize(module, out)
return out
Expand Down
Empty file.
Empty file added nni/algorithms/hpo/__init__.py
Empty file.
2 changes: 2 additions & 0 deletions nni/algorithms/hpo/batch_tuner.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ class BatchTuner
import logging

import nni
from nni.common.hpo_utils import validate_search_space
from nni.tuner import Tuner

TYPE = '_type'
Expand Down Expand Up @@ -75,6 +76,7 @@ def update_search_space(self, search_space):
----------
search_space : dict
"""
validate_search_space(search_space, ['choice'])
self._values = self.is_valid(search_space)

def generate_parameters(self, parameter_id, **kwargs):
Expand Down
7 changes: 5 additions & 2 deletions nni/algorithms/hpo/dngo_tuner.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,13 @@

import numpy as np
import torch
from pybnn import DNGO
from torch.distributions import Normal

import nni.parameter_expressions as parameter_expressions
from nni import ClassArgsValidator
from nni.common.hpo_utils import validate_search_space
from nni.tuner import Tuner
from pybnn import DNGO
from torch.distributions import Normal

_logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -85,6 +87,7 @@ def generate_parameters(self, parameter_id, **kwargs):
return new_x

def update_search_space(self, search_space):
validate_search_space(search_space, ['choice', 'randint', 'uniform', 'quniform', 'loguniform', 'qloguniform'])
self.searchspace_json = search_space
self.random_state = np.random.RandomState()

Expand Down
2 changes: 2 additions & 0 deletions nni/algorithms/hpo/gp_tuner/gp_tuner.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from sklearn.gaussian_process import GaussianProcessRegressor

from nni import ClassArgsValidator
from nni.common.hpo_utils import validate_search_space
from nni.tuner import Tuner
from nni.utils import OptimizeMode, extract_scalar_reward

Expand Down Expand Up @@ -103,6 +104,7 @@ def update_search_space(self, search_space):
Override of the abstract method in :class:`~nni.tuner.Tuner`.
"""
validate_search_space(search_space, ['choice', 'randint', 'uniform', 'quniform', 'loguniform', 'qloguniform'])
self._space = TargetSpace(search_space, self._random_state)

def generate_parameters(self, parameter_id, **kwargs):
Expand Down
2 changes: 2 additions & 0 deletions nni/algorithms/hpo/gridsearch_tuner.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ class GridSearchTuner
import numpy as np

import nni
from nni.common.hpo_utils import validate_search_space
from nni.tuner import Tuner
from nni.utils import convert_dict2tuple

Expand Down Expand Up @@ -144,6 +145,7 @@ def update_search_space(self, search_space):
search_space : dict
The format could be referred to search space spec (https://nni.readthedocs.io/en/latest/Tutorial/SearchSpaceSpec.html).
"""
validate_search_space(search_space, ['choice', 'randint', 'quniform'])
self.expanded_search_space = self._json2parameter(search_space)

def generate_parameters(self, parameter_id, **kwargs):
Expand Down
11 changes: 8 additions & 3 deletions nni/algorithms/hpo/hyperband_advisor.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
from schema import Schema, Optional

from nni import ClassArgsValidator
from nni.common.hpo_utils import validate_search_space
from nni.runtime.common import multi_phase_enabled
from nni.runtime.msg_dispatcher_base import MsgDispatcherBase
from nni.runtime.protocol import CommandType, send
Expand Down Expand Up @@ -265,8 +266,10 @@ def validate_class_args(self, **kwargs):
}).validate(kwargs)

class Hyperband(MsgDispatcherBase):
"""Hyperband inherit from MsgDispatcherBase rather than Tuner, because it integrates both tuner's functions and assessor's functions.
This is an implementation that could fully leverage available resources or follow the algorithm process, i.e., high parallelism or serial.
"""
Hyperband inherit from MsgDispatcherBase rather than Tuner, because it integrates both tuner's functions and assessor's functions.
This is an implementation that could fully leverage available resources or follow the algorithm process,
i.e., high parallelism or serial.
A single execution of Hyperband takes a finite budget of (s_max + 1)B.
Parameters
Expand Down Expand Up @@ -346,7 +349,8 @@ def _get_one_trial_job(self):
self.curr_hb += 1
_logger.debug('create a new bracket, self.curr_hb=%d, self.curr_s=%d', self.curr_hb, self.curr_s)
self.curr_bracket_id = '{}-{}'.format(self.curr_hb, self.curr_s)
self.brackets[self.curr_bracket_id] = Bracket(self.curr_bracket_id, self.curr_s, self.s_max, self.eta, self.R, self.optimize_mode)
self.brackets[self.curr_bracket_id] = Bracket(
self.curr_bracket_id, self.curr_s, self.s_max, self.eta, self.R, self.optimize_mode)
next_n, next_r = self.brackets[self.curr_bracket_id].get_n_r()
_logger.debug('new bracket, next_n=%d, next_r=%d', next_n, next_r)
assert self.searchspace_json is not None and self.random_state is not None
Expand Down Expand Up @@ -376,6 +380,7 @@ def _get_one_trial_job(self):
def handle_update_search_space(self, data):
"""data: JSON object, which is search space
"""
validate_search_space(data)
self.searchspace_json = data
self.random_state = np.random.RandomState()

Expand Down
2 changes: 2 additions & 0 deletions nni/algorithms/hpo/hyperopt_tuner.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
import numpy as np
from schema import Optional, Schema
from nni import ClassArgsValidator
from nni.common.hpo_utils import validate_search_space
from nni.tuner import Tuner
from nni.utils import NodeType, OptimizeMode, extract_scalar_reward, split_index

Expand Down Expand Up @@ -246,6 +247,7 @@ def update_search_space(self, search_space):
----------
search_space : dict
"""
validate_search_space(search_space)
self.json = search_space

search_space_instance = json2space(self.json)
Expand Down
7 changes: 5 additions & 2 deletions nni/algorithms/hpo/metis_tuner/metis_tuner.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@
from schema import Schema, Optional

from nni import ClassArgsValidator
from nni.tuner import Tuner
from nni.common.hpo_utils import validate_search_space
from nni.utils import OptimizeMode, extract_scalar_reward
from . import lib_constraint_summation
from . import lib_data
from .Regression_GMM import CreateModel as gmm_create_model
Expand All @@ -23,8 +26,6 @@
from .Regression_GP import OutlierDetection as gp_outlier_detection
from .Regression_GP import Prediction as gp_prediction
from .Regression_GP import Selection as gp_selection
from nni.tuner import Tuner
from nni.utils import OptimizeMode, extract_scalar_reward

logger = logging.getLogger("Metis_Tuner_AutoML")

Expand Down Expand Up @@ -152,6 +153,8 @@ def update_search_space(self, search_space):
----------
search_space : dict
"""
validate_search_space(search_space, ['choice', 'randint', 'uniform', 'quniform'])

self.x_bounds = [[] for i in range(len(search_space))]
self.x_types = [NONE_TYPE for i in range(len(search_space))]

Expand Down
10 changes: 5 additions & 5 deletions nni/algorithms/hpo/networkmorphism_tuner/graph_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,21 +96,21 @@ def create_new_layer(layer, n_dim):
new_layer = StubDense(input_shape[0], input_shape[0])

elif layer_class == get_dropout_class(n_dim):
new_layer = layer_class(Constant.DENSE_DROPOUT_RATE)
new_layer = layer_class(Constant.DENSE_DROPOUT_RATE) # pylint: disable=not-callable

elif layer_class == get_conv_class(n_dim):
new_layer = layer_class(
new_layer = layer_class( # pylint: disable=not-callable
input_shape[-1], input_shape[-1], sample((1, 3, 5), 1)[0], stride=1
)

elif layer_class == get_batch_norm_class(n_dim):
new_layer = layer_class(input_shape[-1])
new_layer = layer_class(input_shape[-1]) # pylint: disable=not-callable

elif layer_class == get_pooling_class(n_dim):
new_layer = layer_class(sample((1, 3, 5), 1)[0])
new_layer = layer_class(sample((1, 3, 5), 1)[0]) # pylint: disable=not-callable

else:
new_layer = layer_class()
new_layer = layer_class() # pylint: disable=not-callable

return new_layer

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,13 @@
import logging
import os
from schema import Optional, Schema
from nni import ClassArgsValidator
from nni.tuner import Tuner
from nni.utils import OptimizeMode, extract_scalar_reward
from .bayesian import BayesianOptimizer
from .nn import CnnGenerator, MlpGenerator
from .utils import Constant
from .graph import graph_to_json, json_to_graph
from nni import ClassArgsValidator

logger = logging.getLogger("NetworkMorphism_AutoML")

Expand Down
2 changes: 2 additions & 0 deletions nni/algorithms/hpo/smac_tuner/smac_tuner.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@

import nni
from nni import ClassArgsValidator
from nni.common.hpo_utils import validate_search_space
from nni.tuner import Tuner
from nni.utils import OptimizeMode, extract_scalar_reward

Expand Down Expand Up @@ -143,6 +144,7 @@ def update_search_space(self, search_space):
The format could be referred to search space spec (https://nni.readthedocs.io/en/latest/Tutorial/SearchSpaceSpec.html).
"""
self.logger.info('update search space in SMAC.')
validate_search_space(search_space, ['choice', 'randint', 'uniform', 'quniform', 'loguniform'])
if not self.update_ss_done:
self.categorical_dict = generate_scenario(search_space)
if self.categorical_dict is None:
Expand Down
Empty file added nni/algorithms/nas/__init__.py
Empty file.
Empty file.
15 changes: 7 additions & 8 deletions nni/algorithms/nas/pytorch/cream/trainer.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,10 @@
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.

import os
import torch
import logging

from copy import deepcopy

import torch
from nni.nas.pytorch.trainer import Trainer
from nni.nas.pytorch.utils import AverageMeterGroup

Expand Down Expand Up @@ -209,8 +208,8 @@ def _simulate_sgd_update(self, w, g, optimizer):
return g * optimizer.param_groups[-1]['lr'] + w

# split training images into several slices
def _get_minibatch_input(self, input):
slice = self.slices
def _get_minibatch_input(self, input): # pylint: disable=redefined-builtin
slice = self.slices # pylint: disable=redefined-builtin
x = deepcopy(input[:slice].clone().detach())
return x

Expand Down Expand Up @@ -259,8 +258,8 @@ def _cross_entropy_loss_with_soft_target(self, pred, soft_target):
return torch.mean(torch.sum(- soft_target * logsoftmax(pred), 1))

# forward validation data
def _forward_validation(self, input, target):
slice = self.slices
def _forward_validation(self, input, target): # pylint: disable=redefined-builtin
slice = self.slices # pylint: disable=redefined-builtin
x = input[slice:slice * 2].clone()

self._replace_mutator_cand(self.current_student_arch)
Expand All @@ -281,7 +280,7 @@ def _replace_mutator_cand(self, cand):
self.mutator._cache = cand

# update meta matching networks
def _run_update(self, input, target, batch_idx):
def _run_update(self, input, target, batch_idx): # pylint: disable=redefined-builtin
if self._isUpdateMeta(batch_idx):
x = self._get_minibatch_input(input)

Expand Down
2 changes: 1 addition & 1 deletion nni/algorithms/nas/pytorch/fbnet/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ def _layer_choice_sample(self):
layer_id = 0
for i, stage_name in enumerate(stages):
ops_names = [op for op in self.lookup_table.lut_ops[stage_name]]
for j in range(stage_lnum[i]):
for _ in range(stage_lnum[i]):
searched_op = ops_names[choice_ids[layer_id]]
choice_names.append(searched_op)
layer_id += 1
Expand Down
Loading

0 comments on commit 437b020

Please sign in to comment.