diff --git a/dependencies/recommended.txt b/dependencies/recommended.txt index 07c51db178..085a594381 100644 --- a/dependencies/recommended.txt +++ b/dependencies/recommended.txt @@ -20,4 +20,5 @@ tianshou >= 0.4.1 matplotlib git+https://github.com/microsoft/nn-Meter.git#egg=nn_meter sympy -timm >= 0.5.4 \ No newline at end of file +timm >= 0.5.4 +typing_extensions >= 4.7.0 \ No newline at end of file diff --git a/dependencies/required.txt b/dependencies/required.txt index 701680fd90..fada008dbd 100644 --- a/dependencies/required.txt +++ b/dependencies/required.txt @@ -19,6 +19,6 @@ scikit-learn >= 0.24.1 scipy < 1.8 ; python_version < "3.8" scipy ; python_version >= "3.8" tqdm -typeguard >= 3.0.0 +typeguard >= 3.0.0, < 4.1.3 typing_extensions >= 4.7.0 websockets >= 10.1 diff --git a/docs/source/conf.py b/docs/source/conf.py index 9acbfa4190..b0ed412d01 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -31,7 +31,7 @@ version = '' # The full version, including alpha/beta/rc tags # FIXME: this should be written somewhere globally -release = 'v3.0rc1' +release = 'v3.0pt1' # -- General configuration --------------------------------------------------- @@ -117,6 +117,8 @@ r'https://cla\.opensource\.microsoft\.com', r'https://www\.docker\.com/', r'https://nlp.stanford.edu/projects/glove/', + r'https://code.visualstudio.com/docs/python/editing#_formatting', + r'https://dl.acm.org/doi/10.1145/3352020.3352031', # remove after 3.0 release r'https://nni\.readthedocs\.io/en/v2\.10/compression/overview\.html', diff --git a/docs/source/release.rst b/docs/source/release.rst index 82eb80b304..009b92188d 100644 --- a/docs/source/release.rst +++ b/docs/source/release.rst @@ -5,6 +5,126 @@ Change Log ========== + +Release 3.0 - 21/8/2023 +----------------------- + +Web Portal +^^^^^^^^^^ + +* New look and feel + +Neural Architecture Search +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* **Breaking change**: ``nni.retiarii`` is no longer maintained and tested. Please migrate to ``nni.nas``. + + * Inherit ``nni.nas.nn.pytorch.ModelSpace``, rather than use ``@model_wrapper``. + * Use ``nni.choice``, rather than ``nni.nas.nn.pytorch.ValueChoice``. + * Use ``nni.nas.experiment.NasExperiment`` and ``NasExperimentConfig``, rather than ``RetiariiExperiment``. + * Use ``nni.nas.model_context``, rather than ``nni.nas.fixed_arch``. + * Please refer to `quickstart `_ for more changes. + +* A refreshed experience to construct model space. + + * Enhanced debuggability via ``freeze()`` and ``simplify()`` APIs. + * Enhanced expressiveness with ``nni.choice``, ``nni.uniform``, ``nni.normal`` and etc. + * Enhanced experience of customization with ``MutableModule``, ``ModelSpace`` and ``ParamterizedModule``. + * Search space with constraints is now supported. + +* Improved robustness and stability of strategies. + + * Supported search space types are now enriched for PolicyBaseRL, ENAS and Proxyless. + * Each step of one-shot strategies can be executed alone: model mutation, evaluator mutation and training. + * Most multi-trial strategies now supports specifying seed for reproducibility. + * Performance of strategies have been verified on a set of benchmarks. + +* Strategy/engine middleware. + + * Filtering, replicating, deduplicating or retrying models submitted by any strategy. + * Merging or transforming models before executing (e.g., CGO). + * Arbitrarily-long chains of middlewares. + +* New execution engine. + + * Improved debuggability via SequentialExecutionEngine: trials can run in a single process and breakpoints are effective. + * The old execution engine is now decomposed into execution engine and model format. + * Enhanced extensibility of execution engines. + +* NAS profiler and hardware-aware NAS. + + * New profilers profile a model space, and quickly compute a profiling result for a sampled architecture or a distribution of architectures (FlopsProfiler, NumParamsProfiler and NnMeterProfiler are officially supported). + * Assemble profiler with arbitrary strategies, including both multi-trial and one-shot. + * Profiler are extensible. Strategies can be assembled with arbitrary customized profilers. + +Model Compression +^^^^^^^^^^^^^^^^^ + +* Compression framework is refactored, new framework import path is ``nni.contrib.compression``. + + * Configure keys are refactored, support more detailed compression configurations. + * Support multi compression methods fusion. + * Support distillation as a basic compression component. + * Support more compression targets, like ``input``, ``ouptut`` and any registered paramters. + * Support compressing any module type by customizing module settings. + +* Model compression support in DeepSpeed mode. + +* Fix example bugs. + +* Pruning + + * Pruner interfaces have fine-tuned for easy to use. + * Support configuring ``granularity`` in pruners. + * Support different mask ways, multiply zero or add a large negative value. + * Support manully setting dependency group and global group. + * A new powerful pruning speedup is released, applicability and robustness have been greatly improved. + * The end to end transformer compression tutorial has been updated, achieved more extreme compression performance. + * Fix config list in the examples. + +* Quantization + + * Support using ``Evaluator`` to handle training/inferencing. + * Support more module fusion combinations. + * Support configuring ``granularity`` in quantizers. + * Bias correction is supported in the Post Training Quantization algorithm. + * LSQ+ quantization algorithm is supported. + +* Distillation + + * DynamicLayerwiseDistiller and Adaptive1dLayerwiseDistiller are supported. + +* Compression documents now updated for the new framework, the old version please view `v2.10 `_ doc. +* New compression examples are under `nni/examples/compression `_ + + * Create a evaluator: `nni/examples/compression/evaluator `_ + * Pruning a model: `nni/examples/compression/pruning `_ + * Quantize a model: `nni/examples/compression/quantization `_ + * Fusion compression: `nni/examples/compression/fusion `_ + + +Training Services +^^^^^^^^^^^^^^^^^ + +* **Breaking change**: NNI v3.0 cannot resume experiments created by NNI v2.x +* Local training service: + + * Reduced latency of creating trials + * Fixed "GPU metric not found" + * Fixed bugs about resuming trials + +* Remote training service: + + * ``reuse_mode`` now defaults to ``False``; setting it to ``True`` will fallback to v2.x remote training service + * Reduced latency of creating trials + * Fixed "GPU metric not found" + * Fixed bugs about resuming trials + * Supported viewing trial logs on the web portal + * Supported automatic recover after temporary server failure (network fluctuation, out of memory, etc) + +* Get rid of IoC and remove unused training services. + + Release 3.0 Preview - 5/9/2022 ------------------------------ @@ -25,18 +145,21 @@ Neural Architecture Search * Please refer to `quickstart `_ for more changes. * A refreshed experience to construct model space. + * Enhanced debuggability via ``freeze()`` and ``simplify()`` APIs. * Enhanced expressiveness with ``nni.choice``, ``nni.uniform``, ``nni.normal`` and etc. * Enhanced experience of customization with ``MutableModule``, ``ModelSpace`` and ``ParamterizedModule``. * Search space with constraints is now supported. * Improved robustness and stability of strategies. + * Supported search space types are now enriched for PolicyBaseRL, ENAS and Proxyless. * Each step of one-shot strategies can be executed alone: model mutation, evaluator mutation and training. * Most multi-trial strategies now supports specifying seed for reproducibility. * Performance of strategies have been verified on a set of benchmarks. * Strategy/engine middleware. + * Filtering, replicating, deduplicating or retrying models submitted by any strategy. * Merging or transforming models before executing (e.g., CGO). * Arbitrarily-long chains of middlewares.