Skip to content
This repository has been archived by the owner on Sep 18, 2024. It is now read-only.

Commit

Permalink
remove ema correction
Browse files Browse the repository at this point in the history
  • Loading branch information
linbinskn committed Dec 22, 2020
1 parent e1f3da2 commit 8620389
Showing 1 changed file with 4 additions and 5 deletions.
9 changes: 4 additions & 5 deletions nni/algorithms/compression/pytorch/quantization/quantizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,7 @@ def update_ema(biased_ema, value, decay, step):
float, float
"""
biased_ema = biased_ema * decay + (1 - decay) * value
unbiased_ema = biased_ema / (1 - decay ** step) # Bias correction
return biased_ema, unbiased_ema
return biased_ema


def update_quantization_param(bits, rmin, rmax):
Expand Down Expand Up @@ -275,11 +274,11 @@ def quantize_output(self, output, wrapper, **kwargs):
# we dont update output quantization parameters in evaluation stage
if wrapper.training:
current_min, current_max = torch.min(output), torch.max(output)
module.tracked_min_biased, module.tracked_min = update_ema(module.tracked_min_biased, current_min,
module.tracked_min_biased = update_ema(module.tracked_min_biased, current_min,
module.ema_decay, self.bound_model.steps)
module.tracked_max_biased, module.tracked_max = update_ema(module.tracked_max_biased, current_max,
module.tracked_max_biased = update_ema(module.tracked_max_biased, current_max,
module.ema_decay, self.bound_model.steps)
module.scale, module.zero_point = update_quantization_param(output_bits, module.tracked_min, module.tracked_max)
module.scale, module.zero_point = update_quantization_param(output_bits, module.tracked_min_biased, module.tracked_max_biased)
out = self._quantize(output_bits, module, output)
out = self._dequantize(module, out)
return out
Expand Down

0 comments on commit 8620389

Please sign in to comment.