Skip to content
This repository has been archived by the owner on Sep 18, 2024. It is now read-only.

Commit

Permalink
remove
Browse files Browse the repository at this point in the history
  • Loading branch information
chenbohua3 committed Apr 16, 2021
1 parent 6fdadc5 commit dc7f97b
Showing 1 changed file with 0 additions and 2 deletions.
2 changes: 0 additions & 2 deletions nni/algorithms/compression/pytorch/quantization/quantizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -573,8 +573,6 @@ def __init__(self, model, config_list, optimizer=None):
self.quant_grad = QuantForward()
modules_to_compress = self.get_modules_to_compress()
for layer, config in modules_to_compress:
# the way that lsq simulates quantization will make the gradient of zero point always
# equal to 0. So there may be no need to set it.
layer.module.register_parameter("zero_point", torch.nn.Parameter(torch.Tensor([0.0])))
layer.module.register_parameter("scale", torch.nn.Parameter(torch.Tensor([1.0])))
if "weight" in config.get("quant_types", []):
Expand Down

0 comments on commit dc7f97b

Please sign in to comment.