Skip to content

Commit

Permalink
Update optimum/gptq/quantizer.py
Browse files Browse the repository at this point in the history
Co-authored-by: fxmarty <9808326+fxmarty@users.noreply.github.com>
  • Loading branch information
SunMarc and fxmarty authored Dec 12, 2023
1 parent 6249918 commit b2dccef
Showing 1 changed file with 1 addition and 1 deletion.
2 changes: 1 addition & 1 deletion optimum/gptq/quantizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ def convert_model(self, model: nn.Module):
layers_to_keep = sum(self.modules_to_quantize_inside_block, [])
for name in list(layers_to_be_replaced.keys()):
if not any(name.endswith(layer) for layer in layers_to_keep):
logger.info(f"{name} has not been quantized. We don't convert it")
logger.info(f"Quantization disabled for {name} (only modules_in_block_to_quantize={modules_in_block_to_quantize} are quantized)")
del layers_to_be_replaced[name]
self._replace_by_quant_layers(model, layers_to_be_replaced)
return model
Expand Down

0 comments on commit b2dccef

Please sign in to comment.