diff --git a/optimum/gptq/quantizer.py b/optimum/gptq/quantizer.py index 7df936fd0d..1c73c7e1da 100644 --- a/optimum/gptq/quantizer.py +++ b/optimum/gptq/quantizer.py @@ -220,7 +220,7 @@ def convert_model(self, model: nn.Module): layers_to_keep = sum(self.modules_to_quantize_inside_block, []) for name in list(layers_to_be_replaced.keys()): if not any(name.endswith(layer) for layer in layers_to_keep): - logger.info(f"{name} has not been quantized. We don't convert it") + logger.info(f"Quantization disabled for {name} (only modules_in_block_to_quantize={modules_in_block_to_quantize} are quantized)") del layers_to_be_replaced[name] self._replace_by_quant_layers(model, layers_to_be_replaced) return model