Skip to content

Commit

Permalink
style
Browse files Browse the repository at this point in the history
  • Loading branch information
SunMarc committed Dec 12, 2023
1 parent 1720bab commit b2ecfe5
Showing 1 changed file with 4 additions and 5 deletions.
9 changes: 4 additions & 5 deletions optimum/gptq/quantizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,9 @@ def convert_model(self, model: nn.Module):
layers_to_keep = sum(self.modules_in_block_to_quantize, [])
for name in list(layers_to_be_replaced.keys()):
if not any(name.endswith(layer) for layer in layers_to_keep):
logger.info(f"Quantization disabled for {name} (only modules_in_block_to_quantize={self.modules_in_block_to_quantize} are quantized)")
logger.info(
f"Quantization disabled for {name} (only modules_in_block_to_quantize={self.modules_in_block_to_quantize} are quantized)"
)
del layers_to_be_replaced[name]
self._replace_by_quant_layers(model, layers_to_be_replaced)
return model
Expand Down Expand Up @@ -456,10 +458,7 @@ def store_input_hook(_, input, *args):
if not has_device_map or get_device(block) == torch.device("cpu"):
block = block.to(0)
layers = get_layers(block)
if (
isinstance(self.modules_in_block_to_quantize, list)
and len(self.modules_in_block_to_quantize) > 0
):
if isinstance(self.modules_in_block_to_quantize, list) and len(self.modules_in_block_to_quantize) > 0:
if self.true_sequential:
layers_name_list = self.modules_in_block_to_quantize
else:
Expand Down

0 comments on commit b2ecfe5

Please sign in to comment.