From 2c6a6f091ce9a5073fd7c901e01f2c5d488fbe65 Mon Sep 17 00:00:00 2001 From: Marc Sun <57196510+SunMarc@users.noreply.github.com> Date: Tue, 12 Dec 2023 14:18:54 -0500 Subject: [PATCH] Update optimum/gptq/quantizer.py Co-authored-by: fxmarty <9808326+fxmarty@users.noreply.github.com> --- optimum/gptq/quantizer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/optimum/gptq/quantizer.py b/optimum/gptq/quantizer.py index 948cdbaa12..9cdc2ac67a 100644 --- a/optimum/gptq/quantizer.py +++ b/optimum/gptq/quantizer.py @@ -84,7 +84,7 @@ def __init__( Args: bits (`int`): The number of bits to quantize to, supported numbers are (2, 3, 4, 8). - dataset (`Union[List[str],str,Any]`, defaults to None): + dataset (`Union[List[str], str, Any]`, defaults to `None`): The dataset used for quantization. You can provide your own dataset in a list of string or in a list of tokenized data or just use the original datasets used in GPTQ paper ['wikitext2','c4','c4-new','ptb','ptb-new']. group_size (int, defaults to 128):