-
-
Notifications
You must be signed in to change notification settings - Fork 5.9k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[Kernel] Support running GPTQ 8-bit models in Marlin #4533
Changes from 3 commits
5201ffd
7dd10b3
c439eec
2bb278f
af9e3fd
e8de8b9
9bb6cf7
2eec0e5
4c71e54
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Large diffs are not rendered by default.
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -28,17 +28,24 @@ | |
capability < QUANTIZATION_METHODS["gptq_marlin"].get_min_capability()) | ||
|
||
MODELS = [ | ||
# act_order==False, group_size=channelwise | ||
# 4-bit, act_order==False, group_size=channelwise | ||
("robertgshaw2/zephyr-7b-beta-channelwise-gptq", "main"), | ||
# act_order==False, group_size=128 | ||
# 4-bit, act_order==False, group_size=128 | ||
("TheBloke/Llama-2-7B-GPTQ", "main"), | ||
|
||
# act_order==True, group_size=128 | ||
# 4-bit, act_order==True, group_size=128 | ||
("TheBloke/TinyLlama-1.1B-Chat-v1.0-GPTQ", "main"), | ||
# act_order==True, group_size=64 | ||
# 4-bit, act_order==True, group_size=64 | ||
("TheBloke/TinyLlama-1.1B-Chat-v1.0-GPTQ", "gptq-4bit-64g-actorder_True"), | ||
# act_order==True, group_size=32 | ||
# 4-bit, act_order==True, group_size=32 | ||
("TheBloke/TinyLlama-1.1B-Chat-v1.0-GPTQ", "gptq-4bit-32g-actorder_True"), | ||
|
||
# 8-bit, act_order==True, group_size=channelwise | ||
("TheBloke/TinyLlama-1.1B-Chat-v1.0-GPTQ", "gptq-8bit--1g-actorder_True"), | ||
# 8-bit, act_order==True, group_size=128 | ||
("TheBloke/TinyLlama-1.1B-Chat-v1.0-GPTQ", "gptq-8bit-128g-actorder_True"), | ||
# 8-bit, act_order==True, group_size=32 | ||
("TheBloke/TinyLlama-1.1B-Chat-v1.0-GPTQ", "gptq-8bit-32g-actorder_True"), | ||
] | ||
|
||
|
||
|
@@ -63,10 +70,11 @@ def test_models( | |
gptq_marlin_model = vllm_runner(model_name=model_name, | ||
revision=revision, | ||
dtype=dtype, | ||
quantization="marlin", | ||
quantization="gptq", | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @alexm-nm this test should have Also - is There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Oh must be a leftover from debug, good catch, will fix it in 30min There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Fixed, tests pass |
||
max_model_len=MAX_MODEL_LEN, | ||
tensor_parallel_size=1, | ||
disable_custom_all_reduce=True) | ||
disable_custom_all_reduce=True, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. to make this cleaner, can we remove There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Will try There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. works |
||
enforce_eager=True) | ||
|
||
gptq_marlin_outputs = gptq_marlin_model.generate_greedy_logprobs( | ||
example_prompts, max_tokens, num_logprobs) | ||
|
@@ -79,7 +87,8 @@ def test_models( | |
quantization="gptq", | ||
max_model_len=MAX_MODEL_LEN, | ||
tensor_parallel_size=1, | ||
disable_custom_all_reduce=True) | ||
disable_custom_all_reduce=True, | ||
enforce_eager=True) | ||
gptq_outputs = gptq_model.generate_greedy_logprobs(example_prompts, | ||
max_tokens, | ||
num_logprobs) | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
why needed? @alexm-nm
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I got occasional warnings about del self.model being None. Not necessary for correctness if it causes issues in CI.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I dont think we should touch this file
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
removed