Skip to content

Commit

Permalink
support exlusion of params when using low bit optim
Browse files Browse the repository at this point in the history
  • Loading branch information
asahni04 committed Nov 7, 2024
1 parent 27b1d16 commit 5836a17
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 19 deletions.
12 changes: 4 additions & 8 deletions test/prototype/test_low_bit_optim.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,8 @@
quantize_4bit_with_qmap,
_fp32_to_bf16_sr,
)
from torchao.utils import (
TORCH_VERSION_AT_LEAST_2_3,
TORCH_VERSION_AT_LEAST_2_4,
TORCH_VERSION_AT_LEAST_2_6,
)

from torchao.utils import TORCH_VERSION_AT_LEAST_2_3, TORCH_VERSION_AT_LEAST_2_4, TORCH_VERSION_AT_LEAST_2_6

try:
import bitsandbytes as bnb
Expand Down Expand Up @@ -345,9 +342,8 @@ def test_optim_bf16_stochastic_round_correctness(self):
optim2.step()
optim2.zero_grad()

torch.testing.assert_close(
loss1, loss2, msg=lambda msg: f"Iteration {idx}. {msg}"
)
torch.testing.assert_close(loss1, loss2, msg=lambda msg: f"Iteration {idx}. {msg}")


@pytest.mark.skipif(not TORCH_VERSION_AT_LEAST_2_3, reason="requires PyTorch >= 2.3")
@parametrize("optim_name", ["Adam8bit", "AdamW8bit", "Adam4bit", "AdamW4bit", "AdamFp8", "AdamWFp8"])
Expand Down
12 changes: 1 addition & 11 deletions torchao/prototype/low_bit_optim/adam.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,17 +13,7 @@

class _AdamBase(Optimizer):
def __init__(
self,
params,
lr,
betas,
eps,
weight_decay,
amsgrad,
*,
block_size,
bf16_stochastic_round,
is_adamw,
self, params, lr, betas, eps, weight_decay, amsgrad, *, block_size, bf16_stochastic_round, is_adamw,exclude_low_bit_optim_params=None
) -> None:
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
Expand Down

0 comments on commit 5836a17

Please sign in to comment.