Skip to content

Commit

Permalink
formatting changes from black 22.3.0
Browse files Browse the repository at this point in the history
Summary:
Applies the black-fbsource codemod with the new build of pyfmt.

paintitblack

Reviewed By: lisroach

Differential Revision: D36324783

fbshipit-source-id: 280c09e88257e5e569ab729691165d8dedd767bc
  • Loading branch information
amyreese authored and facebook-github-bot committed May 12, 2022
1 parent 6b62949 commit a7a87a9
Show file tree
Hide file tree
Showing 5 changed files with 27 additions and 27 deletions.
4 changes: 2 additions & 2 deletions fbgemm_gpu/bench/quantize_ops_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,8 @@ def cli() -> None:
@settings(max_examples=10, deadline=None)
# pyre-ignore
@given(
num_columns=st.sampled_from([2 ** n for n in range(4, 10)]),
num_rows=st.sampled_from([2 ** n for n in range(4, 10)]),
num_columns=st.sampled_from([2**n for n in range(4, 10)]),
num_rows=st.sampled_from([2**n for n in range(4, 10)]),
)
def bench(
flush_gpu_cache_size_mb: int,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1982,7 +1982,7 @@ def hashtable( # noqa C901
)
hash_table_offsets = torch.tensor([0] + np.cumsum(capacities).tolist()).long()

assert hash_table.numel() * 4 < 2 ** 32
assert hash_table.numel() * 4 < 2**32
# initialize
hash_table[:, :] = -1
torch.ops.fbgemm.pruned_hashmap_insert(
Expand Down
4 changes: 2 additions & 2 deletions fbgemm_gpu/fbgemm_gpu/split_table_batched_embeddings_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -1253,7 +1253,7 @@ def _apply_cache_state(
)
assert cache_sets > 0
if cache_algorithm == CacheAlgorithm.LFU:
assert cache_sets < 2 ** 24 - 1
assert cache_sets < 2**24 - 1
cache_size = cache_sets * ASSOC * element_size * self.max_D_cache
logging.info(
f"Using on-device cache with admission algorithm "
Expand Down Expand Up @@ -2171,7 +2171,7 @@ def _apply_cache_state(
)
assert cache_sets > 0
if cache_algorithm == CacheAlgorithm.LFU:
assert cache_sets < 2 ** 24 - 1
assert cache_sets < 2**24 - 1
cache_size = cache_sets * ASSOC * self.max_D_cache
logging.info(
f"Using on-device cache with admission algorithm "
Expand Down
4 changes: 2 additions & 2 deletions fbgemm_gpu/test/split_embedding_inference_converter_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ def test_quantize_workflow(
pruning_ratio: Optional[float],
use_cpu: bool,
) -> None:
E = int(10 ** log_E)
E = int(10**log_E)
Es = [E] * T
D_alignment = 8 if not quantize_type == SparseType.INT2 else 16
D = div_round_up(D, D_alignment)
Expand Down Expand Up @@ -337,7 +337,7 @@ def test_pruning_workflow_large_scale(
use_cpu: bool,
use_array_for_index_remapping: bool,
) -> None:
E = int(10 ** log_E)
E = int(10**log_E)
D_alignment = 8
D = div_round_up(D, D_alignment)
sparse_arch = SparseArch(emb_dim=D, num_tables=T, num_rows=E, use_cpu=use_cpu)
Expand Down
40 changes: 20 additions & 20 deletions fbgemm_gpu/test/split_table_batched_embeddings_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,7 @@ def execute_forward_(
# This proves that we have exhaustively checked all PoolingModes
raise RuntimeError("Unknown PoolingMode!")

E = int(10 ** log_E)
E = int(10**log_E)
if use_cpu:
D = (D + 15) // 16 * 4
else:
Expand Down Expand Up @@ -869,7 +869,7 @@ def test_forward_fused_pooled_emb_quant(
round_up(np.random.randint(low=int(max(0.25 * D, 1)), high=int(1.0 * D)), 4)
for _ in range(T)
]
E = int(10 ** log_E)
E = int(10**log_E)
Es = [np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)]

op = split_table_batched_embeddings_ops.SplitTableBatchedEmbeddingBagsCodegen(
Expand Down Expand Up @@ -1005,7 +1005,7 @@ def test_nbit_forward_fused_pooled_emb_quant(
for _ in range(T)
]
Ds = [D] * T
E = int(10 ** log_E)
E = int(10**log_E)
Es = [np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)]

weights_ty_list = [weights_ty] * T
Expand Down Expand Up @@ -1192,7 +1192,7 @@ def test_backward_dense(
# This proves that we have exhaustively checked all PoolingModes
raise RuntimeError("Unknown PoolingMode!")

E = int(10 ** log_E)
E = int(10**log_E)
if use_cpu:
D = (D + 15) // 16 * 4
else:
Expand Down Expand Up @@ -1421,7 +1421,7 @@ def test_backward_sgd( # noqa C901
# This proves that we have exhaustively checked all PoolingModes
raise RuntimeError("Unknown PoolingMode!")

E = int(10 ** log_E)
E = int(10**log_E)
if use_cpu:
D = (D + 15) // 16 * 4
else:
Expand Down Expand Up @@ -1644,11 +1644,11 @@ def execute_backward_adagrad_( # noqa C901
# stochastic rounding only implemented for rowwise
assume(not stochastic_rounding or row_wise)
# need unique indices for non-exact tests
assume(exact or int(10 ** log_E) > int(2.1 * B * L))
assume(exact or int(10**log_E) > int(2.1 * B * L))
# only row-wise supports caching
assume(row_wise or not use_cache)

E = int(10 ** log_E)
E = int(10**log_E)
if use_cpu:
D = (D + 15) // 16 * 4
else:
Expand Down Expand Up @@ -2309,7 +2309,7 @@ def test_cache_pipeline(
cache_algorithm: split_table_batched_embeddings_ops.CacheAlgorithm,
) -> None:
iters = 3
E = int(10 ** log_E)
E = int(10**log_E)
D = D * 4
if not mixed:
Ds = [D] * T
Expand Down Expand Up @@ -2439,7 +2439,7 @@ def execute_backward_optimizers_( # noqa C901
# This proves that we have exhaustively checked all PoolingModes
raise RuntimeError("Unknown PoolingMode!")

E = int(10 ** log_E)
E = int(10**log_E)
if use_cpu:
D = (D + 15) // 16 * 4
else:
Expand Down Expand Up @@ -2680,9 +2680,9 @@ def execute_backward_optimizers_( # noqa C901
m1_ref = dense_cpu_grad * (1.0 - beta1)
torch.testing.assert_close(m1.cpu(), m1_ref, atol=1.0e-4, rtol=1.0e-4)
iter_ = cc.iter.item()
v_hat_t = m2_ref / (1 - beta2 ** iter_)
v_hat_t = m2_ref / (1 - beta2**iter_)
v_hat_t = v_hat_t if not rowwise else v_hat_t.view(v_hat_t.numel(), 1)
m_hat_t = m1_ref / (1 - beta1 ** iter_)
m_hat_t = m1_ref / (1 - beta1**iter_)
weights_new = split_weights[t]
weights_ref = (
torch.addcdiv(
Expand Down Expand Up @@ -2714,9 +2714,9 @@ def execute_backward_optimizers_( # noqa C901
m1_ref = dense_cpu_grad * (1.0 - beta1)
torch.testing.assert_close(m1.cpu(), m1_ref, atol=1.0e-4, rtol=1.0e-4)
iter_ = cc.iter.item()
v_hat_t = m2_ref / (1 - beta2 ** iter_)
v_hat_t = m2_ref / (1 - beta2**iter_)
v_hat_t = v_hat_t if not rowwise else v_hat_t.view(v_hat_t.numel(), 1)
m_hat_t = m1_ref / (1 - beta1 ** iter_)
m_hat_t = m1_ref / (1 - beta1**iter_)
rtw = (m_hat_t / (torch.sqrt(v_hat_t) + eps)) + weight_decay * bs[
t
].weight.cpu()
Expand Down Expand Up @@ -3049,7 +3049,7 @@ def execute_nbit_forward_(
else:
mode = "sum"
do_pooling = False
E = int(10 ** log_E)
E = int(10**log_E)

if not mixed_weights_ty:
weights_ty_list = [weights_ty] * T
Expand Down Expand Up @@ -3490,7 +3490,7 @@ def test_nbit_forward_uvm_cache(
mixed = random.choice([True, False])

iters = 3
E = int(10 ** log_E)
E = int(10**log_E)

D_alignment = (
1 if weights_ty.bit_rate() % 8 == 0 else int(8 / weights_ty.bit_rate())
Expand Down Expand Up @@ -3735,7 +3735,7 @@ def test_cache_update_function(self, L: int, H: int, S: int) -> None:
# Create an abstract split table
D = 8
T = 2
E = 10 ** 3
E = 10**3
Ds = [D] * T
Es = [E] * T
emb_op = (
Expand Down Expand Up @@ -3768,7 +3768,7 @@ def test_cache_miss_counter(self, N: int) -> None:
# Create an abstract split table
D = 8
T = 2
E = 10 ** 3
E = 10**3
Ds = [D] * T
Es = [E] * T
emb_op = (
Expand Down Expand Up @@ -3847,7 +3847,7 @@ def test_nbit_cache_update_function(self, L: int, H: int, S: int) -> None:
# Create an abstract split table
D = 8
T = 2
E = 10 ** 3
E = 10**3
Ds = [D] * T
Es = [E] * T
cc = split_table_batched_embeddings_ops.IntNBitTableBatchedEmbeddingBagsCodegen(
Expand Down Expand Up @@ -3882,7 +3882,7 @@ def test_nbit_cache_miss_counter(self, N: int) -> None:
# Create an abstract split table
D = 8
T = 2
E = 10 ** 3
E = 10**3
Ds = [D] * T
Es = [E] * T
cc = split_table_batched_embeddings_ops.IntNBitTableBatchedEmbeddingBagsCodegen(
Expand Down Expand Up @@ -4278,7 +4278,7 @@ def test_embedding_inplace_update(
)
for _ in range(T)
]
E = int(10 ** log_E)
E = int(10**log_E)
Es = [np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)]
row_alignment = 1 if use_cpu else 16
current_device = "cpu" if use_cpu else torch.cuda.current_device()
Expand Down

0 comments on commit a7a87a9

Please sign in to comment.