Skip to content
This repository has been archived by the owner on Sep 18, 2024. It is now read-only.

Add the support for aten::mul operator. #2905

Merged
merged 3 commits into from
Sep 24, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
'AdaptiveAvgPool2d': lambda module, mask: no_replace(module, mask),
'ReLU': lambda module, mask: no_replace(module, mask),
'ReLU6': lambda module, mask: no_replace(module, mask),
'Sigmoid': lambda module, mask: no_replace(module, mask),
'Linear': lambda module, mask: replace_linear(module, mask),
'Dropout': lambda module, mask: no_replace(module, mask),
'Dropout2d': lambda module, mask: no_replace(module, mask),
Expand Down
6 changes: 6 additions & 0 deletions src/sdk/pynni/nni/compression/torch/speedup/infer_shape.py
Original file line number Diff line number Diff line change
Expand Up @@ -221,12 +221,14 @@ def __repr__(self):
infer_from_inshape = {
'ReLU': lambda module_masks, mask: relu_inshape(module_masks, mask),
'ReLU6': lambda module_masks, mask: relu_inshape(module_masks, mask),
'Sigmoid': lambda module_masks, mask: relu_inshape(module_masks, mask),
'aten::relu': lambda module_masks, mask: relu_inshape(module_masks, mask),
'aten::tanh': lambda module_masks, mask: relu_inshape(module_masks, mask),
'aten::tanh_': lambda module_masks, mask: relu_inshape(module_masks, mask),
'aten::hardtanh': lambda module_masks, mask: relu_inshape(module_masks, mask),
'aten::hardtanh_': lambda module_masks, mask: relu_inshape(module_masks, mask),
'aten::relu_': lambda module_masks, mask: relu_inshape(module_masks, mask),
'aten::sigmoid': lambda module_masks, mask: relu_inshape(module_masks, mask),
'Conv2d': lambda module_masks, mask: conv2d_inshape(module_masks, mask),
'MaxPool2d': lambda module_masks, mask: maxpool2d_inshape(module_masks, mask),
'aten::max_pool2d': lambda module_masks, mask: maxpool2d_inshape(module_masks, mask),
Expand All @@ -243,6 +245,10 @@ def __repr__(self):
'BatchNorm2d': lambda module_masks, mask: batchnorm2d_inshape(module_masks, mask),
'aten::add_': lambda module_masks, mask: add_inshape(module_masks, mask),
'aten::add': lambda module_mask, mask: add_inshape(module_mask, mask),
# mul has the similar behaviour with add, they both request
# the input tesors to have the same shape
'aten::mul': lambda module_mask, mask: add_inshape(module_mask, mask),
'aten::mul_': lambda module_mask, mask: add_inshape(module_mask, mask),
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

how XXX_inshape function works when an operator has multiple inputs?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

XXX_inshape cannot have multiple inputs.
For example a += b + c + d # a, b, c, d are tensors
then the inputs of add_inshape is the result of a+b+c

FYI

>>> class tmp(torch.nn.Module):
...     def forward(self, x):
...             ones = torch.ones_like(x)
...             one_2 = torch.ones_like(x)
...             one_3 = torch.ones_like(x)
...             x += one_2 + one_3+ ones
...             return x
...
>>> graph= torch.jit.trace(tmp, torch.ones(4))
>>> torch._C._jit_pass_inline(graph.graph)
>>> graph.graph
graph(%self : __torch__.torch.nn.modules.module.Module,
      %x : Float(4)):
  %9 : int = prim::Constant[value=6]() # <stdin>:4:0
  %10 : int = prim::Constant[value=0]() # <stdin>:4:0
  %11 : Device = prim::Constant[value="cpu"]() # <stdin>:4:0
  %12 : bool = prim::Constant[value=0]() # <stdin>:4:0
  %13 : None = prim::Constant()
  %ones : Float(4) = aten::ones_like(%x, %9, %10, %11, %12, %13) # <stdin>:4:0
  %15 : int = prim::Constant[value=6]() # <stdin>:5:0
  %16 : int = prim::Constant[value=0]() # <stdin>:5:0
  %17 : Device = prim::Constant[value="cpu"]() # <stdin>:5:0
  %18 : bool = prim::Constant[value=0]() # <stdin>:5:0
  %19 : None = prim::Constant()
  %one_2 : Float(4) = aten::ones_like(%x, %15, %16, %17, %18, %19) # <stdin>:5:0
  %21 : int = prim::Constant[value=6]() # <stdin>:6:0
  %22 : int = prim::Constant[value=0]() # <stdin>:6:0
  %23 : Device = prim::Constant[value="cpu"]() # <stdin>:6:0
  %24 : bool = prim::Constant[value=0]() # <stdin>:6:0
  %25 : None = prim::Constant()
  %one_3 : Float(4) = aten::ones_like(%x, %21, %22, %23, %24, %25) # <stdin>:6:0
  %27 : int = prim::Constant[value=1]() # <stdin>:7:0
  %28 : Float(4) = aten::add(%one_2, %one_3, %27) # <stdin>:7:0
  %29 : int = prim::Constant[value=1]() # <stdin>:7:0
  %30 : Float(4) = aten::add(%28, %ones, %29) # <stdin>:7:0
  %31 : int = prim::Constant[value=1]() # <stdin>:7:0
  %32 : Float(4) = aten::add_(%x, %30, %31) # <stdin>:7:0
  return (%32)

'aten::cat': lambda module_mask, mask, cat_info, last_visited: cat_inshape(module_mask, mask, cat_info, last_visited),
'aten::mean': lambda module_masks, mask, shape: mean_inshape(module_masks, mask, shape),
'Dropout': lambda module_masks, mask: dropout_inshape(module_masks, mask),
Expand Down
2 changes: 1 addition & 1 deletion src/sdk/pynni/nni/compression/torch/utils/mask_conflict.py
Original file line number Diff line number Diff line change
Expand Up @@ -284,7 +284,7 @@ def fix_mask(self):
ori_channels = w_shape[0]
for i in channel_remain:
mask['weight'][i] = torch.ones(w_shape[1:])
if hasattr(mask, 'bias'):
if 'bias' in mask and mask['bias'] is not None:
mask['bias'][i] = 1
_logger.info(','.join(dset))
_logger.info('Pruned Filters after fixing conflict:')
Expand Down