Skip to content

Commit

Permalink
[Torch] Support hard_swish op (apache#7174)
Browse files Browse the repository at this point in the history
* imp_hardswish

* format

* fix

* hard_swish_inplace test case
  • Loading branch information
Xuxue1 authored and trevor-m committed Jan 21, 2021
1 parent e372d2f commit 8327757
Show file tree
Hide file tree
Showing 2 changed files with 22 additions and 3 deletions.
11 changes: 11 additions & 0 deletions python/tvm/relay/frontend/pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -790,6 +790,15 @@ def log_sigmoid(self, inputs, input_types):
data = inputs[0]
return _op.log(_op.tensor.sigmoid(data))

def hard_swish(self, inputs, input_types):
data = inputs[0]
dtype = input_types[0]

def _relu6(input_tensor):
return _op.tensor.clip(input_tensor, 0.0, 6.0)

return data * _relu6(data + _expr.const(3.0, dtype=dtype)) / _expr.const(6.0, dtype=dtype)

def adaptive_avg_pool_2d(self, inputs, input_types):
data = inputs[0]
output_size = inputs[1]
Expand Down Expand Up @@ -2266,6 +2275,8 @@ def create_convert_map(self):
"aten::bincount": self.bincount,
"aten::scatter_add": self.scatter_add,
"aten::__not__": self.logical_not,
"aten::hardswish_": self.hard_swish,
"aten::hardswish": self.hard_swish,
}

def update_convert_map(self, custom_map):
Expand Down
14 changes: 11 additions & 3 deletions tests/python/frontend/pytorch/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,14 +181,14 @@ def verify_model(model_name, input_data=[], custom_convert_map={}, rtol=1e-5, at
baseline_input = [inp.cuda() for inp in baseline_input]

with torch.no_grad():
baseline_outputs = baseline_model(*baseline_input)
baseline_outputs = baseline_model(*[input.clone() for input in baseline_input])

if isinstance(baseline_outputs, tuple):
baseline_outputs = tuple(out.cpu().numpy() for out in baseline_outputs)
else:
baseline_outputs = (baseline_outputs.cpu().numpy(),)

trace = torch.jit.trace(baseline_model, baseline_input)
trace = torch.jit.trace(baseline_model, [input.clone() for input in baseline_input])
if isinstance(baseline_model, torch.nn.Module):
trace = trace.float().eval()

Expand All @@ -200,7 +200,7 @@ def verify_model(model_name, input_data=[], custom_convert_map={}, rtol=1e-5, at
input_names = ["input{}".format(idx) for idx, inp in enumerate(baseline_input)]
input_shapes = list(zip(input_names, [inp.shape for inp in baseline_input]))
mod, params = relay.frontend.from_pytorch(trace, input_shapes, custom_convert_map)
compiled_input = dict(zip(input_names, [inp.cpu().numpy() for inp in baseline_input]))
compiled_input = dict(zip(input_names, [inp.clone().cpu().numpy() for inp in baseline_input]))

with tvm.transform.PassContext(opt_level=3):
for target, ctx in tvm.testing.enabled_targets():
Expand Down Expand Up @@ -3437,6 +3437,13 @@ def test_fn(x, weights=None):
verify_trace_model(test_fn, [inp, weights], targets)


def test_hard_swish():
examples = [torch.rand(8).float(), torch.rand(8, 10).float(), torch.rand(1, 1, 10).float()]
for input in examples:
verify_model(torch.nn.Hardswish().eval(), input_data=input)
verify_model(torch.nn.Hardswish(inplace=True).eval(), input_data=input)


if __name__ == "__main__":
# some structural tests
test_forward_traced_function()
Expand Down Expand Up @@ -3603,3 +3610,4 @@ def test_fn(x, weights=None):

# Test convert torch script(jit) with specific inputs' types
test_convert_torch_script_with_input_types()
test_hard_swish()

0 comments on commit 8327757

Please sign in to comment.