From adccbf14c77ba73dd064653c0461739e3b2191ac Mon Sep 17 00:00:00 2001 From: John Date: Tue, 17 Nov 2020 15:06:09 -0800 Subject: [PATCH] Koenvandesande remove duplicate filenames (#448) * Remove duplicate filenames which do not work on Windows by merging files * Fix * relu tests Co-authored-by: Koen van de Sande --- torch2trt/converters/Identity.py | 11 --------- torch2trt/converters/ReLU.py | 11 --------- torch2trt/converters/ReLU6.py | 23 ------------------- torch2trt/converters/__init__.py | 3 --- torch2trt/converters/identity.py | 12 +++++++++- torch2trt/converters/relu.py | 30 ++++++++++++++++++++++--- torch2trt/converters/relu6.py | 38 +++++++++++++++++++++++++++++--- 7 files changed, 73 insertions(+), 55 deletions(-) delete mode 100644 torch2trt/converters/Identity.py delete mode 100644 torch2trt/converters/ReLU.py delete mode 100644 torch2trt/converters/ReLU6.py diff --git a/torch2trt/converters/Identity.py b/torch2trt/converters/Identity.py deleted file mode 100644 index 1934666d..00000000 --- a/torch2trt/converters/Identity.py +++ /dev/null @@ -1,11 +0,0 @@ -from torch2trt.torch2trt import * - - -@tensorrt_converter('torch.nn.Dropout.forward') -@tensorrt_converter('torch.nn.Dropout2d.forward') -@tensorrt_converter('torch.nn.Dropout3d.forward') -def convert_Identity(ctx): - input = ctx.method_args[1] - input_trt = add_missing_trt_tensors(ctx.network, [input])[0] - output = ctx.method_return - output._trt = input_trt \ No newline at end of file diff --git a/torch2trt/converters/ReLU.py b/torch2trt/converters/ReLU.py deleted file mode 100644 index 481f4b8a..00000000 --- a/torch2trt/converters/ReLU.py +++ /dev/null @@ -1,11 +0,0 @@ -from torch2trt.torch2trt import * - - -@tensorrt_converter('torch.nn.ReLU.forward') -def convert_ReLU(ctx): - input = ctx.method_args[1] - input_trt = add_missing_trt_tensors(ctx.network, [input])[0] - output = ctx.method_return - layer = ctx.network.add_activation( - input=input_trt, type=trt.ActivationType.RELU) - output._trt = layer.get_output(0) \ No newline at end of file diff --git a/torch2trt/converters/ReLU6.py b/torch2trt/converters/ReLU6.py deleted file mode 100644 index c452693c..00000000 --- a/torch2trt/converters/ReLU6.py +++ /dev/null @@ -1,23 +0,0 @@ -from torch2trt.torch2trt import * -from torch2trt.module_test import add_module_test - - -@tensorrt_converter('torch.nn.ReLU6.forward') -def convert_ReLU6(ctx): - input = ctx.method_args[1] - output = ctx.method_return - - input_a_trt, input_b_trt = add_missing_trt_tensors(ctx.network, [input, 6]) - input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], len(output.shape) - 1) - - layer = ctx.network.add_activation( - input=input_a_trt, type=trt.ActivationType.RELU) - layer = ctx.network.add_elementwise( - layer.get_output(0), input_b_trt, trt.ElementWiseOperation.MIN) - - output._trt = layer.get_output(0) - - -@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 4, 5)]) -def test_relu6_basic(): - return torch.nn.ReLU6() diff --git a/torch2trt/converters/__init__.py b/torch2trt/converters/__init__.py index 663710e0..08164476 100644 --- a/torch2trt/converters/__init__.py +++ b/torch2trt/converters/__init__.py @@ -12,11 +12,8 @@ from .Conv2d import * from .ConvTranspose import * from .ConvTranspose2d import * -from .Identity import * from .Linear import * from .LogSoftmax import * -from .ReLU import * -from .ReLU6 import * from .activation import * from .adaptive_avg_pool2d import * from .adaptive_max_pool2d import * diff --git a/torch2trt/converters/identity.py b/torch2trt/converters/identity.py index bac1bd99..f7ef1f97 100644 --- a/torch2trt/converters/identity.py +++ b/torch2trt/converters/identity.py @@ -5,8 +5,18 @@ @tensorrt_converter('torch.nn.functional.dropout') @tensorrt_converter('torch.nn.functional.dropout2d') @tensorrt_converter('torch.nn.functional.dropout3d') -def convert_identity(ctx): +def convert_functional_identity(ctx): input = ctx.method_args[0] input_trt = add_missing_trt_tensors(ctx.network, [input])[0] output = ctx.method_return output._trt = input_trt + + +@tensorrt_converter('torch.nn.Dropout.forward') +@tensorrt_converter('torch.nn.Dropout2d.forward') +@tensorrt_converter('torch.nn.Dropout3d.forward') +def convert_identity(ctx): + input = ctx.method_args[1] + input_trt = add_missing_trt_tensors(ctx.network, [input])[0] + output = ctx.method_return + output._trt = input_trt diff --git a/torch2trt/converters/relu.py b/torch2trt/converters/relu.py index 37f71167..c58405c1 100644 --- a/torch2trt/converters/relu.py +++ b/torch2trt/converters/relu.py @@ -1,11 +1,35 @@ from torch2trt.torch2trt import * -from .ReLU import * +from torch2trt.module_test import add_module_test @tensorrt_converter('torch.relu') @tensorrt_converter('torch.relu_') @tensorrt_converter('torch.nn.functional.relu') @tensorrt_converter('torch.nn.functional.relu_') -def convert_relu(ctx): +def convert_functional_relu(ctx): ctx.method_args = (torch.nn.ReLU(),) + ctx.method_args - convert_ReLU(ctx) \ No newline at end of file + convert_relu(ctx) + + +@tensorrt_converter('torch.nn.ReLU.forward') +def convert_relu(ctx): + input = ctx.method_args[1] + input_trt = add_missing_trt_tensors(ctx.network, [input])[0] + output = ctx.method_return + layer = ctx.network.add_activation( + input=input_trt, type=trt.ActivationType.RELU) + output._trt = layer.get_output(0) + +@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 4, 5)]) +def test_relu_basic(): + return torch.nn.ReLU() + + +class FunctionalRelu(torch.nn.Module): + def forward(self, x): + return torch.nn.functional.relu(x) + + +@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 4, 5)]) +def test_functional_relu_basic(): + return FunctionalRelu() \ No newline at end of file diff --git a/torch2trt/converters/relu6.py b/torch2trt/converters/relu6.py index fc4e6ec0..0d809aa4 100644 --- a/torch2trt/converters/relu6.py +++ b/torch2trt/converters/relu6.py @@ -1,8 +1,40 @@ from torch2trt.torch2trt import * -from .ReLU6 import * +from torch2trt.module_test import add_module_test @tensorrt_converter('torch.nn.functional.relu6') -def convert_relu6(ctx): +def convert_functional_relu6(ctx): ctx.method_args = (torch.nn.ReLU6(),) + ctx.method_args - convert_ReLU6(ctx) \ No newline at end of file + convert_relu6(ctx) + + +@tensorrt_converter('torch.nn.ReLU6.forward') +def convert_relu6(ctx): + input = ctx.method_args[1] + output = ctx.method_return + + input_a_trt, input_b_trt = add_missing_trt_tensors(ctx.network, [input, 6]) + input_a_trt, input_b_trt = broadcast_trt_tensors(ctx.network, [input_a_trt, input_b_trt], len(output.shape) - 1) + + layer = ctx.network.add_activation( + input=input_a_trt, type=trt.ActivationType.RELU) + layer = ctx.network.add_elementwise( + layer.get_output(0), input_b_trt, trt.ElementWiseOperation.MIN) + + output._trt = layer.get_output(0) + + +@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 4, 5)]) +def test_relu6_basic(): + return torch.nn.ReLU6() + + +class FunctionalRelu6(torch.nn.Module): + def forward(self, x): + return torch.nn.functional.relu6(x) + + +@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 4, 5)]) +def test_functional_relu6_basic(): + return FunctionalRelu6() +