Skip to content

Commit

Permalink
[torch-mlir][NFC] remove trailing whitespace (llvm#2936)
Browse files Browse the repository at this point in the history
  • Loading branch information
aartbik authored Feb 20, 2024
1 parent 13113df commit 534b266
Show file tree
Hide file tree
Showing 10 changed files with 38 additions and 38 deletions.
2 changes: 1 addition & 1 deletion projects/pt1/python/torch_mlir/compiler_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def run_pipeline_with_repro_report(module,
{sys.stderr.getvalue()}
python exception: {e}
For Torch-MLIR developers, the error can be reproduced with:
$ torch-mlir-opt -pass-pipeline='{pipeline}' {filename}
Add '{debug_options}' to get the IR dump for debugging purpose.
Expand Down
4 changes: 2 additions & 2 deletions projects/pt1/python/torch_mlir/dynamo.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,12 @@

def _get_decomposition_table():
"""Get a decomposition table suitable for Torch-MLIR.
Sometimes TorchDynamo traces slightly different ops than what TorchScript
captures. Historically we have been driven by the ops captured by
TorchScript, so we try to decompose the ops captured by TorchDynamo into
other ops that we already support.
There isn't a highly principled solution here. Torch-MLIR currently supports
a somewhat random set of ops, added in a demand-driven way over time,
including direct backend support and decompositions internal to Torch-MLIR.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def aten〇diagonal〡shape(self: List[int], offset: int = 0, dim1: int = 0, dim
pass
else:
diagonal.append(self_dim)

diag_size = max(min(self[dim1], self[dim2] - offset), 0)
if offset<0:
diag_size = max(min(self[dim1] + offset, self[dim2]), 0)
Expand Down Expand Up @@ -295,10 +295,10 @@ def prims〇collapse〡shape(a: List[int], start: int, end: int) -> List[int]:
assert end >= 0, "end out of bounds"
assert start <= end, "start must be less than or equal to end"

# Examples:
# Examples:
#
# torch._prims.collapse(torch.empty(2,3,4), 1,2).shape
# is
# is
# torch.Size([2, 12])
#
# torch._prims.collapse(torch.empty(2,3,4), 1,3).shape
Expand Down Expand Up @@ -592,7 +592,7 @@ def aten〇pixel_shuffle〡shape(self: List[int], upscale_factor: int) -> List[i
assert len(self) >= 3, "input must be at least rank-3 in pixel_shuffle"
upscale_factor_squared = upscale_factor * upscale_factor
assert self[-3] % (upscale_factor_squared) == 0, "number of input channels must be divisible by upscale_factor^2 in pixel_shuffle"

out = self[0:-3]
out.append(self[-3] // upscale_factor_squared)
out.append(self[-2] * upscale_factor)
Expand Down Expand Up @@ -756,7 +756,7 @@ def _max_pool3d(
assert (
len(stride) == 0 or len(stride) == 1 or len(stride) == 3
), "max_pool3d: stride must either be omitted, a single int, or a tuple of three ints"

if len(stride) == 0:
(dD, dH, dW) = (kD, kD, kD)
elif len(stride) == 1:
Expand Down Expand Up @@ -808,14 +808,14 @@ def _max_pool3d(
return [nInputPlane, outputDepth, outputHeight, outputWidth]
else:
return [nbatch, nInputPlane, outputDepth, outputHeight, outputWidth]

def aten〇max_pool2d〡shape(self: List[int], kernel_size: List[int], stride: List[int] = (), padding: List[int] = (0, 0,), dilation: List[int] = (1, 1,), ceil_mode: bool = False) -> List[int]:
return upstream_shape_functions.max_pool2d(self, kernel_size, stride, padding, dilation, ceil_mode)

@check_shape_function([
Invocation(TensorOfShape(3, 6, 10, 10, 10), [2]), # Basic using defaults
Invocation(TensorOfShape(3, 6, 10, 10, 10), [4], [2], [2], [2]), # Using single values for each parameter
Invocation(TensorOfShape(3, 6, 64, 64, 64), [4, 6, 8], [2, 4, 2], [1, 2, 4], [1, 2, 4]), # Using dimensions should be
Invocation(TensorOfShape(3, 6, 64, 64, 64), [4, 6, 8], [2, 4, 2], [1, 2, 4], [1, 2, 4]), # Using dimensions should be
ErrorInvocation(TensorOfShape(3, 6, 2, 2, 2), [4]), # Input is too small
ErrorInvocation(TensorOfShape(3, 6, 10, 10, 10), [4], [2], [4], [2]), # The following relationship between kernel and padding needs to apply: Kernel size >= 2 * padding size
])
Expand Down Expand Up @@ -1374,15 +1374,15 @@ def aten〇conv_tbc〡shape(self: List[int], weight: List[int], bias: List[int],

assert channels == channels_w
# the out_channels in weights and biases should also match, but this assert doesn't work because typing problems
# assert out_channels == out_channels_b
# assert out_channels == out_channels_b

self_bct = [batch, channels, time]
weight_bct = [out_channels, channels, kernel_width]
bias_bct = bias

# use existing shape inf
# use existing shape inf
output_size_bct = upstream_shape_functions.conv_forwards(self, weight, bias, stride=[1], padding=[pad], dilation=[], transposed=False, output_padding=[], groups=1)

batch_out, channels_out, time_out = output_size_bct

# bct -> tbc
Expand Down Expand Up @@ -1544,7 +1544,7 @@ def aten〇replication_pad2d〡shape(self: List[int], padding: List[int]) -> Lis
return pad_shape_fn(self, padding)

def aten〇replication_pad2d〡dtype(self_rank_dtype: Tuple[int, int], padding: List[int]) -> int:
self_rank, self_dtype = self_rank_dtype
self_rank, self_dtype = self_rank_dtype
return self_dtype

def aten〇pad〡shape(self: List[int], pad: List[int], mode: str = "constant", value: Optional[float] = None) -> List[int]:
Expand Down Expand Up @@ -3618,7 +3618,7 @@ def aten〇where〇ScalarSelf〡dtype(condition_rank_dtype: Tuple[int, int], sel
@check_dtype_function(
_check_tensors_with_the_same_dtype(num_of_tensors=1))
def aten〇nan_to_num〡dtype(self_rank_dtype: Tuple[int, int], nan: Optional[float] = None, posinf: Optional[float] = None, neginf: Optional[float] = None) -> int:
self_rank, self_dtype = self_rank_dtype
self_rank, self_dtype = self_rank_dtype
return self_dtype

@check_dtype_function(
Expand Down Expand Up @@ -4258,7 +4258,7 @@ def aten〇cat〡dtype(tensors_rank_dtype: List[Tuple[int, int]], dim: int = 0)
return promote_dtypes(ranks, dtypes)

@check_dtype_function(
[Invocation("i,j->ij", [TensorOfShape(1, dtype=torch.float32),
[Invocation("i,j->ij", [TensorOfShape(1, dtype=torch.float32),
TensorOfShape(1, dtype=torch.int32)]),])
def aten〇einsum〡dtype(equation: str, tensors_rank_dtype: List[Tuple[int, int]], path: Optional[List[int]] = None) -> int:
ranks: List[Optional[int]] = []
Expand Down
2 changes: 1 addition & 1 deletion projects/pt1/python/torch_mlir_e2e_test/framework.py
Original file line number Diff line number Diff line change
Expand Up @@ -324,7 +324,7 @@ def compile_and_run_test(test: Test, config: TestConfig, verbose=False) -> Any:


def run_tests(tests: List[Test], config: TestConfig, sequential=False, verbose=False) -> List[TestResult]:
"""Invoke the given `Test`'s with the provided `TestConfig`."""
"""Invoke the given `Test`'s with the provided `TestConfig`."""
num_processes = min(int(mp.cpu_count() * 0.8) + 1, len(tests))
try:
env_concurrency = int(os.getenv("TORCH_MLIR_TEST_CONCURRENCY", "0"))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def compile(self, imported_module: Module):
imported_module,
f"builtin.module(func.func({ONNX_TO_TORCH_FUNC_PIPELINE}))",
"Lowering Onnx backend contract to Linalg-on-Tensors backend contract")

run_pipeline_with_repro_report(
imported_module,
f"builtin.module(torch-lower-to-backend-contract)",
Expand Down
2 changes: 1 addition & 1 deletion projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -853,7 +853,7 @@ def __init__(self):
])
def forward(self, x, weight, bias):
return torch.conv_tbc(x, weight, bias)

@register_test_case(module_factory=lambda: ConvTbcModule())
def ConvTbcModule_basic(module, tu: TestUtils):
module.forward(tu.rand(9, 4, 5), tu.rand(3, 5, 6), tu.rand(6))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def __init__(self):

@export
@annotate_args([
None,
None,
([-1, -1], torch.float32, True),
])
def forward(self, a):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3833,7 +3833,7 @@ def __init__(self):
([2, 5], torch.float32, True),
])
def forward(self, x):
return torch.ops.aten.isposinf(x)
return torch.ops.aten.isposinf(x)

@register_test_case(module_factory=lambda: ElementwiseAtenIsposinfOpModule())
def ElementwiseAtenIsposinfOpModule_basic(module, tu:TestUtils):
Expand Down
16 changes: 8 additions & 8 deletions projects/pt1/python/torch_mlir_e2e_test/test_suite/pooling.py
Original file line number Diff line number Diff line change
Expand Up @@ -945,7 +945,7 @@ def AvgPool1dStaticModule_basic(module, tu: TestUtils):
# ==============================================================================

class AdaptiveAvgPool1dStaticLargerOutput(torch.nn.Module):

def __init__(self):
super().__init__()
self.aap1d = torch.nn.AdaptiveAvgPool1d(output_size=13)
Expand All @@ -965,7 +965,7 @@ def AdaptiveAvgPool1dStaticLargerOutput_basic(
module.forward(tu.rand(5, 512, 7))

class AdaptiveAvgPool1dStaticEvenMultiple(torch.nn.Module):

def __init__(self):
super().__init__()
self.aap1d = torch.nn.AdaptiveAvgPool1d(output_size=7)
Expand All @@ -985,7 +985,7 @@ def AdaptiveAvgPool1dStaticEvenMultiple_basic(
module.forward(tu.rand(5, 512, 147))

class AdaptiveAvgPool1dGeneralDynamic(torch.nn.Module):

def __init__(self):
super().__init__()
self.aap1d = torch.nn.AdaptiveAvgPool1d(output_size=7)
Expand Down Expand Up @@ -1085,7 +1085,7 @@ def AdaptiveAvgPool1dUnitOutputSizeDynamicModule_basic(
module.forward(tu.rand(1, 512, 7))

class AdaptiveMaxPool2dDynamic(torch.nn.Module):

def __init__(self):
super().__init__()
self.amp2d = torch.nn.AdaptiveMaxPool2d(output_size=(7,13), return_indices=False)
Expand All @@ -1105,7 +1105,7 @@ def AdaptiveMaxPool2dDynamic_basic(
module.forward(tu.rand(1, 512, 10, 16))

class AdaptiveMaxPool2dDynamicWithIndices(torch.nn.Module):

def __init__(self):
super().__init__()
self.amp2d = torch.nn.AdaptiveMaxPool2d(output_size=(7,13), return_indices=True)
Expand All @@ -1123,10 +1123,10 @@ def forward(self,x):
def AdaptiveMaxPool2dDynamicWithIndices_basic(
module, tu: TestUtils):
module.forward(tu.rand(1, 512, 10, 16))


class AdaptiveMaxPool2dStatic(torch.nn.Module):

def __init__(self):
super().__init__()
self.amp2d = torch.nn.AdaptiveMaxPool2d(output_size=(7,13), return_indices=False)
Expand All @@ -1146,7 +1146,7 @@ def AdaptiveMaxPool2dStatic_basic(
module.forward(tu.rand(1, 512, 10, 9))

class AdaptiveMaxPool2dStaticWithIndices(torch.nn.Module):

def __init__(self):
super().__init__()
self.amp2d = torch.nn.AdaptiveMaxPool2d(output_size=(7,13), return_indices=True)
Expand Down
16 changes: 8 additions & 8 deletions projects/pt1/python/torch_mlir_e2e_test/test_suite/reduction.py
Original file line number Diff line number Diff line change
Expand Up @@ -327,13 +327,13 @@ def __init__(self):
])
def forward(self, a):
return torch.ops.aten.all(a, dim=0, keepdim=False)

@register_test_case(module_factory=lambda: ReduceAllDimEmpty())
def ReduceAllDimEmpty_basic(module, tu: TestUtils):
module.forward(torch.tensor([]))

# ==============================================================================

class ReduceAllDimFloat(torch.nn.Module):
def __init__(self):
super().__init__()
Expand All @@ -345,13 +345,13 @@ def __init__(self):
])
def forward(self, a):
return torch.ops.aten.all(a, dim=1, keepdim=True)

@register_test_case(module_factory=lambda: ReduceAllDimFloat())
def ReduceAllDimFloat_basic(module, tu: TestUtils):
module.forward(torch.tensor([[5.0,1e-6,-5.0],[0,5.0,0]]))

# ==============================================================================

class ReduceAllDimInt(torch.nn.Module):
def __init__(self):
super().__init__()
Expand All @@ -363,13 +363,13 @@ def __init__(self):
])
def forward(self, a):
return torch.ops.aten.all(a, dim=1, keepdim=True)

@register_test_case(module_factory=lambda: ReduceAllDimInt())
def ReduceAllDimInt_basic(module, tu: TestUtils):
module.forward(torch.tensor([[5,-5,0],[5,1e10,5]]).to(torch.int32))

# ==============================================================================

class ReduceAllDimBool(torch.nn.Module):
def __init__(self):
super().__init__()
Expand All @@ -381,13 +381,13 @@ def __init__(self):
])
def forward(self, a):
return torch.ops.aten.all(a, dim=1, keepdim=False)

@register_test_case(module_factory=lambda: ReduceAllDimBool())
def ReduceAllDimBool_basic(module, tu: TestUtils):
module.forward(torch.tensor([[True, False, True], [True, True, True]]))

# ==============================================================================

class ReduceMaxAlongDim(torch.nn.Module):
def __init__(self):
super().__init__()
Expand Down

0 comments on commit 534b266

Please sign in to comment.