Skip to content

Commit

Permalink
AtenSortOp Folder (llvm#2864)
Browse files Browse the repository at this point in the history
A chunk off

llvm#2856
llvm#2860

---------

Co-authored-by: Xida Ren <xida.ren.dev@gmail.com>
Co-authored-by: Rob Suderman <rob.suderman@gmail.com>
  • Loading branch information
3 people authored Feb 6, 2024
1 parent faf7d4a commit cc06391
Show file tree
Hide file tree
Showing 4 changed files with 77 additions and 1 deletion.
1 change: 1 addition & 0 deletions include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -12559,6 +12559,7 @@ def Torch_AtenSortOp : Torch_Op<"aten.sort", [
printDefaultTorchOp(printer, *this, 3, 2);
}
}];
let hasFolder = 1;
}

def Torch_AtenSplitTensorOp : Torch_Op<"aten.split.Tensor", [
Expand Down
46 changes: 46 additions & 0 deletions lib/Dialect/Torch/IR/TorchOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1710,6 +1710,52 @@ void AtenSortIntOp::getCanonicalizationPatterns(RewritePatternSet &patterns,
});
}

//===----------------------------------------------------------------------===//
// AtenSortOp
//===----------------------------------------------------------------------===//

LogicalResult AtenSortOp::fold(FoldAdaptor adaptor,
SmallVectorImpl<OpFoldResult> &results) {
auto operand = getSelf();
auto operandType = dyn_cast<BaseTensorType>(operand.getType());
if (!operandType || !operandType.hasSizes())
return failure();

// only ValueTensorType has toBuiltinTensor
auto indicesTensorType = dyn_cast<ValueTensorType>(getResult(1).getType());
if (!indicesTensorType)
return failure();

if (!indicesTensorType.hasDtype())
return failure();
auto indicesType =
indicesTensorType.toBuiltinTensor().clone(indicesTensorType.getDtype());
if (!indicesType || !indicesType.hasStaticShape())
return failure();

bool unaryDim = false;
IntegerAttr dimAttribute = dyn_cast_if_present<IntegerAttr>(adaptor.getDim());
if (!dimAttribute)
return failure();
int64_t dimInt = dimAttribute.getValue().getSExtValue();
if (dimInt < 0)
dimInt += operandType.getSizes().size();
if (dimAttribute) {
unaryDim = operandType.getSizes()[dimInt] == 1;
}

OpBuilder builder(getContext());
if (unaryDim || llvm::all_of(operandType.getSizes(),
[](int64_t dim) { return dim == 1; })) {
results.push_back(operand);
results.push_back(DenseElementsAttr::get(
indicesType, builder.getZeroAttr(indicesType.getElementType())));
return success();
}

return failure();
}

//===----------------------------------------------------------------------===//
// NonValueTensorLiteralOp
//===----------------------------------------------------------------------===//
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -728,7 +728,7 @@ def emit_with_mutating_variants(key, **kwargs):
emit("aten::ne.int_list : (int[], int[]) -> (bool)")
emit("aten::any.bool : (bool[]) -> (bool)", has_folder=True)
emit("aten::sort.int : (int[], bool) -> ()", has_canonicalizer=True)
emit("aten::sort : (Tensor, int, bool) -> (Tensor, Tensor)")
emit("aten::sort : (Tensor, int, bool) -> (Tensor, Tensor)", has_folder=True)
emit("aten::split.Tensor : (Tensor, int, int) -> (Tensor[])")
emit("aten::split_with_sizes : (Tensor, int[], int) -> (Tensor[])")
emit("aten::unbind.int : (Tensor, int) -> (Tensor[])")
Expand Down
29 changes: 29 additions & 0 deletions test/Dialect/Torch/canonicalize.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -2012,6 +2012,35 @@ func.func @torch.aten.sort.int$reverse_true() -> !torch.list<int> {
return %0 : !torch.list<int>
}

// CHECK-LABEL: @torch.aten.sort$unary_element
// CHECK : %[[INDICES:.*]] = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
// CHECK-NOT : torch.aten.sort %arg
// CHECK : return %arg0, %[[INDICES]] : !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
func.func @torch.aten.sort$unary_element(%arg0 : !torch.vtensor<[1],si64>, %arg1 : !torch.int, %arg2 : !torch.bool) -> (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) {
%0, %1 = torch.aten.sort %arg0, %arg1, %arg2 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
return %0, %1 : !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
}


// CHECK-LABEL: @torch.aten.sort$unary_dim
// CHECK : %[[INDICES:.*]] = torch.vtensor.literal(dense<1> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
// CHECK-NOT : torch.aten.sort %arg
// CHECK : return %arg0, %[[INDICES]] : !torch.vtensor<[3, 1,4],si64>, !torch.vtensor<[1],si64>
func.func @torch.aten.sort$unary_dim(%arg0 : !torch.vtensor<[3, 1, 4],si64>, %arg1 : !torch.bool) -> (!torch.vtensor<[3, 1, 4],si64>, !torch.vtensor<[1],si64>) {
%dim = torch.constant.int 1
%0, %1 = torch.aten.sort %arg0, %dim, %arg1 : !torch.vtensor<[3, 1, 4],si64>, !torch.int, !torch.bool -> !torch.vtensor<[3, 1, 4],si64>, !torch.vtensor<[1],si64>
return %0, %1 : !torch.vtensor<[3, 1,4],si64>, !torch.vtensor<[1],si64>
}

// CHECK-LABEL: @torch.aten.sort$nofold
// CHECK : torch.aten.sort %arg
func.func @torch.aten.sort$nofold (%arg0 : !torch.vtensor<[3, 1, 4],si64>, %arg1 : !torch.bool) -> (!torch.vtensor<[3, 1, 4],si64>, !torch.vtensor<[3],si64>) {
%dim = torch.constant.int 0
%0, %1 = torch.aten.sort %arg0, %dim, %arg1 : !torch.vtensor<[3, 1, 4],si64>, !torch.int, !torch.bool -> !torch.vtensor<[3, 1, 4],si64>, !torch.vtensor<[3],si64>
return %0, %1 : !torch.vtensor<[3, 1, 4],si64>, !torch.vtensor<[3],si64>
}


// CHECK-LABEL: @torch.aten.cat$fold_single_operand
// CHECK-SAME: %[[ARG0:.+]]: !torch.tensor
// CHECK: return %[[ARG0]] : !torch.tensor
Expand Down

0 comments on commit cc06391

Please sign in to comment.