Skip to content
This repository has been archived by the owner on Dec 12, 2024. It is now read-only.

Commit

Permalink
Replace member calls with free function calls
Browse files Browse the repository at this point in the history
Switches member calls to `isa/dyn_cast/cast` to free function calls
since calling member variants is deprecated, see
https://mlir.llvm.org/deprecation/.
  • Loading branch information
marbre committed Apr 19, 2024
1 parent fed3d5a commit db9772c
Show file tree
Hide file tree
Showing 3 changed files with 10 additions and 10 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ struct ConvertStablehloRegionOpsToEmitCPass

SmallVector<Attribute, 2> arguments = indexSequence(operands.size(), ctx);

size_t dim = op.getResult(0).getType().cast<RankedTensorType>().getRank();
size_t dim = cast<RankedTensorType>(op.getResult(0).getType()).getRank();
arguments.push_back(builder.getI64TensorAttr(op.getWindowDimensions()));
arguments.push_back(builder.getI64TensorAttr(
op.getWindowStrides().value_or(SmallVector<int64_t>(dim, 1))));
Expand Down
2 changes: 1 addition & 1 deletion lib/Conversion/TensorToEmitC/TensorToEmitC.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ class ExtractOpConversion : public OpConversionPattern<tensor::ExtractOp> {
StringAttr callee = rewriter.getStringAttr("emitc::tensor::extract");

Type elementType = indexCastOp.getType();
if (auto tensorType = elementType.dyn_cast<TensorType>()) {
if (auto tensorType = dyn_cast<TensorType>(elementType)) {
elementType = tensorType.getElementType();
}

Expand Down
16 changes: 8 additions & 8 deletions lib/Conversion/TosaToEmitC/TosaToEmitC.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -261,14 +261,14 @@ class ClampOpConversion : public OpConversionPattern<tosa::ClampOp> {
// the min/max attribute type match the operand's element type and it's bit
// width.
auto elementType =
adaptor.getInput().getType().cast<RankedTensorType>().getElementType();
if (elementType.isa<IntegerType>()) {
cast<RankedTensorType>(adaptor.getInput().getType()).getElementType();
if (isa<IntegerType>(elementType)) {
// Change the {min,max}_int type to the element type of the operand.
auto minInt = clampOp.getMinInt();
auto maxInt = clampOp.getMaxInt();
arguments.push_back(IntegerAttr::get(elementType, minInt));
arguments.push_back(IntegerAttr::get(elementType, maxInt));
} else if (elementType.isa<FloatType>()) {
} else if (isa<FloatType>(elementType)) {
// Change the {min,max}_fp type to the element type of the operand.
auto minFp = clampOp.getMinFpAttr().getValueAsDouble();
auto maxFp = clampOp.getMaxFpAttr().getValueAsDouble();
Expand Down Expand Up @@ -412,8 +412,8 @@ createBroadcastOpIfNeeded(SrcOp &srcOp, Adaptor adaptor,
StringAttr broadcastCallee = rewriter.getStringAttr(broadcastFuncName);

Value output = srcOp.getResult();
auto opOutputShape = output.getType().cast<RankedTensorType>().getShape();
auto opOutputRank = output.getType().cast<RankedTensorType>().getRank();
auto opOutputShape = cast<RankedTensorType>(output.getType()).getShape();
auto opOutputRank = cast<RankedTensorType>(output.getType()).getRank();
SmallVector<Value> broadcastedOperands;

for (auto operand : adaptor.getOperands()) {
Expand Down Expand Up @@ -652,7 +652,7 @@ class ReduceOpConversion : public OpConversionPattern<SrcOp> {
// not keep reduced dimensions.
Value output = reduceOp.getResult();
RankedTensorType reducedOutputType =
output.getType().cast<RankedTensorType>();
cast<RankedTensorType>(output.getType());

SmallVector<int64_t> newReducedOutputShape;

Expand All @@ -678,7 +678,7 @@ class ReduceOpConversion : public OpConversionPattern<SrcOp> {

// Create tosa.reshape op.
SmallVector<int64_t> newShapeAttr_;
for (auto dim : output.getType().cast<RankedTensorType>().getShape()) {
for (auto dim : cast<RankedTensorType>(output.getType()).getShape()) {
newShapeAttr_.push_back(dim);
};

Expand Down Expand Up @@ -782,7 +782,7 @@ class TileOpConversion : public OpConversionPattern<tosa::TileOp> {
ConversionPatternRewriter &rewriter) const override {
StringAttr callee = rewriter.getStringAttr("emitc::tosa::tile");
auto inputShape =
adaptor.getInput1().getType().cast<RankedTensorType>().getShape();
cast<RankedTensorType>(adaptor.getInput1().getType()).getShape();
for (int64_t i = 0, e = inputShape.size(); i < e; i++) {
if (inputShape[i] > std::numeric_limits<int>::max()) {
return tileOp.emitError("tosa.tile with dimensions larger than the "
Expand Down

0 comments on commit db9772c

Please sign in to comment.