Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[i1] Remove command line option to enable packed storage #19528

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 12 additions & 1 deletion compiler/src/iree/compiler/Codegen/Common/EncodingUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,13 @@ MaterializeEncodingTypeConverter::MaterializeEncodingTypeConverter(
addConversion([](FloatType floatType) { return floatType; });
addConversion([](MemRefType memrefType) { return memrefType; });
addConversion([=](RankedTensorType type) -> RankedTensorType {
MaterializeEncodingInfo encodingInfo = getEncodingInfo(type);
lialan marked this conversation as resolved.
Show resolved Hide resolved
if (IREE::Encoding::hasPackedStorageAttr(type)) {
return type;
}
// For a given tensor type with an encoding, return the materialized
// type to use for it. If no encoding is set, then return the tensor type
// itself.
MaterializeEncodingInfo encodingInfo = getEncodingInfo(type);
if (IREE::Codegen::isIdentityLayout(encodingInfo)) {
return dropEncoding(type);
}
Expand Down Expand Up @@ -92,6 +95,14 @@ MaterializeEncodingTypeConverter::getEncodingInfo(RankedTensorType type) const {
}

RankedTensorType dropEncoding(RankedTensorType type) {
assert(!IREE::Encoding::hasPackedStorageAttr(type) &&
"not expected `packed_storage` attribute.");
return RankedTensorType::get(type.getShape(), type.getElementType());
}

RankedTensorType dropPackedStorageEncodingIfAny(RankedTensorType type) {
lialan marked this conversation as resolved.
Show resolved Hide resolved
if (!IREE::Encoding::hasPackedStorageAttr(type))
return type;
return RankedTensorType::get(type.getShape(), type.getElementType());
}

Expand Down
4 changes: 4 additions & 0 deletions compiler/src/iree/compiler/Codegen/Common/EncodingUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include "iree/compiler/Codegen/Dialect/Codegen/IR/IREECodegenInterfaces.h"
#include "iree/compiler/Codegen/Dialect/Codegen/IR/IREECodegenTypes.h"
#include "iree/compiler/Dialect/Encoding/IR/EncodingOps.h"
#include "iree/compiler/Dialect/Encoding/IR/EncodingTypes.h"
#include "iree/compiler/Dialect/HAL/IR/HALTypes.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/Transforms/DialectConversion.h"
Expand Down Expand Up @@ -77,6 +78,9 @@ class OpMaterializeEncodingPattern : public OpConversionPattern<OpTy> {
/// Returns the RankedTensorType without encodings.
RankedTensorType dropEncoding(RankedTensorType type);

/// Returns the RankedTensorType without packed storage encoding (if any).
RankedTensorType dropPackedStorageEncodingIfAny(RankedTensorType type);

/// Returns the deserialized MaterializeEncodingInfo if the `layouts` field is
/// present in encodings and it only has a single layout. Otherwise, returns
/// std::nullopt.
Expand Down
26 changes: 16 additions & 10 deletions compiler/src/iree/compiler/Codegen/Common/TypePropagationPass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
//===---------------------------------------------------------------------===//

#include "iree/compiler/Codegen/Common/Passes.h"
#include "iree/compiler/Dialect/Encoding/IR/EncodingTypes.h"
#include "iree/compiler/Dialect/LinalgExt/IR/LinalgExtDialect.h"
#include "iree/compiler/Dialect/LinalgExt/IR/LinalgExtOps.h"
#include "iree/compiler/Dialect/Util/IR/UtilTypes.h"
Expand Down Expand Up @@ -65,9 +66,8 @@ static Value convertElementType(OpBuilder &b, Location loc, Type targetType,
/// std::nullopt.
static std::optional<Type> getLegalizedType(Type t) {
if (auto shapedType = llvm::dyn_cast<RankedTensorType>(t)) {
Type elementType = shapedType.getElementType();
std::optional<Type> legalizedElementType =
legalizeStorageElementType(elementType);
legalizeTensorStorageElementType(shapedType);
if (!legalizedElementType)
return std::nullopt;
return RankedTensorType::get(shapedType.getShape(),
Expand Down Expand Up @@ -121,7 +121,7 @@ struct ConstantOpTypeConversion
constantOp, "expected attribute type to be shaped type");
}
std::optional<Type> legalizedElementType =
legalizeStorageElementType(attrType.getElementType());
legalizeTensorStorageElementType(attrType);
if (!legalizedElementType) {
return rewriter.notifyMatchFailure(constantOp,
"cannot legalize elementType");
Expand Down Expand Up @@ -227,8 +227,10 @@ struct GenericOpTypePropagation
signatureConverter.addInputs(index, argType);
continue;
}
auto inputOperandType =
llvm::cast<RankedTensorType>(genericOp->getOperandTypes()[index]);
std::optional<Type> legalizedArgType =
legalizeStorageElementType(argType);
legalizeTensorStorageElementType(inputOperandType);
if (!legalizedArgType) {
return genericOp.emitOpError("failed to get legalized type for arg ")
<< index;
Expand Down Expand Up @@ -258,8 +260,8 @@ struct GenericOpTypePropagation
modifyYield = true;
OpOperand *yieldOperand =
modifiedOp.getMatchingYieldValue(modifiedOpOperand);
std::optional<Type> legalizedType =
legalizeStorageElementType(yieldOperand->get().getType());
std::optional<Type> legalizedType = legalizeTensorStorageElementType(
modifiedOpOperand->get().getType());
if (!legalizedType) {
return genericOp.emitOpError(
"failed to get legalized type for yield value");
Expand Down Expand Up @@ -289,7 +291,7 @@ struct LinalgFillTypePropagation
ConversionPatternRewriter &rewriter) const final {
Value value = adaptor.getInputs().front();
std::optional<Type> legalizedElementType =
legalizeStorageElementType(value.getType());
legalizeTensorStorageElementType(adaptor.getOutputs()[0].getType());
if (!legalizedElementType) {
return fillOp.emitOpError("failed to get legalized type for value");
}
Expand Down Expand Up @@ -355,8 +357,8 @@ struct IREELinalgExtScatterTypePropagation
// type.
TypeConverter::SignatureConversion signatureConverter(
modifiedOpRegion.getNumArguments());
Type argType = modifiedOpRegion.getArguments()[0].getType();
std::optional<Type> legalizedArgType = legalizeStorageElementType(argType);
std::optional<Type> legalizedArgType =
legalizeTensorStorageElementType(inputType);
if (!legalizedArgType) {
return scatterOp.emitOpError("failed to get legalized type for argument");
}
Expand Down Expand Up @@ -418,8 +420,12 @@ struct IREELinalgExtSortTypePropagation
TypeConverter::SignatureConversion signatureConverter(
modifiedOpRegion.getNumArguments());
for (auto [index, arg] : llvm::enumerate(modifiedOpRegion.getArguments())) {
// Refer to input types of the original operation to determine the
// corresponding legal arg type.
auto convertType = index % 2 == 0 ? sortOp->getOperandTypes()[index / 2]
: sortOp->getResultTypes()[index / 2];
std::optional<Type> legalizedArgType =
legalizeStorageElementType(arg.getType());
legalizeTensorStorageElementType(convertType);
if (!legalizedArgType) {
return sortOp.emitOpError("failed to get legalized type for argument");
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -281,8 +281,12 @@ EncodingAttr getEncodingAttr(RankedTensorType type) {
return dyn_cast_or_null<EncodingAttr>(type.getEncoding());
}

bool hasPackedStorageAttr(RankedTensorType type) {
return dyn_cast_or_null<PackedStorageAttr>(type.getEncoding()) != nullptr;
bool hasPackedStorageAttr(Type type) {
if (auto tensorType = dyn_cast<RankedTensorType>(type)) {
return dyn_cast_or_null<PackedStorageAttr>(tensorType.getEncoding()) !=
nullptr;
}
return false;
}

FailureOr<linalg::ContractionDimensions>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ namespace mlir::iree_compiler::IREE::Encoding {
EncodingAttr getEncodingAttr(RankedTensorType type);

/// Returns true if the type contains packed_storage attribute.
lialan marked this conversation as resolved.
Show resolved Hide resolved
bool hasPackedStorageAttr(RankedTensorType type);
bool hasPackedStorageAttr(Type type);

/// Returns the ContractionDimensions for the encoding user_indexing_maps.
FailureOr<linalg::ContractionDimensions>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ iree_compiler_cc_library(
],
deps = [
":Utils",
"//compiler/src/iree/compiler/Codegen/Common",
"//compiler/src/iree/compiler/Dialect/HAL/Analysis",
"//compiler/src/iree/compiler/Dialect/HAL/Conversion",
"//compiler/src/iree/compiler/Dialect/HAL/IR",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ iree_cc_library(
MLIRSCFDialect
MLIRTransformUtils
MLIRTransforms
iree::compiler::Codegen::Common
iree::compiler::Dialect::HAL::Analysis
iree::compiler::Dialect::HAL::Conversion
iree::compiler::Dialect::HAL::IR
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

#include "iree/compiler/Dialect/HAL/Conversion/StreamToHAL/Patterns.h"

#include "iree/compiler/Codegen/Common/EncodingUtils.h"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We do not want this dependency. I think you want to reuse dropEncoding method. Let's move it to Encoding/EncodingTypes.h and EncodingAttr.cpp.

(Ideally we should move the implementation to EncodingTypes.cpp. I haven't started the work because we have few people touching the dialect at this moment. I'll send a cleanup after you finish the i1 work.)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

removed all those deps.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

as a general rule there are to be no codegen deps in non-codegen directories - dropPackedStorageEncodingIfAny should be moved to EncodingTypes.h

#include "iree/compiler/Dialect/HAL/Analysis/Captures.h"
#include "iree/compiler/Dialect/HAL/Conversion/StreamToHAL/Utils.h"
#include "iree/compiler/Dialect/HAL/IR/HALDialect.h"
Expand Down Expand Up @@ -478,7 +479,8 @@ struct TensorExportBufferViewOpPattern
}

auto loc = exportOp.getLoc();
auto tensorType = llvm::cast<RankedTensorType>(adaptor.getSourceEncoding());
auto tensorType = dropPackedStorageEncodingIfAny(
llvm::cast<RankedTensorType>(adaptor.getSourceEncoding()));
auto dynamicDims = adaptor.getSourceEncodingDims();

// NOTE: we should have verified supported encodings/types at entry into the
Expand Down
2 changes: 1 addition & 1 deletion compiler/src/iree/compiler/Dialect/Stream/IR/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ iree_compiler_cc_library(
":StreamInterfacesGen",
":StreamOpsGen",
":StreamTypesGen",
"//compiler/src/iree/compiler/Dialect/Util/IR",
"//compiler/src/iree/compiler/Dialect/Encoding/IR",
"//compiler/src/iree/compiler/Utils",
"@llvm-project//llvm:Support",
"@llvm-project//mlir:ArithDialect",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ iree_cc_library(
MLIRTensorDialect
MLIRTransformUtils
MLIRViewLikeInterface
iree::compiler::Dialect::Util::IR
iree::compiler::Dialect::Encoding::IR
iree::compiler::Utils
PUBLIC
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

#include "iree/compiler/Dialect/Stream/IR/StreamOps.h"

#include "iree/compiler/Dialect/Encoding/IR/EncodingTypes.h"
#include "iree/compiler/Dialect/Util/IR/ClosureOpUtils.h"
#include "iree/compiler/Dialect/Util/IR/UtilOps.h"
#include "iree/compiler/Dialect/Util/IR/UtilTypes.h"
Expand All @@ -27,6 +28,10 @@
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/RegionUtils.h"

namespace mlir::iree_compiler {
using IREE::Encoding::getEncodingAttr;
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

directly reference this when required - it hurts readability to have aliases like this (as a user will just see getEncodingAttr and not know if it's on their type, in the dialect they are in, etc) - I'm not sure we want this function anywhere, though, as commented below (the flow/stream dialects should support all encodings, not just those in the encoding dialect)

}

lialan marked this conversation as resolved.
Show resolved Hide resolved
namespace mlir::iree_compiler::IREE::Stream {

//===----------------------------------------------------------------------===//
Expand Down Expand Up @@ -1903,7 +1908,7 @@ LogicalResult TensorCloneOp::verify() {
// information.
auto sourceEncoding = llvm::cast<RankedTensorType>(op.getSourceEncoding());
auto resultEncoding = llvm::cast<RankedTensorType>(op.getResultEncoding());
if (sourceEncoding.getEncoding() != resultEncoding.getEncoding()) {
if (getEncodingAttr(sourceEncoding) != getEncodingAttr(resultEncoding)) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this required by flow.tensor.bitcast lowering? I.e., the op is lowered to flow.tensor.clone and you need to bypass the check?

I'm not convinced that the change is correct. Because the getEncodingAttr checks if the tensor type has IREE::Encoding::EncodingAttr attribute. There could be other encodings and it is a bug if we introduce new encodings. E.g., tensor<3x4xi32, #whatever_other_encoding_with_padding_semantic> can not be cloned to tensor<3x4xi32>. I think we need to have a stronger restriction. Perhaps just relax the check for packed_storage encoding?

EncodingAttr getEncodingAttr(RankedTensorType type) {
return dyn_cast_or_null<EncodingAttr>(type.getEncoding());
}

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is a little tricky. And yes this is required by flow.tensor.bitcast, and we would sometimes cast from a tensor without attribute to another tensor with packed attribute. In such case, we shouldn't check if both the source and result matches packed attribute.

I have slightly updated it to exclude packed attribute comparison. Suggestions are welcome.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I agree with hanhan - I'm not sure why this changed - if it is indeed tricky it's too tricky to be as opaque as this. The op behavior and the comment both state the existing code was the check that was desired, and not that we should allow non-encoding-dialect encodings to be ignored.

return op.emitOpError() << "clones changing tensor encoding from "
<< sourceEncoding.getEncoding() << " to "
<< resultEncoding.getEncoding() << "; not allowed";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception

#include "iree/compiler/Dialect/Encoding/IR/EncodingTypes.h"
#include "iree/compiler/Dialect/Flow/IR/FlowDialect.h"
#include "iree/compiler/Dialect/Flow/IR/FlowTypes.h"
#include "iree/compiler/Dialect/Stream/Analysis/Affinity.h"
Expand All @@ -22,6 +23,7 @@
#include "iree/compiler/Dialect/Util/IR/UtilTypes.h"
#include "iree/compiler/Dialect/Util/Transforms/Passes.h"
#include "iree/compiler/Dialect/Util/Transforms/Patterns.h"
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
Expand Down Expand Up @@ -247,6 +249,12 @@ struct ConvertToStreamPass final
if (llvm::isa<IREE::Flow::ChannelType>(type)) {
return IREE::Stream::ChannelType::get(context);
}
if (auto rankedType = llvm::dyn_cast_or_null<RankedTensorType>(type)) {
lialan marked this conversation as resolved.
Show resolved Hide resolved
if (IREE::Encoding::hasPackedStorageAttr(rankedType)) {
return RankedTensorType::get(rankedType.getShape(),
rankedType.getElementType());
}
}
return !llvm::isa<TensorType>(type) ? type : Type{};
});

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ static LogicalResult checkEncoding(Operation *op, RankedTensorType encodingType,
// Aligns the element type of a tensor<> to a byte-aligned power of 2 bit width.
static RankedTensorType alignTensorType(RankedTensorType originalType) {
Type elementType = originalType.getElementType();
Type alignedType = legalizeStorageElementType(elementType);
Type alignedType = legalizeTensorStorageElementType(originalType);
if (alignedType == elementType)
return originalType;
return RankedTensorType::get(originalType.getShape(), alignedType,
Expand Down Expand Up @@ -168,7 +168,9 @@ static Value canonicalizeFillPattern(Value pattern, OpBuilder &builder) {
// %i8_val = (%i8_val << 2) | %i2_val
// %i8_val = (%i8_val << 2) | %i2_val
// %i8_val = (%i8_val << 2) | %i2_val
if (needToPackSubByteElementBitWidth(elementBitWidth)) {
bool patternIsPacked =
IREE::Encoding::hasPackedStorageAttr(pattern.getType());
if (!patternIsPacked && needToPackSubByteElementBitWidth(elementBitWidth)) {
Type i8Type = builder.getI8Type();
Value bitwidth = builder.createOrFold<arith::ConstantOp>(
loc, i8Type, builder.getIntegerAttr(i8Type, elementBitWidth));
Expand Down Expand Up @@ -655,7 +657,8 @@ struct EncodeHostTensorsPass
static IREE::Flow::DispatchTensorType
alignDispatchTensorType(IREE::Flow::DispatchTensorType originalType) {
Type elementType = originalType.getBoundElementType();
Type alignedType = legalizeStorageElementType(elementType);
Type alignedType =
legalizeTensorStorageElementType(originalType.asRankedTensorType());
if (alignedType == elementType)
return originalType;
return IREE::Flow::DispatchTensorType::get(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ iree_lit_test_suite(
"encode_host_tensors.mlir",
"encode_host_tensors_encoding.mlir",
"encode_host_tensors_packing.mlir",
"encode_host_tensors_packing_i1_experimental_clopt.mlir",
"fold_globals.mlir",
"fold_uniform_operands.mlir",
"fuse_dispatch_bindings.mlir",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ iree_lit_test_suite(
"encode_host_tensors.mlir"
"encode_host_tensors_encoding.mlir"
"encode_host_tensors_packing.mlir"
"encode_host_tensors_packing_i1_experimental_clopt.mlir"
"fold_globals.mlir"
"fold_uniform_operands.mlir"
"fuse_dispatch_bindings.mlir"
Expand Down

This file was deleted.

Loading
Loading