Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
- bumps llvm-project to
llvm/llvm-project@6c64c8a
- bumps stablehlo to
openxla/stablehlo@6e403b1
- Updates type conversion materialization functions to return Value
after API change in llvm-project.

---------

Signed-off-by: Max Dawkins <max.dawkins@gmail.com>
  • Loading branch information
Max191 authored Oct 30, 2024
1 parent 6b58c89 commit 8b0bf2e
Show file tree
Hide file tree
Showing 3 changed files with 45 additions and 45 deletions.
2 changes: 1 addition & 1 deletion externals/llvm-project
Submodule llvm-project updated 2641 files
2 changes: 1 addition & 1 deletion externals/stablehlo
Submodule stablehlo updated 44 files
+16 −0 BUILD.bazel
+4 −0 CMakeLists.txt
+2 −2 WORKSPACE.bazel
+1 −1 build_tools/llvm_version.txt
+1 −0 docs/generated/stablehlo_linalg_passes.md
+7 −0 docs/generated/stablehlo_passes.md
+1 −0 docs/generated/stablehlo_tosa_passes.md
+6 −2 docs/spec.md
+199 −0 rfcs/20241001-microscaling-formats.md
+19 −0 stablehlo/conversions/linalg/tests/miscellaneous.mlir
+9 −10 stablehlo/conversions/linalg/transforms/TypeConversion.cpp
+2 −19 stablehlo/dialect/Base.cpp
+3 −2 stablehlo/dialect/Base.td
+44 −4 stablehlo/dialect/StablehloOps.cpp
+5 −2 stablehlo/dialect/Version.cpp
+1 −1 stablehlo/dialect/Version.h
+49 −1 stablehlo/dialect/VhloBytecode.cpp
+1 −0 stablehlo/dialect/VhloDialect.td
+24 −0 stablehlo/dialect/VhloTypes.cpp
+12 −0 stablehlo/dialect/VhloTypes.td
+15 −43 stablehlo/reference/Tensor.cpp
+6 −4 stablehlo/reference/Types.cpp
+1 −1 stablehlo/testdata/igamma_float64_20_20_float64_20_20_chlo.mlir
+1 −1 stablehlo/testdata/igammac_float64_20_20_float64_20_20_chlo.mlir
+32 −0 stablehlo/tests/interpret/constant.mlir
+40 −8 stablehlo/tests/ops_stablehlo.mlir
+53 −53 stablehlo/tests/ops_stablehlo_quantized.mlir
+4 −0 stablehlo/tests/ops_stablehlo_roundtrip.mlir
+220 −0 stablehlo/tests/transforms/stablehlo_aggressive_folder.mlir
+550 −526 stablehlo/tests/transforms/stablehlo_aggressive_simplification.mlir
+2,936 −0 stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.1_8_0.mlir
+ stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.1_8_0.mlir.bc
+32 −0 stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.mlir
+35 −0 stablehlo/tests/vhlo/vhlo_to_version_downgrade_invalid.1_7_0.mlir
+15 −0 stablehlo/tests/vhlo/vhlo_to_version_downgrade_patch.mlir
+7 −2 stablehlo/transforms/CMakeLists.txt
+31 −2 stablehlo/transforms/PassUtils.cpp
+27 −12 stablehlo/transforms/PassUtils.h
+5 −0 stablehlo/transforms/Passes.h
+2 −0 stablehlo/transforms/Passes.td
+245 −7 stablehlo/transforms/StablehloAggressiveFolder.cpp
+98 −492 stablehlo/transforms/StablehloAggressiveSimplification.cpp
+281 −0 stablehlo/transforms/StablehloAggressiveSimplificationPatterns.td
+7 −0 stablehlo/transforms/VhloToVersion.cpp
86 changes: 43 additions & 43 deletions lib/Dialect/TorchConversion/Transforms/BackendTypeConversion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -57,16 +57,16 @@ static void setupTorchBoolToI1Conversion(ConversionTarget &target,
typeConverter.addConversion([](Torch::BoolType type) -> std::optional<Type> {
return IntegerType::get(type.getContext(), 1);
});
typeConverter.addTargetMaterialization(
[](OpBuilder &builder, IntegerType type, ValueRange inputs,
Location loc) -> std::optional<Value> {
// Other builtin integer types could be handled by other materializers.
if (!(type.getWidth() == 1 && type.isSignless()))
return std::nullopt;
assert(inputs.size() == 1);
assert(isa<Torch::BoolType>(inputs[0].getType()));
return builder.create<ToI1Op>(loc, inputs[0]).getResult();
});
typeConverter.addTargetMaterialization([](OpBuilder &builder,
IntegerType type, ValueRange inputs,
Location loc) -> Value {
// Other builtin integer types could be handled by other materializers.
if (!(type.getWidth() == 1 && type.isSignless()))
return Value();
assert(inputs.size() == 1);
assert(isa<Torch::BoolType>(inputs[0].getType()));
return builder.create<ToI1Op>(loc, inputs[0]).getResult();
});
auto sourceMaterialization = [](OpBuilder &builder, Torch::BoolType type,
ValueRange inputs, Location loc) -> Value {
assert(inputs.size() == 1);
Expand All @@ -83,19 +83,19 @@ static void setupTorchIntToI64Conversion(ConversionTarget &target,
typeConverter.addConversion([](Torch::IntType type) -> std::optional<Type> {
return IntegerType::get(type.getContext(), 64);
});
typeConverter.addTargetMaterialization(
[](OpBuilder &builder, IntegerType type, ValueRange inputs,
Location loc) -> std::optional<Value> {
// Other builtin integer types could be handled by other materializers.
if (!(type.getWidth() == 64 && type.isSignless()))
return std::nullopt;
// Other input type to be converted to i64 are handled by other
// materializers.
if (!isa<Torch::IntType>(inputs[0].getType()))
return std::nullopt;
assert(inputs.size() == 1);
return builder.createOrFold<ToI64Op>(loc, inputs[0]);
});
typeConverter.addTargetMaterialization([](OpBuilder &builder,
IntegerType type, ValueRange inputs,
Location loc) -> Value {
// Other builtin integer types could be handled by other materializers.
if (!(type.getWidth() == 64 && type.isSignless()))
return Value();
// Other input type to be converted to i64 are handled by other
// materializers.
if (!isa<Torch::IntType>(inputs[0].getType()))
return Value();
assert(inputs.size() == 1);
return builder.createOrFold<ToI64Op>(loc, inputs[0]);
});
auto sourceMaterialization = [](OpBuilder &builder, Torch::IntType type,
ValueRange inputs, Location loc) -> Value {
assert(inputs.size() == 1);
Expand All @@ -112,13 +112,13 @@ static void setupTorchFloatToF64Conversion(ConversionTarget &target,
typeConverter.addConversion([](Torch::FloatType type) -> std::optional<Type> {
return Float64Type::get(type.getContext());
});
typeConverter.addTargetMaterialization(
[](OpBuilder &builder, Float64Type type, ValueRange inputs,
Location loc) -> std::optional<Value> {
assert(inputs.size() == 1);
assert(isa<Torch::FloatType>(inputs[0].getType()));
return builder.create<ToF64Op>(loc, inputs[0]).getResult();
});
typeConverter.addTargetMaterialization([](OpBuilder &builder,
Float64Type type, ValueRange inputs,
Location loc) -> Value {
assert(inputs.size() == 1);
assert(isa<Torch::FloatType>(inputs[0].getType()));
return builder.create<ToF64Op>(loc, inputs[0]).getResult();
});
auto sourceMaterialization = [](OpBuilder &builder, Torch::FloatType type,
ValueRange inputs, Location loc) -> Value {
assert(inputs.size() == 1);
Expand All @@ -137,19 +137,19 @@ static void setupTorchGeneratorToI64Conversion(ConversionTarget &target,
[](Torch::GeneratorType type) -> std::optional<Type> {
return IntegerType::get(type.getContext(), 64);
});
typeConverter.addTargetMaterialization(
[](OpBuilder &builder, IntegerType type, ValueRange inputs,
Location loc) -> std::optional<Value> {
// Other builtin integer types could be handled by other materializers.
if (!(type.getWidth() == 64 && type.isSignless()))
return std::nullopt;
// Other input type to be converted to i64 are handled by other
// materializers.
if (!isa<Torch::GeneratorType>(inputs[0].getType()))
return std::nullopt;
assert(inputs.size() == 1);
return builder.create<GeneratorToI64Op>(loc, inputs[0]).getResult();
});
typeConverter.addTargetMaterialization([](OpBuilder &builder,
IntegerType type, ValueRange inputs,
Location loc) -> Value {
// Other builtin integer types could be handled by other materializers.
if (!(type.getWidth() == 64 && type.isSignless()))
return Value();
// Other input type to be converted to i64 are handled by other
// materializers.
if (!isa<Torch::GeneratorType>(inputs[0].getType()))
return Value();
assert(inputs.size() == 1);
return builder.create<GeneratorToI64Op>(loc, inputs[0]).getResult();
});
auto sourceMaterialization = [](OpBuilder &builder, Torch::GeneratorType type,
ValueRange inputs, Location loc) -> Value {
assert(inputs.size() == 1);
Expand Down

0 comments on commit 8b0bf2e

Please sign in to comment.