-
Notifications
You must be signed in to change notification settings - Fork 645
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
GPU ukernel lowering config for data-tiled multi_mma, and a simple uk…
…ernel. (#19504) This PR adds the KernelConfig logic to generate a lowering_config selecting a ukernel for multi_mma. In order to be able to test it, this PR also adds a very simple `multi_mma` ukernel, but it isn't actually exercised yet, other than successfully compiling to bitcode. The compiler logic only cares about the existence of the resulting bitcode file. The actual lowering to ukernel op will come in the next PR. --------- Signed-off-by: Benoit Jacob <jacob.benoit.1@gmail.com>
- Loading branch information
Showing
12 changed files
with
177 additions
and
43 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
53 changes: 53 additions & 0 deletions
53
...builtins/ukernel/iree_uk_amdgpu_multi_mma_mfma_i32_16x16x32_i8_unroll8x2x2_subgroups1x4.c
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,53 @@ | ||
// Copyright 2024 The IREE Authors | ||
// | ||
// Licensed under the Apache License v2.0 with LLVM Exceptions. | ||
// See https://llvm.org/LICENSE.txt for license information. | ||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||
|
||
#include "compiler/plugins/target/ROCM/builtins/ukernel/common.h" | ||
|
||
// Very naive kernel. TODO(bjacob): | ||
// 1. Shared memory: can't allocate it within the microkernel (which is just a | ||
// helper device function, not the actual amdgpu_kernel). Need to get it | ||
// passed down here as a `T [[clang::address_space(3)]] *` parameter. | ||
// 2. Better scheduling via either barrier intrinsics or inline assemby. | ||
// 3. Subgroups1x4 being asymmetric is a historical accident... should be 2x2. | ||
[[clang::always_inline]] void | ||
iree_uk_amdgpu_multi_mma_mfma_i32_16x16x32_i8_unroll8x2x2_subgroups1x4( | ||
const int8_t *a_buffer, int64_t a_offset, const int8_t *b_buffer, | ||
int64_t b_offset, int32_t *c_buffer, int64_t c_offset, int64_t k_size) { | ||
int tid = __builtin_amdgcn_workitem_id_x(); | ||
|
||
// Load existing accumulators. | ||
int32x4_t acc[8][2] = {{0}}; | ||
int32x4_t *c_global = (int32x4_t *)(c_buffer + c_offset); | ||
for (int i = 0; i < 8; ++i) { | ||
for (int j = 0; j < 2; ++j) { | ||
acc[i][j] = c_global[256 * (2 * i + j) + tid]; | ||
} | ||
} | ||
|
||
// Arithmetic loop. | ||
const int64x2_t *a_global = | ||
(const int64x2_t *)(a_buffer + a_offset) + (tid % 64); | ||
const int64x2_t *b_global = (const int64x2_t *)(b_buffer + b_offset) + tid; | ||
for (int k_outer = 0; k_outer < k_size; ++k_outer) { | ||
for (int i = 0; i < 8; ++i) { | ||
for (int j = 0; j < 2; ++j) { | ||
for (int k = 0; k < 2; ++k) { | ||
acc[i][j] = __builtin_amdgcn_mfma_i32_16x16x32_i8( | ||
a_global[64 * i][k], b_global[256 * j][k], acc[i][j], 0, 0, 0); | ||
} | ||
} | ||
} | ||
a_global += 512; | ||
b_global += 512; | ||
} | ||
|
||
// Store accumulators. | ||
for (int i = 0; i < 8; ++i) { | ||
for (int j = 0; j < 2; ++j) { | ||
c_global[256 * (2 * i + j) + tid] = acc[i][j]; | ||
} | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
29 changes: 29 additions & 0 deletions
29
compiler/plugins/target/ROCM/test/config_ukernel_multi_mma_gfx942.mlir
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,29 @@ | ||
// RUN: iree-opt --split-input-file --iree-gpu-test-target=gfx942 --pass-pipeline='builtin.module(iree-llvmgpu-select-lowering-strategy)' %s | FileCheck %s | ||
|
||
func.func @multi_mma_mfma_i32_16x16x32_i8(%a : tensor<1x2x8x4x16x2x8xi8>, | ||
%b : tensor<1x2x4x2x4x16x2x8xi8>, | ||
%c : tensor<1x1x8x4x2x4x16x4xi32>) | ||
-> tensor<1x1x8x4x2x4x16x4xi32> attributes { | ||
hal.executable.target = #hal.executable.target<"rocm", "rocm-hsaco-fb", {ukernels = "multi_mma"}> | ||
} { | ||
%d = iree_gpu.multi_mma %a, %b, %c {indexing_maps = [ | ||
affine_map<(d0, d1, d2) -> (d0, d2)>, | ||
affine_map<(d0, d1, d2) -> (d1, d2)>, | ||
affine_map<(d0, d1, d2) -> (d0, d1)> | ||
], iterator_types = [ | ||
#iree_gpu.iterator_type<parallel>, | ||
#iree_gpu.iterator_type<parallel>, | ||
#iree_gpu.iterator_type<reduction> | ||
], kind = #iree_gpu.data_tiled_mma_layout< | ||
intrinsic = MFMA_I32_16x16x32_I8, | ||
unroll_m = 8, unroll_n = 2, subgroups_n = 4, unroll_k = 2 | ||
>} : tensor<1x2x8x4x16x2x8xi8>, tensor<1x2x4x2x4x16x2x8xi8> into tensor<1x1x8x4x2x4x16x4xi32> | ||
return %d : tensor<1x1x8x4x2x4x16x4xi32> | ||
} | ||
|
||
// CHECK-LABEL: @multi_mma_mfma_i32_16x16x32_i8 | ||
// CHECK: iree_gpu.multi_mma | ||
// CHECK-SAME: #hal.executable.object<{path = "iree_uk_amdgpu_multi_mma_mfma_i32_16x16x32_i8_unroll8x2x2_subgroups1x4.gfx942.bc" | ||
// CHECK-NOT: promote_operands | ||
// CHECK-SAME: reduction = [0, 0, 0] | ||
// CHECK-SAME: #iree_gpu.ukernel_config<name = "iree_uk_amdgpu_multi_mma_mfma_i32_16x16x32_i8_unroll8x2x2_subgroups1x4" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters