Skip to content

Commit

Permalink
[llvm] [aot] CUDA-AOT PR taichi-dev#2: Implemented AOT Module Loader …
Browse files Browse the repository at this point in the history
…for LLVM-CUDA backend
  • Loading branch information
jim19930609 committed Jun 2, 2022
1 parent 41c9736 commit f77d75c
Show file tree
Hide file tree
Showing 4 changed files with 94 additions and 5 deletions.
8 changes: 3 additions & 5 deletions taichi/backends/cpu/aot_module_loader_impl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,13 @@ class AotModuleImpl : public LlvmAotModule {
: LlvmAotModule(params.module_path, params.program) {
}

Arch arch() const override {
return Arch::x64;
}

private:
FunctionType convert_module_to_function(
const std::string &name,
LlvmOfflineCache::KernelCacheData &&loaded) override {
auto *tlctx = program_->get_llvm_context(program_->config->arch);
Arch arch = program_->config->arch;
TI_ASSERT(arch == Arch::x64 || arch == Arch::arm64);
auto *tlctx = program_->get_llvm_context(arch);

const auto &tasks = loaded.offloaded_task_list;
std::vector<OffloadedTask> offloaded_tasks;
Expand Down
66 changes: 66 additions & 0 deletions taichi/backends/cuda/aot_module_loader_impl.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
#include "taichi/backends/cuda/aot_module_loader_impl.h"
#include "taichi/llvm/llvm_aot_module_loader.h"

#include "taichi/llvm/llvm_offline_cache.h"
#include "taichi/llvm/llvm_program.h"
#include "taichi/backends/cuda/codegen_cuda.h"

namespace taichi {
namespace lang {
namespace {

class AotModuleImpl : public LlvmAotModule {
public:
explicit AotModuleImpl(const cpu::AotModuleParams &params)
: LlvmAotModule(params.module_path, params.program) {
}

private:
FunctionType convert_module_to_function(
const std::string &name,
LlvmOfflineCache::KernelCacheData &&loaded) override {
Arch arch = program_->config->arch;
TI_ASSERT(arch == Arch::cuda);
auto *tlctx = program_->get_llvm_context(arch);

const auto &tasks = loaded.offloaded_task_list;
std::vector<OffloadedTask> offloaded_tasks;
offloaded_tasks.reserve(tasks.size());
for (const auto &t : tasks) {
OffloadedTask ot{/*codegen=*/nullptr};
ot.name = t.name;
ot.block_dim = t.block_dim;
ot.grid_dim = t.grid_dim;
offloaded_tasks.push_back(std::move(ot));
}

CUDAModuleToFunctionConverter converter{tlctx, program_};
return converter.convert(name, loaded.args, std::move(loaded.owned_module),
std::move(offloaded_tasks));
}

std::unique_ptr<aot::KernelTemplate> make_new_kernel_template(
const std::string &name) override {
TI_NOT_IMPLEMENTED;
return nullptr;
}

std::unique_ptr<aot::Field> make_new_field(const std::string &name) override {
TI_NOT_IMPLEMENTED;
return nullptr;
}
};

} // namespace

namespace cuda {

std::unique_ptr<aot::Module> make_aot_module(std::any mod_params) {
auto mod = std::make_unique<AotModuleImpl>(
std::any_cast<const AotModuleParams &>(mod_params));
return mod;
}

} // namespace cuda
} // namespace lang
} // namespace taichi
21 changes: 21 additions & 0 deletions taichi/backends/cuda/aot_module_loader_impl.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
#pragma once

#include "taichi/aot/module_loader.h"

namespace taichi {
namespace lang {

class LlvmProgramImpl;

namespace cuda {

struct TI_DLL_EXPORT AotModuleParams {
std::string module_path;
LlvmProgramImpl *program{nullptr};
};

TI_DLL_EXPORT std::unique_ptr<aot::Module> make_aot_module(std::any mod_params);

} // namespace cuda
} // namespace lang
} // namespace taichi
4 changes: 4 additions & 0 deletions taichi/llvm/llvm_aot_module_loader.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,10 @@ class LlvmAotModule : public aot::Module {
TI_ASSERT(program_ != nullptr);
}

Arch arch() const override {
return program_->config->arch;
}

uint64_t version() const override {
return 0;
}
Expand Down

0 comments on commit f77d75c

Please sign in to comment.