Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore(lidar_centerpoint): add a check between the model and config's class size #3101

Merged
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,8 @@ class Logger : public nvinfer1::ILogger // NOLINT
}
}

void log(Severity severity, const std::string & msg) noexcept { log(severity, msg.c_str()); }
yukke42 marked this conversation as resolved.
Show resolved Hide resolved

Severity reportable_severity_{Severity::kWARNING};
rclcpp::Logger logger_{rclcpp::get_logger("tensorrt_common")};
};
Expand Down
9 changes: 9 additions & 0 deletions perception/lidar_centerpoint/lib/network/network_trt.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,8 @@ bool HeadTRT::setProfile(
nvinfer1::IBuilder & builder, nvinfer1::INetworkDefinition & network,
nvinfer1::IBuilderConfig & config)
{
using Severity = tensorrt_common::Logger::Severity;

auto profile = builder.createOptimizationProfile();
auto in_name = network.getInput(0)->getName();
auto in_dims = nvinfer1::Dims4(
Expand All @@ -59,6 +61,13 @@ bool HeadTRT::setProfile(

for (std::size_t ci = 0; ci < out_channel_sizes_.size(); ci++) {
auto out_name = network.getOutput(ci)->getName();

if (
out_name == std::string("heatmap") &&
network.getOutput(ci)->getDimensions().d[1] != static_cast<int32_t>(out_channel_sizes_[ci])) {
logger_.log(Severity::kERROR, "Expected and actual number of classes do not match");
return false;
}
auto out_dims = nvinfer1::Dims4(
config_.batch_size_, out_channel_sizes_[ci], config_.down_grid_size_y_,
config_.down_grid_size_x_);
Expand Down
38 changes: 24 additions & 14 deletions perception/lidar_centerpoint/lib/network/tensorrt_wrapper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,12 @@ TensorRTWrapper::~TensorRTWrapper()
bool TensorRTWrapper::init(
const std::string & onnx_path, const std::string & engine_path, const std::string & precision)
{
using Severity = tensorrt_common::Logger::Severity;

runtime_ =
tensorrt_common::TrtUniquePtr<nvinfer1::IRuntime>(nvinfer1::createInferRuntime(logger_));
if (!runtime_) {
std::cout << "Fail to create runtime" << std::endl;
logger_.log(Severity::kERROR, "Failed to create runtime");
return false;
}

Expand All @@ -56,15 +58,18 @@ bool TensorRTWrapper::init(

bool TensorRTWrapper::createContext()
{
using Severity = tensorrt_common::Logger::Severity;

if (!engine_) {
std::cout << "Fail to create context: Engine isn't created" << std::endl;
logger_.log(Severity::kERROR, "Failed to create context: Engine was not created");
return false;
}

context_ =
tensorrt_common::TrtUniquePtr<nvinfer1::IExecutionContext>(engine_->createExecutionContext());
if (!context_) {
std::cout << "Fail to create context" << std::endl;
logger_.log(Severity::kERROR, "Failed to create builder");
return false;
}

Expand All @@ -75,17 +80,19 @@ bool TensorRTWrapper::parseONNX(
const std::string & onnx_path, const std::string & engine_path, const std::string & precision,
const size_t workspace_size)
{
using Severity = tensorrt_common::Logger::Severity;

auto builder =
tensorrt_common::TrtUniquePtr<nvinfer1::IBuilder>(nvinfer1::createInferBuilder(logger_));
if (!builder) {
std::cout << "Fail to create builder" << std::endl;
logger_.log(Severity::kERROR, "Failed to create builder");
return false;
}

auto config =
tensorrt_common::TrtUniquePtr<nvinfer1::IBuilderConfig>(builder->createBuilderConfig());
if (!config) {
std::cout << "Fail to create config" << std::endl;
logger_.log(Severity::kERROR, "Failed to create config");
return false;
}
#if (NV_TENSORRT_MAJOR * 1000) + (NV_TENSORRT_MINOR * 100) + NV_TENSOR_PATCH >= 8400
Expand All @@ -95,10 +102,10 @@ bool TensorRTWrapper::parseONNX(
#endif
if (precision == "fp16") {
if (builder->platformHasFastFp16()) {
std::cout << "use TensorRT FP16 Inference" << std::endl;
logger_.log(Severity::kINFO, "Using TensorRT FP16 Inference");
config->setFlag(nvinfer1::BuilderFlag::kFP16);
} else {
std::cout << "TensorRT FP16 Inference isn't supported in this environment" << std::endl;
logger_.log(Severity::kINFO, "TensorRT FP16 Inference isn't supported in this environment");
}
}

Expand All @@ -107,7 +114,7 @@ bool TensorRTWrapper::parseONNX(
auto network =
tensorrt_common::TrtUniquePtr<nvinfer1::INetworkDefinition>(builder->createNetworkV2(flag));
if (!network) {
std::cout << "Fail to create network" << std::endl;
logger_.log(Severity::kERROR, "Failed to create network");
return false;
}

Expand All @@ -116,22 +123,23 @@ bool TensorRTWrapper::parseONNX(
parser->parseFromFile(onnx_path.c_str(), static_cast<int>(nvinfer1::ILogger::Severity::kERROR));

if (!setProfile(*builder, *network, *config)) {
std::cout << "Fail to set profile" << std::endl;
logger_.log(Severity::kERROR, "Failed to set profile");
return false;
}

std::cout << "Applying optimizations and building TRT CUDA engine (" << onnx_path << ") ..."
<< std::endl;
logger_.log(
Severity::kINFO, std::string("Applying optimizations and building TRT CUDA engine (") +
onnx_path + std::string(") ..."));
plan_ = tensorrt_common::TrtUniquePtr<nvinfer1::IHostMemory>(
builder->buildSerializedNetwork(*network, *config));
if (!plan_) {
std::cout << "Fail to create serialized network" << std::endl;
logger_.log(Severity::kERROR, "Failed to create serialized network");
return false;
}
engine_ = tensorrt_common::TrtUniquePtr<nvinfer1::ICudaEngine>(
runtime_->deserializeCudaEngine(plan_->data(), plan_->size()));
if (!engine_) {
std::cout << "Fail to create engine" << std::endl;
logger_.log(Severity::kERROR, "Failed to create engine");
return false;
}

Expand All @@ -140,21 +148,23 @@ bool TensorRTWrapper::parseONNX(

bool TensorRTWrapper::saveEngine(const std::string & engine_path)
{
std::cout << "Writing to " << engine_path << std::endl;
using Severity = tensorrt_common::Logger::Severity;
logger_.log(Severity::kINFO, std::string("Writing to ") + engine_path);
std::ofstream file(engine_path, std::ios::out | std::ios::binary);
file.write(reinterpret_cast<const char *>(plan_->data()), plan_->size());
return true;
}

bool TensorRTWrapper::loadEngine(const std::string & engine_path)
{
using Severity = tensorrt_common::Logger::Severity;
std::ifstream engine_file(engine_path);
std::stringstream engine_buffer;
engine_buffer << engine_file.rdbuf();
std::string engine_str = engine_buffer.str();
engine_ = tensorrt_common::TrtUniquePtr<nvinfer1::ICudaEngine>(runtime_->deserializeCudaEngine(
reinterpret_cast<const void *>(engine_str.data()), engine_str.size()));
std::cout << "Loaded engine from " << engine_path << std::endl;
logger_.log(Severity::kINFO, std::string("Loaded engine from ") + engine_path);
return true;
}

Expand Down