Skip to content

Commit

Permalink
refactor(tvm_utility): redefine network node and inference config (au…
Browse files Browse the repository at this point in the history
…towarefoundation#2467)

* redefine network node and inference config

Signed-off-by: Xinyu Wang <xinyu.wang@tier4.jp>

* accomondate network node changes

Signed-off-by: Xinyu Wang <xinyu.wang@tier4.jp>

* ci(pre-commit): autofix

* revert device type and id

Signed-off-by: Xinyu Wang <xinyu.wang@tier4.jp>

* cmake setting

Signed-off-by: Xinyu Wang <xinyu.wang@tier4.jp>

* data type and modelzoo version

Signed-off-by: Xinyu Wang <xinyu.wang@tier4.jp>

Signed-off-by: Xinyu Wang <xinyu.wang@tier4.jp>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
  • Loading branch information
angry-crab and pre-commit-ci[bot] authored Dec 22, 2022
1 parent 7ecbbc7 commit db37c87
Show file tree
Hide file tree
Showing 5 changed files with 63 additions and 42 deletions.
28 changes: 18 additions & 10 deletions common/tvm_utility/include/tvm_utility/pipeline.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -182,8 +182,21 @@ class Pipeline
PostProcessorType post_processor_{};
};

// Each node should be specified with a string name and a shape
using NetworkNode = std::pair<std::string, std::vector<int64_t>>;
// NetworkNode
typedef struct
{
// Node name
std::string node_name;

// Network data type configurations
DLDataTypeCode tvm_dtype_code;
int32_t tvm_dtype_bits;
int32_t tvm_dtype_lanes;

// Shape info
std::vector<int64_t> node_shape;
} NetworkNode;

typedef struct
{
// Network info
Expand All @@ -196,11 +209,6 @@ typedef struct
std::string network_graph_path;
std::string network_params_path;

// Network data type configurations
DLDataTypeCode tvm_dtype_code;
int32_t tvm_dtype_bits;
int32_t tvm_dtype_lanes;

// Inference hardware configuration
DLDeviceType tvm_device_type;
int32_t tvm_device_id;
Expand Down Expand Up @@ -278,8 +286,8 @@ class InferenceEngineTVM : public InferenceEngine

for (auto & output_config : config.network_outputs) {
output_.push_back(TVMArrayContainer(
output_config.second, config.tvm_dtype_code, config.tvm_dtype_bits, config.tvm_dtype_lanes,
config.tvm_device_type, config.tvm_device_id));
output_config.node_shape, output_config.tvm_dtype_code, output_config.tvm_dtype_bits,
output_config.tvm_dtype_lanes, config.tvm_device_type, config.tvm_device_id));
}
}

Expand All @@ -290,7 +298,7 @@ class InferenceEngineTVM : public InferenceEngine
if (input[index].getArray() == nullptr) {
throw std::runtime_error("input variable is null");
}
set_input(config_.network_inputs[index].first.c_str(), input[index].getArray());
set_input(config_.network_inputs[index].node_name.c_str(), input[index].getArray());
}

// Execute the inference
Expand Down
32 changes: 17 additions & 15 deletions common/tvm_utility/test/yolo_v2_tiny/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,18 +45,18 @@ class PreProcessorYoloV2Tiny : public tvm_utility::pipeline::PreProcessor<std::s
{
public:
explicit PreProcessorYoloV2Tiny(tvm_utility::pipeline::InferenceEngineTVMConfig config)
: network_input_width(config.network_inputs[0].second[1]),
network_input_height(config.network_inputs[0].second[2]),
network_input_depth(config.network_inputs[0].second[3]),
network_datatype_bytes(config.tvm_dtype_bits / 8)
: network_input_width(config.network_inputs[0].node_shape[1]),
network_input_height(config.network_inputs[0].node_shape[2]),
network_input_depth(config.network_inputs[0].node_shape[3]),
network_input_datatype_bytes(config.network_inputs[0].tvm_dtype_bits / 8)
{
// Allocate input variable
std::vector<int64_t> shape_x{1, network_input_width, network_input_height, network_input_depth};
tvm_utility::pipeline::TVMArrayContainer x{
shape_x,
config.tvm_dtype_code,
config.tvm_dtype_bits,
config.tvm_dtype_lanes,
config.network_inputs[0].tvm_dtype_code,
config.network_inputs[0].tvm_dtype_bits,
config.network_inputs[0].tvm_dtype_lanes,
config.tvm_device_type,
config.tvm_device_id};

Expand Down Expand Up @@ -101,7 +101,8 @@ class PreProcessorYoloV2Tiny : public tvm_utility::pipeline::PreProcessor<std::s

TVMArrayCopyFromBytes(
output.getArray(), image_3f.data,
network_input_width * network_input_height * network_input_depth * network_datatype_bytes);
network_input_width * network_input_height * network_input_depth *
network_input_datatype_bytes);

return {output};
}
Expand All @@ -110,18 +111,18 @@ class PreProcessorYoloV2Tiny : public tvm_utility::pipeline::PreProcessor<std::s
int64_t network_input_width;
int64_t network_input_height;
int64_t network_input_depth;
int64_t network_datatype_bytes;
int64_t network_input_datatype_bytes;
tvm_utility::pipeline::TVMArrayContainer output;
};

class PostProcessorYoloV2Tiny : public tvm_utility::pipeline::PostProcessor<std::vector<float>>
{
public:
explicit PostProcessorYoloV2Tiny(tvm_utility::pipeline::InferenceEngineTVMConfig config)
: network_output_width(config.network_outputs[0].second[1]),
network_output_height(config.network_outputs[0].second[2]),
network_output_depth(config.network_outputs[0].second[3]),
network_datatype_bytes(config.tvm_dtype_bits / 8)
: network_output_width(config.network_outputs[0].node_shape[1]),
network_output_height(config.network_outputs[0].node_shape[2]),
network_output_depth(config.network_outputs[0].node_shape[3]),
network_output_datatype_bytes(config.network_outputs[0].tvm_dtype_bits / 8)
{
// Parse human readable names for the classes
std::ifstream label_file{LABEL_FILENAME};
Expand Down Expand Up @@ -170,7 +171,8 @@ class PostProcessorYoloV2Tiny : public tvm_utility::pipeline::PostProcessor<std:
network_output_width * network_output_height * network_output_depth, 0);
TVMArrayCopyToBytes(
input[0].getArray(), infer.data(),
network_output_width * network_output_height * network_output_depth * network_datatype_bytes);
network_output_width * network_output_height * network_output_depth *
network_output_datatype_bytes);

// Utility function to return data from y given index
auto get_output_data = [this, infer, n_classes, n_anchors, n_coords](
Expand Down Expand Up @@ -232,7 +234,7 @@ class PostProcessorYoloV2Tiny : public tvm_utility::pipeline::PostProcessor<std:
int64_t network_output_width;
int64_t network_output_height;
int64_t network_output_depth;
int64_t network_datatype_bytes;
int64_t network_output_datatype_bytes;
std::vector<std::string> labels{};
std::vector<std::pair<float, float>> anchors{};
};
Expand Down
15 changes: 13 additions & 2 deletions common/tvm_utility/tvm_utility-extras.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

# Get user-provided variables
set(DOWNLOAD_ARTIFACTS OFF CACHE BOOL "enable artifacts download")
set(MODELZOO_VERSION "2.1.0-20221102" CACHE STRING "targeted ModelZoo version")
set(MODELZOO_VERSION "3.0.0-20221221" CACHE STRING "targeted ModelZoo version")

#
# Download the selected neural network if it is not already present on disk.
Expand All @@ -32,6 +32,7 @@ set(MODELZOO_VERSION "2.1.0-20221102" CACHE STRING "targeted ModelZoo version")
function(get_neural_network MODEL_NAME MODEL_BACKEND DEPENDENCY)
set(DATA_PATH ${CMAKE_CURRENT_SOURCE_DIR}/data)
set(EXTERNALPROJECT_NAME ${MODEL_NAME}_${MODEL_BACKEND})
set(PREPROCESSING "")

# Prioritize user-provided models.
if(IS_DIRECTORY "${DATA_PATH}/user/${MODEL_NAME}")
Expand All @@ -42,6 +43,13 @@ function(get_neural_network MODEL_NAME MODEL_BACKEND DEPENDENCY)
"${DATA_PATH}/models/${MODEL_NAME}/inference_engine_tvm_config.hpp"
COPYONLY
)
if(EXISTS "${DATA_PATH}/user/${MODEL_NAME}/preprocessing_inference_engine_tvm_config.hpp")
configure_file(
"${DATA_PATH}/user/${MODEL_NAME}/preprocessing_inference_engine_tvm_config.hpp"
"${DATA_PATH}/models/${MODEL_NAME}/preprocessing_inference_engine_tvm_config.hpp"
COPYONLY
)
endif()
set(DOWNLOAD_DIR "")
set(SOURCE_DIR "${DATA_PATH}/user/${MODEL_NAME}")
set(INSTALL_DIRECTORY "${DATA_PATH}/user/${MODEL_NAME}")
Expand All @@ -63,7 +71,9 @@ function(get_neural_network MODEL_NAME MODEL_BACKEND DEPENDENCY)
set(SOURCE_DIR "${DATA_PATH}/models/${MODEL_NAME}")
set(INSTALL_DIRECTORY "${DATA_PATH}/models/${MODEL_NAME}")
endif()

if(EXISTS "${DATA_PATH}/models/${MODEL_NAME}/preprocessing_inference_engine_tvm_config.hpp")
set(PREPROCESSING "${DATA_PATH}/models/${MODEL_NAME}/preprocessing_inference_engine_tvm_config.hpp")
endif()
include(ExternalProject)
externalproject_add(${EXTERNALPROJECT_NAME}
DOWNLOAD_DIR ${DOWNLOAD_DIR}
Expand All @@ -72,6 +82,7 @@ function(get_neural_network MODEL_NAME MODEL_BACKEND DEPENDENCY)
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
BUILD_BYPRODUCTS "${DATA_PATH}/models/${MODEL_NAME}/inference_engine_tvm_config.hpp"
BUILD_BYPRODUCTS ${PREPROCESSING}
INSTALL_COMMAND ""
)
install(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ class LIDAR_APOLLO_SEGMENTATION_TVM_LOCAL ApolloLidarSegmentationPreProcessor
const int64_t input_channels;
const int64_t input_width;
const int64_t input_height;
const int64_t datatype_bytes;
const int64_t input_datatype_bytes;
const std::shared_ptr<FeatureGenerator> feature_generator;
TVMArrayContainer output;
};
Expand Down Expand Up @@ -115,7 +115,7 @@ class LIDAR_APOLLO_SEGMENTATION_TVM_LOCAL ApolloLidarSegmentationPostProcessor
const int64_t output_channels;
const int64_t output_width;
const int64_t output_height;
const int64_t datatype_bytes;
const int64_t output_datatype_bytes;
const float32_t objectness_thresh_;
const float32_t score_threshold_;
const float32_t height_thresh_;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,10 +36,10 @@ ApolloLidarSegmentationPreProcessor::ApolloLidarSegmentationPreProcessor(
const tvm_utility::pipeline::InferenceEngineTVMConfig & config, int32_t range,
bool8_t use_intensity_feature, bool8_t use_constant_feature, float32_t min_height,
float32_t max_height)
: input_channels(config.network_inputs[0].second[1]),
input_width(config.network_inputs[0].second[2]),
input_height(config.network_inputs[0].second[3]),
datatype_bytes(config.tvm_dtype_bits / 8),
: input_channels(config.network_inputs[0].node_shape[1]),
input_width(config.network_inputs[0].node_shape[2]),
input_height(config.network_inputs[0].node_shape[3]),
input_datatype_bytes(config.network_inputs[0].tvm_dtype_bits / 8),
feature_generator(std::make_shared<FeatureGenerator>(
input_width, input_height, range, use_intensity_feature, use_constant_feature, min_height,
max_height))
Expand All @@ -48,9 +48,9 @@ ApolloLidarSegmentationPreProcessor::ApolloLidarSegmentationPreProcessor(
std::vector<int64_t> shape_x{1, input_channels, input_width, input_height};
TVMArrayContainer x{
shape_x,
config.tvm_dtype_code,
config.tvm_dtype_bits,
config.tvm_dtype_lanes,
config.network_inputs[0].tvm_dtype_code,
config.network_inputs[0].tvm_dtype_bits,
config.network_inputs[0].tvm_dtype_lanes,
config.tvm_device_type,
config.tvm_device_id};
output = x;
Expand All @@ -68,7 +68,7 @@ TVMArrayContainerVector ApolloLidarSegmentationPreProcessor::schedule(

TVMArrayCopyFromBytes(
output.getArray(), feature_map_ptr->map_data.data(),
input_channels * input_height * input_width * datatype_bytes);
input_channels * input_height * input_width * input_datatype_bytes);

return {output};
}
Expand All @@ -78,10 +78,10 @@ ApolloLidarSegmentationPostProcessor::ApolloLidarSegmentationPostProcessor(
const pcl::PointCloud<pcl::PointXYZI>::ConstPtr & pc_ptr, int32_t range,
float32_t objectness_thresh, float32_t score_threshold, float32_t height_thresh,
int32_t min_pts_num)
: output_channels(config.network_outputs[0].second[1]),
output_width(config.network_outputs[0].second[2]),
output_height(config.network_outputs[0].second[3]),
datatype_bytes(config.tvm_dtype_bits / 8),
: output_channels(config.network_outputs[0].node_shape[1]),
output_width(config.network_outputs[0].node_shape[2]),
output_height(config.network_outputs[0].node_shape[3]),
output_datatype_bytes(config.network_outputs[0].tvm_dtype_bits / 8),
objectness_thresh_(objectness_thresh),
score_threshold_(score_threshold),
height_thresh_(height_thresh),
Expand All @@ -100,7 +100,7 @@ std::shared_ptr<DetectedObjectsWithFeature> ApolloLidarSegmentationPostProcessor
std::vector<float32_t> feature(output_channels * output_width * output_height, 0);
TVMArrayCopyToBytes(
input[0].getArray(), feature.data(),
output_channels * output_width * output_height * datatype_bytes);
output_channels * output_width * output_height * output_datatype_bytes);
cluster2d_->cluster(
feature.data(), pc_ptr_, valid_idx, objectness_thresh_, true /*use all grids for clustering*/);
auto object_array = cluster2d_->getObjects(score_threshold_, height_thresh_, min_pts_num_);
Expand Down

0 comments on commit db37c87

Please sign in to comment.