forked from tier4/autoware.universe
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
feat(lidar_apollo_segmentation_tvm): port package (tier4#1181)
Port apollo_lidar_segmentation from Autoware.Auto, changing the name to align with the TierIV package using TensorRT. Issue-Id: SCM-4493 Change-Id: If2bd78d473a0044d06fa42658766ffe9177a59a2 Signed-off-by: Luca Foschiani <luca.foschiani@arm.com> Signed-off-by: Ambroise Vincent <ambroise.vincent@arm.com> Signed-off-by: Luca Foschiani <luca.foschiani@arm.com> Signed-off-by: Ambroise Vincent <ambroise.vincent@arm.com> Co-authored-by: Luca Foschiani <luca.foschiani@arm.com> Co-authored-by: Xinyu Wang <93699235+angry-crab@users.noreply.github.com>
- Loading branch information
Showing
27 changed files
with
2,369 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,75 @@ | ||
# Copyright 2021-2022 Arm Ltd. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
|
||
cmake_minimum_required(VERSION 3.14) | ||
project(lidar_apollo_segmentation_tvm) | ||
|
||
find_package(autoware_cmake REQUIRED) | ||
autoware_package() | ||
|
||
set(tvm_runtime_DIR ${tvm_vendor_DIR}) | ||
find_package(tvm_runtime CONFIG REQUIRED) | ||
find_package(PCL 1.10 REQUIRED) | ||
|
||
# Get target backend | ||
set(${PROJECT_NAME}_BACKEND llvm CACHE STRING "${PROJECT_NAME} neural network backend") | ||
|
||
# Test if files exist. The result is set in baidu_cnn_FOUND | ||
autoware_check_neural_network(baidu_cnn "${${PROJECT_NAME}_BACKEND}") | ||
if(baidu_cnn_FOUND) | ||
# Library | ||
ament_auto_add_library(${PROJECT_NAME} SHARED | ||
include/lidar_apollo_segmentation_tvm/lidar_apollo_segmentation_tvm.hpp | ||
include/lidar_apollo_segmentation_tvm/cluster2d.hpp | ||
include/lidar_apollo_segmentation_tvm/disjoint_set.hpp | ||
include/lidar_apollo_segmentation_tvm/feature_generator.hpp | ||
include/lidar_apollo_segmentation_tvm/feature_map.hpp | ||
include/lidar_apollo_segmentation_tvm/log_table.hpp | ||
include/lidar_apollo_segmentation_tvm/util.hpp | ||
src/lidar_apollo_segmentation_tvm.cpp | ||
src/cluster2d.cpp | ||
src/feature_generator.cpp | ||
src/feature_map.cpp | ||
src/log_table.cpp | ||
) | ||
|
||
target_compile_options(${PROJECT_NAME} PRIVATE "-Wno-sign-conversion" "-Wno-conversion") | ||
|
||
target_compile_definitions(${PROJECT_NAME} PRIVATE NETWORKS_BACKEND=${${PROJECT_NAME}_BACKEND}) | ||
|
||
# Set includes as "SYSTEM" to avoid compiler errors on their headers. | ||
target_include_directories(${PROJECT_NAME} SYSTEM PUBLIC | ||
"${PCL_INCLUDE_DIRS}" | ||
"${tvm_vendor_INCLUDE_DIRS}" | ||
) | ||
|
||
target_link_libraries(${PROJECT_NAME} | ||
${PCL_LIBRARIES} | ||
${tf2_ros_LIBRARIES} | ||
${tvm_runtime_LIBRARIES} | ||
) | ||
|
||
if(BUILD_TESTING) | ||
# gtest | ||
set(LIDAR_APOLLO_SEGMENTATION_TVM_GTEST lidar_apollo_segmentation_tvm_gtest) | ||
ament_add_gtest(${LIDAR_APOLLO_SEGMENTATION_TVM_GTEST} test/main.cpp TIMEOUT 120) | ||
target_include_directories(${LIDAR_APOLLO_SEGMENTATION_TVM_GTEST} PRIVATE "include") | ||
target_link_libraries(${LIDAR_APOLLO_SEGMENTATION_TVM_GTEST} ${PROJECT_NAME}) | ||
endif() | ||
|
||
ament_export_include_directories(${PCL_INCLUDE_DIRS}) | ||
ament_auto_package() | ||
else() | ||
message(WARNING "Neural network not found, skipping package.") | ||
endif() |
29 changes: 29 additions & 0 deletions
29
perception/lidar_apollo_segmentation_tvm/design/lidar-segmentation-design.md
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,29 @@ | ||
# lidar_apollo_segmentation_tvm {#lidar-apollo-segmentation-tvm-design} | ||
|
||
## Design | ||
|
||
### Usage {#lidar-apollo-segmentation-tvm-design-usage} | ||
|
||
#### Neural network | ||
|
||
This package will not build without a neural network for its inference. | ||
The network is provided by the neural_networks_provider package. | ||
See its design page for more information on how to enable downloading pre-compiled networks (by setting the `DOWNLOAD_ARTIFACTS` cmake variable), or how to handle user-compiled networks. | ||
|
||
#### Backend | ||
|
||
The backend used for the inference can be selected by setting the `lidar_apollo_segmentation_tvm_BACKEND` cmake variable. | ||
The current available options are `llvm` for a CPU backend, and `vulkan` for a GPU backend. | ||
It defaults to `llvm`. | ||
|
||
### Convolutional Neural Networks (CNN) Segmentation | ||
|
||
See the [original design](https://github.com/ApolloAuto/apollo/blob/3422a62ce932cb1c0c269922a0f1aa59a290b733/docs/specs/3d_obstacle_perception.md#convolutional-neural-networks-cnn-segmentation) by Apollo. | ||
The paragraph of interest goes up to, but excluding, the "MinBox Builder" paragraph. | ||
This package instead relies on further processing by a dedicated shape estimator. | ||
|
||
Note: the parameters described in the original design have been modified and are out of date. | ||
|
||
## Reference | ||
|
||
Lidar segmentation is based off a core algorithm by [Apollo](https://github.com/ApolloAuto/apollo/blob/master/docs/specs/3d_obstacle_perception.md), with modifications from [TierIV] (<https://github.com/tier4/lidar_instance_segmentation_tvm>) for the TVM backend. |
180 changes: 180 additions & 0 deletions
180
perception/lidar_apollo_segmentation_tvm/include/lidar_apollo_segmentation_tvm/cluster2d.hpp
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,180 @@ | ||
// Copyright 2017-2022 Arm Ltd., TierIV, Autoware Foundation, The Apollo Authors | ||
// | ||
// Licensed under the Apache License, Version 2.0 (the "License"); | ||
// you may not use this file except in compliance with the License. | ||
// You may obtain a copy of the License at | ||
// | ||
// http://www.apache.org/licenses/LICENSE-2.0 | ||
// | ||
// Unless required by applicable law or agreed to in writing, software | ||
// distributed under the License is distributed on an "AS IS" BASIS, | ||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
// See the License for the specific language governing permissions and | ||
// limitations under the License. | ||
|
||
#ifndef LIDAR_APOLLO_SEGMENTATION_TVM__CLUSTER2D_HPP_ | ||
#define LIDAR_APOLLO_SEGMENTATION_TVM__CLUSTER2D_HPP_ | ||
|
||
#include <common/types.hpp> | ||
#include <lidar_apollo_segmentation_tvm/disjoint_set.hpp> | ||
#include <lidar_apollo_segmentation_tvm/util.hpp> | ||
#include <lidar_apollo_segmentation_tvm/visibility_control.hpp> | ||
|
||
#include <autoware_auto_perception_msgs/msg/point_clusters.hpp> | ||
#include <tier4_perception_msgs/msg/detected_object_with_feature.hpp> | ||
#include <tier4_perception_msgs/msg/detected_objects_with_feature.hpp> | ||
|
||
#include <pcl/io/pcd_io.h> | ||
#include <pcl/point_cloud.h> | ||
#include <pcl/point_types.h> | ||
|
||
#include <memory> | ||
#include <vector> | ||
|
||
namespace autoware | ||
{ | ||
namespace perception | ||
{ | ||
namespace lidar_apollo_segmentation_tvm | ||
{ | ||
using autoware::common::types::bool8_t; | ||
using autoware::common::types::char8_t; | ||
using autoware::common::types::float32_t; | ||
|
||
/// \brief Internal obstacle classification categories. | ||
enum class MetaType { | ||
META_UNKNOWN, | ||
META_SMALLMOT, | ||
META_BIGMOT, | ||
META_NONMOT, | ||
META_PEDESTRIAN, | ||
MAX_META_TYPE | ||
}; | ||
|
||
/// \brief Internal obstacle representation. | ||
struct Obstacle | ||
{ | ||
std::vector<int32_t> grids; | ||
pcl::PointCloud<pcl::PointXYZI>::Ptr cloud_ptr; | ||
float32_t score; | ||
float32_t height; | ||
float32_t heading; | ||
MetaType meta_type; | ||
std::vector<float32_t> meta_type_probs; | ||
|
||
Obstacle() : score(0.0), height(-5.0), heading(0.0), meta_type(MetaType::META_UNKNOWN) | ||
{ | ||
cloud_ptr.reset(new pcl::PointCloud<pcl::PointXYZI>); | ||
meta_type_probs.assign(static_cast<int>(MetaType::MAX_META_TYPE), 0.0); | ||
} | ||
}; | ||
|
||
/// \brief Handle the ouput of the CNN-based prediction by obtaining information on individual | ||
/// cells. | ||
class LIDAR_APOLLO_SEGMENTATION_TVM_LOCAL Cluster2D | ||
{ | ||
public: | ||
/// \brief Constructor | ||
/// \param[in] rows The number of rows in the cluster. | ||
/// \param[in] cols The number of columns in the cluster. | ||
/// \param[in] range Scaling factor. | ||
explicit Cluster2D(int32_t rows, int32_t cols, float32_t range); | ||
|
||
/// \brief Construct a directed graph and search the connected components for candidate object | ||
/// clusters. | ||
/// \param[in] inferred_data Prediction information from the neural network inference. | ||
/// \param[in] pc_ptr Input point cloud. | ||
/// \param[in] valid_indices Indices of the points to consider in the point cloud. | ||
/// \param[in] objectness_thresh Threshold for filtering out non-object cells. | ||
/// \param[in] use_all_grids_for_clustering | ||
void cluster( | ||
const float32_t * inferred_data, const pcl::PointCloud<pcl::PointXYZI>::ConstPtr & pc_ptr, | ||
const pcl::PointIndices & valid_indices, float32_t objectness_thresh, | ||
bool8_t use_all_grids_for_clustering); | ||
|
||
/// \brief Populate the fields of obstacles_ elements. | ||
/// \param[in] inferred_data Prediction information from the neural network inference. | ||
void filter(const float32_t * inferred_data); | ||
|
||
/// \brief Assign a classification type to the obstacles_ elements. | ||
/// \param[in] inferred_data Prediction information from the neural network inference. | ||
void classify(const float32_t * inferred_data); | ||
|
||
/// \brief Remove the candidate clusters that don't meet the parameters' requirements. | ||
/// \param[in] confidence_thresh The detection confidence score threshold. | ||
/// \param[in] height_thresh If it is non-negative, the points that are higher than the predicted | ||
/// object height by height_thresh are filtered out. | ||
/// \param[in] min_pts_num The candidate clusters with less than min_pts_num points are removed. | ||
/// \return The detected objects. | ||
std::shared_ptr<tier4_perception_msgs::msg::DetectedObjectsWithFeature> getObjects( | ||
float32_t confidence_thresh, float32_t height_thresh, int32_t min_pts_num); | ||
|
||
/// \brief Transform an obstacle from the internal representation to the external one. | ||
/// \param[in] in_obstacle | ||
/// \return Output obstacle. | ||
tier4_perception_msgs::msg::DetectedObjectWithFeature obstacleToObject( | ||
const Obstacle & in_obstacle) const; | ||
|
||
private: | ||
const int32_t rows_; | ||
const int32_t cols_; | ||
const int32_t siz_; | ||
const float32_t range_; | ||
const float32_t scale_; | ||
const float32_t inv_res_x_; | ||
const float32_t inv_res_y_; | ||
std::vector<int32_t> point2grid_; | ||
std::vector<Obstacle> obstacles_; | ||
std::vector<int32_t> id_img_; | ||
|
||
pcl::PointCloud<pcl::PointXYZI>::ConstPtr pc_ptr_; | ||
const std::vector<int32_t> * valid_indices_in_pc_ = nullptr; | ||
|
||
/// \brief Node of a directed graph. | ||
struct Node | ||
{ | ||
Node * center_node; | ||
Node * parent; | ||
char8_t node_rank; | ||
char8_t traversed; | ||
bool8_t is_center; | ||
bool8_t is_object; | ||
int32_t point_num; | ||
int32_t obstacle_id; | ||
|
||
Node() | ||
{ | ||
center_node = nullptr; | ||
parent = nullptr; | ||
node_rank = 0; | ||
traversed = 0; | ||
is_center = false; | ||
is_object = false; | ||
point_num = 0; | ||
obstacle_id = -1; | ||
} | ||
}; | ||
|
||
/// \brief Check whether a signed row and column values are valid array indices. | ||
inline bool IsValidRowCol(int32_t row, int32_t col) const | ||
{ | ||
return IsValidRow(row) && IsValidCol(col); | ||
} | ||
|
||
/// \brief Check whether a signed row value is a valid array index. | ||
inline bool IsValidRow(int32_t row) const { return row >= 0 && row < rows_; } | ||
|
||
/// \brief Check whether a signed column value is a valid array index. | ||
inline bool IsValidCol(int32_t col) const { return col >= 0 && col < cols_; } | ||
|
||
/// \brief Transform a row and column coordinate to a linear grid index. | ||
inline int32_t RowCol2Grid(int32_t row, int32_t col) const { return row * cols_ + col; } | ||
|
||
/// \brief Traverse the directed graph until visiting a node. | ||
/// \param[in] x Node to visit. | ||
void traverse(Node * x) const; | ||
}; | ||
} // namespace lidar_apollo_segmentation_tvm | ||
} // namespace perception | ||
} // namespace autoware | ||
#endif // LIDAR_APOLLO_SEGMENTATION_TVM__CLUSTER2D_HPP_ |
84 changes: 84 additions & 0 deletions
84
...tion/lidar_apollo_segmentation_tvm/include/lidar_apollo_segmentation_tvm/disjoint_set.hpp
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,84 @@ | ||
// Copyright 2017-2022 Arm Ltd., The Apollo Authors | ||
// | ||
// Licensed under the Apache License, Version 2.0 (the "License"); | ||
// you may not use this file except in compliance with the License. | ||
// You may obtain a copy of the License at | ||
// | ||
// http://www.apache.org/licenses/LICENSE-2.0 | ||
// | ||
// Unless required by applicable law or agreed to in writing, software | ||
// distributed under the License is distributed on an "AS IS" BASIS, | ||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
// See the License for the specific language governing permissions and | ||
// limitations under the License. | ||
|
||
#ifndef LIDAR_APOLLO_SEGMENTATION_TVM__DISJOINT_SET_HPP_ | ||
#define LIDAR_APOLLO_SEGMENTATION_TVM__DISJOINT_SET_HPP_ | ||
|
||
namespace autoware | ||
{ | ||
namespace perception | ||
{ | ||
namespace lidar_apollo_segmentation_tvm | ||
{ | ||
/// \brief Add a new element in a new set. | ||
/// \param[inout] x The element to be added. | ||
template <class T> | ||
void DisjointSetMakeSet(T * x) | ||
{ | ||
x->parent = x; | ||
x->node_rank = 0; | ||
} | ||
|
||
/// \brief Recursively follow the chain of parent pointers from the input until reaching a root. | ||
/// \param[inout] x The element which root is looked for. | ||
/// \return The root of the set containing x. | ||
template <class T> | ||
T * DisjointSetFindRecursive(T * x) | ||
{ | ||
if (x->parent != x) { | ||
x->parent = DisjointSetFindRecursive(x->parent); | ||
} | ||
return x->parent; | ||
} | ||
|
||
/// \brief Find the root of the set x belongs to. | ||
/// \param[inout] x The set element. | ||
/// \return The root of the set containing x. | ||
template <class T> | ||
T * DisjointSetFind(T * x) | ||
{ | ||
T * y = x->parent; | ||
if (y == x || y->parent == y) { | ||
return y; | ||
} | ||
T * root = DisjointSetFindRecursive(y->parent); | ||
x->parent = root; | ||
y->parent = root; | ||
return root; | ||
} | ||
|
||
/// \brief Replace the set containing x and the set containing y with their union. | ||
/// \param[inout] x An element of a first set. | ||
/// \param[inout] y An element of a second set. | ||
template <class T> | ||
void DisjointSetUnion(T * x, T * y) | ||
{ | ||
x = DisjointSetFind(x); | ||
y = DisjointSetFind(y); | ||
if (x == y) { | ||
return; | ||
} | ||
if (x->node_rank < y->node_rank) { | ||
x->parent = y; | ||
} else if (y->node_rank < x->node_rank) { | ||
y->parent = x; | ||
} else { | ||
y->parent = x; | ||
x->node_rank++; | ||
} | ||
} | ||
} // namespace lidar_apollo_segmentation_tvm | ||
} // namespace perception | ||
} // namespace autoware | ||
#endif // LIDAR_APOLLO_SEGMENTATION_TVM__DISJOINT_SET_HPP_ |
Oops, something went wrong.