Skip to content

Commit

Permalink
feat(pointpainting fusion): add pointpainting (#991)
Browse files Browse the repository at this point in the history
* pointpainting_fusion-paint points

* pointpainting_fusion:run the model

* fix incorrect projection

* update pointpainting_fusion using centerpoint as shared library

* pointpainting_fusion: fix uncommited node.cpp and node.hpp

* fix: update the class

Signed-off-by: tzhong518 <sworgun@gmail.com>

* fix: update the class

Signed-off-by: tzhong518 <sworgun@gmail.com>

* fix: small corrections

Signed-off-by: tzhong518 <sworgun@gmail.com>

* fix: correctionts to use centerpoint_trt.hpp

Signed-off-by: tzhong518 <sworgun@gmail.com>

* ci(pre-commit): autofix

* fix: resolve conversations

Signed-off-by: tzhong518 <sworgun@gmail.com>

* ci(pre-commit): autofix

* fix: resolve conversations

Signed-off-by: tzhong518 <sworgun@gmail.com>

* ci(pre-commit): autofix

* fix: remove magic number

Signed-off-by: tzhong518 <sworgun@gmail.com>

* fix: remove magic number7

Signed-off-by: tzhong518 <sworgun@gmail.com>

* fix: fix Cmakelist, doc, variable name and default value

Signed-off-by: tzhong518 <sworgun@gmail.com>

* fix: CI build&precommit

Signed-off-by: tzhong518 <sworgun@gmail.com>

* fix: pre-commit-optional&build(humble)

Signed-off-by: tzhong518 <sworgun@gmail.com>

* fix: move guard to publish()

Signed-off-by: tzhong518 <sworgun@gmail.com>

* ci(pre-commit): autofix

* fix: update pointpainting model

Signed-off-by: tzhong518 <sworgun@gmail.com>

* Update perception/image_projection_based_fusion/config/pointpainting.param.yaml

Co-authored-by: Yusuke Muramatsu <yukke42@users.noreply.github.com>

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Yusuke Muramatsu <yukke42@users.noreply.github.com>
  • Loading branch information
3 people authored Jul 6, 2022
1 parent 7c10acb commit cae2bab
Show file tree
Hide file tree
Showing 17 changed files with 1,151 additions and 33 deletions.
183 changes: 161 additions & 22 deletions perception/image_projection_based_fusion/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,42 +1,181 @@
cmake_minimum_required(VERSION 3.14)
project(image_projection_based_fusion)
add_compile_options(-Wno-unknown-pragmas)

find_package(autoware_cmake REQUIRED)
autoware_package()

find_package(OpenCV REQUIRED)
find_package(Eigen3 REQUIRED)
autoware_package()

set(CUDA_VERBOSE OFF)

# set flags for CUDA availability
option(CUDA_AVAIL "CUDA available" OFF)
find_package(CUDA)
if(CUDA_FOUND)
find_library(CUBLAS_LIBRARIES cublas HINTS
${CUDA_TOOLKIT_ROOT_DIR}/lib64
${CUDA_TOOLKIT_ROOT_DIR}/lib
)
if(CUDA_VERBOSE)
message("CUDA is available!")
message("CUDA Libs: ${CUDA_LIBRARIES}")
message("CUDA Headers: ${CUDA_INCLUDE_DIRS}")
endif()
# Note: cublas_device was depreciated in CUDA version 9.2
# https://forums.developer.nvidia.com/t/where-can-i-find-libcublas-device-so-or-libcublas-device-a/67251/4
# In LibTorch, CUDA_cublas_device_LIBRARY is used.
unset(CUDA_cublas_device_LIBRARY CACHE)
set(CUDA_AVAIL ON)
else()
message("CUDA NOT FOUND")
set(CUDA_AVAIL OFF)
endif()

# set flags for TensorRT availability
option(TRT_AVAIL "TensorRT available" OFF)
# try to find the tensorRT modules
find_library(NVINFER nvinfer)
find_library(NVONNXPARSER nvonnxparser)
if(NVINFER AND NVONNXPARSER)
if(CUDA_VERBOSE)
message("TensorRT is available!")
message("NVINFER: ${NVINFER}")
message("NVONNXPARSER: ${NVONNXPARSER}")
endif()
set(TRT_AVAIL ON)
else()
message("TensorRT is NOT Available")
set(TRT_AVAIL OFF)
endif()

# set flags for CUDNN availability
option(CUDNN_AVAIL "CUDNN available" OFF)
# try to find the CUDNN module
find_library(CUDNN_LIBRARY
NAMES libcudnn.so${__cudnn_ver_suffix} libcudnn${__cudnn_ver_suffix}.dylib ${__cudnn_lib_win_name}
PATHS $ENV{LD_LIBRARY_PATH} ${__libpath_cudart} ${CUDNN_ROOT_DIR} ${PC_CUDNN_LIBRARY_DIRS} ${CMAKE_INSTALL_PREFIX}
PATH_SUFFIXES lib lib64 bin
DOC "CUDNN library."
)
if(CUDNN_LIBRARY)
if(CUDA_VERBOSE)
message(STATUS "CUDNN is available!")
message(STATUS "CUDNN_LIBRARY: ${CUDNN_LIBRARY}")
endif()
set(CUDNN_AVAIL ON)
else()
message("CUDNN is NOT Available")
set(CUDNN_AVAIL OFF)
endif()

message(STATUS "start to download")
if(TRT_AVAIL AND CUDA_AVAIL AND CUDNN_AVAIL)
# Download trained models
set(DATA_PATH ${CMAKE_CURRENT_SOURCE_DIR}/data)
execute_process(COMMAND mkdir -p ${DATA_PATH})

function(download FILE_NAME FILE_HASH)
message(STATUS "Checking and downloading ${FILE_NAME}")
set(FILE_PATH ${DATA_PATH}/${FILE_NAME})
set(STATUS_CODE 0)
message(STATUS "start ${FILE_NAME}")
if(EXISTS ${FILE_PATH})
message(STATUS "found ${FILE_NAME}")
file(MD5 ${FILE_PATH} EXISTING_FILE_HASH)
if("${FILE_HASH}" STREQUAL "${EXISTING_FILE_HASH}")
message(STATUS "same ${FILE_NAME}")
message(STATUS "File already exists.")
else()
message(STATUS "diff ${FILE_NAME}")
message(STATUS "File hash changes. Downloading now ...")
file(DOWNLOAD https://awf.ml.dev.web.auto/perception/models/pointpainting/v1/${FILE_NAME}
${FILE_PATH} STATUS DOWNLOAD_STATUS TIMEOUT 300)
list(GET DOWNLOAD_STATUS 0 STATUS_CODE)
list(GET DOWNLOAD_STATUS 1 ERROR_MESSAGE)
endif()
else()
message(STATUS "not found ${FILE_NAME}")
message(STATUS "File doesn't exists. Downloading now ...")
file(DOWNLOAD https://awf.ml.dev.web.auto/perception/models/pointpainting/v1/${FILE_NAME}
${FILE_PATH} STATUS DOWNLOAD_STATUS TIMEOUT 300)
list(GET DOWNLOAD_STATUS 0 STATUS_CODE)
list(GET DOWNLOAD_STATUS 1 ERROR_MESSAGE)
endif()
if(${STATUS_CODE} EQUAL 0)
message(STATUS "Download completed successfully!")
else()
message(FATAL_ERROR "Error occurred during download: ${ERROR_MESSAGE}")
endif()
endfunction()

# default model
download(pts_voxel_encoder_pointpainting.onnx 795a919537b71faae706b8c71312d31a)
download(pts_backbone_neck_head_pointpainting.onnx edb48a74659820904071ea8050413c23)
find_package(OpenCV REQUIRED)
find_package(Eigen3 REQUIRED)

include_directories(
include
SYSTEM
include_directories(
include
${OpenCV_INCLUDE_DIRS}
${EIGEN3_INCLUDE_DIRS}
)
${PCL_INCLUDE_DIRS}
)

ament_auto_add_library(${PROJECT_NAME} SHARED
src/fusion_node.cpp
src/debugger.cpp
src/utils/geometry.cpp
src/utils/utils.cpp
src/roi_cluster_fusion/node.cpp
src/roi_detected_object_fusion/node.cpp
)
target_link_libraries(${PROJECT_NAME}
${OpenCV_LIBRARIES}
${EIGEN3_LIBRARIES}
)
ament_auto_add_library(${PROJECT_NAME} SHARED
src/fusion_node.cpp
src/debugger.cpp
src/utils/geometry.cpp
src/utils/utils.cpp
src/roi_cluster_fusion/node.cpp
src/roi_detected_object_fusion/node.cpp
src/pointpainting_fusion/node.cpp
src/pointpainting_fusion/pointpainting_trt.cpp
src/pointpainting_fusion/voxel_generator.cpp
)

rclcpp_components_register_node(${PROJECT_NAME}
PLUGIN "image_projection_based_fusion::RoiClusterFusionNode"
EXECUTABLE roi_cluster_fusion_node
)
cuda_add_library(pointpainting_cuda_lib SHARED
src/pointpainting_fusion/preprocess_kernel.cu
)

target_link_libraries(${PROJECT_NAME}
${OpenCV_LIBRARIES}
${EIGEN3_LIBRARIES}
${PCL_LIBRARIES}
${NVINFER}
${CUDA_LIBRARIES}
${CUBLAS_LIBRARIES}
${CUDA_curand_LIBRARY}
${CUDNN_LIBRARY}
pointpainting_cuda_lib
)

rclcpp_components_register_node(${PROJECT_NAME}
PLUGIN "image_projection_based_fusion::RoiClusterFusionNode"
EXECUTABLE roi_cluster_fusion_node
)
else()
find_package(ament_cmake_auto REQUIRED)
ament_auto_find_build_dependencies()

ament_auto_package(
INSTALL_TO_SHARE
launch
)
endif()

rclcpp_components_register_node(${PROJECT_NAME}
PLUGIN "image_projection_based_fusion::RoiDetectedObjectFusionNode"
EXECUTABLE roi_detected_object_fusion_node
)

rclcpp_components_register_node(${PROJECT_NAME}
PLUGIN "image_projection_based_fusion::PointpaintingFusionNode"
EXECUTABLE pointpainting_fusion_node
)

ament_auto_package(INSTALL_TO_SHARE
launch
config
data
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
/**:
ros__parameters:
class_names: ["CAR", "PEDESTRIAN", "BICYCLE"]
rename_car_to_truck_and_bus: true
point_feature_size: 6 # x, y, z and car, pedestrian, bicycle
max_voxel_size: 40000
point_cloud_range: [-76.8, -76.8, -3.0, 76.8, 76.8, 5.0]
voxel_size: [0.32, 0.32, 8.0]
downsample_factor: 1
encoder_in_feature_size: 11
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
# pointpainting_fusion

## Purpose

The `pointpainting_fusion` is a package for utilizing the class information detected by a 2D object detection in 3D object detection.

## Inner-workings / Algorithms

The lidar points are projected onto the output of an image-only 2d object detection network and the class scores are appended to each point. The painted point cloud can then be fed to the centerpoint network.

![pointpainting_fusion_image](./images/pointpainting_fusion.jpg)

## Inputs / Outputs

### Input

| Name | Type | Description |
| --------------------- | -------------------------------------------------------- | ---------------------------------------------------------------------------------- |
| `input` | `sensor_msgs::msg::PointCloud2` | input pointcloud |
| `input/camera_infoID` | `sensor_msgs::msg::CameraInfo` | camera information to project 3d points onto image planes, `ID` is between 0 and 7 |
| `input/roisID` | `tier4_perception_msgs::msg::DetectedObjectsWithFeature` | ROIs from each image, `ID` is between 0 and 7 |
| `input/image_rawID` | `sensor_msgs::msg::Image` | images for visualization, `ID` is between 0 and 7 |

| `

### Output

| Name | Type | Description |
| -------------------- | ----------------------------------------------------- | ------------------------------------------------- |
| `output` | `sensor_msgs::msg::PointCloud2` | painted pointclouda |
| `~/output/objects` | `autoware_auto_perception_msgs::msg::DetectedObjects` | detected objects |
| `output/image_rawID` | `sensor_msgs::msg::Image` | images for visualization, `ID` is between 0 and 7 |

## Parameters

### Core Parameters

| Name | Type | Default Value | Description |
| ------------------------------- | ------ | ------------- | ----------------------------------------------------------- |
| `score_threshold` | float | `0.4` | detected objects with score less than threshold are ignored |
| `densification_world_frame_id` | string | `map` | the world frame id to fuse multi-frame pointcloud |
| `densification_num_past_frames` | int | `0` | the number of past frames to fuse with the current frame |
| `trt_precision` | string | `fp16` | TensorRT inference precision: `fp32` or `fp16` |
| `encoder_onnx_path` | string | `""` | path to VoxelFeatureEncoder ONNX file |
| `encoder_engine_path` | string | `""` | path to VoxelFeatureEncoder TensorRT Engine file |
| `head_onnx_path` | string | `""` | path to DetectionHead ONNX file |
| `head_engine_path` | string | `""` | path to DetectionHead TensorRT Engine file |

## Assumptions / Known limits

- The multi-frame painting is not implemented yet.

## (Optional) Error detection and handling

<!-- Write how to detect errors and how to recover from them.
Example:
This package can handle up to 20 obstacles. If more obstacles found, this node will give up and raise diagnostic errors.
-->

## (Optional) Performance characterization

<!-- Write performance information like complexity. If it wouldn't be the bottleneck, not necessary.
Example:
### Complexity
This algorithm is O(N).
### Processing time
...
-->

## References/External links

[1] Vora, Sourabh, et al. "Pointpainting: Sequential fusion for 3d object detection." Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2020.

[2] CVPR'20 Workshop on Scalability in Autonomous Driving] Waymo Open Dataset Challenge: <https://youtu.be/9g9GsI33ol8?t=535>
Ding, Zhuangzhuang, et al. "1st Place Solution for Waymo Open Dataset Challenge--3D Detection and Domain Adaptation." arXiv preprint arXiv:2006.15505 (2020).

## (Optional) Future extensions / Unimplemented parts

<!-- Write future extensions of this package.
Example:
Currently, this package can't handle the chattering obstacles well. We plan to add some probabilistic filters in the perception layer to improve it.
Also, there are some parameters that should be global(e.g. vehicle size, max steering, etc.). These will be refactored and defined as global parameters so that we can share the same parameters between different nodes.
-->
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright 2021 TIER IV, Inc.
// Copyright 2022 TIER IV, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -72,7 +72,7 @@ class FusionNode : public rclcpp::Node
const sensor_msgs::msg::CameraInfo & camera_info, Msg & output_msg) = 0;

// set args if you need
virtual void postprocess();
virtual void postprocess(Msg & output_msg);

void publish(const Msg & output_msg);

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
// Copyright 2022 TIER IV, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#ifndef IMAGE_PROJECTION_BASED_FUSION__POINTPAINTING_FUSION__NODE_HPP_
#define IMAGE_PROJECTION_BASED_FUSION__POINTPAINTING_FUSION__NODE_HPP_

#include "image_projection_based_fusion/fusion_node.hpp"
#include "image_projection_based_fusion/pointpainting_fusion/pointpainting_trt.hpp"

#include <image_projection_based_fusion/utils/geometry.hpp>
#include <image_projection_based_fusion/utils/utils.hpp>
#include <lidar_centerpoint/centerpoint_trt.hpp>

#include <map>
#include <memory>
#include <string>
#include <vector>

namespace image_projection_based_fusion
{
using Label = autoware_auto_perception_msgs::msg::ObjectClassification;

class PointpaintingFusionNode : public FusionNode<sensor_msgs::msg::PointCloud2, DetectedObjects>
{
public:
explicit PointpaintingFusionNode(const rclcpp::NodeOptions & options);

protected:
void preprocess(sensor_msgs::msg::PointCloud2 & painted_pointcloud_msg) override;

void fuseOnSingleImage(
const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg, const std::size_t image_id,
const DetectedObjectsWithFeature & input_roi_msg,
const sensor_msgs::msg::CameraInfo & camera_info,
sensor_msgs::msg::PointCloud2 & painted_pointcloud_msg) override;

void postprocess(sensor_msgs::msg::PointCloud2 & painted_pointcloud_msg) override;

rclcpp::Publisher<DetectedObjects>::SharedPtr obj_pub_ptr_;

float score_threshold_{0.0};
std::vector<std::string> class_names_;
std::vector<double> pointcloud_range;
bool rename_car_to_truck_and_bus_{false};
bool has_twist_{false};

std::unique_ptr<image_projection_based_fusion::PointPaintingTRT> detector_ptr_{nullptr};

bool out_of_scope(const DetectedObjects & obj);
};

} // namespace image_projection_based_fusion

#endif // IMAGE_PROJECTION_BASED_FUSION__POINTPAINTING_FUSION__NODE_HPP_
Loading

0 comments on commit cae2bab

Please sign in to comment.