Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature/point pillars #2029

Merged
merged 39 commits into from
Mar 8, 2019
Merged
Show file tree
Hide file tree
Changes from 20 commits
Commits
Show all changes
39 commits
Select commit Hold shift + click to select a range
1b5a90b
Add PointPillars
koh-murakami-ai Feb 26, 2019
6b1f65c
add readme.md
koh-murakami-ai Feb 26, 2019
a9a8cf9
Refactor codes
koh-murakami-ai Feb 26, 2019
49f6fba
apply ros clang
koh-murakami-ai Feb 26, 2019
7c4c1d6
Refactor codes
koh-murakami-ai Feb 27, 2019
0a784b6
fix some test
koh-murakami-ai Feb 28, 2019
978a5d6
fix test
koh-murakami-ai Feb 28, 2019
9cd5b71
refactor codes
koh-murakami-ai Feb 28, 2019
b4fee92
refactor codes
koh-murakami-ai Feb 28, 2019
663f4f1
refactor code
koh-murakami-ai Feb 28, 2019
36d9d10
apply ros clang
koh-murakami-ai Feb 28, 2019
267467d
refactor codes
koh-murakami-ai Feb 28, 2019
37ee941
remove cuda description in package.xml
koh-murakami-ai Feb 28, 2019
036aba6
Add specific version for TensorRT in README.md
koh-murakami-ai Feb 28, 2019
602d21c
update readme
koh-murakami-ai Feb 28, 2019
c6414f7
refactor codes
koh-murakami-ai Feb 28, 2019
65fd094
add empty package declaration when not finding cuda/tensorrt
koh-murakami-ai Feb 28, 2019
b60a3e5
add more tests
koh-murakami-ai Feb 28, 2019
18a39e0
Update README.md
koh-murakami-ai Feb 28, 2019
0f9d824
refactor codes
koh-murakami-ai Feb 28, 2019
e7fde83
refactor codes
koh-murakami-ai Feb 28, 2019
6512482
Modify cmake
koh-murakami-ai Mar 4, 2019
5904fd1
update readme
koh-murakami-ai Mar 4, 2019
4c1339b
Merge remote-tracking branch 'origin/develop' into feature/point_pillars
koh-murakami-ai Mar 6, 2019
b350ec9
add script in cmake to install lib
koh-murakami-ai Mar 6, 2019
1053510
add script in package.xml for autoware_build_flags
koh-murakami-ai Mar 6, 2019
60f3a12
Modify cmake, include CUDNN
koh-murakami-ai Mar 6, 2019
a384528
Add CUDNN dependency script in README
koh-murakami-ai Mar 6, 2019
367bcb1
Delete unnecessary default value for onnx files in launch file
koh-murakami-ai Mar 6, 2019
b5cd3ff
add label when creating DetectedObject
koh-murakami-ai Mar 6, 2019
3e226ac
Add include/{}/
koh-murakami-ai Mar 7, 2019
7daa788
Delete unnecessary code from previous commit
koh-murakami-ai Mar 7, 2019
a04b5ac
Remove magic number by using MACRO
koh-murakami-ai Mar 7, 2019
c59fa71
Clarify float number
koh-murakami-ai Mar 7, 2019
7b9cd43
Add roslib dependency
koh-murakami-ai Mar 7, 2019
d67f2ec
Fix typo
koh-murakami-ai Mar 7, 2019
816ba2d
Modify comment
koh-murakami-ai Mar 8, 2019
d17e4b4
Merge remote-tracking branch 'origin/develop' into feature/point_pillars
koh-murakami-ai Mar 8, 2019
4b6b0ac
revert robosense submodule
koh-murakami-ai Mar 8, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
cmake_minimum_required(VERSION 2.8.3)
project(lidar_point_pillars)

# set flags for CUDA availability
option(CUDA_AVAIL "CUDA available" OFF)
find_package(CUDA)
if (CUDA_FOUND)
message("CUDA is available!")
message("CUDA Libs: ${CUDA_LIBRARIES}")
message("CUDA Headers: ${CUDA_INCLUDE_DIRS}")
set(CUDA_AVAIL ON)
else()
message("CUDA NOT FOUND")
set(CUDA_AVAIL OFF)
endif (CUDA_FOUND)

# set flags for TensorRT availability
option(TRT_AVAIL "TensorRT available" OFF)
# try to find the tensorRT modules
find_library(NVINFER NAMES nvinfer)
find_library(NVPARSERS NAMES nvparsers)
if(NVINFER AND NVPARSERS)
message("TensorRT is available!")
message("NVINFER: ${NVINFER}")
message("NVPARSERS: ${NVPARSERS}")
set(TRT_AVAIL ON)
else()
message("TensorRT is NOT Available")
set(TRT_AVAIL OFF)
endif()

if(TRT_AVAIL AND CUDA_AVAIL)

find_package(autoware_build_flags REQUIRED)
find_package(catkin REQUIRED COMPONENTS
roscpp
pcl_ros
autoware_msgs
)
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")

catkin_package(
CATKIN_DEPENDS
roscpp
pcl_ros
autoware_msgs
)

include_directories(
include
${catkin_INCLUDE_DIRS}
)
set(SOURCE_FILES
nodes/lidar_point_pillars_node.cpp
nodes/point_pillars_ros.cpp
)

add_executable(lidar_point_pillars
${SOURCE_FILES})

add_dependencies(lidar_point_pillars
${catkin_EXPORTED_TARGETS}
)


cuda_add_library(gpu_point_pillars_lib
nodes/preprocess_points_cuda.cu
nodes/anchor_mask_cuda.cu
nodes/scatter_cuda.cu
nodes/postprocess_cuda.cu
nodes/nms_cuda.cu
)

target_link_libraries(gpu_point_pillars_lib
${CUDA_LIBRARIES}
)

add_library(point_pillars_lib
nodes/point_pillars.cpp
nodes/preprocess_points.cpp
)

target_link_libraries(point_pillars_lib
nvinfer
nvonnxparser
amc-nu marked this conversation as resolved.
Show resolved Hide resolved
gpu_point_pillars_lib
)

target_link_libraries(lidar_point_pillars
${catkin_LIBRARIES}
point_pillars_lib
)


install(TARGETS
lidar_point_pillars
ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION}
k0suke-murakami marked this conversation as resolved.
Show resolved Hide resolved
)

install(DIRECTORY launch/
DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}/launch
PATTERN ".svn" EXCLUDE)

install(DIRECTORY include/
DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
amc-nu marked this conversation as resolved.
Show resolved Hide resolved
PATTERN ".svn" EXCLUDE
)

if (CATKIN_ENABLE_TESTING)
find_package(rostest REQUIRED)
catkin_add_gtest(test-point_pillars test/src/test_point_pillars.cpp)
target_link_libraries(test-point_pillars ${catkin_LIBRARIES} point_pillars_lib)
endif()
ELSE()
find_package(catkin REQUIRED)
catkin_package()
message("PointPillars won't be built, CUDA and/or TensorRT were not found.")
amc-nu marked this conversation as resolved.
Show resolved Hide resolved
ENDIF ()
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
# Point Pillars for 3D Object Detection: ver. 1.0

Autoware package for Point Pillars. [Referenced paper](https://arxiv.org/abs/1812.05784).

## Requirements

CUDA Toolkit v9.0 or v10.0

TensorRT: Tested with 5.0.2 -> [How to install](https://docs.nvidia.com/deeplearning/sdk/tensorrt-install-guide/index.html#installing)



## How to launch

* Launch file:
`roslaunch lidar_point_pillars lidar_point_pillars.launch pfe_onnx_file:=/PATH/TO/FILE.onnx rpn_onnx_file:=/PATH/TO/FILE.onnx input_topic:=/points_raw`

* You can launch it through the runtime manager in Computing tab, as well.

## API
```
/**
* @brief Call PointPillars for the inference.
* @param[in] in_points_array pointcloud array
* @param[in] in_num_points Number of points
* @param[out] out_detections Output bounding box from the network
* @details This is an interface for the algorithm.
*/
void doInference(float* in_points_array, int in_num_points, std::vector<float> out_detections);
```

## Parameters

|Parameter| Type| Description|Default|
----------|-----|--------|----|
|`input_topic`|*String*|Input topic Pointcloud. |`/points_raw`|
|`baselink_support`|*Bool*|Whether to use baselink to adjust parameters. |`True`|
|`reproduce_result_mode`|*Bool*|Whether to enable reproducible result mode at the cost of the runtime. |`False`|
|`score_threshold`|*Float*|Minimum score required to include the result [0,1]|0.5|
|`nms_overlap_threshold`|*Float*|Minimum IOU required to have when applying NMS [0,1]|0.5|
|`pfe_onnx_file`|*String* |Path to the PFE onnx file||
|`rpn_onnx_file`|*String* |Path to the RPN onnx file||

## Outputs

|Topic|Type|Description|
|---|---|---|
|`/detection/lidar_detector/objects`|`autoware_msgs/DetectedObjetArray`|Array of Detected Objects in Autoware format|

## Notes

* To display the results in Rviz `objects_visualizer` is required.
(Launch file launches automatically this node).

* Pretrained models are available [here](https://github.com/cirpue49/kitti_pretrained_pp), trained with the help of the KITTI dataset. For this reason, these are not suitable for commercial purposes. Derivative works are bound to the BY-NC-SA 3.0 License. (https://creativecommons.org/licenses/by-nc-sa/3.0/)

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@gbiggs @esteve @kfunaoka this package uses a pre-trained network that is not for commercial purposes.

I think that we need to find a way to clearly mark such packages, otherwise it will be difficult to find them later and it will also cause confusion with Autoware users trying to commercialize such packages.

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, I agree. I'll add it to my notes for the documentation template.

I think we also need a way for a user to easily select between using a non-commercial pre-trained network that we provide and a network they provide themselves (as well as a good method and documentation for how to train a network). This would improve the usefulness of Autoware without placing a burden on us to provide trained networks. I'll add this to my increasing pile of notes on what Autoware should be. :)

Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
/*
* Copyright 2018-2019 Autoware Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

/**
* @file anchor_mask_cuda.h
* @brief Make anchor mask for filtering output
* @author Kosuke Murakami
* @date 2019/02/26
*/

#ifndef ANCHOR_MASK_CUDA_H
#define ANCHOR_MASK_CUDA_H

class AnchorMaskCuda
{
private:
const int NUM_INDS_FOR_SCAN_;
const int NUM_ANCHOR_X_INDS_;
const int NUM_ANCHOR_Y_INDS_;
const int NUM_ANCHOR_R_INDS_;
const float MIN_X_RANGE_;
const float MIN_Y_RANGE_;
const float PILLAR_X_SIZE_;
const float PILLAR_Y_SIZE_;
const int GRID_X_SIZE_;
const int GRID_Y_SIZE_;

public:
/**
* @brief Constructor
* @param[in] NUM_INDS_FOR_SCAN Number of indexes for scan(cumsum)
* @param[in] NUM_ANCHOR_X_INDS Number of x-indexes for anchors
* @param[in] NUM_ANCHOR_Y_INDS Number of y-indexes for anchors
* @param[in] NUM_ANCHOR_R_INDS Number of rotation-indexes for anchors
* @param[in] MIN_X_RANGE Minimum x value for pointcloud
* @param[in] MIN_Y_RANGE Minimum y value for pointcloud
* @param[in] PILLAR_X_SIZE Size of x-dimension for a pillar
* @param[in] PILLAR_Y_SIZE Size of y-dimension for a pillar
* @param[in] GRID_X_SIZE Number of pillars in x-coordinate
* @param[in] GRID_Y_SIZE Number of pillars in y-coordinate
* @details Captital variables never change after the compile
*/
AnchorMaskCuda(const int NUM_INDS_FOR_SCAN, const int NUM_ANCHOR_X_INDS, const int NUM_ANCHOR_Y_INDS,
const int NUM_ANCHOR_R_INDS, const float MIN_X_RANGE, const float MIN_Y_RANGE,
const float PILLAR_X_SIZE, const float PILLAR_Y_SIZE, const int GRID_X_SIZE, const int GRID_Y_SIZE);

/**
* @brief call cuda code for making anchor mask
* @param[in] dev_sparse_pillar_map Grid map representation for pillar occupancy
* @param[in] dev_cumsum_along_x Cumsum dev_sparse_pillar_map along x axis
* @param[in] dev_cumsum_along_y Cumsum dev_cumsum_along_x along y axis
* @param[in] dev_box_anchors_min_x Array for storng min x value for each anchor
* @param[in] dev_box_anchors_min_y Array for storng min y value for each anchor
* @param[in] dev_box_anchors_max_x Array for storng max x value for each anchor
* @param[in] dev_box_anchors_max_y Array for storng max y value for each anchor
* @param[in] dev_box_anchors_max_y Array for storng max y value for each anchor
* @param[out] dev_anchor_mask Anchor mask for filtering output
* @details dev_* means device memory. Make a mask for activating pillar occupancy area
*/
void doAnchorMaskCuda(int* dev_sparse_pillar_map, int* dev_cumsum_along_x, int* dev_cumsum_along_y,
const float* dev_box_anchors_min_x, const float* dev_box_anchors_min_y,
const float* dev_box_anchors_max_x, const float* dev_box_anchors_max_y, int* dev_anchor_mask);
};

#endif // ANCHOR_MASK_CUDA_H
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
/*
* Copyright 2018-2019 Autoware Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

/**
* @file common.h
* @brief MACRO for CUDA codes
* @author Kosuke Murakami
* @date 2019/02/26
*/

#ifndef COMMON_H
#define COMMON_H

// headers in STL
#include <stdio.h>

// headers in CUDA
#include <cuda_runtime_api.h>

#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))

#define GPU_CHECK(ans) \
{ \
GPUAssert((ans), __FILE__, __LINE__); \
}
inline void GPUAssert(cudaError_t code, const char* file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort)
exit(code);
}
}

#endif // COMMON_H
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
/*
* Copyright 2018-2019 Autoware Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

/**
* @file nms_cuda.h
* @brief Non-maximum suppresion for network output
* @author Modified by Kosuke Murakami
* @date 2019/02/26
*/

#ifndef NMS_CUDA_H
#define NMS_CUDA_H

// heders in STL
#include <vector>
#include <iostream>

// headers in local files
#include "common.h"

class NMSCuda
{
private:
const int NUM_THREADS_;
const float nms_overlap_threshold_;

public:
/**
* @brief Constructor
* @param[in] NUM_THRESD Number of threads for launching cuda kernel
* @param[in] nms_overlap_threshold IOU threshold for NMS
* @details Captital variables never change after the compile, Non-captital variables could be chaned through rosparam
*/
NMSCuda(const int NUM_THREADS, const float nms_overlap_threshold);

/**
* @brief GPU Non-Maximum Suppresion for network output
* @param[in] host_filter_count Number of filtered output
* @param[in] dev_sorted_box_for_nms Bounding box output sorted by score
* @param[out] out_keep_inds Indexes for selected bounding box
* @param[out] out_num_to_keep Number of keep bounding box
* @details Includes CUDA NMS and postprocessing for selecting box in CPU
*/
void doNMSCuda(const int host_filter_count, float* dev_sorted_box_for_nms, int* out_keep_inds, int& out_num_to_keep);
};

#endif // NMS_CUDA_H
Loading