Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(image_projection_based_fusion): update pointpainting #4544

Merged
merged 7 commits into from
Aug 9, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions perception/image_projection_based_fusion/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -124,15 +124,15 @@ if(TRT_AVAIL AND CUDA_AVAIL AND CUDNN_AVAIL)
else()
message(STATUS "diff ${FILE_NAME}")
message(STATUS "File hash changes. Downloading now ...")
file(DOWNLOAD https://awf.ml.dev.web.auto/perception/models/pointpainting/v3/${FILE_NAME}
file(DOWNLOAD https://awf.ml.dev.web.auto/perception/models/pointpainting/v4/${FILE_NAME}
${FILE_PATH} STATUS DOWNLOAD_STATUS TIMEOUT 300)
list(GET DOWNLOAD_STATUS 0 STATUS_CODE)
list(GET DOWNLOAD_STATUS 1 ERROR_MESSAGE)
endif()
else()
message(STATUS "not found ${FILE_NAME}")
message(STATUS "File doesn't exists. Downloading now ...")
file(DOWNLOAD https://awf.ml.dev.web.auto/perception/models/pointpainting/v3/${FILE_NAME}
file(DOWNLOAD https://awf.ml.dev.web.auto/perception/models/pointpainting/v4/${FILE_NAME}
${FILE_PATH} STATUS DOWNLOAD_STATUS TIMEOUT 300)
list(GET DOWNLOAD_STATUS 0 STATUS_CODE)
list(GET DOWNLOAD_STATUS 1 ERROR_MESSAGE)
Expand All @@ -145,8 +145,8 @@ if(TRT_AVAIL AND CUDA_AVAIL AND CUDNN_AVAIL)
endfunction()

# default model
download(pts_voxel_encoder_pointpainting.onnx e674271096f6fbbe0f808eef87f5a5ab)
download(pts_backbone_neck_head_pointpainting.onnx d4527a4da08959c2bf9c997112a1de35)
download(pts_voxel_encoder_pointpainting.onnx 25c70f76a45a64944ccd19f604c99410)
download(pts_backbone_neck_head_pointpainting.onnx 2c7108245240fbd7bf0104dd68225868)
find_package(OpenCV REQUIRED)
find_package(Eigen3 REQUIRED)

Expand Down
Original file line number Diff line number Diff line change
@@ -1,15 +1,16 @@
/**:
ros__parameters:
class_names: ["CAR", "TRUCK", "BUS", "BICYCLE", "PEDESTRIAN"]
point_feature_size: 9 # x, y, z, time-lag and car, pedestrian, bicycle
paint_class_names: ["CAR", "BICYCLE", "PEDESTRIAN"]
point_feature_size: 7 # x, y, z, time-lag and car, pedestrian, bicycle
max_voxel_size: 40000
point_cloud_range: [-102.4, -102.4, -4.0, 102.4, 102.4, 6.0]
voxel_size: [0.32, 0.32, 10.0]
point_cloud_range: [-121.6, -76.8, -3.0, 121.6, 76.8, 5.0]
voxel_size: [0.32, 0.32, 8.0]
downsample_factor: 1
encoder_in_feature_size: 14
encoder_in_feature_size: 12
yaw_norm_thresholds: [0.3, 0.3, 0.3, 0.3, 0.0]
# post-process params
circle_nms_dist_threshold: 0.5
circle_nms_dist_threshold: 0.3
iou_nms_target_class_names: ["CAR"]
iou_nms_search_distance_2d: 10.0
iou_nms_threshold: 0.1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@
<node pkg="image_projection_based_fusion" exec="pointpainting_fusion_node" name="pointpainting" output="screen">
<remap from="~/input/pointcloud" to="$(var input/pointcloud)"/>
<remap from="~/output/objects" to="$(var output/objects)"/>
<param name="score_threshold" value="0.45"/>
<param name="score_threshold" value="0.35"/>
<param name="densification_world_frame_id" value="map"/>
<param name="densification_num_past_frames" value="1"/>
<param name="trt_precision" value="fp16"/>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,26 @@
#include <lidar_centerpoint/preprocess/pointcloud_densification.hpp>
#include <lidar_centerpoint/ros_utils.hpp>
#include <lidar_centerpoint/utils.hpp>
#include <pcl_ros/transforms.hpp>
#include <tier4_autoware_utils/geometry/geometry.hpp>
#include <tier4_autoware_utils/math/constants.hpp>

#include <omp.h>

#include <chrono>

namespace
{

Eigen::Affine3f _transformToEigen(const geometry_msgs::msg::Transform & t)
{
Eigen::Affine3f a;
a.matrix() = tf2::transformToEigen(t).matrix().cast<float>();
return a;
}
yukke42 marked this conversation as resolved.
Show resolved Hide resolved

} // namespace

namespace image_projection_based_fusion
{

Expand Down Expand Up @@ -102,8 +115,12 @@ PointPaintingFusionNode::PointPaintingFusionNode(const rclcpp::NodeOptions & opt
const std::string head_engine_path = this->declare_parameter("head_engine_path", "");

class_names_ = this->declare_parameter<std::vector<std::string>>("class_names");
const auto paint_class_names =
this->declare_parameter<std::vector<std::string>>("paint_class_names");
std::vector<std::string> classes_{"CAR", "TRUCK", "BUS", "BICYCLE", "PEDESTRIAN"};
if (std::find(class_names_.begin(), class_names_.end(), "TRUCK") != class_names_.end()) {
if (
std::find(paint_class_names.begin(), paint_class_names.end(), "TRUCK") !=
paint_class_names.end()) {
yukke42 marked this conversation as resolved.
Show resolved Hide resolved
isClassTable_["CAR"] = std::bind(&isCar, std::placeholders::_1);
} else {
isClassTable_["CAR"] = std::bind(&isVehicle, std::placeholders::_1);
Expand All @@ -113,9 +130,9 @@ PointPaintingFusionNode::PointPaintingFusionNode(const rclcpp::NodeOptions & opt
isClassTable_["BICYCLE"] = std::bind(&isBicycle, std::placeholders::_1);
isClassTable_["PEDESTRIAN"] = std::bind(&isPedestrian, std::placeholders::_1);
for (const auto & cls : classes_) {
auto it = find(class_names_.begin(), class_names_.end(), cls);
if (it != class_names_.end()) {
int index = it - class_names_.begin();
auto it = find(paint_class_names.begin(), paint_class_names.end(), cls);
if (it != paint_class_names.end()) {
int index = it - paint_class_names.begin();
class_index_[cls] = index + 1;
} else {
isClassTable_.erase(cls);
Expand Down Expand Up @@ -253,39 +270,38 @@ void PointPaintingFusionNode::fuseOnSingleImage(
std::vector<Eigen::Vector2d> debug_image_points;

// get transform from cluster frame id to camera optical frame id
geometry_msgs::msg::TransformStamped transform_stamped;
// geometry_msgs::msg::TransformStamped transform_stamped;
Eigen::Affine3f lidar2cam_affine;
{
const auto transform_stamped_optional = getTransformStamped(
tf_buffer_, /*target*/ camera_info.header.frame_id,
/*source*/ painted_pointcloud_msg.header.frame_id, camera_info.header.stamp);
tf_buffer_, /*target*/ input_roi_msg.header.frame_id,
/*source*/ painted_pointcloud_msg.header.frame_id, input_roi_msg.header.stamp);
if (!transform_stamped_optional) {
return;
}
transform_stamped = transform_stamped_optional.value();
lidar2cam_affine = _transformToEigen(transform_stamped_optional.value().transform);
}

// transform
sensor_msgs::msg::PointCloud2 transformed_pointcloud;
tf2::doTransform(painted_pointcloud_msg, transformed_pointcloud, transform_stamped);

std::chrono::system_clock::time_point start, end;
start = std::chrono::system_clock::now();
// sensor_msgs::msg::PointCloud2 transformed_pointcloud;
// tf2::doTransform(painted_pointcloud_msg, transformed_pointcloud, transform_stamped);

const auto x_offset =
transformed_pointcloud.fields.at(static_cast<size_t>(autoware_point_types::PointIndex::X))
painted_pointcloud_msg.fields.at(static_cast<size_t>(autoware_point_types::PointIndex::X))
.offset;
const auto y_offset =
transformed_pointcloud.fields.at(static_cast<size_t>(autoware_point_types::PointIndex::Y))
painted_pointcloud_msg.fields.at(static_cast<size_t>(autoware_point_types::PointIndex::Y))
.offset;
const auto z_offset =
transformed_pointcloud.fields.at(static_cast<size_t>(autoware_point_types::PointIndex::Z))
painted_pointcloud_msg.fields.at(static_cast<size_t>(autoware_point_types::PointIndex::Z))
.offset;
const auto class_offset = painted_pointcloud_msg.fields.at(4).offset;
const auto p_step = transformed_pointcloud.point_step;
const auto p_step = painted_pointcloud_msg.point_step;
// projection matrix
Eigen::Matrix3f camera_projection; // use only x,y,z
camera_projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2),
camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6);
Eigen::Vector3f point_lidar, point_camera;
/** dc : don't care

x | f x1 x2 dc ||xc|
Expand All @@ -295,18 +311,24 @@ dc | dc dc dc dc ||zc|
**/

auto objects = input_roi_msg.feature_objects;
int iterations = transformed_pointcloud.data.size() / transformed_pointcloud.point_step;
int iterations = painted_pointcloud_msg.data.size() / painted_pointcloud_msg.point_step;
// iterate points
// Requires 'OMP_NUM_THREADS=N'
omp_set_num_threads(omp_num_threads_);
#pragma omp parallel for
for (int i = 0; i < iterations; i++) {
int stride = p_step * i;
unsigned char * data = &transformed_pointcloud.data[0];
unsigned char * data = &painted_pointcloud_msg.data[0];
unsigned char * output = &painted_pointcloud_msg.data[0];
float p_x = *reinterpret_cast<const float *>(&data[stride + x_offset]);
float p_y = *reinterpret_cast<const float *>(&data[stride + y_offset]);
float p_z = *reinterpret_cast<const float *>(&data[stride + z_offset]);
point_lidar << p_x, p_y, p_z;
point_camera = lidar2cam_affine * point_lidar;
p_x = point_camera.x();
p_y = point_camera.y();
p_z = point_camera.z();

if (p_z <= 0.0 || p_x > (tan_h_.at(image_id) * p_z) || p_x < (-tan_h_.at(image_id) * p_z)) {
continue;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,8 @@ namespace
{
const std::size_t MAX_POINT_IN_VOXEL_SIZE = 32; // the same as max_point_in_voxel_size_ in config
const std::size_t WARPS_PER_BLOCK = 4;
const std::size_t ENCODER_IN_FEATURE_SIZE = 14; // same as encoder_in_feature_size_ in config.hpp
const int POINT_FEATURE_SIZE = 9;
const std::size_t ENCODER_IN_FEATURE_SIZE = 12; // same as encoder_in_feature_size_ in config.hpp
const int POINT_FEATURE_SIZE = 7;

// cspell: ignore divup
std::size_t divup(const std::size_t a, const std::size_t b)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ std::optional<geometry_msgs::msg::TransformStamped> getTransformStamped(
try {
geometry_msgs::msg::TransformStamped transform_stamped;
transform_stamped = tf_buffer.lookupTransform(
target_frame_id, source_frame_id, time, rclcpp::Duration::from_seconds(0.5));
target_frame_id, source_frame_id, time, rclcpp::Duration::from_seconds(0.01));
return transform_stamped;
} catch (tf2::TransformException & ex) {
RCLCPP_WARN_STREAM(rclcpp::get_logger("image_projection_based_fusion"), ex.what());
Expand Down