diff --git a/README.md b/README.md
index 3fe01ff..366b519 100644
--- a/README.md
+++ b/README.md
@@ -70,13 +70,13 @@ This package is powered by [NVIDIA Isaac Transport for ROS (NITROS)](https://dev
## Performance
-| Sample Graph
| Input Size
| AGX Orin
| Orin NX
| Orin Nano 8GB
| x86_64 w/ RTX 4060 Ti
| x86_64 w/ RTX 4090
|
-|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [TensorRT Node](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/benchmarks/isaac_ros_tensor_rt_benchmark/scripts/isaac_ros_tensor_rt_dope_node.py)
DOPE
| VGA
| [48.1 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_dope_node-agx_orin.json)
24 ms @ 30Hz
| [17.9 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_dope_node-orin_nx.json)
56 ms @ 30Hz
| [13.1 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_dope_node-orin_nano.json)
82 ms @ 30Hz
| [98.3 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_dope_node-nuc_4060ti.json)
13 ms @ 30Hz
| [296 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_dope_node-x86_4090.json)
5.1 ms @ 30Hz
|
-| [Triton Node](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/benchmarks/isaac_ros_triton_benchmark/scripts/isaac_ros_triton_dope_node.py)
DOPE
| VGA
| [47.2 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_triton_dope_node-agx_orin.json)
23 ms @ 30Hz
| [20.4 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_triton_dope_node-orin_nx.json)
540 ms @ 30Hz
| [14.4 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_triton_dope_node-orin_nano.json)
790 ms @ 30Hz
| [94.2 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_triton_dope_node-nuc_4060ti.json)
12 ms @ 30Hz
| [254 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_triton_dope_node-x86_4090.json)
4.6 ms @ 30Hz
|
-| [TensorRT Node](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/benchmarks/isaac_ros_tensor_rt_benchmark/scripts/isaac_ros_tensor_rt_ps_node.py)
PeopleSemSegNet
| 544p
| [460 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_ps_node-agx_orin.json)
4.1 ms @ 30Hz
| [348 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_ps_node-orin_nx.json)
6.1 ms @ 30Hz
| [238 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_ps_node-orin_nano.json)
7.0 ms @ 30Hz
| [685 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_ps_node-nuc_4060ti.json)
2.9 ms @ 30Hz
| [675 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_ps_node-x86_4090.json)
3.0 ms @ 30Hz
|
-| [Triton Node](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/benchmarks/isaac_ros_triton_benchmark/scripts/isaac_ros_triton_ps_node.py)
PeopleSemSegNet
| 544p
| [304 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_triton_ps_node-agx_orin.json)
4.8 ms @ 30Hz
| [206 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_triton_ps_node-orin_nx.json)
6.5 ms @ 30Hz
| –
| [677 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_triton_ps_node-nuc_4060ti.json)
2.2 ms @ 30Hz
| [619 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_triton_ps_node-x86_4090.json)
1.9 ms @ 30Hz
|
-| [DNN Image Encoder Node](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/benchmarks/isaac_ros_dnn_image_encoder_benchmark/scripts/isaac_ros_dnn_image_encoder_node.py)
| VGA
| [522 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_dnn_image_encoder_node-agx_orin.json)
12 ms @ 30Hz
| [330 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_dnn_image_encoder_node-orin_nx.json)
12 ms @ 30Hz
| –
| [811 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_dnn_image_encoder_node-nuc_4060ti.json)
6.6 ms @ 30Hz
| [822 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_dnn_image_encoder_node-x86_4090.json)
6.4 ms @ 30Hz
|
+| Sample Graph
| Input Size
| AGX Orin
| Orin NX
| Orin Nano 8GB
| x86_64 w/ RTX 4090
|
+|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| [TensorRT Node](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/benchmarks/isaac_ros_tensor_rt_benchmark/scripts/isaac_ros_tensor_rt_dope_node.py)
DOPE
| VGA
| [47.9 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_dope_node-agx_orin.json)
24 ms @ 30Hz
| [18.1 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_dope_node-orin_nx.json)
56 ms @ 30Hz
| [13.1 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_dope_node-orin_nano.json)
81 ms @ 30Hz
| [298 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_dope_node-x86-4090.json)
4.6 ms @ 30Hz
|
+| [Triton Node](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/benchmarks/isaac_ros_triton_benchmark/scripts/isaac_ros_triton_dope_node.py)
DOPE
| VGA
| [47.2 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_triton_dope_node-agx_orin.json)
24 ms @ 30Hz
| [20.3 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_triton_dope_node-orin_nx.json)
530 ms @ 30Hz
| [14.5 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_triton_dope_node-orin_nano.json)
780 ms @ 30Hz
| [276 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_triton_dope_node-x86-4090.json)
4.6 ms @ 30Hz
|
+| [TensorRT Node](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/benchmarks/isaac_ros_tensor_rt_benchmark/scripts/isaac_ros_tensor_rt_ps_node.py)
PeopleSemSegNet
| 544p
| [460 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_ps_node-agx_orin.json)
4.1 ms @ 30Hz
| [348 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_ps_node-orin_nx.json)
6.1 ms @ 30Hz
| [238 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_ps_node-orin_nano.json)
7.0 ms @ 30Hz
| [685 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_ps_node-nuc_4060ti.json)
2.9 ms @ 30Hz
|
+| [Triton Node](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/benchmarks/isaac_ros_triton_benchmark/scripts/isaac_ros_triton_ps_node.py)
PeopleSemSegNet
| 544p
| [304 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_triton_ps_node-agx_orin.json)
4.8 ms @ 30Hz
| [206 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_triton_ps_node-orin_nx.json)
6.5 ms @ 30Hz
| –
| [677 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_triton_ps_node-nuc_4060ti.json)
2.2 ms @ 30Hz
|
+| [DNN Image Encoder Node](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/benchmarks/isaac_ros_dnn_image_encoder_benchmark/scripts/isaac_ros_dnn_image_encoder_node.py)
| VGA
| [420 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_dnn_image_encoder_node-agx_orin.json)
12 ms @ 30Hz
| [382 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_dnn_image_encoder_node-orin_nx.json)
11 ms @ 30Hz
| –
| [574 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_dnn_image_encoder_node-x86-4090.json)
5.2 ms @ 30Hz
|
---
@@ -104,4 +104,4 @@ Please visit the [Isaac ROS Documentation](https://nvidia-isaac-ros.github.io/re
## Latest
-Update 2024-09-26: Update for Isaac ROS 3.1
+Update 2024-12-10: Update to be compatible with JetPack 6.1
diff --git a/isaac_ros_dnn_image_encoder/CMakeLists.txt b/isaac_ros_dnn_image_encoder/CMakeLists.txt
index 3b42035..0ef2dd5 100644
--- a/isaac_ros_dnn_image_encoder/CMakeLists.txt
+++ b/isaac_ros_dnn_image_encoder/CMakeLists.txt
@@ -38,6 +38,13 @@ if(BUILD_TESTING)
find_package(ament_lint_auto REQUIRED)
ament_lint_auto_find_test_dependencies()
+ # Gtest for dnn image encoder node
+ ament_add_gtest(dnn_image_encoder_node_test test/dnn_image_encoder_node_test.cpp)
+ target_link_libraries(dnn_image_encoder_node_test dnn_image_encoder_node)
+ target_include_directories(dnn_image_encoder_node_test PUBLIC include/isaac_ros_dnn_image_encoder/)
+ target_include_directories(dnn_image_encoder_node_test PUBLIC /usr/src/googletest/googlemock/include/)
+ ament_target_dependencies(dnn_image_encoder_node_test rclcpp)
+ ament_target_dependencies(dnn_image_encoder_node_test isaac_ros_nitros)
# The FindPythonInterp and FindPythonLibs modules are removed
if(POLICY CMP0148)
@@ -50,4 +57,10 @@ if(BUILD_TESTING)
add_launch_test(test/isaac_ros_dnn_image_encoder_image_norm_test.py)
endif()
+
+# Embed versioning information into installed files
+ament_index_get_resource(ISAAC_ROS_COMMON_CMAKE_PATH isaac_ros_common_cmake_path isaac_ros_common)
+include("${ISAAC_ROS_COMMON_CMAKE_PATH}/isaac_ros_common-version-info.cmake")
+generate_version_info(${PROJECT_NAME})
+
ament_auto_package(INSTALL_TO_SHARE config launch)
diff --git a/isaac_ros_dnn_image_encoder/launch/dnn_image_encoder.launch.py b/isaac_ros_dnn_image_encoder/launch/dnn_image_encoder.launch.py
index bb8690b..98747f0 100644
--- a/isaac_ros_dnn_image_encoder/launch/dnn_image_encoder.launch.py
+++ b/isaac_ros_dnn_image_encoder/launch/dnn_image_encoder.launch.py
@@ -43,9 +43,13 @@ def launch_setup(context, *args, **kwargs):
context.perform_substitution(LaunchConfiguration('enable_padding'))
)
+ input_qos = LaunchConfiguration('input_qos')
+ output_qos = LaunchConfiguration('output_qos')
+
keep_aspect_ratio = LaunchConfiguration('keep_aspect_ratio')
crop_mode = LaunchConfiguration('crop_mode')
encoding_desired = LaunchConfiguration('encoding_desired')
+ input_encoding = LaunchConfiguration('input_encoding')
final_tensor_name = LaunchConfiguration('final_tensor_name')
image_mean = LaunchConfiguration('image_mean')
@@ -92,11 +96,12 @@ def launch_setup(context, *args, **kwargs):
plugin='nvidia::isaac_ros::image_proc::ResizeNode',
parameters=[
{
+ 'input_qos': input_qos,
'output_width': resize_image_width,
'output_height': resize_image_height,
'num_blocks': num_blocks,
'keep_aspect_ratio': keep_aspect_ratio,
- 'encoding_desired': '',
+ 'encoding_desired': input_encoding,
}
],
remappings=[
@@ -104,6 +109,22 @@ def launch_setup(context, *args, **kwargs):
('camera_info', camera_info_input_topic),
],
),
+ ComposableNode(
+ name='image_format_converter_node',
+ package='isaac_ros_image_proc',
+ plugin='nvidia::isaac_ros::image_proc::ImageFormatConverterNode',
+ parameters=[
+ {
+ 'image_width': network_image_width,
+ 'image_height': network_image_height,
+ 'encoding_desired': encoding_desired,
+ }
+ ],
+ remappings=[
+ ('image_raw', 'resize/image'),
+ ('image', 'converted/image'),
+ ],
+ ),
ComposableNode(
name='crop_node',
package='isaac_ros_image_proc',
@@ -121,26 +142,11 @@ def launch_setup(context, *args, **kwargs):
}
],
remappings=[
- ('image', 'resize/image'),
- ('camera_info', 'resize/camera_info'),
- ],
- ),
- ComposableNode(
- name='image_format_converter_node',
- package='isaac_ros_image_proc',
- plugin='nvidia::isaac_ros::image_proc::ImageFormatConverterNode',
- parameters=[
- {
- 'image_width': network_image_width,
- 'image_height': network_image_height,
- 'encoding_desired': encoding_desired,
- }
- ],
- remappings=[
- ('image_raw', 'crop/image'),
('image', 'converted/image'),
+ ('camera_info', 'resize/camera_info'),
],
),
+
ComposableNode(
name='image_to_tensor',
package='isaac_ros_tensor_proc',
@@ -152,7 +158,7 @@ def launch_setup(context, *args, **kwargs):
}
],
remappings=[
- ('image', 'converted/image'),
+ ('image', 'crop/image'),
('tensor', 'image_tensor'),
],
),
@@ -192,6 +198,7 @@ def launch_setup(context, *args, **kwargs):
plugin='nvidia::isaac_ros::dnn_inference::ReshapeNode',
parameters=[
{
+ 'output_qos': output_qos,
'output_tensor_name': final_tensor_name,
'input_tensor_shape': [3, network_image_height, network_image_width],
'output_tensor_shape': [1, 3, network_image_height, network_image_width],
@@ -253,6 +260,16 @@ def generate_launch_description():
default_value='True',
description='Whether to enable padding or not',
),
+ DeclareLaunchArgument(
+ 'input_qos',
+ default_value='DEFAULT',
+ description='The QoS settings for the input image'
+ ),
+ DeclareLaunchArgument(
+ 'output_qos',
+ default_value='DEFAULT',
+ description='The QoS settings for the output tensor'
+ ),
DeclareLaunchArgument(
'num_blocks',
default_value='40',
@@ -268,6 +285,11 @@ def generate_launch_description():
default_value='CENTER',
description='The crop mode to crop the image using',
),
+ DeclareLaunchArgument(
+ 'input_encoding',
+ default_value='rgb8',
+ description='The desired image format encoding',
+ ),
DeclareLaunchArgument(
'encoding_desired',
default_value='rgb8',
diff --git a/isaac_ros_dnn_image_encoder/package.xml b/isaac_ros_dnn_image_encoder/package.xml
index 10e2660..2a4b9e1 100644
--- a/isaac_ros_dnn_image_encoder/package.xml
+++ b/isaac_ros_dnn_image_encoder/package.xml
@@ -21,7 +21,7 @@ SPDX-License-Identifier: Apache-2.0
isaac_ros_dnn_image_encoder
- 3.1.0
+ 3.2.0
Encoder for preprocessing images into tensors for deep learning inference
Isaac ROS Maintainers
Apache-2.0
@@ -48,6 +48,7 @@ SPDX-License-Identifier: Apache-2.0
ament_lint_auto
ament_lint_common
isaac_ros_test
+ ament_cmake_gtest
ament_cmake
diff --git a/isaac_ros_dnn_image_encoder/test/dnn_image_encoder_node_test.cpp b/isaac_ros_dnn_image_encoder/test/dnn_image_encoder_node_test.cpp
new file mode 100644
index 0000000..2a43d1b
--- /dev/null
+++ b/isaac_ros_dnn_image_encoder/test/dnn_image_encoder_node_test.cpp
@@ -0,0 +1,114 @@
+// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
+// Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+#include
+#include "dnn_image_encoder_node.hpp"
+#include "rclcpp/rclcpp.hpp"
+
+// Objective: to cover code lines where exceptions are thrown
+// Approach: send Invalid Arguments for node parameters to trigger the exception
+
+
+TEST(dnn_image_encoder_node_test, test_invalid_input_image_width)
+{
+ rclcpp::init(0, nullptr);
+ rclcpp::NodeOptions options;
+ options.append_parameter_override("input_image_width", 0);
+ EXPECT_THROW(
+ {
+ try {
+ nvidia::isaac_ros::dnn_inference::DnnImageEncoderNode dnn_image_encoder_node(options);
+ } catch (const std::invalid_argument & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("Invalid input_image_width"));
+ throw;
+ } catch (const rclcpp::exceptions::InvalidParameterValueException & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("No parameter value set"));
+ throw;
+ }
+ }, std::invalid_argument);
+ rclcpp::shutdown();
+}
+
+TEST(dnn_image_encoder_node_test, test_invalid_input_image_height)
+{
+ rclcpp::init(0, nullptr);
+ rclcpp::NodeOptions options;
+ options.append_parameter_override("input_image_width", 1);
+ EXPECT_THROW(
+ {
+ try {
+ nvidia::isaac_ros::dnn_inference::DnnImageEncoderNode dnn_image_encoder_node(options);
+ } catch (const std::invalid_argument & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("Invalid input_image_height"));
+ throw;
+ } catch (const rclcpp::exceptions::InvalidParameterValueException & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("No parameter value set"));
+ throw;
+ }
+ }, std::invalid_argument);
+ rclcpp::shutdown();
+}
+
+TEST(dnn_image_encoder_node_test, test_invalid_network_image_width)
+{
+ rclcpp::init(0, nullptr);
+ rclcpp::NodeOptions options;
+ options.append_parameter_override("input_image_width", 1);
+ options.append_parameter_override("input_image_height", 1);
+ EXPECT_THROW(
+ {
+ try {
+ nvidia::isaac_ros::dnn_inference::DnnImageEncoderNode dnn_image_encoder_node(options);
+ } catch (const std::invalid_argument & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("Invalid network_image_width"));
+ throw;
+ } catch (const rclcpp::exceptions::InvalidParameterValueException & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("No parameter value set"));
+ throw;
+ }
+ }, std::invalid_argument);
+ rclcpp::shutdown();
+}
+
+TEST(dnn_image_encoder_node_test, test_invalid_network_image_height)
+{
+ rclcpp::init(0, nullptr);
+ rclcpp::NodeOptions options;
+ options.append_parameter_override("input_image_width", 1);
+ options.append_parameter_override("input_image_height", 1);
+ options.append_parameter_override("network_image_width", 1);
+ EXPECT_THROW(
+ {
+ try {
+ nvidia::isaac_ros::dnn_inference::DnnImageEncoderNode dnn_image_encoder_node(options);
+ } catch (const std::invalid_argument & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("Invalid network_image_height"));
+ throw;
+ } catch (const rclcpp::exceptions::InvalidParameterValueException & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("No parameter value set"));
+ throw;
+ }
+ }, std::invalid_argument);
+ rclcpp::shutdown();
+}
+
+
+int main(int argc, char ** argv)
+{
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/isaac_ros_dnn_image_encoder/test/isaac_ros_dnn_image_encoder_image_crop_test.py b/isaac_ros_dnn_image_encoder/test/isaac_ros_dnn_image_encoder_image_crop_test.py
index 15691c6..9c85b5e 100644
--- a/isaac_ros_dnn_image_encoder/test/isaac_ros_dnn_image_encoder_image_crop_test.py
+++ b/isaac_ros_dnn_image_encoder/test/isaac_ros_dnn_image_encoder_image_crop_test.py
@@ -210,20 +210,20 @@ def extract_pixel(data, x, y):
# Extract 3 float values corresponding to the
r, g, b = extract_pixel(tensor.data, x, y)
- if(abs(r - RED_EXPECTED_VAL) < COLOR_MATCH_TOLERANCE
+ if (abs(r - RED_EXPECTED_VAL) < COLOR_MATCH_TOLERANCE
and g < COLOR_MATCH_TOLERANCE and b < COLOR_MATCH_TOLERANCE):
red_pixel_count += 1
- if(abs(g - GREEN_EXPECTED_VAL) < COLOR_MATCH_TOLERANCE
+ if (abs(g - GREEN_EXPECTED_VAL) < COLOR_MATCH_TOLERANCE
and r < COLOR_MATCH_TOLERANCE and b < COLOR_MATCH_TOLERANCE):
green_pixel_count += 1
- if(abs(b - BLUE_EXPECTED_VAL) < COLOR_MATCH_TOLERANCE
+ if (abs(b - BLUE_EXPECTED_VAL) < COLOR_MATCH_TOLERANCE
and r < COLOR_MATCH_TOLERANCE and g < COLOR_MATCH_TOLERANCE):
blue_pixel_count += 1
- if(abs(r - RED_EXPECTED_VAL) < COLOR_MATCH_TOLERANCE and
+ if (abs(r - RED_EXPECTED_VAL) < COLOR_MATCH_TOLERANCE and
abs(g - GREEN_EXPECTED_VAL) < COLOR_MATCH_TOLERANCE and
abs(b - BLUE_EXPECTED_VAL) < COLOR_MATCH_TOLERANCE):
white_pixel_count += 1
- if(r < COLOR_MATCH_TOLERANCE and g < COLOR_MATCH_TOLERANCE and
+ if (r < COLOR_MATCH_TOLERANCE and g < COLOR_MATCH_TOLERANCE and
b < COLOR_MATCH_TOLERANCE):
black_pixel_count += 1
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_tensor_rt/CMakeLists.txt b/isaac_ros_gxf_extensions/gxf_isaac_tensor_rt/CMakeLists.txt
index 1a3a807..defc598 100644
--- a/isaac_ros_gxf_extensions/gxf_isaac_tensor_rt/CMakeLists.txt
+++ b/isaac_ros_gxf_extensions/gxf_isaac_tensor_rt/CMakeLists.txt
@@ -27,7 +27,7 @@ ament_auto_find_build_dependencies()
# Dependencies
find_package(CUDAToolkit)
-find_package(TENSORRT 8 MODULE REQUIRED)
+find_package(TENSORRT 10 MODULE REQUIRED)
find_package(yaml-cpp)
# TensorRT extension
@@ -39,6 +39,8 @@ ament_auto_add_library(${PROJECT_NAME} SHARED
target_link_libraries(${PROJECT_NAME}
CUDA::cudart
+ TENSORRT::nvinfer
+ TENSORRT::nvinfer_plugin
TENSORRT::nvonnxparser
yaml-cpp
)
@@ -54,4 +56,10 @@ set_target_properties(${PROJECT_NAME} PROPERTIES
# Install the binary file
install(TARGETS ${PROJECT_NAME} DESTINATION share/${PROJECT_NAME}/gxf/lib)
+
+# Embed versioning information into installed files
+ament_index_get_resource(ISAAC_ROS_COMMON_CMAKE_PATH isaac_ros_common_cmake_path isaac_ros_common)
+include("${ISAAC_ROS_COMMON_CMAKE_PATH}/isaac_ros_common-version-info.cmake")
+generate_version_info(${PROJECT_NAME})
+
ament_auto_package(INSTALL_TO_SHARE)
\ No newline at end of file
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_tensor_rt/gxf/extensions/tensor_rt/tensor_rt_extension.cpp b/isaac_ros_gxf_extensions/gxf_isaac_tensor_rt/gxf/extensions/tensor_rt/tensor_rt_extension.cpp
index 898f8e2..41a064c 100644
--- a/isaac_ros_gxf_extensions/gxf_isaac_tensor_rt/gxf/extensions/tensor_rt/tensor_rt_extension.cpp
+++ b/isaac_ros_gxf_extensions/gxf_isaac_tensor_rt/gxf/extensions/tensor_rt/tensor_rt_extension.cpp
@@ -25,7 +25,7 @@ extern "C" {
GXF_EXT_FACTORY_BEGIN()
GXF_EXT_FACTORY_SET_INFO(0xd43f23e4b9bf11eb, 0x9d182b7be630552b, "TensorRTExtension", "TensorRT",
- "Nvidia", "2.5.0", "LICENSE");
+ "Nvidia", "2.7.0", "LICENSE");
GXF_EXT_FACTORY_ADD(0x06a7f0e0b9c011eb, 0x8cd623c9c2070107, nvidia::gxf::TensorRtInference,
nvidia::gxf::Codelet,
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_tensor_rt/gxf/extensions/tensor_rt/tensor_rt_inference.cpp b/isaac_ros_gxf_extensions/gxf_isaac_tensor_rt/gxf/extensions/tensor_rt/tensor_rt_inference.cpp
index 299fce8..c537b76 100644
--- a/isaac_ros_gxf_extensions/gxf_isaac_tensor_rt/gxf/extensions/tensor_rt/tensor_rt_inference.cpp
+++ b/isaac_ros_gxf_extensions/gxf_isaac_tensor_rt/gxf/extensions/tensor_rt/tensor_rt_inference.cpp
@@ -107,6 +107,9 @@ gxf::Expected NvInferDatatypeToTensorElementType(nvinfer1::D
case nvinfer1::DataType::kINT32: {
return gxf::PrimitiveType::kInt32;
}
+ case nvinfer1::DataType::kINT64: {
+ return gxf::PrimitiveType::kInt64;
+ }
case nvinfer1::DataType::kUINT8: {
return gxf::PrimitiveType::kUnsigned8;
}
@@ -307,14 +310,15 @@ gxf_result_t TensorRtInference::start() {
// Deserialize the CUDA engine
if (verbose_.get()) { GXF_LOG_INFO("Creating inference runtime."); }
- cuda_engine_.reset(infer_runtime_->deserializeCudaEngine(plan.data(), plan.size(), NULL));
+ cuda_engine_.reset(infer_runtime_->deserializeCudaEngine(plan.data(), plan.size()));
// Debug spews
if (verbose_.get()) {
- GXF_LOG_INFO("Number of CUDA bindings: %d", cuda_engine_->getNbBindings());
- for (int i = 0; i < cuda_engine_->getNbBindings(); ++i) {
- GXF_LOG_INFO("CUDA binding No.%d: name %s Format %s", i, cuda_engine_->getBindingName(i),
- cuda_engine_->getBindingFormatDesc(i));
+ GXF_LOG_INFO("Number of CUDA bindings: %d", cuda_engine_->getNbIOTensors());
+ for (int32_t i = 0; i < cuda_engine_->getNbIOTensors(); ++i) {
+ GXF_LOG_INFO("Tensor name %s: Format %s",
+ cuda_engine_->getIOTensorName(i),
+ cuda_engine_->getTensorFormatDesc(cuda_engine_->getIOTensorName(i)));
}
}
@@ -322,60 +326,40 @@ gxf_result_t TensorRtInference::start() {
const uint64_t input_number = input_tensor_names_.get().size();
const uint64_t output_number = output_tensor_names_.get().size();
const int64_t total_bindings_number = input_number + output_number;
- if (cuda_engine_->getNbBindings() != static_cast(total_bindings_number)) {
+ if (cuda_engine_->getNbIOTensors() != static_cast(total_bindings_number)) {
GXF_LOG_ERROR(
"Numbers of CUDA bindings mismatch: configured for %lu vs model requires %d. "
"Please check TensorRTInference codelet configuration.\n",
- total_bindings_number, cuda_engine_->getNbBindings());
+ total_bindings_number, cuda_engine_->getNbIOTensors());
return GXF_ARGUMENT_INVALID;
}
// Creates cuda execution context
cuda_execution_ctx_.reset(cuda_engine_->createExecutionContext());
- // Allocates CUDA buffer pointers for binding to be populated in tick()
- cuda_buffers_.resize(input_tensor_names_.get().size() + output_tensor_names_.get().size(),
- nullptr);
-
// Keeps record of input bindings
binding_infos_.clear();
for (uint64_t j = 0; j < input_number; ++j) {
const std::string& tensor_name = input_tensor_names_.get()[j];
const std::string& binding_name = input_binding_names_.get()[j];
-
- const int32_t binding_index = cuda_engine_->getBindingIndex(binding_name.c_str());
- if (binding_index == -1) {
- GXF_LOG_ERROR("Failed to get binding index for input %s in model %s", binding_name.c_str(),
- engine_file_path_.get().c_str());
- return GXF_FAILURE;
- }
-
- if (binding_index >= static_cast(cuda_buffers_.size())) {
- GXF_LOG_ERROR("Binding index for input %s is out of range in model %s.", binding_name.c_str(),
- engine_file_path_.get().c_str());
- return GXF_FAILURE;
- }
-
// Checks element type
const auto maybe_element_type =
- NvInferDatatypeToTensorElementType(cuda_engine_->getBindingDataType(binding_index));
+ NvInferDatatypeToTensorElementType(cuda_engine_->getTensorDataType(binding_name.c_str()));
if (!maybe_element_type) {
- GXF_LOG_ERROR("Unsupported element type for binding input %s on index %d. ",
- binding_name.c_str(), binding_index);
+ GXF_LOG_ERROR("Unsupported element type for binding input %s", binding_name.c_str());
return maybe_element_type.error();
}
// Keeps binding info
- const auto& dims = cuda_engine_->getBindingDimensions(binding_index);
+ const auto& dims = cuda_engine_->getTensorShape(binding_name.c_str());
binding_infos_[tensor_name] =
- BindingInfo{binding_index, static_cast(dims.nbDims), binding_name,
- maybe_element_type.value(), Dims2Dimensions(dims)};
+ BindingInfo{static_cast(dims.nbDims), binding_name, maybe_element_type.value(),
+ Dims2Dimensions(dims)};
// Debug spew
if (verbose_.get()) {
GXF_LOG_INFO(
- "Input Tensor %s:%s index %d Dimensions %s.", tensor_name.c_str(), binding_name.c_str(),
- binding_index,
+ "Input Tensor %s:%s Dimensions %s.", tensor_name.c_str(), binding_name.c_str(),
FormatDims(binding_infos_[tensor_name].dimensions, binding_infos_[tensor_name].rank)
.c_str());
}
@@ -385,37 +369,23 @@ gxf_result_t TensorRtInference::start() {
for (uint64_t j = 0; j < output_number; ++j) {
const std::string& tensor_name = output_tensor_names_.get()[j];
const std::string& binding_name = output_binding_names_.get()[j];
-
- const int32_t binding_index = cuda_engine_->getBindingIndex(binding_name.c_str());
- if (binding_index == -1) {
- GXF_LOG_ERROR("Failed to get binding index for output %s", binding_name.c_str());
- return GXF_FAILURE;
- }
- if (binding_index >= static_cast(cuda_buffers_.size())) {
- GXF_LOG_ERROR("Binding index for output %s is out of range.", binding_name.c_str());
- return GXF_FAILURE;
- }
-
// Checks element type
const auto maybe_element_type =
- NvInferDatatypeToTensorElementType(cuda_engine_->getBindingDataType(binding_index));
+ NvInferDatatypeToTensorElementType(cuda_engine_->getTensorDataType(binding_name.c_str()));
if (!maybe_element_type) {
- GXF_LOG_ERROR("Unsupported element type for binding output %s on index %d. ",
- binding_name.c_str(), binding_index);
+ GXF_LOG_ERROR("Unsupported element type for binding output %s ", binding_name.c_str());
return maybe_element_type.error();
}
// Keeps binding info
- const auto& dims = cuda_engine_->getBindingDimensions(binding_index);
+ const auto& dims = cuda_engine_->getTensorShape(binding_name.c_str());
binding_infos_[tensor_name] =
- BindingInfo{binding_index, static_cast(dims.nbDims), binding_name,
- maybe_element_type.value(), Dims2Dimensions(dims)};
- cuda_buffers_[binding_index] = nullptr; // populate cuda_buffers dynamically, in tick()
+ BindingInfo{static_cast(dims.nbDims), binding_name, maybe_element_type.value(),
+ Dims2Dimensions(dims)};
if (verbose_.get()) {
GXF_LOG_INFO(
- "Output Tensor %s:%s (%d), Dimensions: %s.", tensor_name.c_str(), binding_name.c_str(),
- binding_index,
+ "Output Tensor %s:%s, Dimensions: %s.", tensor_name.c_str(), binding_name.c_str(),
FormatDims(binding_infos_[tensor_name].dimensions, binding_infos_[tensor_name].rank)
.c_str());
}
@@ -450,7 +420,7 @@ gxf::Expected> TensorRtInference::convertModelToEngine() {
// Builder Config provides options to the Builder
NvInferHandle builderConfig(builder->createBuilderConfig());
- builderConfig->setMaxWorkspaceSize(max_workspace_size_);
+ builderConfig->setMemoryPoolLimit(nvinfer1::MemoryPoolType::kWORKSPACE, max_workspace_size_);
// Sets DLA core if provided and always fall back to GPU
auto dla_core = dla_core_.try_get();
@@ -512,15 +482,13 @@ gxf::Expected> TensorRtInference::convertModelToEngine() {
builderConfig->addOptimizationProfile(optimization_profile);
// Creates TensorRT Engine Plan
- NvInferHandle engine(
- builder->buildEngineWithConfig(*network, *builderConfig));
- if (!engine) {
+ NvInferHandle model_stream(
+ builder->buildSerializedNetwork(*network, *builderConfig));
+ if (!model_stream) {
GXF_LOG_ERROR("Failed to build TensorRT engine from model %s.", model_file_path_.get().c_str());
return gxf::Unexpected{GXF_FAILURE};
}
-
- NvInferHandle model_stream(engine->serialize());
- if (!model_stream || model_stream->size() == 0 || model_stream->data() == nullptr) {
+ if (model_stream->size() == 0 || model_stream->data() == nullptr) {
GXF_LOG_ERROR("Fail to serialize TensorRT Engine.");
return gxf::Unexpected{GXF_FAILURE};
}
@@ -536,7 +504,6 @@ gxf::Expected> TensorRtInference::convertModelToEngine() {
gxf_result_t TensorRtInference::stop() {
cuda_execution_ctx_ = nullptr;
cuda_engine_ = nullptr;
- cuda_buffers_.clear();
auto result = cudaEventDestroy(cuda_event_consumed_);
if (cudaSuccess != result) {
@@ -609,10 +576,12 @@ gxf_result_t TensorRtInference::tick() {
binding_rank_matched++;
continue;
}
+
if (shape.dimension(shape_rank_matched) == 1) {
shape_rank_matched++;
continue;
}
+
if (binding_info.dimensions[binding_rank_matched] == 1) {
binding_rank_matched++;
continue;
@@ -654,14 +623,15 @@ gxf_result_t TensorRtInference::tick() {
}
// Updates the latest dimension of input tensor
- if (!cuda_execution_ctx_->setBindingDimensions(binding_info.index, dims)) {
+ if (!cuda_execution_ctx_->setInputShape(binding_info.binding_name.c_str(), dims)) {
GXF_LOG_ERROR("Failed to update input binding %s dimensions.",
binding_info.binding_name.c_str());
return GXF_FAILURE;
}
// Binds input tensor buffer
- cuda_buffers_[binding_info.index] = maybe_tensor.value()->pointer();
+ cuda_execution_ctx_->setTensorAddress(binding_info.binding_name.c_str(),
+ maybe_tensor.value()->pointer());
}
// Creates result message entity
@@ -679,11 +649,13 @@ gxf_result_t TensorRtInference::tick() {
// Queries binding dimension from context and allocates tensor accordingly
const auto& binding_info = binding_infos_[tensor_name];
- const auto binding_dims = cuda_engine_->getBindingDimensions(binding_info.index);
+ const auto binding_dims = cuda_engine_->getTensorShape(binding_info.binding_name.c_str());
auto dimensions = Dims2Dimensions(binding_dims);
for (int i = 0; i < binding_dims.nbDims; i++) {
- if (dimensions[i] < 0) { dimensions[i] = max_batch_size_.get(); }
+ if (dimensions[i] < 0) {
+ dimensions[i] = max_batch_size_.get();
+ }
}
gxf::Shape shape{dimensions, binding_info.rank};
@@ -696,7 +668,8 @@ gxf_result_t TensorRtInference::tick() {
}
// Allocates gpu buffer for output tensors
- cuda_buffers_[binding_info.index] = maybe_result_tensor.value()->pointer();
+ cuda_execution_ctx_->setTensorAddress(binding_info.binding_name.c_str(),
+ maybe_result_tensor.value()->pointer());
}
auto maybe_stream_id = result_message.add("TensorRTCuStream");
@@ -708,8 +681,7 @@ gxf_result_t TensorRtInference::tick() {
GXF_ASSERT(maybe_stream_id.value()->stream_cid != kNullUid, "Internal error: stream_cid is null");
// Runs inference on specified CUDA stream
- if (!cuda_execution_ctx_->enqueueV2(cuda_buffers_.data(), cached_cuda_stream_,
- &cuda_event_consumed_)) {
+ if (!cuda_execution_ctx_->enqueueV3(cached_cuda_stream_)) {
GXF_LOG_ERROR("TensorRT task enqueue for engine %s failed.", engine_file_path_.get().c_str());
return GXF_FAILURE;
}
@@ -723,7 +695,7 @@ gxf_result_t TensorRtInference::tick() {
}
// Record the job and proceed with execution
- auto maybe_event = result_message.add("Infer complete event");
+ auto maybe_event = result_message.add("GXF_TRT_INFER_COMPLETE");
auto& event = maybe_event.value();
auto ret = event->initWithEvent(cuda_event_done, device_id_);
if (!ret) {
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_tensor_rt/gxf/extensions/tensor_rt/tensor_rt_inference.hpp b/isaac_ros_gxf_extensions/gxf_isaac_tensor_rt/gxf/extensions/tensor_rt/tensor_rt_inference.hpp
index d7f2f54..237c65e 100644
--- a/isaac_ros_gxf_extensions/gxf_isaac_tensor_rt/gxf/extensions/tensor_rt/tensor_rt_inference.hpp
+++ b/isaac_ros_gxf_extensions/gxf_isaac_tensor_rt/gxf/extensions/tensor_rt/tensor_rt_inference.hpp
@@ -68,17 +68,13 @@ class TensorRtInference : public gxf::Codelet {
private:
// Helper deleter to call destroy while destroying the cuda objects
+
+ // unique_ptr
template
- struct DeleteFunctor {
- inline void operator()(void* ptr) { reinterpret_cast(ptr)->destroy(); }
- };
- // unique_ptr using custom Delete Functor above
- template
- using NvInferHandle = std::unique_ptr>;
+ using NvInferHandle = std::unique_ptr;
// To cache binding info for tensors
typedef struct {
- int32_t index;
uint32_t rank;
std::string binding_name;
gxf::PrimitiveType element_type;
@@ -118,7 +114,6 @@ class TensorRtInference : public gxf::Codelet {
NvInferHandle cuda_engine_;
gxf::Handle cuda_stream_;
- std::vector cuda_buffers_;
cudaStream_t cached_cuda_stream_;
cudaEvent_t cuda_event_consumed_;
};
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_tensor_rt/package.xml b/isaac_ros_gxf_extensions/gxf_isaac_tensor_rt/package.xml
index c5f7e8c..e2afd05 100644
--- a/isaac_ros_gxf_extensions/gxf_isaac_tensor_rt/package.xml
+++ b/isaac_ros_gxf_extensions/gxf_isaac_tensor_rt/package.xml
@@ -21,7 +21,7 @@ SPDX-License-Identifier: Apache-2.0
gxf_isaac_tensor_rt
- 3.1.0
+ 3.2.0
TensorRT GXF extension.
Isaac ROS Maintainers
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/CMakeLists.txt b/isaac_ros_gxf_extensions/gxf_isaac_triton/CMakeLists.txt
index 0b7af41..38da585 100644
--- a/isaac_ros_gxf_extensions/gxf_isaac_triton/CMakeLists.txt
+++ b/isaac_ros_gxf_extensions/gxf_isaac_triton/CMakeLists.txt
@@ -27,11 +27,11 @@ ament_auto_find_build_dependencies()
# Determine the architecture
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
- set(GXF_EXT_LIB_PATH "${CMAKE_CURRENT_SOURCE_DIR}/lib/gxf_jetpack60")
- set(NVDS_LIB_PATH "${CMAKE_CURRENT_SOURCE_DIR}/nvds/lib/gxf_jetpack60")
+ set(GXF_EXT_LIB_PATH "${CMAKE_CURRENT_SOURCE_DIR}/lib/gxf_jetpack61")
+ set(NVDS_LIB_PATH "${CMAKE_CURRENT_SOURCE_DIR}/nvds/lib/gxf_jetpack61")
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64")
- set(GXF_EXT_LIB_PATH "${CMAKE_CURRENT_SOURCE_DIR}/lib/gxf_x86_64_cuda_12_2")
- set(NVDS_LIB_PATH "${CMAKE_CURRENT_SOURCE_DIR}/nvds/lib/gxf_x86_64_cuda_12_2")
+ set(GXF_EXT_LIB_PATH "${CMAKE_CURRENT_SOURCE_DIR}/lib/gxf_x86_64_cuda_12_6")
+ set(NVDS_LIB_PATH "${CMAKE_CURRENT_SOURCE_DIR}/nvds/lib/gxf_x86_64_cuda_12_6")
else()
message(FATAL_ERROR "Unsupported architecture: ${CMAKE_SYSTEM_PROCESSOR}")
endif()
@@ -67,4 +67,10 @@ ament_export_targets(export_${PROJECT_NAME} HAS_LIBRARY_TARGET)
install(DIRECTORY ${NVDS_LIB_PATH}/
DESTINATION share/${PROJECT_NAME}/gxf/lib)
+
+# Embed versioning information into installed files
+ament_index_get_resource(ISAAC_ROS_COMMON_CMAKE_PATH isaac_ros_common_cmake_path isaac_ros_common)
+include("${ISAAC_ROS_COMMON_CMAKE_PATH}/isaac_ros_common-version-info.cmake")
+generate_version_info(${PROJECT_NAME})
+
ament_auto_package(INSTALL_TO_SHARE)
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/lib/gxf_jetpack60/libgxf_isaac_triton.so b/isaac_ros_gxf_extensions/gxf_isaac_triton/lib/gxf_jetpack60/libgxf_isaac_triton.so
deleted file mode 100755
index a6a9bed..0000000
--- a/isaac_ros_gxf_extensions/gxf_isaac_triton/lib/gxf_jetpack60/libgxf_isaac_triton.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:74d6d9a65e5067b8bee46168ef5e2051f9aa0ff5725df58ef0c77cd31f049888
-size 8608224
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/lib/gxf_jetpack61/libgxf_isaac_triton.so b/isaac_ros_gxf_extensions/gxf_isaac_triton/lib/gxf_jetpack61/libgxf_isaac_triton.so
new file mode 100755
index 0000000..4ff18ad
--- /dev/null
+++ b/isaac_ros_gxf_extensions/gxf_isaac_triton/lib/gxf_jetpack61/libgxf_isaac_triton.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:159d255e7632c0d91992bdb0867e3472c0e07070052d71ddc39f88dc98449d29
+size 14795088
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/lib/gxf_x86_64_cuda_12_2/libgxf_isaac_triton.so b/isaac_ros_gxf_extensions/gxf_isaac_triton/lib/gxf_x86_64_cuda_12_2/libgxf_isaac_triton.so
deleted file mode 100755
index 9ac1a17..0000000
--- a/isaac_ros_gxf_extensions/gxf_isaac_triton/lib/gxf_x86_64_cuda_12_2/libgxf_isaac_triton.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:559f1acd9c0efdd5317414aaed9012d85b544d814f4d0617cb46310b3a7bdba4
-size 6099112
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/lib/gxf_x86_64_cuda_12_6/libgxf_isaac_triton.so b/isaac_ros_gxf_extensions/gxf_isaac_triton/lib/gxf_x86_64_cuda_12_6/libgxf_isaac_triton.so
new file mode 100755
index 0000000..59c8351
--- /dev/null
+++ b/isaac_ros_gxf_extensions/gxf_isaac_triton/lib/gxf_x86_64_cuda_12_6/libgxf_isaac_triton.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b925a2781ce46086c9d7ba97fb5adc24386b0214f1ec689e0741ba3adea36389
+size 6150320
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/include/nvdsinfer.h b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/include/nvdsinfer.h
index 70f4593..d49fee0 100644
--- a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/include/nvdsinfer.h
+++ b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/include/nvdsinfer.h
@@ -83,7 +83,9 @@ typedef enum
/** Specifies INT8 format. */
INT8 = 2,
/** Specifies INT32 format. */
- INT32 = 3
+ INT32 = 3,
+ /** Specifies INT64 format. */
+ INT64 = 4
} NvDsInferDataType;
/**
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/include/nvdsinferserver_config.proto b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/include/nvdsinferserver_config.proto
index 9ac2aec..d80f3f3 100644
--- a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/include/nvdsinferserver_config.proto
+++ b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/include/nvdsinferserver_config.proto
@@ -163,6 +163,9 @@ message BackendParams {
* Default: MEMORY_TYPE_DEFAULT, it is Triton preferred memory type.
*/
MemoryType output_mem_type = 5;
+
+ /** disable warmup */
+ bool disable_warmup = 6;
}
/** extrac controls */
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack60/libnvbufsurface.so b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack60/libnvbufsurface.so
deleted file mode 100644
index 752f861..0000000
--- a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack60/libnvbufsurface.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:fdacbf3b86b0f387ad624c1e5f2266f097e0038567443745314a61f1bf0d44b5
-size 855208
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack60/libnvbufsurftransform.so b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack60/libnvbufsurftransform.so
deleted file mode 100755
index d34bdea..0000000
--- a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack60/libnvbufsurftransform.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:cea8d2395c328291be2e6b85b721cc6537f0d8d0ac2b4ed94311313448aed6dd
-size 22221568
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack60/libnvds_infer_server.so b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack60/libnvds_infer_server.so
deleted file mode 100755
index ddeaccc..0000000
--- a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack60/libnvds_infer_server.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:6940f2372a5b44135e2696559d2324c54968b0a5b37a4099622d2536156251b2
-size 11690080
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack60/libnvds_inferutils.so b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack60/libnvds_inferutils.so
deleted file mode 100755
index 32a48ba..0000000
--- a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack60/libnvds_inferutils.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:3b4c7d1938430a328d619a0c2ce32029b39e8aa08a289477ab74a988d7463cca
-size 136304
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack60/libnvbuf_fdmap.so b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack61/libnvbuf_fdmap.so
similarity index 100%
rename from isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack60/libnvbuf_fdmap.so
rename to isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack61/libnvbuf_fdmap.so
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack61/libnvbufsurface.so b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack61/libnvbufsurface.so
new file mode 100644
index 0000000..1a6838f
--- /dev/null
+++ b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack61/libnvbufsurface.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3f4510e0bbdde4590bedc0530c4dd209a96f40ec33d81549f1fd6d423c749177
+size 855328
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack61/libnvbufsurftransform.so b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack61/libnvbufsurftransform.so
new file mode 100755
index 0000000..446095a
--- /dev/null
+++ b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack61/libnvbufsurftransform.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e319bda235ec17f90cb2f3674872e0d510d601574a305c8b173a5eca9985ce70
+size 23925536
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack61/libnvds_infer_server.so b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack61/libnvds_infer_server.so
new file mode 100755
index 0000000..1ee1f32
--- /dev/null
+++ b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack61/libnvds_infer_server.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cea8b476e1d98af497b88648423ae08dde26dd8704e912732b8d3de19282c7c0
+size 12935832
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack60/libnvds_inferlogger.so b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack61/libnvds_inferlogger.so
similarity index 100%
rename from isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack60/libnvds_inferlogger.so
rename to isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack61/libnvds_inferlogger.so
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack61/libnvds_inferutils.so b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack61/libnvds_inferutils.so
new file mode 100755
index 0000000..fb6fe50
--- /dev/null
+++ b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack61/libnvds_inferutils.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c3b3add0e8be23f8ffae180615688716a0dfa21612e2a6cceaffa273ea3bbadb
+size 137088
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack61/libtritonserver.so b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack61/libtritonserver.so
new file mode 100755
index 0000000..5805900
--- /dev/null
+++ b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_jetpack61/libtritonserver.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:76584aa999c24a180f516e8b0e85b7eb1d13ccb264074cf82d9ca73f3410f52f
+size 6989920
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_2/libnvbuf_fdmap.so b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_2/libnvbuf_fdmap.so
deleted file mode 100755
index 691f923..0000000
--- a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_2/libnvbuf_fdmap.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:e03cda9e20d388ca6b0eca6cdc58f79eb99d3cc2913d734d1af1e1ec8247d3bd
-size 21744
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_2/libnvbufsurface.so b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_2/libnvbufsurface.so
deleted file mode 100755
index ef6bd67..0000000
--- a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_2/libnvbufsurface.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:a10ef88abc193f624795de7bc3616bd8fcfcdb8cbbba1d1f131e16c7322bb170
-size 38504
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_2/libnvbufsurftransform.so b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_2/libnvbufsurftransform.so
deleted file mode 100755
index 5f0176a..0000000
--- a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_2/libnvbufsurftransform.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:a0fde2ba665dcac14a5f7f91d21c5289994d96715f71341b36ed46ea9380b669
-size 32082936
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_2/libnvds_infer_server.so b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_2/libnvds_infer_server.so
deleted file mode 100644
index 96b2a9d..0000000
--- a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_2/libnvds_infer_server.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:1ef46e14b5ee5774689d1dc021161123023646d59cb9b8e56c26f2cb9794c015
-size 21299704
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_2/libnvds_inferlogger.so b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_2/libnvds_inferlogger.so
deleted file mode 100755
index 174ec23..0000000
--- a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_2/libnvds_inferlogger.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:e79baad2a349288cdedb102f0abcde86ce7d27d2fc0494ae09c279d64860e3e3
-size 26080
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_2/libnvds_inferutils.so b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_2/libnvds_inferutils.so
deleted file mode 100755
index 3262e65..0000000
--- a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_2/libnvds_inferutils.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:da819408b4a3f1b21b3c3c9607ceabbf4a31c3ba01536ded15b8cbff904f77da
-size 163752
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_6/libnvbuf_fdmap.so b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_6/libnvbuf_fdmap.so
new file mode 100755
index 0000000..2812325
--- /dev/null
+++ b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_6/libnvbuf_fdmap.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:01aa4f6b4ca1abb6edd79edefc7751ccaddd11404ea251b81bd0c220f2d4272f
+size 19208
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_6/libnvbufsurface.so b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_6/libnvbufsurface.so
new file mode 100755
index 0000000..d2b1f48
--- /dev/null
+++ b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_6/libnvbufsurface.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bca7d6db485f8890951f0c80f41bf0d62ef78006d3d03cda46533d2249b1969b
+size 35376
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_6/libnvbufsurftransform.so b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_6/libnvbufsurftransform.so
new file mode 100755
index 0000000..c5ada97
--- /dev/null
+++ b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_6/libnvbufsurftransform.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4a45adde84527aab21f9c81ded483a4cca8ef89e497fb8822f69d86bcdb22a74
+size 38875240
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_6/libnvds_infer_server.so b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_6/libnvds_infer_server.so
new file mode 100755
index 0000000..16a36bc
--- /dev/null
+++ b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_6/libnvds_infer_server.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e54d386556825ee7d96cd93eaac6834f6a40c1b639c7c84b47dfdabd48568e8e
+size 21345424
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_6/libnvds_inferlogger.so b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_6/libnvds_inferlogger.so
new file mode 100755
index 0000000..43259ad
--- /dev/null
+++ b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_6/libnvds_inferlogger.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f336277a7ded55e602dbef558debf71becfefb8cd1d0ba683142e0dd674ec6e8
+size 26608
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_6/libnvds_inferutils.so b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_6/libnvds_inferutils.so
new file mode 100755
index 0000000..f317529
--- /dev/null
+++ b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_6/libnvds_inferutils.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f7bff174d3777d8eb4d1cd37395afb51bc06ea6074b490ce025003b4603c3861
+size 143176
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_6/libtritonserver.so b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_6/libtritonserver.so
new file mode 100755
index 0000000..ddec363
--- /dev/null
+++ b/isaac_ros_gxf_extensions/gxf_isaac_triton/nvds/lib/gxf_x86_64_cuda_12_6/libtritonserver.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:889512b9a13a9a4cb15e92321b30a89e4960553000fe8ab85b82007abf705c48
+size 31424120
diff --git a/isaac_ros_gxf_extensions/gxf_isaac_triton/package.xml b/isaac_ros_gxf_extensions/gxf_isaac_triton/package.xml
index bcf0d75..a69c15c 100644
--- a/isaac_ros_gxf_extensions/gxf_isaac_triton/package.xml
+++ b/isaac_ros_gxf_extensions/gxf_isaac_triton/package.xml
@@ -21,7 +21,7 @@ SPDX-License-Identifier: Apache-2.0
gxf_isaac_triton
- 3.1.0
+ 3.2.0
Triton GXF extension.
Isaac ROS Maintainers
diff --git a/isaac_ros_tensor_proc/CMakeLists.txt b/isaac_ros_tensor_proc/CMakeLists.txt
index 40e37cc..77b3798 100644
--- a/isaac_ros_tensor_proc/CMakeLists.txt
+++ b/isaac_ros_tensor_proc/CMakeLists.txt
@@ -72,6 +72,7 @@ ament_auto_add_library(image_tensor_normalize_node SHARED src/image_tensor_norma
BUILD_RPATH_USE_ORIGIN TRUE
INSTALL_RPATH_USE_LINK_PATH TRUE)
+# Normalize
ament_auto_add_library(normalize_node SHARED src/normalize_node.cpp)
rclcpp_components_register_nodes(normalize_node
"nvidia::isaac_ros::dnn_inference::NormalizeNode")
@@ -86,6 +87,20 @@ if(BUILD_TESTING)
find_package(ament_lint_auto REQUIRED)
ament_lint_auto_find_test_dependencies()
+ # Gtest for interleaved_to_planar_node
+ ament_add_gtest(interleaved_to_planar_node_test test/interleaved_to_planar_node_test.cpp)
+ target_link_libraries(interleaved_to_planar_node_test interleaved_to_planar_node)
+ target_include_directories(interleaved_to_planar_node_test PUBLIC include/isaac_ros_tensor_proc/)
+ target_include_directories(interleaved_to_planar_node_test PUBLIC /usr/src/googletest/googlemock/include/)
+ ament_target_dependencies(interleaved_to_planar_node_test rclcpp)
+ ament_target_dependencies(interleaved_to_planar_node_test isaac_ros_nitros)
+ # Gtest for normalize_node
+ ament_add_gtest(normalize_node_test test/normalize_node_test.cpp)
+ target_link_libraries(normalize_node_test normalize_node)
+ target_include_directories(normalize_node_test PUBLIC include/isaac_ros_tensor_proc/)
+ target_include_directories(normalize_node_test PUBLIC /usr/src/googletest/googlemock/include/)
+ ament_target_dependencies(normalize_node_test rclcpp)
+ ament_target_dependencies(normalize_node_test isaac_ros_nitros)
# The FindPythonInterp and FindPythonLibs modules are removed
if(POLICY CMP0148)
@@ -102,4 +117,10 @@ if(BUILD_TESTING)
add_launch_test(test/isaac_ros_normalize_test.py)
endif()
+
+# Embed versioning information into installed files
+ament_index_get_resource(ISAAC_ROS_COMMON_CMAKE_PATH isaac_ros_common_cmake_path isaac_ros_common)
+include("${ISAAC_ROS_COMMON_CMAKE_PATH}/isaac_ros_common-version-info.cmake")
+generate_version_info(${PROJECT_NAME})
+
ament_auto_package(INSTALL_TO_SHARE config)
diff --git a/isaac_ros_tensor_proc/package.xml b/isaac_ros_tensor_proc/package.xml
index 0c6216a..f0d95fb 100644
--- a/isaac_ros_tensor_proc/package.xml
+++ b/isaac_ros_tensor_proc/package.xml
@@ -21,7 +21,7 @@ SPDX-License-Identifier: Apache-2.0
isaac_ros_tensor_proc
- 3.1.0
+ 3.2.0
Processing operations for tensors
Isaac ROS Maintainers
Apache-2.0
@@ -47,6 +47,7 @@ SPDX-License-Identifier: Apache-2.0
ament_lint_auto
ament_lint_common
isaac_ros_test
+ ament_cmake_gtest
ament_cmake
diff --git a/isaac_ros_tensor_proc/src/image_tensor_normalize_node.cpp b/isaac_ros_tensor_proc/src/image_tensor_normalize_node.cpp
index e584cf2..5ea77e7 100644
--- a/isaac_ros_tensor_proc/src/image_tensor_normalize_node.cpp
+++ b/isaac_ros_tensor_proc/src/image_tensor_normalize_node.cpp
@@ -138,13 +138,13 @@ ImageTensorNormalizeNode::ImageTensorNormalizeNode(const rclcpp::NodeOptions opt
this, "tensor",
nvidia::isaac_ros::nitros::nitros_tensor_list_nhwc_rgb_f32_t::supported_type_name,
std::bind(&ImageTensorNormalizeNode::ImageTensorNormalizeCallback, this,
- std::placeholders::_1), nvidia::isaac_ros::nitros::NitrosStatisticsConfig{}, input_qos_)},
+ std::placeholders::_1), nvidia::isaac_ros::nitros::NitrosDiagnosticsConfig{}, input_qos_)},
nitros_tensor_pub_{std::make_shared<
nvidia::isaac_ros::nitros::ManagedNitrosPublisher<
nvidia::isaac_ros::nitros::NitrosTensorList>>(
this, "normalized_tensor",
nvidia::isaac_ros::nitros::nitros_tensor_list_nhwc_rgb_f32_t::supported_type_name,
- nvidia::isaac_ros::nitros::NitrosStatisticsConfig{}, output_qos_)},
+ nvidia::isaac_ros::nitros::NitrosDiagnosticsConfig{}, output_qos_)},
mean_param_{declare_parameter>("mean", {0.5, 0.5, 0.5})},
stddev_param_{declare_parameter>("stddev", {0.5, 0.5, 0.5})},
input_tensor_name_{declare_parameter("input_tensor_name", "tensor")},
diff --git a/isaac_ros_tensor_proc/src/image_to_tensor_node.cpp b/isaac_ros_tensor_proc/src/image_to_tensor_node.cpp
index 50d60ee..29bb9c1 100644
--- a/isaac_ros_tensor_proc/src/image_to_tensor_node.cpp
+++ b/isaac_ros_tensor_proc/src/image_to_tensor_node.cpp
@@ -82,14 +82,14 @@ ImageToTensorNode::ImageToTensorNode(const rclcpp::NodeOptions options)
::nvidia::isaac_ros::nitros::NitrosImageView>>(
this, "image", ::nvidia::isaac_ros::nitros::nitros_image_rgb8_t::supported_type_name,
std::bind(&ImageToTensorNode::ImageToTensorCallback, this,
- std::placeholders::_1), nvidia::isaac_ros::nitros::NitrosStatisticsConfig{},
+ std::placeholders::_1), nvidia::isaac_ros::nitros::NitrosDiagnosticsConfig{},
input_qos_)},
nitros_tensor_pub_{std::make_shared<
nvidia::isaac_ros::nitros::ManagedNitrosPublisher<
nvidia::isaac_ros::nitros::NitrosTensorList>>(
this, "tensor",
nvidia::isaac_ros::nitros::nitros_tensor_list_nchw_rgb_f32_t::supported_type_name,
- nvidia::isaac_ros::nitros::NitrosStatisticsConfig{}, output_qos_)},
+ nvidia::isaac_ros::nitros::NitrosDiagnosticsConfig{}, output_qos_)},
scale_{declare_parameter("scale", true)},
tensor_name_{declare_parameter("tensor_name", "tensor")}
{
diff --git a/isaac_ros_tensor_proc/test/interleaved_to_planar_node_test.cpp b/isaac_ros_tensor_proc/test/interleaved_to_planar_node_test.cpp
new file mode 100644
index 0000000..d908fdb
--- /dev/null
+++ b/isaac_ros_tensor_proc/test/interleaved_to_planar_node_test.cpp
@@ -0,0 +1,51 @@
+// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
+// Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+#include
+#include "interleaved_to_planar_node.hpp"
+#include "rclcpp/rclcpp.hpp"
+
+// Objective: to cover code lines where exceptions are thrown
+// Approach: send Invalid Arguments for node parameters to trigger the exception
+
+
+TEST(interleaved_to_planar_node_test, test_empty_input_tensor_shape)
+{
+ rclcpp::init(0, nullptr);
+ rclcpp::NodeOptions options;
+ options.append_parameter_override("input_tensor_shape", std::vector());
+ EXPECT_THROW(
+ {
+ try {
+ nvidia::isaac_ros::dnn_inference::InterleavedToPlanarNode interleaved_to_planar_node(options);
+ } catch (const std::invalid_argument & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("The input shape is empty!"));
+ throw;
+ } catch (const rclcpp::exceptions::InvalidParameterValueException & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("No parameter value set"));
+ throw;
+ }
+ }, std::invalid_argument);
+ rclcpp::shutdown();
+}
+
+
+int main(int argc, char ** argv)
+{
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/isaac_ros_tensor_proc/test/normalize_node_test.cpp b/isaac_ros_tensor_proc/test/normalize_node_test.cpp
new file mode 100644
index 0000000..81b45f2
--- /dev/null
+++ b/isaac_ros_tensor_proc/test/normalize_node_test.cpp
@@ -0,0 +1,114 @@
+// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
+// Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+#include
+#include "normalize_node.hpp"
+#include "rclcpp/rclcpp.hpp"
+
+// Objective: to cover code lines where exceptions are thrown
+// Approach: send Invalid Arguments for node parameters to trigger the exception
+
+
+TEST(normalize_node_test, test_invalid_input_image_width)
+{
+ rclcpp::init(0, nullptr);
+ rclcpp::NodeOptions options;
+ options.append_parameter_override("input_image_width", 0);
+ // options.arguments(
+ // {
+ // "--ros-args",
+ // "-p", "input_image_width:=0",
+ // });
+ EXPECT_THROW(
+ {
+ try {
+ nvidia::isaac_ros::dnn_inference::NormalizeNode normalize_node(options);
+ } catch (const std::invalid_argument & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("Invalid input_image_width"));
+ throw;
+ } catch (const rclcpp::exceptions::InvalidParameterValueException & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("No parameter value set"));
+ throw;
+ }
+ }, std::invalid_argument);
+ rclcpp::shutdown();
+}
+
+TEST(normalize_node_test, test_invalid_input_image_height)
+{
+ rclcpp::init(0, nullptr);
+ rclcpp::NodeOptions options;
+ options.append_parameter_override("input_image_width", 1);
+ options.append_parameter_override("input_image_height", 0);
+ // options.arguments(
+ // {
+ // "--ros-args",
+ // "-p", "input_image_width:=1",
+ // // "-p", "input_image_height:=0",
+ // });
+ EXPECT_THROW(
+ {
+ try {
+ nvidia::isaac_ros::dnn_inference::NormalizeNode normalize_node(options);
+ } catch (const std::invalid_argument & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("Invalid input_image_height"));
+ throw;
+ } catch (const rclcpp::exceptions::InvalidParameterValueException & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("No parameter value set"));
+ throw;
+ }
+ }, std::invalid_argument);
+ rclcpp::shutdown();
+}
+
+TEST(normalize_node_test, test_invalid_image_channels)
+{
+ rclcpp::init(0, nullptr);
+ rclcpp::NodeOptions options;
+ options.append_parameter_override("input_image_width", 1);
+ options.append_parameter_override("input_image_height", 1);
+ options.append_parameter_override("image_mean", std::vector{0.5, 0.5});
+ // options.arguments(
+ // {
+ // "--ros-args",
+ // "-p", "input_image_width:=1",
+ // "-p", "input_image_height:=1",
+ // "-p", "image_mean:=[0.5, 0.5]",
+ // });
+ EXPECT_THROW(
+ {
+ try {
+ nvidia::isaac_ros::dnn_inference::NormalizeNode normalize_node(options);
+ } catch (const std::invalid_argument & e) {
+ EXPECT_THAT(
+ e.what(),
+ testing::HasSubstr("Did not receive 3 image mean channels or 3 image stddev channels"));
+ throw;
+ } catch (const rclcpp::exceptions::InvalidParameterValueException & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("No parameter value set"));
+ throw;
+ }
+ }, std::invalid_argument);
+ rclcpp::shutdown();
+}
+
+
+int main(int argc, char ** argv)
+{
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/isaac_ros_tensor_rt/CMakeLists.txt b/isaac_ros_tensor_rt/CMakeLists.txt
index de8b846..3d7b42a 100644
--- a/isaac_ros_tensor_rt/CMakeLists.txt
+++ b/isaac_ros_tensor_rt/CMakeLists.txt
@@ -26,11 +26,11 @@ find_package(ament_cmake_auto REQUIRED)
ament_auto_find_build_dependencies()
# Dependencies
-find_package(TENSORRT 8 MODULE REQUIRED)
+find_package(TENSORRT 10 MODULE REQUIRED)
# TensorRTNode
ament_auto_add_library(tensor_rt_node SHARED src/tensor_rt_node.cpp)
-target_link_libraries(tensor_rt_node TENSORRT::nvonnxparser)
+target_link_libraries(tensor_rt_node TENSORRT::nvinfer TENSORRT::nvinfer_plugin TENSORRT::nvonnxparser)
rclcpp_components_register_nodes(tensor_rt_node "nvidia::isaac_ros::dnn_inference::TensorRTNode")
set(node_plugins "${node_plugins}nvidia::isaac_ros::dnn_inference::TensorRTNode;$\n")
set_target_properties(tensor_rt_node PROPERTIES
@@ -60,4 +60,10 @@ if(BUILD_TESTING)
add_launch_test(test/isaac_ros_tensor_rt_test.py TIMEOUT "300")
endif()
+
+# Embed versioning information into installed files
+ament_index_get_resource(ISAAC_ROS_COMMON_CMAKE_PATH isaac_ros_common_cmake_path isaac_ros_common)
+include("${ISAAC_ROS_COMMON_CMAKE_PATH}/isaac_ros_common-version-info.cmake")
+generate_version_info(${PROJECT_NAME})
+
ament_auto_package(INSTALL_TO_SHARE config launch)
\ No newline at end of file
diff --git a/isaac_ros_tensor_rt/package.xml b/isaac_ros_tensor_rt/package.xml
index 97d3b8e..51d31b9 100644
--- a/isaac_ros_tensor_rt/package.xml
+++ b/isaac_ros_tensor_rt/package.xml
@@ -21,7 +21,7 @@ SPDX-License-Identifier: Apache-2.0
isaac_ros_tensor_rt
- 3.1.0
+ 3.2.0
DNN Inference support for Isaac ROS
Isaac ROS Maintainers
diff --git a/isaac_ros_tensor_rt/src/tensor_rt_node.cpp b/isaac_ros_tensor_rt/src/tensor_rt_node.cpp
index f6f6c54..06c9319 100644
--- a/isaac_ros_tensor_rt/src/tensor_rt_node.cpp
+++ b/isaac_ros_tensor_rt/src/tensor_rt_node.cpp
@@ -23,6 +23,7 @@
#include "NvInferPluginUtils.h"
+#include "isaac_ros_common/qos.hpp"
#include "isaac_ros_nitros_tensor_list_type/nitros_tensor_list.hpp"
#include "rclcpp/rclcpp.hpp"
@@ -208,6 +209,22 @@ TensorRTNode::TensorRTNode(const rclcpp::NodeOptions & options)
{
RCLCPP_DEBUG(get_logger(), "[TensorRTNode] In TensorRTNode's constructor");
+ // This function sets the QoS parameter for publishers and subscribers setup by this NITROS node
+ rclcpp::QoS input_qos_ = ::isaac_ros::common::AddQosParameter(
+ *this, "DEFAULT",
+ "input_qos");
+ rclcpp::QoS output_qos_ = ::isaac_ros::common::AddQosParameter(
+ *this, "DEFAULT",
+ "output_qos");
+ for (auto & config : config_map_) {
+ if (config.second.topic_name == INPUT_TOPIC_NAME) {
+ config.second.qos = input_qos_;
+ }
+ if (config.second.topic_name == OUTPUT_TOPIC_NAME) {
+ config.second.qos = output_qos_;
+ }
+ }
+
if (engine_file_path_.empty()) {
throw std::invalid_argument(
"[TensorRTNode] Empty engine_file_path_, "
@@ -290,7 +307,7 @@ size_t TensorRTNode::determineMaxTensorBlockSize()
uint64_t tensor_element_count = 1;
for (int j = 0; j < shape.nbDims; j++) {
- tensor_element_count *= std::max(shape.d[j], 1);
+ tensor_element_count *= std::max(shape.d[j], 1L);
}
uint64_t bytes_per_element;
diff --git a/isaac_ros_tensor_rt/test/tensor_rt_node_test.cpp b/isaac_ros_tensor_rt/test/tensor_rt_node_test.cpp
index b551c1c..489c776 100644
--- a/isaac_ros_tensor_rt/test/tensor_rt_node_test.cpp
+++ b/isaac_ros_tensor_rt/test/tensor_rt_node_test.cpp
@@ -24,17 +24,110 @@ TEST(tensor_rt_node_test, test_engine_file_path)
{
rclcpp::init(0, nullptr);
rclcpp::NodeOptions options;
+ options.append_parameter_override("engine_file_path", "");
+ EXPECT_THROW(
+ {
+ try {
+ nvidia::isaac_ros::dnn_inference::TensorRTNode trt_node(options);
+ } catch (const std::invalid_argument & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("Empty engine_file_path"));
+ throw;
+ } catch (const rclcpp::exceptions::InvalidParameterValueException & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("No parameter value set"));
+ throw;
+ }
+ }, std::invalid_argument);
+ rclcpp::shutdown();
+}
+
+TEST(tensor_rt_node_test, test_empty_input_tensor_names)
+{
+ rclcpp::init(0, nullptr);
+ rclcpp::NodeOptions options;
+ options.append_parameter_override("engine_file_path", "dummy_path");
+ options.append_parameter_override("input_tensor_names", std::vector{});
+ EXPECT_THROW(
+ {
+ try {
+ nvidia::isaac_ros::dnn_inference::TensorRTNode trt_node(options);
+ } catch (const std::invalid_argument & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("Empty input_tensor_names"));
+ throw;
+ } catch (const rclcpp::exceptions::InvalidParameterValueException & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("No parameter value set"));
+ throw;
+ }
+ }, std::invalid_argument);
+ rclcpp::shutdown();
+}
+
+TEST(tensor_rt_node_test, test_empty_input_binding_names)
+{
+ rclcpp::init(0, nullptr);
+ rclcpp::NodeOptions options;
+ options.append_parameter_override("engine_file_path", "dummy_path");
+ options.append_parameter_override("input_tensor_names", std::vector{"dummy"});
options.arguments(
{
"--ros-args",
- "-p", "engine_file_path:=''",
+ "-p", "engine_file_path:='dummy_path'",
+ "-p", "input_tensor_names:=['dummy_path']",
});
EXPECT_THROW(
{
try {
nvidia::isaac_ros::dnn_inference::TensorRTNode trt_node(options);
} catch (const std::invalid_argument & e) {
- EXPECT_THAT(e.what(), testing::HasSubstr("Empty engine_file_path"));
+ EXPECT_THAT(e.what(), testing::HasSubstr("Empty input_binding_names"));
+ throw;
+ } catch (const rclcpp::exceptions::InvalidParameterValueException & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("No parameter value set"));
+ throw;
+ }
+ }, std::invalid_argument);
+ rclcpp::shutdown();
+}
+
+TEST(tensor_rt_node_test, test_empty_output_tensor_names)
+{
+ rclcpp::init(0, nullptr);
+ rclcpp::NodeOptions options;
+ options.append_parameter_override("engine_file_path", "dummy_path");
+ options.append_parameter_override("input_tensor_names", std::vector{"dummy"});
+ options.append_parameter_override("input_binding_names", std::vector{"dummy"});
+ options.append_parameter_override("output_binding_names", std::vector{"dummy"});
+ EXPECT_THROW(
+ {
+ try {
+ nvidia::isaac_ros::dnn_inference::TensorRTNode trt_node(options);
+ } catch (const std::invalid_argument & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("Empty output_tensor_names"));
+ throw;
+ } catch (const rclcpp::exceptions::InvalidParameterValueException & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("No parameter value set"));
+ throw;
+ }
+ }, std::invalid_argument);
+ rclcpp::shutdown();
+}
+
+TEST(tensor_rt_node_test, test_empty_output_binding_names)
+{
+ rclcpp::init(0, nullptr);
+ rclcpp::NodeOptions options;
+ options.append_parameter_override("engine_file_path", "dummy_path");
+ options.append_parameter_override("input_tensor_names", std::vector{"dummy"});
+ options.append_parameter_override("input_binding_names", std::vector{"dummy"});
+ options.append_parameter_override("output_tensor_names", std::vector{"dummy"});
+ EXPECT_THROW(
+ {
+ try {
+ nvidia::isaac_ros::dnn_inference::TensorRTNode trt_node(options);
+ } catch (const std::invalid_argument & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("Empty output_binding_names"));
+ throw;
+ } catch (const rclcpp::exceptions::InvalidParameterValueException & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("No parameter value set"));
throw;
}
}, std::invalid_argument);
diff --git a/isaac_ros_triton/CMakeLists.txt b/isaac_ros_triton/CMakeLists.txt
index 3e4e26e..71a7d1f 100644
--- a/isaac_ros_triton/CMakeLists.txt
+++ b/isaac_ros_triton/CMakeLists.txt
@@ -39,6 +39,13 @@ if(BUILD_TESTING)
find_package(ament_lint_auto REQUIRED)
ament_lint_auto_find_test_dependencies()
+ # Gtest for triton node
+ ament_add_gtest(triton_node_test test/triton_node_test.cpp)
+ target_link_libraries(triton_node_test triton_node)
+ target_include_directories(triton_node_test PUBLIC include/isaac_ros_triton_node/)
+ target_include_directories(triton_node_test PUBLIC /usr/src/googletest/googlemock/include/)
+ ament_target_dependencies(triton_node_test rclcpp)
+ ament_target_dependencies(triton_node_test isaac_ros_nitros)
# The FindPythonInterp and FindPythonLibs modules are removed
if(POLICY CMP0148)
@@ -50,4 +57,10 @@ if(BUILD_TESTING)
add_launch_test(test/isaac_ros_triton_test_tf.py TIMEOUT "300")
endif()
+
+# Embed versioning information into installed files
+ament_index_get_resource(ISAAC_ROS_COMMON_CMAKE_PATH isaac_ros_common_cmake_path isaac_ros_common)
+include("${ISAAC_ROS_COMMON_CMAKE_PATH}/isaac_ros_common-version-info.cmake")
+generate_version_info(${PROJECT_NAME})
+
ament_auto_package(INSTALL_TO_SHARE config launch)
diff --git a/isaac_ros_triton/package.xml b/isaac_ros_triton/package.xml
index a9f28be..0d97e1e 100644
--- a/isaac_ros_triton/package.xml
+++ b/isaac_ros_triton/package.xml
@@ -21,7 +21,7 @@
isaac_ros_triton
- 3.1.0
+ 3.2.0
DNN Inference support for Isaac ROS
Isaac ROS Maintainers
@@ -44,6 +44,7 @@
ament_lint_auto
ament_lint_common
isaac_ros_test
+ ament_cmake_gtest
ament_cmake
diff --git a/isaac_ros_triton/test/triton_node_test.cpp b/isaac_ros_triton/test/triton_node_test.cpp
new file mode 100644
index 0000000..e64bab1
--- /dev/null
+++ b/isaac_ros_triton/test/triton_node_test.cpp
@@ -0,0 +1,168 @@
+// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
+// Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+#include
+#include "triton_node.hpp"
+#include "rclcpp/rclcpp.hpp"
+
+// Objective: to cover code lines where exceptions are thrown
+// Approach: send Invalid Arguments for node parameters to trigger the exception
+
+TEST(triton_node_test, test_empty_model_name)
+{
+ rclcpp::init(0, nullptr);
+ rclcpp::NodeOptions options;
+ options.append_parameter_override("model_name", "");
+ EXPECT_THROW(
+ {
+ try {
+ nvidia::isaac_ros::dnn_inference::TritonNode triton_node(options);
+ } catch (const std::invalid_argument & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("Empty model_name"));
+ throw;
+ } catch (const rclcpp::exceptions::InvalidParameterValueException & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("No parameter value set"));
+ throw;
+ }
+ }, std::invalid_argument);
+ rclcpp::shutdown();
+}
+
+TEST(triton_node_test, test_empty_model_repository_paths)
+{
+ rclcpp::init(0, nullptr);
+ rclcpp::NodeOptions options;
+ options.append_parameter_override("model_name", "dummy_name");
+ EXPECT_THROW(
+ {
+ try {
+ nvidia::isaac_ros::dnn_inference::TritonNode triton_node(options);
+ } catch (const std::invalid_argument & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("Empty model_repository_paths"));
+ throw;
+ } catch (const rclcpp::exceptions::InvalidParameterValueException & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("No parameter value set"));
+ throw;
+ }
+ }, std::invalid_argument);
+ rclcpp::shutdown();
+}
+
+TEST(triton_node_test, test_empty_input_tensor_names)
+{
+ rclcpp::init(0, nullptr);
+ rclcpp::NodeOptions options;
+ options.append_parameter_override("model_name", "dummy_name");
+ options.append_parameter_override(
+ "model_repository_paths",
+ std::vector{"dummy_path"});
+ EXPECT_THROW(
+ {
+ try {
+ nvidia::isaac_ros::dnn_inference::TritonNode triton_node(options);
+ } catch (const std::invalid_argument & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("Empty input_tensor_names"));
+ throw;
+ } catch (const rclcpp::exceptions::InvalidParameterValueException & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("No parameter value set"));
+ throw;
+ }
+ }, std::invalid_argument);
+ rclcpp::shutdown();
+}
+
+TEST(triton_node_test, test_empty_input_binding_names)
+{
+ rclcpp::init(0, nullptr);
+ rclcpp::NodeOptions options;
+ options.append_parameter_override("model_name", "dummy_name");
+ options.append_parameter_override(
+ "model_repository_paths",
+ std::vector{"dummy_path"});
+ options.append_parameter_override("input_tensor_names", std::vector{"dummy"});
+ EXPECT_THROW(
+ {
+ try {
+ nvidia::isaac_ros::dnn_inference::TritonNode triton_node(options);
+ } catch (const std::invalid_argument & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("Empty input_binding_names"));
+ throw;
+ } catch (const rclcpp::exceptions::InvalidParameterValueException & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("No parameter value set"));
+ throw;
+ }
+ }, std::invalid_argument);
+ rclcpp::shutdown();
+}
+
+TEST(triton_node_test, test_empty_output_tensor_names)
+{
+ rclcpp::init(0, nullptr);
+ rclcpp::NodeOptions options;
+ options.append_parameter_override("model_name", "dummy_name");
+ options.append_parameter_override(
+ "model_repository_paths",
+ std::vector{"dummy_path"});
+ options.append_parameter_override("input_tensor_names", std::vector{"dummy"});
+ options.append_parameter_override("input_binding_names", std::vector{"dummy"});
+ EXPECT_THROW(
+ {
+ try {
+ nvidia::isaac_ros::dnn_inference::TritonNode triton_node(options);
+ } catch (const std::invalid_argument & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("Empty output_tensor_names"));
+ throw;
+ } catch (const rclcpp::exceptions::InvalidParameterValueException & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("No parameter value set"));
+ throw;
+ }
+ }, std::invalid_argument);
+ rclcpp::shutdown();
+}
+
+TEST(triton_node_test, test_empty_output_binding_names)
+{
+ rclcpp::init(0, nullptr);
+ rclcpp::NodeOptions options;
+ options.append_parameter_override("model_name", "dummy_name");
+ options.append_parameter_override(
+ "model_repository_paths",
+ std::vector{"dummy_path"});
+ options.append_parameter_override("input_tensor_names", std::vector{"dummy"});
+ options.append_parameter_override("input_binding_names", std::vector{"dummy"});
+ options.append_parameter_override("output_tensor_names", std::vector{"dummy"});
+ EXPECT_THROW(
+ {
+ try {
+ nvidia::isaac_ros::dnn_inference::TritonNode triton_node(options);
+ } catch (const std::invalid_argument & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("Empty output_binding_names"));
+ throw;
+ } catch (const rclcpp::exceptions::InvalidParameterValueException & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("No parameter value set"));
+ throw;
+ }
+ }, std::invalid_argument);
+ rclcpp::shutdown();
+}
+
+
+int main(int argc, char ** argv)
+{
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}