diff --git a/README.md b/README.md
index 4b6e37e..551666a 100644
--- a/README.md
+++ b/README.md
@@ -40,13 +40,13 @@ This package is powered by [NVIDIA Isaac Transport for ROS (NITROS)](https://dev
The following table summarizes the per-platform performance statistics of sample graphs that use this package, with links included to the full benchmark output. These benchmark configurations are taken from the [Isaac ROS Benchmark](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark#list-of-isaac-ros-benchmarks) collection, based on the [`ros2_benchmark`](https://github.com/NVIDIA-ISAAC-ROS/ros2_benchmark) framework.
-| Sample Graph | Input Size | AGX Orin | Orin NX | Orin Nano 8GB | x86_64 w/ RTX 3060 Ti |
-| ----------------------------------------------------------------------------------------------------------------------------------------------------- | ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| [TensorRT Node
DOPE](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/scripts//isaac_ros_tensor_rt_dope_node.py) | VGA | [48.1 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_dope_node-agx_orin.json)
22 ms | [17.2 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_dope_node-orin_nx.json)
56 ms | [13.0 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_dope_node-orin_nano_8gb.json)
79 ms | [94.9 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_dope_node-x86_64_rtx_3060Ti.json)
10 ms |
-| [Triton Node
DOPE](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/scripts//isaac_ros_triton_dope_node.py) | VGA | [48.0 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_triton_dope_node-agx_orin.json)
22 ms | [20.1 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_triton_dope_node-orin_nx.json)
540 ms | [14.5 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_triton_dope_node-orin_nano_8gb.json)
790 ms | [94.2 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_triton_dope_node-x86_64_rtx_3060Ti.json)
11 ms |
-| [TensorRT Node
PeopleSemSegNet](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/scripts//isaac_ros_tensor_rt_ps_node.py) | 544p | [467 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_ps_node-agx_orin.json)
2.3 ms | [270 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_ps_node-orin_nx.json)
4.0 ms | [184 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_ps_node-orin_nano_8gb.json)
9.0 ms | [1500 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_ps_node-x86_64_rtx_3060Ti.json)
1.1 ms |
-| [Triton Node
PeopleSemSegNet](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/scripts//isaac_ros_triton_ps_node.py) | 544p | [293 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_triton_ps_node-agx_orin.json)
3.7 ms | [191 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_triton_ps_node-orin_nx.json)
5.5 ms | -- | [512 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_triton_ps_node-x86_64_rtx_3060Ti.json)
2.1 ms |
-| [DNN Image Encoder Node](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/scripts//isaac_ros_dnn_image_encoder_node.py) | VGA | [2230 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_dnn_image_encoder_node-agx_orin.json)
0.60 ms | [1560 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_dnn_image_encoder_node-orin_nx.json)
0.89 ms | -- | [5780 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_dnn_image_encoder_node-x86_64_rtx_3060Ti.json)
0.45 ms |
+| Sample Graph | Input Size | AGX Orin | Orin NX | Orin Nano 8GB | x86_64 w/ RTX 4060 Ti |
+| ----------------------------------------------------------------------------------------------------------------------------------------------------- | ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| [TensorRT Node
DOPE](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/scripts//isaac_ros_tensor_rt_dope_node.py) | VGA | [48.1 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_dope_node-agx_orin.json)
21 ms | [19.0 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_dope_node-orin_nx.json)
54 ms | [13.0 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_dope_node-orin_nano.json)
79 ms | [102 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_dope_node-nuc_4060ti.json)
10 ms |
+| [Triton Node
DOPE](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/scripts//isaac_ros_triton_dope_node.py) | VGA | [48.0 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_triton_dope_node-agx_orin.json)
22 ms | [20.5 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_triton_dope_node-orin_nx.json)
540 ms | [14.5 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_triton_dope_node-orin_nano.json)
790 ms | [99.4 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_triton_dope_node-nuc_4060ti.json)
10 ms |
+| [TensorRT Node
PeopleSemSegNet](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/scripts//isaac_ros_tensor_rt_ps_node.py) | 544p | [468 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_ps_node-agx_orin.json)
2.6 ms | [272 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_ps_node-orin_nx.json)
4.1 ms | [185 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_ps_node-orin_nano.json)
5.9 ms | [1990 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_tensor_rt_ps_node-nuc_4060ti.json)
0.88 ms |
+| [Triton Node
PeopleSemSegNet](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/scripts//isaac_ros_triton_ps_node.py) | 544p | [296 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_triton_ps_node-agx_orin.json)
3.5 ms | [190 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_triton_ps_node-orin_nx.json)
5.5 ms | -- | [709 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_triton_ps_node-nuc_4060ti.json)
2.0 ms |
+| [DNN Image Encoder Node](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/scripts//isaac_ros_dnn_image_encoder_node.py) | VGA | [2120 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_dnn_image_encoder_node-agx_orin.json)
1.1 ms | [1550 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_dnn_image_encoder_node-orin_nx.json)
1.2 ms | -- | [5340 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_dnn_image_encoder_node-nuc_4060ti.json)
0.48 ms |
## Table of Contents
@@ -87,7 +87,7 @@ The following table summarizes the per-platform performance statistics of sample
## Latest Update
-Update 2023-04-05: Source available GXF extensions
+Update 2023-05-25: Performance improvements.
## Supported Platforms
@@ -473,6 +473,7 @@ For solutions to problems with using DNN models, please check [here](docs/troubl
| Date | Changes |
| ---------- | ---------------------------------------------------------------------------------------------------------------------------- |
+| 2023-05-25 | Performance improvements |
| 2023-04-05 | Source available GXF extensions |
| 2022-10-19 | Updated OSS licensing |
| 2022-08-31 | Update to be compatible with JetPack 5.0.2 |
diff --git a/isaac_ros_dnn_encoders/config/dnn_image_encoder_node.yaml b/isaac_ros_dnn_encoders/config/dnn_image_encoder_node.yaml
index e2e841d..a89bfda 100644
--- a/isaac_ros_dnn_encoders/config/dnn_image_encoder_node.yaml
+++ b/isaac_ros_dnn_encoders/config/dnn_image_encoder_node.yaml
@@ -1,6 +1,6 @@
%YAML 1.2
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-# Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -19,36 +19,36 @@
name: global
components:
- name: adapter_video_buffer
- type: nvidia::cvcore::tensor_ops::ImageAdapter
+ type: nvidia::isaac::tensor_ops::ImageAdapter
parameters:
message_type: "VideoBuffer"
- name: adapter_bgr_u8
- type: nvidia::cvcore::tensor_ops::ImageAdapter
+ type: nvidia::isaac::tensor_ops::ImageAdapter
parameters:
message_type: "Tensor"
image_type: "BGR_U8"
- name: adapter_rgb_u8
- type: nvidia::cvcore::tensor_ops::ImageAdapter
+ type: nvidia::isaac::tensor_ops::ImageAdapter
parameters:
message_type: "Tensor"
image_type: "RGB_U8"
- name: adapter_bgr_f32
- type: nvidia::cvcore::tensor_ops::ImageAdapter
+ type: nvidia::isaac::tensor_ops::ImageAdapter
parameters:
message_type: "Tensor"
image_type: "BGR_F32"
- name: adapter_rgb_f32
- type: nvidia::cvcore::tensor_ops::ImageAdapter
+ type: nvidia::isaac::tensor_ops::ImageAdapter
parameters:
message_type: "Tensor"
image_type: "RGB_F32"
- name: adapter_planar_bgr_f32
- type: nvidia::cvcore::tensor_ops::ImageAdapter
+ type: nvidia::isaac::tensor_ops::ImageAdapter
parameters:
message_type: "Tensor"
image_type: "PLANAR_BGR_F32"
- name: adapter_planar_rgb_f32
- type: nvidia::cvcore::tensor_ops::ImageAdapter
+ type: nvidia::isaac::tensor_ops::ImageAdapter
parameters:
message_type: "Tensor"
image_type: "PLANAR_RGB_F32"
@@ -110,7 +110,7 @@ components:
block_size: 1566720
num_blocks: 40
- name: resize_operator
- type: nvidia::cvcore::tensor_ops::Resize
+ type: nvidia::isaac::tensor_ops::Resize
parameters:
output_width: 0
output_height: 0
@@ -150,7 +150,7 @@ components:
block_size: 1566720
num_blocks: 40
- name: color_converter_operator
- type: nvidia::cvcore::tensor_ops::ConvertColorFormat
+ type: nvidia::isaac::tensor_ops::ConvertColorFormat
parameters:
output_type: "RGB_U8"
receiver: data_receiver
@@ -186,7 +186,7 @@ components:
block_size: 6266880
num_blocks: 40
- name: normalizer_operator
- type: nvidia::cvcore::tensor_ops::Normalize
+ type: nvidia::isaac::tensor_ops::Normalize
parameters:
scales: [ 0.0156862745, 0.00490196078, 0.00784313725 ]
offsets: [ -127.5, -153.0, -63.75 ]
@@ -223,7 +223,7 @@ components:
block_size: 6266880
num_blocks: 40
- name: interleaved_to_planar_operator
- type: nvidia::cvcore::tensor_ops::InterleavedToPlanar
+ type: nvidia::isaac::tensor_ops::InterleavedToPlanar
parameters:
receiver: data_receiver
transmitter: data_transmitter
@@ -259,7 +259,7 @@ components:
block_size: 6266880
num_blocks: 40
- name: reshape_operator
- type: nvidia::cvcore::tensor_ops::Reshape
+ type: nvidia::isaac::tensor_ops::Reshape
parameters:
receiver: data_receiver
transmitter: data_transmitter
@@ -304,7 +304,7 @@ components:
camera_model_rx: data_receiver_timestamp
tx: data_transmitter
---
-name: vault
+name: sink
components:
- name: signal
type: nvidia::gxf::DoubleBufferReceiver
@@ -315,12 +315,10 @@ components:
parameters:
receiver: signal
min_size: 1
-- name: vault
- type: nvidia::gxf::Vault
+- name: sink
+ type: nvidia::isaac_ros::MessageRelay
parameters:
source: signal
- max_waiting_count: 1
- drop_waiting: false
---
components:
- name: edge0
@@ -376,12 +374,13 @@ components:
type: nvidia::gxf::Connection
parameters:
source: compositor/data_transmitter
- target: vault/signal
+ target: sink/signal
---
components:
- type: nvidia::gxf::GreedyScheduler
parameters:
clock: clock
stop_on_deadlock: false
+ check_recession_period_us: 100
- name: clock
type: nvidia::gxf::RealtimeClock
diff --git a/isaac_ros_dnn_encoders/config/namespace_injector_rule.yaml b/isaac_ros_dnn_encoders/config/namespace_injector_rule.yaml
index 5b0404d..dd699e6 100644
--- a/isaac_ros_dnn_encoders/config/namespace_injector_rule.yaml
+++ b/isaac_ros_dnn_encoders/config/namespace_injector_rule.yaml
@@ -1,6 +1,6 @@
%YAML 1.2
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-# Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -20,13 +20,13 @@ name: DNN Image Encoder Namespace Injector Rule
operation: namespace_injector
body:
components:
- - type: nvidia::cvcore::tensor_ops::ConvertColorFormat
+ - type: nvidia::isaac::tensor_ops::ConvertColorFormat
path_parameter_keys: [input_adapter, output_adapter]
- - type: nvidia::cvcore::tensor_ops::Resize
+ - type: nvidia::isaac::tensor_ops::Resize
path_parameter_keys: [input_adapter, output_adapter]
- - type: nvidia::cvcore::tensor_ops::Normalize
+ - type: nvidia::isaac::tensor_ops::Normalize
path_parameter_keys: [input_adapter, output_adapter]
- - type: nvidia::cvcore::tensor_ops::InterleavedToPlanar
+ - type: nvidia::isaac::tensor_ops::InterleavedToPlanar
path_parameter_keys: [input_adapter, output_adapter]
- - type: nvidia::cvcore::tensor_ops::Reshape
+ - type: nvidia::isaac::tensor_ops::Reshape
path_parameter_keys: [input_adapter, output_adapter]
diff --git a/isaac_ros_dnn_encoders/package.xml b/isaac_ros_dnn_encoders/package.xml
index 49f4680..d8f42f4 100644
--- a/isaac_ros_dnn_encoders/package.xml
+++ b/isaac_ros_dnn_encoders/package.xml
@@ -21,7 +21,7 @@ SPDX-License-Identifier: Apache-2.0
isaac_ros_dnn_encoders
- 0.30.0
+ 0.31.0
Encoders for preprocessing before running deep learning inference
Hemal Shah
Apache-2.0
diff --git a/isaac_ros_dnn_encoders/src/dnn_image_encoder_node.cpp b/isaac_ros_dnn_encoders/src/dnn_image_encoder_node.cpp
index 47a530c..e5fb3d3 100644
--- a/isaac_ros_dnn_encoders/src/dnn_image_encoder_node.cpp
+++ b/isaac_ros_dnn_encoders/src/dnn_image_encoder_node.cpp
@@ -1,5 +1,5 @@
// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-// Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -51,7 +51,7 @@ constexpr char INPUT_COMPONENT_KEY[] = "broadcaster/data_receiver";
constexpr char INPUT_DEFAULT_IMAGE_FORMAT[] = "nitros_image_bgr8";
constexpr char INPUT_TOPIC_NAME[] = "image";
-constexpr char OUTPUT_COMPONENT_KEY[] = "vault/vault";
+constexpr char OUTPUT_COMPONENT_KEY[] = "sink/sink";
constexpr char OUTPUT_DEFAULT_TENSOR_FORMAT[] = "nitros_tensor_list_nchw_rgb_f32";
constexpr char OUTPUT_TOPIC_NAME[] = "encoded_tensor";
@@ -148,7 +148,7 @@ void DnnImageEncoderNode::preLoadGraphCallback()
std::to_string(scale[2]) + "]";
NitrosNode::preLoadGraphSetParameter(
"normalizer",
- "nvidia::cvcore::tensor_ops::Normalize",
+ "nvidia::isaac::tensor_ops::Normalize",
"scales",
scales);
@@ -157,7 +157,7 @@ void DnnImageEncoderNode::preLoadGraphCallback()
std::to_string(offset[2]) + "]";
NitrosNode::preLoadGraphSetParameter(
"normalizer",
- "nvidia::cvcore::tensor_ops::Normalize",
+ "nvidia::isaac::tensor_ops::Normalize",
"offsets",
offsets);
}
@@ -167,18 +167,18 @@ void DnnImageEncoderNode::postLoadGraphCallback()
RCLCPP_INFO(get_logger(), "In DNN Image Encoder Node postLoadGraphCallback().");
getNitrosContext().setParameterUInt64(
- "resizer", "nvidia::cvcore::tensor_ops::Resize", "output_width", network_image_width_);
+ "resizer", "nvidia::isaac::tensor_ops::Resize", "output_width", network_image_width_);
getNitrosContext().setParameterUInt64(
- "resizer", "nvidia::cvcore::tensor_ops::Resize", "output_height", network_image_height_);
+ "resizer", "nvidia::isaac::tensor_ops::Resize", "output_height", network_image_height_);
getNitrosContext().setParameterBool(
- "resizer", "nvidia::cvcore::tensor_ops::Resize", "keep_aspect_ratio",
+ "resizer", "nvidia::isaac::tensor_ops::Resize", "keep_aspect_ratio",
resize_mode_ != ResizeMode::kDistort);
const gxf::optimizer::ComponentInfo output_comp_info = {
- "nvidia::gxf::Vault", // component_type_name
- "vault", // component_name
- "vault" // entity_name
+ "nvidia::isaac_ros::MessageRelay", // component_type_name
+ "sink", // component_name
+ "sink" // entity_name
};
const std::string negotiated_tensor_format = getFinalDataFormat(output_comp_info);
@@ -226,7 +226,7 @@ void DnnImageEncoderNode::postLoadGraphCallback()
};
getNitrosContext().setParameter1DInt32Vector(
- "reshaper", "nvidia::cvcore::tensor_ops::Reshape", "output_shape",
+ "reshaper", "nvidia::isaac::tensor_ops::Reshape", "output_shape",
final_tensor_shape);
}
}
diff --git a/isaac_ros_dnn_inference_test/package.xml b/isaac_ros_dnn_inference_test/package.xml
index 5e82b44..1b83a01 100644
--- a/isaac_ros_dnn_inference_test/package.xml
+++ b/isaac_ros_dnn_inference_test/package.xml
@@ -21,7 +21,7 @@ SPDX-License-Identifier: Apache-2.0
isaac_ros_dnn_inference_test
- 0.30.0
+ 0.31.0
DNN Inference support for Isaac ROS
Hemal Shah
diff --git a/isaac_ros_tensor_rt/config/tensor_rt_inference.yaml b/isaac_ros_tensor_rt/config/tensor_rt_inference.yaml
index f3ccce9..74551e2 100644
--- a/isaac_ros_tensor_rt/config/tensor_rt_inference.yaml
+++ b/isaac_ros_tensor_rt/config/tensor_rt_inference.yaml
@@ -1,6 +1,6 @@
%YAML 1.2
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-# Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -84,7 +84,7 @@ components:
rx: rx
tx: tx
---
-name: vault
+name: sink
components:
- name: signal
type: nvidia::gxf::DoubleBufferReceiver
@@ -95,12 +95,10 @@ components:
parameters:
receiver: signal
min_size: 1
-- name: vault
- type: nvidia::gxf::Vault
+- name: sink
+ type: nvidia::isaac_ros::MessageRelay
parameters:
source: signal
- max_waiting_count: 1
- drop_waiting: false
---
name: connections
components:
@@ -111,7 +109,7 @@ components:
- type: nvidia::gxf::Connection
parameters:
source: cuda_stream_sync/tx
- target: vault/signal
+ target: sink/signal
---
name: utils
components:
@@ -121,6 +119,7 @@ components:
parameters:
clock: clock
stop_on_deadlock: false
+ check_recession_period_us: 100
- name: stream
type: nvidia::gxf::CudaStreamPool
parameters:
diff --git a/isaac_ros_tensor_rt/package.xml b/isaac_ros_tensor_rt/package.xml
index 0057c75..daa5996 100644
--- a/isaac_ros_tensor_rt/package.xml
+++ b/isaac_ros_tensor_rt/package.xml
@@ -21,7 +21,7 @@ SPDX-License-Identifier: Apache-2.0
isaac_ros_tensor_rt
- 0.30.0
+ 0.31.0
DNN Inference support for Isaac ROS
CY Chen
diff --git a/isaac_ros_tensor_rt/src/tensor_rt_node.cpp b/isaac_ros_tensor_rt/src/tensor_rt_node.cpp
index c83ac4d..44e4bc9 100644
--- a/isaac_ros_tensor_rt/src/tensor_rt_node.cpp
+++ b/isaac_ros_tensor_rt/src/tensor_rt_node.cpp
@@ -1,5 +1,5 @@
// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-// Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -42,7 +42,7 @@ constexpr char INPUT_COMPONENT_KEY[] = "inference/rx";
constexpr char INPUT_DEFAULT_TENSOR_FORMAT[] = "nitros_tensor_list_nchw_rgb_f32";
constexpr char INPUT_TOPIC_NAME[] = "tensor_pub";
-constexpr char OUTPUT_COMPONENT_KEY[] = "vault/vault";
+constexpr char OUTPUT_COMPONENT_KEY[] = "sink/sink";
constexpr char OUTPUT_DEFAULT_TENSOR_FORMAT[] = "nitros_tensor_list_nhwc_rgb_f32";
constexpr char OUTPUT_TOPIC_NAME[] = "tensor_sub";
diff --git a/isaac_ros_triton/config/triton_node.yaml b/isaac_ros_triton/config/triton_node.yaml
index a0b12fa..f508cc3 100644
--- a/isaac_ros_triton/config/triton_node.yaml
+++ b/isaac_ros_triton/config/triton_node.yaml
@@ -1,6 +1,6 @@
%YAML 1.2
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-# Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -110,7 +110,7 @@ components:
allocator: allocator
mode: 0
---
-name: vault
+name: sink
components:
- name: signal
type: nvidia::gxf::DoubleBufferReceiver
@@ -121,12 +121,10 @@ components:
parameters:
receiver: signal
min_size: 1
-- name: vault
- type: nvidia::gxf::Vault
+- name: sink
+ type: nvidia::isaac_ros::MessageRelay
parameters:
source: signal
- max_waiting_count: 1
- drop_waiting: false
---
components:
- type: nvidia::gxf::Connection
@@ -136,12 +134,13 @@ components:
- type: nvidia::gxf::Connection
parameters:
source: tensor_copier/output
- target: vault/signal
+ target: sink/signal
---
components:
- type: nvidia::gxf::GreedyScheduler
parameters:
clock: clock
stop_on_deadlock: false
+ check_recession_period_us: 100
- name: clock
type: nvidia::gxf::RealtimeClock
diff --git a/isaac_ros_triton/gxf/triton/extensions/triton/triton_server.hpp b/isaac_ros_triton/gxf/triton/extensions/triton/triton_server.hpp
index 550c0e8..d926719 100644
--- a/isaac_ros_triton/gxf/triton/extensions/triton/triton_server.hpp
+++ b/isaac_ros_triton/gxf/triton/extensions/triton/triton_server.hpp
@@ -78,6 +78,7 @@ class TritonServer : public nvidia::gxf::Component {
"Path to Triton Backend Directory",
"Path to Triton Backend Directory", std::string(""));
+ // TODO(@niralp): Design GXF Model Explicit mode
result &= registrar->parameter(model_control_mode_,
"model_control_mode",
"Triton Model Control Mode",
diff --git a/isaac_ros_triton/package.xml b/isaac_ros_triton/package.xml
index c9deb3f..33c2c6e 100644
--- a/isaac_ros_triton/package.xml
+++ b/isaac_ros_triton/package.xml
@@ -21,7 +21,7 @@
isaac_ros_triton
- 0.30.0
+ 0.31.0
DNN Inference support for Isaac ROS
CY Chen
diff --git a/isaac_ros_triton/src/triton_node.cpp b/isaac_ros_triton/src/triton_node.cpp
index 7986d75..ea36e3e 100644
--- a/isaac_ros_triton/src/triton_node.cpp
+++ b/isaac_ros_triton/src/triton_node.cpp
@@ -1,5 +1,5 @@
// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-// Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -42,7 +42,7 @@ constexpr char INPUT_COMPONENT_KEY[] = "triton_request/input";
constexpr char INPUT_DEFAULT_TENSOR_FORMAT[] = "nitros_tensor_list_nchw_rgb_f32";
constexpr char INPUT_TOPIC_NAME[] = "tensor_pub";
-constexpr char OUTPUT_COMPONENT_KEY[] = "vault/vault";
+constexpr char OUTPUT_COMPONENT_KEY[] = "sink/sink";
constexpr char OUTPUT_DEFAULT_TENSOR_FORMAT[] = "nitros_tensor_list_nhwc_rgb_f32";
constexpr char OUTPUT_TOPIC_NAME[] = "tensor_sub";