Skip to content

Commit

Permalink
[onnxruntime] Create features tensorrt, directml, openvino (#237)
Browse files Browse the repository at this point in the history
* [onnxruntime] create 'tensorrt' feature

* ci: test onnxruntime[tensorrt] in hosted runner

* [onnxruntime] create a patch for the feature

* [onnxruntime] create 'openvino' feature

* [onnxruntime] create 'directml' feature

* ci: remove onnxruntime[directml]
  • Loading branch information
luncliff authored Aug 25, 2024
1 parent 0ae67b6 commit 61a5345
Show file tree
Hide file tree
Showing 4 changed files with 106 additions and 4 deletions.
29 changes: 29 additions & 0 deletions ports/onnxruntime/fix-cmake-tensorrt.patch
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
diff --git a/cmake/onnxruntime_providers_tensorrt.cmake b/cmake/onnxruntime_providers_tensorrt.cmake
index e56de0c..0ab5e1e 100644
--- a/cmake/onnxruntime_providers_tensorrt.cmake
+++ b/cmake/onnxruntime_providers_tensorrt.cmake
@@ -8,8 +8,13 @@
set(BUILD_LIBRARY_ONLY 1)
add_definitions("-DONNX_ML=1")
add_definitions("-DONNX_NAMESPACE=onnx")
- set(CUDA_INCLUDE_DIRS ${CUDAToolkit_INCLUDE_DIRS})
- set(TENSORRT_ROOT ${onnxruntime_TENSORRT_HOME})
+ if(DEFINED onnxruntime_TENSORRT_HOME)
+ set(TENSORRT_ROOT ${onnxruntime_TENSORRT_HOME})
+ else()
+ find_package(CUDAToolkit REQUIRED)
+ get_filename_component(TENSORRT_ROOT "${CUDAToolkit_LIBRARY_ROOT}" ABSOLUTE)
+ message(STATUS "Guessing TensorRT with CUDAToolkit_LIBRARY_ROOT: ${TENSORRT_ROOT}")
+ endif()
set(OLD_CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
set(PROTOBUF_LIBRARY ${PROTOBUF_LIB})
if (WIN32)
@@ -30,7 +35,7 @@
# There is an issue when running "Debug build" TRT EP with "Release build" TRT builtin parser on Windows.
# We enforce following workaround for now until the real fix.
if (WIN32 AND CMAKE_BUILD_TYPE STREQUAL "Debug")
- set(onnxruntime_USE_TENSORRT_BUILTIN_PARSER OFF)
+ # set(onnxruntime_USE_TENSORRT_BUILTIN_PARSER OFF)
MESSAGE(STATUS "[Note] There is an issue when running \"Debug build\" TRT EP with \"Release build\" TRT built-in parser on Windows. This build will use tensorrt oss parser instead.")
endif()

39 changes: 38 additions & 1 deletion ports/onnxruntime/portfile.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ vcpkg_from_github(
fix-cmake.patch
fix-cmake-cuda.patch
fix-cmake-training.patch
fix-cmake-tensorrt.patch
fix-sources.patch
fix-clang-cl-simd-compile.patch
)
Expand Down Expand Up @@ -92,6 +93,13 @@ elseif(VCPKG_TARGET_IS_OSX OR VCPKG_TARGET_IS_IOS)
set(GENERATOR_OPTIONS GENERATOR Xcode)
endif()

if("tensorrt" IN_LIST FEATURES)
if(DEFINED TENSORRT_ROOT)
message(STATUS "Using TensorRT: ${TENSORRT_ROOT}")
list(APPEND FEATURE_OPTIONS "-Donnxruntime_TENSORRT_HOME:PATH=${TENSORRT_ROOT}")
endif()
endif()

# see tools/ci_build/build.py
vcpkg_cmake_configure(
SOURCE_PATH "${SOURCE_PATH}/cmake"
Expand All @@ -115,6 +123,7 @@ vcpkg_cmake_configure(
-Donnxruntime_ENABLE_EXTERNAL_CUSTOM_OP_SCHEMAS=OFF
-Donnxruntime_ENABLE_LAZY_TENSOR=OFF
-Donnxruntime_NVCC_THREADS=1 # parallel compilation
"-DCMAKE_CUDA_FLAGS=-Xcudafe --diag_suppress=2803" # too much warnings about attribute
-Donnxruntime_DISABLE_RTTI=OFF
-Donnxruntime_DISABLE_ABSEIL=OFF
-Donnxruntime_USE_NEURAL_SPEED=OFF
Expand All @@ -132,18 +141,46 @@ vcpkg_cmake_configure(
onnxruntime_TENSORRT_PLACEHOLDER_BUILDER
onnxruntime_USE_CUSTOM_DIRECTML
onnxruntime_NVCC_THREADS
CMAKE_CUDA_FLAGS
)
if("cuda" IN_LIST FEATURES)
vcpkg_cmake_build(TARGET onnxruntime_providers_cuda LOGFILE_BASE build-cuda)
endif()
if("tensorrt" IN_LIST FEATURES)
vcpkg_cmake_build(TARGET onnxruntime_providers_tensorrt LOGFILE_BASE build-tensorrt)
endif()
if("directml" IN_LIST FEATURES)
vcpkg_cmake_build(TARGET onnxruntime_providers_dml LOGFILE_BASE build-directml)
endif()
if("training" IN_LIST FEATURES)
vcpkg_cmake_build(TARGET tensorboard LOGFILE_BASE build-tensorboard)
endif()
vcpkg_cmake_install()
vcpkg_cmake_config_fixup(CONFIG_PATH lib/cmake/onnxruntime PACKAGE_NAME onnxruntime)
vcpkg_copy_pdbs()
vcpkg_fixup_pkgconfig() # pkg_check_modules(libonnxruntime)

# cmake function which relocates the onnxruntime_providers_* library before vcpkg_copy_pdbs()
function(relocate_ort_providers PROVIDER_NAME)
if(VCPKG_TARGET_IS_WINDOWS AND (VCPKG_LIBRARY_LINKAGE STREQUAL "dynamic"))
# the target is expected to be used without the .lib files
file(RENAME "${CURRENT_PACKAGES_DIR}/debug/lib/${PROVIDER_NAME}.dll"
"${CURRENT_PACKAGES_DIR}/debug/bin/${PROVIDER_NAME}.dll")
file(RENAME "${CURRENT_PACKAGES_DIR}/lib/${PROVIDER_NAME}.dll"
"${CURRENT_PACKAGES_DIR}/bin/${PROVIDER_NAME}.dll")
endif()
endfunction()

if("cuda" IN_LIST FEATURES)
relocate_ort_providers(onnxruntime_providers_cuda)
endif()
if("tensorrt" IN_LIST FEATURES)
relocate_ort_providers(onnxruntime_providers_tensorrt)
endif()
if("directml" IN_LIST FEATURES)
relocate_ort_providers(onnxruntime_providers_dml)
endif()
vcpkg_copy_pdbs()

if("test" IN_LIST FEATURES)
vcpkg_copy_tools(TOOL_NAMES onnx_test_runner AUTO_CLEAN)
endif()
Expand Down
39 changes: 37 additions & 2 deletions ports/onnxruntime/vcpkg.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"name": "onnxruntime",
"version-semver": "1.18.1",
"port-version": 1,
"port-version": 2,
"description": "ONNX Runtime: cross-platform, high performance ML inferencing and training accelerator",
"homepage": "https://onnxruntime.ai/",
"license": "MIT",
Expand Down Expand Up @@ -73,17 +73,52 @@
],
"features": {
"cuda": {
"description": "Build ONNXRuntime unit tests",
"description": "Build with CUDA support",
"dependencies": [
"cuda",
"cudnn",
"nvidia-cutlass"
]
},
"directml": {
"description": "Build with DirectML support",
"supports": "windows",
"dependencies": [
"directml",
"directx-headers"
]
},
"framework": {
"description": "Build a macOS/iOS framework, Objective-C library",
"supports": "osx | ios"
},
"openvino": {
"description": "Build with OpenVINO support",
"supports": "!(osx | ios | android | emscripten)",
"dependencies": [
{
"name": "openvino",
"default-features": false,
"features": [
"cpu",
"gpu",
"onnx"
]
}
]
},
"tensorrt": {
"description": "Build with TensorRT support",
"dependencies": [
{
"name": "onnxruntime",
"default-features": false,
"features": [
"cuda"
]
}
]
},
"test": {
"description": "Build ONNXRuntime unit tests",
"dependencies": [
Expand Down
3 changes: 2 additions & 1 deletion test/self-hosted.json
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,8 @@
{
"name": "onnxruntime",
"features": [
"cuda"
"cuda",
"tensorrt"
]
}
]
Expand Down

0 comments on commit 61a5345

Please sign in to comment.