Skip to content

Commit

Permalink
fix: Upgrade Torch version, enable options
Browse files Browse the repository at this point in the history
- Upgrade Torch version across the stack
- Update Dynamo sample with advanced usage to indicate usage of new
`options` argument in `torch.compile`
- Enable options argument in `torch.compile` including improved input
handling in the default torch_tensorrt backend
- ResNet example now features `torch_tensorrt.dynamo.compile`, while
transformers example features `torch_tensorrt.compile(...,
ir="dynamo_compile", ...)`
  • Loading branch information
gs-olive committed Jun 2, 2023
1 parent cd6542e commit d695d8d
Show file tree
Hide file tree
Showing 10 changed files with 48 additions and 35 deletions.
2 changes: 1 addition & 1 deletion .bazelrc
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
# +------------------------------------------------------------+
# Enable colorful output of GCC
build --cxxopt="-fdiagnostics-color=always"
build --cxxopt='-std=c++14'
build --cxxopt='-std=c++17'
#build --linkopt="-Wl,--no-as-needed"


Expand Down
8 changes: 4 additions & 4 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -269,10 +269,10 @@ commands:
parameters:
torch-build:
type: string
default: "2.1.0.dev20230419+cu118"
default: "2.1.0.dev20230601+cu118"
torchvision-build:
type: string
default: "0.16.0.dev20230419+cu118"
default: "0.16.0.dev20230601+cu118"
torch-build-index:
type: string
default: "https://download.pytorch.org/whl/nightly/cu118"
Expand Down Expand Up @@ -1350,10 +1350,10 @@ parameters:
# Nightly platform config
torch-build:
type: string
default: "2.1.0.dev20230419+cu118"
default: "2.1.0.dev20230601+cu118"
torchvision-build:
type: string
default: "0.16.0.dev20230419+cu118"
default: "0.16.0.dev20230601+cu118"
torch-build-index:
type: string
default: "https://download.pytorch.org/whl/nightly/cu118"
Expand Down
4 changes: 2 additions & 2 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@
cmake_minimum_required(VERSION 3.17)
project(Torch-TensorRT LANGUAGES CXX)

# use c++14 like PyTorch
set(CMAKE_CXX_STANDARD 14)
# use c++17 like PyTorch
set(CMAKE_CXX_STANDARD 17)

# Build the libraries with -fPIC
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ torch.jit.save(trt_ts_module, "trt_torchscript_module.ts") # save the TRT embedd
These are the following dependencies used to verify the testcases. Torch-TensorRT can work with other versions, but the tests are not guaranteed to pass.

- Bazel 5.2.0
- Libtorch 2.1.0.dev20230419 (built with CUDA 11.8)
- Libtorch 2.1.0.dev20230601 (built with CUDA 11.8)
- CUDA 11.8
- cuDNN 8.8.0
- TensorRT 8.6.0
Expand Down
8 changes: 4 additions & 4 deletions WORKSPACE
Original file line number Diff line number Diff line change
Expand Up @@ -51,17 +51,17 @@ new_local_repository(
http_archive(
name = "libtorch",
build_file = "@//third_party/libtorch:BUILD",
sha256 = "1a526a9cd19c1015674d26921dbb94bcd2d632a6f9c431a21c43f4e24768d834",
sha256 = "c8407ae3462c344ae3814e82023e22ece759ebe75023f35bdf62e9c0a7e79035",
strip_prefix = "libtorch",
urls = ["https://download.pytorch.org/libtorch/nightly/cu118/libtorch-cxx11-abi-shared-with-deps-2.1.0.dev20230419%2Bcu118.zip"],
urls = ["https://download.pytorch.org/libtorch/nightly/cu118/libtorch-cxx11-abi-shared-with-deps-2.1.0.dev20230601%2Bcu118.zip"],
)

http_archive(
name = "libtorch_pre_cxx11_abi",
build_file = "@//third_party/libtorch:BUILD",
sha256 = "60c5912a5085a6a7073b3804b10d41d6cc054693bbeb7a45e0247050c2837bac",
sha256 = "76f983bd6d784cc0a95c679034d297abe36911c16b2188498b13a9028177e28e",
strip_prefix = "libtorch",
urls = ["https://download.pytorch.org/libtorch/nightly/cu118/libtorch-shared-with-deps-2.1.0.dev20230419%2Bcu118.zip"],
urls = ["https://download.pytorch.org/libtorch/nightly/cu118/libtorch-shared-with-deps-2.1.0.dev20230601%2Bcu118.zip"],
)

# Download these tarballs manually from the NVIDIA website
Expand Down
30 changes: 17 additions & 13 deletions examples/dynamo/dynamo_compile_advanced_usage.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

import torch
from torch_tensorrt.dynamo.backend import create_backend
from torch_tensorrt.fx.lower_setting import LowerPrecision

# %%
Expand Down Expand Up @@ -39,9 +38,9 @@ def forward(self, x: torch.Tensor, y: torch.Tensor):

# Next, we compile the model using torch.compile
# For the default settings, we can simply call torch.compile
# with the backend "tensorrt", and run the model on an
# with the backend "torch_tensorrt", and run the model on an
# input to cause compilation, as so:
optimized_model = torch.compile(model, backend="tensorrt")
optimized_model = torch.compile(model, backend="torch_tensorrt")
optimized_model(*sample_inputs)

# %%
Expand All @@ -58,18 +57,23 @@ def forward(self, x: torch.Tensor, y: torch.Tensor):
# %%

# If we want to customize certain options in the backend,
# but still use the torch.compile call directly, we can call the
# convenience/helper function create_backend to create a custom backend
# which has been pre-populated with certain keys
custom_backend = create_backend(
lower_precision=LowerPrecision.FP16,
debug=True,
min_block_size=2,
torch_executed_ops={},
)
# but still use the torch.compile call directly, we can provide
# custom options to the backend via the "options" keyword
# which takes in a dictionary mapping options to values.
#
# For accepted backend options, see the CompilationSettings dataclass:
# py/torch_tensorrt/dynamo/backend/_settings.py
backend_kwargs = {
"lower_precision": LowerPrecision.FP16,
"debug": True,
"min_block_size": 2,
"torch_executed_ops": {"torch.ops.aten.sub.Tensor"},
}

# Run the model on an input to cause compilation, as so:
optimized_model_custom = torch.compile(model_half, backend=custom_backend)
optimized_model_custom = torch.compile(
model_half, backend="torch_tensorrt", options=backend_kwargs
)
optimized_model_custom(*sample_inputs_half)

# %%
Expand Down
4 changes: 2 additions & 2 deletions py/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ numpy
packaging
pybind11==2.6.2
--extra-index-url https://download.pytorch.org/whl/nightly/cu118
torch==2.1.0.dev20230419+cu118
torchvision==0.16.0.dev20230419+cu118
torch==2.1.0.dev20230601+cu118
torchvision==0.16.0.dev20230601+cu118
--extra-index-url https://pypi.ngc.nvidia.com
tensorrt==8.6.0
9 changes: 9 additions & 0 deletions py/torch_tensorrt/dynamo/backend/backends.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import torch
import traceback
from functools import partial
from dataclasses import replace, fields
import torch._dynamo as td

from torch_tensorrt.dynamo.backend._settings import CompilationSettings
Expand All @@ -25,7 +26,15 @@ def torch_tensorrt_backend(
gm: torch.fx.GraphModule,
sample_inputs: Sequence[torch.Tensor],
settings: CompilationSettings = CompilationSettings(),
**kwargs
):
# If the user specifies keyword args, overwrite those fields in settings
# Validate all specified kwargs to ensure they are true fields of the dataclass
if kwargs:
valid_attrs = {attr.name for attr in fields(settings)}
valid_kwargs = {k: v for k, v in kwargs.items() if k in valid_attrs}
settings = replace(settings, **valid_kwargs)

DEFAULT_BACKEND = aot_torch_tensorrt_aten_backend

return DEFAULT_BACKEND(gm, sample_inputs, settings=settings)
Expand Down
8 changes: 4 additions & 4 deletions toolchains/ci_workspaces/WORKSPACE.x86_64.release.rhel
Original file line number Diff line number Diff line change
Expand Up @@ -56,17 +56,17 @@ new_local_repository(
http_archive(
name = "libtorch",
build_file = "@//third_party/libtorch:BUILD",
sha256 = "1a526a9cd19c1015674d26921dbb94bcd2d632a6f9c431a21c43f4e24768d834",
sha256 = "c8407ae3462c344ae3814e82023e22ece759ebe75023f35bdf62e9c0a7e79035",
strip_prefix = "libtorch",
urls = ["https://download.pytorch.org/libtorch/nightly/cu118/libtorch-cxx11-abi-shared-with-deps-2.1.0.dev20230419%2Bcu118.zip"],
urls = ["https://download.pytorch.org/libtorch/nightly/cu118/libtorch-cxx11-abi-shared-with-deps-2.1.0.dev20230601%2Bcu118.zip"],
)

http_archive(
name = "libtorch_pre_cxx11_abi",
build_file = "@//third_party/libtorch:BUILD",
sha256 = "60c5912a5085a6a7073b3804b10d41d6cc054693bbeb7a45e0247050c2837bac",
sha256 = "76f983bd6d784cc0a95c679034d297abe36911c16b2188498b13a9028177e28e",
strip_prefix = "libtorch",
urls = ["https://download.pytorch.org/libtorch/nightly/cu118/libtorch-shared-with-deps-2.1.0.dev20230419%2Bcu118.zip"],
urls = ["https://download.pytorch.org/libtorch/nightly/cu118/libtorch-shared-with-deps-2.1.0.dev20230601%2Bcu118.zip"],
)

####################################################################################
Expand Down
8 changes: 4 additions & 4 deletions toolchains/ci_workspaces/WORKSPACE.x86_64.release.ubuntu
Original file line number Diff line number Diff line change
Expand Up @@ -56,17 +56,17 @@ new_local_repository(
http_archive(
name = "libtorch",
build_file = "@//third_party/libtorch:BUILD",
sha256 = "1a526a9cd19c1015674d26921dbb94bcd2d632a6f9c431a21c43f4e24768d834",
sha256 = "c8407ae3462c344ae3814e82023e22ece759ebe75023f35bdf62e9c0a7e79035",
strip_prefix = "libtorch",
urls = ["https://download.pytorch.org/libtorch/nightly/cu118/libtorch-cxx11-abi-shared-with-deps-2.1.0.dev20230419%2Bcu118.zip"],
urls = ["https://download.pytorch.org/libtorch/nightly/cu118/libtorch-cxx11-abi-shared-with-deps-2.1.0.dev20230601%2Bcu118.zip"],
)

http_archive(
name = "libtorch_pre_cxx11_abi",
build_file = "@//third_party/libtorch:BUILD",
sha256 = "60c5912a5085a6a7073b3804b10d41d6cc054693bbeb7a45e0247050c2837bac",
sha256 = "76f983bd6d784cc0a95c679034d297abe36911c16b2188498b13a9028177e28e",
strip_prefix = "libtorch",
urls = ["https://download.pytorch.org/libtorch/nightly/cu118/libtorch-shared-with-deps-2.1.0.dev20230419%2Bcu118.zip"],
urls = ["https://download.pytorch.org/libtorch/nightly/cu118/libtorch-shared-with-deps-2.1.0.dev20230601%2Bcu118.zip"],
)

####################################################################################
Expand Down

0 comments on commit d695d8d

Please sign in to comment.