Skip to content

Commit

Permalink
Merge pull request #4 from Flamefire/PyTorch-1.13.1
Browse files Browse the repository at this point in the history
Update patches based on PyTorch 1.13.1
  • Loading branch information
branfosj authored Feb 10, 2023
2 parents 6fac76c + 3f6e57d commit 915994d
Show file tree
Hide file tree
Showing 4 changed files with 26 additions and 27 deletions.
12 changes: 6 additions & 6 deletions easybuild/easyconfigs/p/PyTorch/PyTorch-1.13.1-foss-2022a.eb
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,10 @@ patches = [
'PyTorch-1.12.1_fix-vsx-vector-funcs.patch',
'PyTorch-1.12.1_fix-vsx-loadu.patch',
'PyTorch-1.12.1_skip-test_round_robin_create_destroy.patch',
'PyTorch-1.13.1_remove-flaky-test-in-testnn.patch',
'PyTorch-1.13.1_skip-ao-sparsity-test-without-fbgemm.patch',
'PyTorch-1.13.1_fix-test-ops-conf.patch',
'PyTorch-1.13.1_no-cuda-stubs-rpath.patch',
'PyTorch-1.13.1_remove-flaky-test-in-testnn.patch',
'PyTorch-1.13.1_skip-ao-sparsity-test-without-fbgemm.patch',
]
checksums = [
{'pytorch-v1.13.1.tar.gz': 'dbc229ee9750b02b514937d017744443a269ea0241ed3f32b9af0703589d25d4'},
Expand All @@ -46,12 +46,12 @@ checksums = [
{'PyTorch-1.12.1_fix-vsx-loadu.patch': '8bfe3c94ada1dd1f7974a1261a8b576fb7ae944050fa1c7830fca033831123b2'},
{'PyTorch-1.12.1_skip-test_round_robin_create_destroy.patch':
'1435fcac3234edc865479199673b902eb67f6a2bd046af7d731141f03594666d'},
{'PyTorch-1.13.1_fix-test-ops-conf.patch': 'df652eec7753864ebebbfeca546929a53e3fb8f24259d5c9b964266a8551198c'},
{'PyTorch-1.13.1_no-cuda-stubs-rpath.patch': '4c636059850fc9d1ecb27ce275f8aad5d5b6fdc19e35aff0c25b86cb3201352a'},
{'PyTorch-1.13.1_remove-flaky-test-in-testnn.patch':
'ad8db280c1acb5fade65646097590c6b332a9caf722191e4a3ddba2fb945ee6d'},
'be83ff61fe2dedab6d49c232936d5622df81ab49154264490021c6c828e53315'},
{'PyTorch-1.13.1_skip-ao-sparsity-test-without-fbgemm.patch':
'ea391298c4d9984de87cc14da9740ff09137b4a832fafd9e60c576f81690e8ec'},
{'PyTorch-1.13.1_fix-test-ops-conf.patch': 'df652eec7753864ebebbfeca546929a53e3fb8f24259d5c9b964266a8551198c'},
{'PyTorch-1.13.1_no-cuda-stubs-rpath.patch': '1a8f712e474f64da878b9328ce05e7245afcba1765cbc27fb5f4f16f733ea175'},
'92cd48ef6d01aa7e07ccce1dcaf40bc3fb0f220c4aa4fea15f3e05fb42e37909'},
]

osdependencies = [OS_PKG_IBVERBS_DEV]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,11 @@
#
# Original patch: Caspar van Leeuwen
# Updated: Alexander Grund (TU Dresden)
# Updated: Simon Branford (University of Birmingham)
#
# See https://github.com/pytorch/pytorch/pull/87593

diff --git a/caffe2/CMakeLists.txt b/caffe2/CMakeLists.txt
index 4182797fc78e5..265bf4f660896 100644
index 9074b848411..1d45807189b 100644
--- a/caffe2/CMakeLists.txt
+++ b/caffe2/CMakeLists.txt
@@ -631,13 +631,12 @@ endif()
Expand All @@ -43,7 +42,7 @@ index 4182797fc78e5..265bf4f660896 100644
if(USE_NCCL)
diff --git a/cmake/LinkCudaLibraries.cmake b/cmake/LinkCudaLibraries.cmake
new file mode 100644
index 0000000000000..005914ccc6f7c
index 00000000000..005914ccc6f
--- /dev/null
+++ b/cmake/LinkCudaLibraries.cmake
@@ -0,0 +1,33 @@
Expand Down Expand Up @@ -81,7 +80,7 @@ index 0000000000000..005914ccc6f7c
+ endif()
+endfunction()
diff --git a/test/cpp/api/CMakeLists.txt b/test/cpp/api/CMakeLists.txt
index 6b801a0731827..6ac92870479e0 100644
index 6b801a07318..6ac92870479 100644
--- a/test/cpp/api/CMakeLists.txt
+++ b/test/cpp/api/CMakeLists.txt
@@ -54,7 +54,8 @@ if(NOT MSVC)
Expand All @@ -95,7 +94,7 @@ index 6b801a0731827..6ac92870479e0 100644
${CUDA_NVRTC_LIB}
${CUDA_CUDA_LIB}
diff --git a/test/cpp/dist_autograd/CMakeLists.txt b/test/cpp/dist_autograd/CMakeLists.txt
index 9969c63e16d57..356ba5be55c4e 100644
index 9969c63e16d..356ba5be55c 100644
--- a/test/cpp/dist_autograd/CMakeLists.txt
+++ b/test/cpp/dist_autograd/CMakeLists.txt
@@ -10,7 +10,8 @@ if(USE_DISTRIBUTED AND NOT WIN32)
Expand All @@ -109,10 +108,10 @@ index 9969c63e16d57..356ba5be55c4e 100644
${CUDA_NVRTC_LIB}
${CUDA_CUDA_LIB}
diff --git a/test/cpp/jit/CMakeLists.txt b/test/cpp/jit/CMakeLists.txt
index b8b765a68d8b4..aba9c8c6c3e17 100644
index 66a60fb01ca..005e18183d2 100644
--- a/test/cpp/jit/CMakeLists.txt
+++ b/test/cpp/jit/CMakeLists.txt
@@ -156,7 +156,8 @@ if(LINUX)
@@ -148,7 +148,8 @@ if(LINUX)
endif()

if(USE_CUDA)
Expand All @@ -123,7 +122,7 @@ index b8b765a68d8b4..aba9c8c6c3e17 100644
${CUDA_NVRTC_LIB}
${CUDA_CUDA_LIB}
diff --git a/test/cpp/rpc/CMakeLists.txt b/test/cpp/rpc/CMakeLists.txt
index 3997f8753e555..21fddbc645d0d 100644
index 3997f8753e5..21fddbc645d 100644
--- a/test/cpp/rpc/CMakeLists.txt
+++ b/test/cpp/rpc/CMakeLists.txt
@@ -33,7 +33,8 @@ target_include_directories(
Expand All @@ -137,7 +136,7 @@ index 3997f8753e555..21fddbc645d0d 100644
${CUDA_NVRTC_LIB}
${CUDA_CUDA_LIB}
diff --git a/test/cpp/tensorexpr/CMakeLists.txt b/test/cpp/tensorexpr/CMakeLists.txt
index 7dff70630d3ec..ecb83005492f5 100644
index 7dff70630d3..ecb83005492 100644
--- a/test/cpp/tensorexpr/CMakeLists.txt
+++ b/test/cpp/tensorexpr/CMakeLists.txt
@@ -57,14 +57,15 @@ if(USE_PTHREADPOOL)
Expand All @@ -159,10 +158,10 @@ index 7dff70630d3ec..ecb83005492f5 100644
${CUDA_NVRTC_LIB}
${CUDA_CUDA_LIB}
diff --git a/test/test_torch.py b/test/test_torch.py
index 31759213ecefc..67ebc0420f38a 100644
index 8de5b822d00..fce7b5714f1 100644
--- a/test/test_torch.py
+++ b/test/test_torch.py
@@ -8581,6 +8581,21 @@ def add_neg_dim_tests():
@@ -8414,6 +8414,21 @@ def add_neg_dim_tests():
assert not hasattr(TestTorch, test_name), "Duplicated test name: " + test_name
setattr(TestTorch, test_name, make_neg_dim_test(name, tensor_arg, arg_constr, types, extra_dim))

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,17 +5,18 @@ fail.
See https://github.com/pytorch/pytorch/issues/86638
So remove the half precision test.

Author: Simon Branford (University of Birmingham)
Based off 1.12.1 patch by Alexander Grund (TU Dresden)
Author: Alexander Grund (TU Dresden)

diff --git a/test/nn/test_embedding.py b/test/nn/test_embedding.py
index f76e01c65c5..6b5de2b1059 100644
--- a/test/nn/test_embedding.py
+++ b/test/nn/test_embedding.py
@@ -18236,7 +18236,7 @@ class TestNNDeviceType(NNTestCase):
@@ -1108,7 +1108,7 @@ class TestEmbeddingNNDeviceType(NNTestCase):
self.assertRaises(RuntimeError, lambda: es(input.view(-1), offset))

@skipMeta
- @dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double, torch.half)))
+ @dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double)))
def test_embedding_bag_device(self, device, dtypes):
with set_default_dtype(torch.double):
self._test_EmbeddingBag(device, 'sum', False, wdtype=dtypes[2], dtype=dtypes[0], odtype=dtypes[1])
self._test_EmbeddingBag(device, 'sum', False, wdtype=dtypes[2], dtype=dtypes[0], odtype=dtypes[1])
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,10 @@ Those tests (from test_ao_sparsity) require FBGEMM which may not be available.
So add the skip decorator.
See https://github.com/pytorch/pytorch/issues/87364

Author: Simon Branford (University of Birmingham)
Based off 1.12.1 patch by Alexander Grund (TU Dresden)
Author: Alexander Grund (TU Dresden)

diff --git a/test/ao/sparsity/test_composability.py b/test/ao/sparsity/test_composability.py
index b44c885507..b7d35343c0 100644
index 6a1b6067a4c..b2eed72e3e3 100644
--- a/test/ao/sparsity/test_composability.py
+++ b/test/ao/sparsity/test_composability.py
@@ -9,6 +9,7 @@ import torch.ao.quantization as tq
Expand All @@ -17,11 +16,11 @@ index b44c885507..b7d35343c0 100644
from torch.ao.quantization.quantize_fx import prepare_fx, convert_fx, convert_to_reference_fx, prepare_qat_fx
from torch.ao.sparsity import fqn_to_module

@@ -23,6 +24,7 @@ sparse_defaults = {
@@ -62,6 +63,7 @@ def _calculate_sparsity(tensor):
# This series of tests are to check the composability goals for sparsity and quantization. Namely
# that performing quantization and sparsity model manipulations in various orderings
# does not cause problems
+@skipIfNoFBGEMM
class TestComposability(TestCase):
def _get_model_and_sparsifier_and_sparse_config(self, qconfig=None):
model = nn.Sequential(
# This test checks whether performing quantization prepare before sparse prepare
# causes any issues and verifies that the correct observers are inserted and that

0 comments on commit 915994d

Please sign in to comment.