From 5166578b095ca3ff8528482e66e378e024cb9b34 Mon Sep 17 00:00:00 2001 From: Roy Li Date: Thu, 4 Apr 2019 02:21:09 -0700 Subject: [PATCH] Introduce DeprecatedTypeProperties class (#17991) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/17991 changes: -Breaks bc: Tensor::type() now returns DeprecatedTypeProperties& rather than Type&. -Added DeprecatedTypeProperties, it serves as a temporary replacement for Type as the return value of Tensor::type(). This contributes to making Type just for dispatch purposes so that we can make it dtype agnostic. -Tensor::dispatch_type() now returns Type& like Tensor::type() used to do. -Changed callsites of Tensor::type() appropriately. Reviewed By: ezyang Differential Revision: D14443117 fbshipit-source-id: 239ccb7a09626279a71d1a37f8f82e7f57bf7d9e --- aten/src/ATen/DLConvertor.cpp | 6 +- aten/src/ATen/Dispatch.h | 4 +- aten/src/ATen/SparseTensorImpl.cpp | 3 +- aten/src/ATen/SparseTensorUtils.h | 7 +- aten/src/ATen/core/DeprecatedTypeProperties.h | 67 ++ .../core/DeprecatedTypePropertiesRegistry.cpp | 12 + .../core/DeprecatedTypePropertiesRegistry.h | 46 + aten/src/ATen/core/Formatting.cpp | 7 +- aten/src/ATen/core/Formatting.h | 1 + aten/src/ATen/core/LegacyTypeDispatch.cpp | 6 +- aten/src/ATen/core/Tensor.cpp | 4 +- aten/src/ATen/core/Tensor.h | 7 +- aten/src/ATen/core/TensorMethods.h | 836 +++++++++--------- aten/src/ATen/function_wrapper.py | 2 +- aten/src/ATen/native/BatchLinearAlgebra.cpp | 6 +- aten/src/ATen/native/Indexing.cpp | 11 +- aten/src/ATen/native/LegacyBridge.cpp | 6 +- aten/src/ATen/native/LinearAlgebra.cpp | 7 +- aten/src/ATen/native/LossCTC.cpp | 2 +- aten/src/ATen/native/Memory.cpp | 2 +- aten/src/ATen/native/NNPACK.cpp | 8 +- aten/src/ATen/native/ReduceOps.cpp | 2 +- aten/src/ATen/native/TensorConversions.cpp | 4 +- aten/src/ATen/native/TensorFactories.cpp | 12 +- aten/src/ATen/native/TensorIterator.h | 2 +- aten/src/ATen/native/TensorIteratorReduce.cpp | 2 +- aten/src/ATen/native/TypeProperties.cpp | 4 +- aten/src/ATen/native/cuda/Distributions.cu | 1 - aten/src/ATen/native/cuda/LossCTC.cu | 14 +- aten/src/ATen/native/cuda/MiscUtils.h | 4 +- aten/src/ATen/templates/Tensor.h | 7 +- aten/src/ATen/templates/TensorMethods.h | 18 +- .../ATen/test/cuda_tensor_interop_test.cpp | 4 +- aten/src/ATen/test/scalar_test.cpp | 4 +- aten/src/ATen/test/tensor_interop_test.cpp | 4 +- aten/src/ATen/test/undefined_tensor_test.cpp | 6 +- 36 files changed, 637 insertions(+), 501 deletions(-) create mode 100644 aten/src/ATen/core/DeprecatedTypeProperties.h create mode 100644 aten/src/ATen/core/DeprecatedTypePropertiesRegistry.cpp create mode 100644 aten/src/ATen/core/DeprecatedTypePropertiesRegistry.h diff --git a/aten/src/ATen/DLConvertor.cpp b/aten/src/ATen/DLConvertor.cpp index a40e87272..cf9daa5c0 100644 --- a/aten/src/ATen/DLConvertor.cpp +++ b/aten/src/ATen/DLConvertor.cpp @@ -56,10 +56,10 @@ static DLDataType getDLDataType(const Tensor& t) { return dtype; } -static DLContext getDLContext(const Type& type, const int64_t& device_id) { +static DLContext getDLContext(const Tensor& tensor, const int64_t& device_id) { DLContext ctx; ctx.device_id = device_id; - if (type.is_cuda()) { + if (tensor.is_cuda()) { ctx.device_type = DLDeviceType::kDLGPU; } else { ctx.device_type = DLDeviceType::kDLCPU; @@ -161,7 +161,7 @@ DLManagedTensor* toDLPack(const Tensor& src) { if (src.is_cuda()) { device_id = src.get_device(); } - atDLMTensor->tensor.dl_tensor.ctx = getDLContext(src.type(), device_id); + atDLMTensor->tensor.dl_tensor.ctx = getDLContext(src, device_id); atDLMTensor->tensor.dl_tensor.ndim = src.dim(); atDLMTensor->tensor.dl_tensor.dtype = getDLDataType(src); atDLMTensor->tensor.dl_tensor.shape = diff --git a/aten/src/ATen/Dispatch.h b/aten/src/ATen/Dispatch.h index b22a60d0b..d6130d606 100644 --- a/aten/src/ATen/Dispatch.h +++ b/aten/src/ATen/Dispatch.h @@ -41,9 +41,9 @@ inline at::ScalarType scalar_type(at::ScalarType s) { return s; } -C10_DEPRECATED_MESSAGE("passing at::Type to an AT_DISPATCH macro is deprecated, " \ +C10_DEPRECATED_MESSAGE("passing at::DeprecatedTypeProperties to an AT_DISPATCH macro is deprecated, " \ "pass an at::ScalarType instead") -inline at::ScalarType scalar_type(const at::Type &t) { +inline at::ScalarType scalar_type(const at::DeprecatedTypeProperties &t) { return t.scalarType(); } diff --git a/aten/src/ATen/SparseTensorImpl.cpp b/aten/src/ATen/SparseTensorImpl.cpp index 2cdca7986..48ca1b3d2 100644 --- a/aten/src/ATen/SparseTensorImpl.cpp +++ b/aten/src/ATen/SparseTensorImpl.cpp @@ -88,7 +88,8 @@ void SparseTensorImpl::set_indices_and_values_unsafe(const Tensor& indices, cons AT_CHECK(!indices.is_sparse(), "expected indices to be a dense tensor, but got indices of layout ", indices.layout()); AT_CHECK(!values.is_sparse(), "expected values to be a dense tensor, but got values of layout ", values.layout()); - AT_CHECK(values.type().toSparse() == legacyTensorType(*this), "values type must match sparse tensor type"); + AT_CHECK(values.device().type() == device().type(), "device type of values (", values.device().type(), ") must match device type of device().type()", device().type(), ")"); + AT_CHECK(values.scalar_type() == typeMetaToScalarType(dtype()), "dtype of values (", values.scalar_type(), ") must match dtype of sparse tensor (", typeMetaToScalarType(dtype()), ")"); AT_CHECK(indices.scalar_type() == kLong, "indices must be an int64 tensor"); AT_CHECK(indices.type().backend() == values.type().backend(), "backend of indices (", indices.type().backend(), ") must match backend of values (", values.type().backend(), ")"); AT_CHECK(!indices.is_cuda() || indices.get_device() == values.get_device(), "device of indices (", indices.get_device(), ") must match device of values (", values.get_device(), ")"); diff --git a/aten/src/ATen/SparseTensorUtils.h b/aten/src/ATen/SparseTensorUtils.h index a7fa4ab8b..8113367d7 100644 --- a/aten/src/ATen/SparseTensorUtils.h +++ b/aten/src/ATen/SparseTensorUtils.h @@ -31,7 +31,10 @@ inline void alias_into_sparse(const SparseTensor& self, const LongTensor& indice // Take indices and values and makes a (data) copy of them to put into the sparse // indices/values. This used to be called THSTensor_(_set) inline void copy_into_sparse(const SparseTensor& self, const LongTensor& indices, const Tensor& values, bool non_blocking) { - alias_into_sparse(self, self._indices().type().copy(indices, non_blocking), self._values().type().copy(values, non_blocking)); + alias_into_sparse( + self, + self._indices().dispatch_type().copy(indices, non_blocking), + self._values().dispatch_type().copy(values, non_blocking)); } // TODO: put this into the public API @@ -82,7 +85,7 @@ inline LongTensor flatten_indices(const Tensor& indices, IntArrayRef full_size, indices_mult_cpu_vec[i] = mult; mult *= full_size[i]; } - auto indices_mult_cpu = indices.type().cpu() + auto indices_mult_cpu = indices.dispatch_type().cpu() .tensorFromBlob(indices_mult_cpu_vec.data(), /*size=*/{sparse_dim, 1}); // NB: must be blocking because this blob may be freed after this closure, // and non_blocking copy will see garbage. diff --git a/aten/src/ATen/core/DeprecatedTypeProperties.h b/aten/src/ATen/core/DeprecatedTypeProperties.h new file mode 100644 index 000000000..88f53f63f --- /dev/null +++ b/aten/src/ATen/core/DeprecatedTypeProperties.h @@ -0,0 +1,67 @@ +#pragma once + +#include +#include +#include + + + +namespace at { + +// This class specifies a Backend and a ScalarType. Currently, it primarily +// serves as a replacement return value for Tensor::type(). Previously, +// Tensor::type() returned Type&, but we are changing Type to not be +// dtype-specific. +class DeprecatedTypeProperties { + public: + DeprecatedTypeProperties(Backend backend, ScalarType scalar_type) + : backend_(backend), scalar_type_(scalar_type) {} + + Backend backend() const { + return backend_; + } + + bool is_sparse() const { + return layout_from_backend(backend()) == kSparse; + } + + DeviceType device_type() const { + return backendToDeviceType(backend_); + } + + bool is_cuda() const { + return backendToDeviceType(backend_) == kCUDA; + } + + ScalarType scalarType() const { + return scalar_type_; + } + + caffe2::TypeMeta typeMeta() const { + return scalarTypeToTypeMeta(scalar_type_); + } + + bool is_defined() const { + return backend_ != Backend::Undefined && scalar_type_ != ScalarType::Undefined; + } + + bool operator==(const DeprecatedTypeProperties& other) const { + return backend_ == other.backend() && scalar_type_ == other.scalarType(); + } + + bool operator!=(const DeprecatedTypeProperties& other) const { + return !(*this == other); + } + + std::string toString() const { + std::stringstream ss; + ss << at::toString(backend()) << at::toString(scalarType()) << "Type"; + return ss.str(); + } + + private: + Backend backend_; + ScalarType scalar_type_; +}; + +} // namespace at diff --git a/aten/src/ATen/core/DeprecatedTypePropertiesRegistry.cpp b/aten/src/ATen/core/DeprecatedTypePropertiesRegistry.cpp new file mode 100644 index 000000000..154f04d50 --- /dev/null +++ b/aten/src/ATen/core/DeprecatedTypePropertiesRegistry.cpp @@ -0,0 +1,12 @@ +#include + +namespace at { + +// TODO: This could be bad juju if someone calls globalContext() in the +// destructor of an object with static lifetime. +DeprecatedTypePropertiesRegistry & globalDeprecatedTypePropertiesRegistry() { + static DeprecatedTypePropertiesRegistry singleton; + return singleton; +} + +} diff --git a/aten/src/ATen/core/DeprecatedTypePropertiesRegistry.h b/aten/src/ATen/core/DeprecatedTypePropertiesRegistry.h new file mode 100644 index 000000000..0ab57bf0f --- /dev/null +++ b/aten/src/ATen/core/DeprecatedTypePropertiesRegistry.h @@ -0,0 +1,46 @@ +#pragma once + +// In order to preserve bc, we make DeprecatedTypeProperties instances unique +// just like they are for Type. + +#include +#include +#include + +namespace at { + +struct CAFFE2_API DeprecatedTypePropertiesDeleter { + void operator()(DeprecatedTypeProperties * ptr) { + delete ptr; + } +}; + +class CAFFE2_API DeprecatedTypePropertiesRegistry { + public: + using DeprecatedTypePropertiesUniquePtr = + std::unique_ptr; + + DeprecatedTypePropertiesRegistry() { + for (int b = 0; b < static_cast(Backend::NumOptions); ++b) { + for (int s = 0; s < static_cast(ScalarType::NumOptions); ++s) { + registry[b][s] = DeprecatedTypePropertiesUniquePtr{ + new DeprecatedTypeProperties(static_cast(b), static_cast(s)), + DeprecatedTypePropertiesDeleter() + }; + } + } + } + + DeprecatedTypeProperties& getDeprecatedTypeProperties(Backend p, ScalarType s) { + return *registry[static_cast(p)][static_cast(s)]; + } + +private: + DeprecatedTypePropertiesUniquePtr registry + [static_cast(Backend::NumOptions)] + [static_cast(ScalarType::NumOptions)]; +}; + +CAFFE2_API DeprecatedTypePropertiesRegistry& globalDeprecatedTypePropertiesRegistry(); + +} // namespace at diff --git a/aten/src/ATen/core/Formatting.cpp b/aten/src/ATen/core/Formatting.cpp index ab0bb3303..0abedcade 100644 --- a/aten/src/ATen/core/Formatting.cpp +++ b/aten/src/ATen/core/Formatting.cpp @@ -37,6 +37,10 @@ std::ostream& operator<<(std::ostream & out, const Type& t) { return out << t.toString(); } +std::ostream& operator<<(std::ostream & out, const DeprecatedTypeProperties& t) { + return out << t.toString(); +} + static std::tuple __printFormat(std::ostream& stream, const Tensor& self) { auto size = self.numel(); if(size == 0) { @@ -238,8 +242,7 @@ std::ostream& print(std::ostream& stream, const Tensor & tensor_, int64_t linesi stream << "size:\n" << tensor_.sizes() << "\n"; stream << "]"; } else { - Type& cpudouble = tensor_.type().toBackend(Backend::CPU).toScalarType(kDouble); - Tensor tensor = tensor_.toType(cpudouble).contiguous(); + Tensor tensor = tensor_.to(kCPU, kDouble).contiguous(); if(tensor.ndimension() == 0) { stream << defaultfloat << tensor.data()[0] << std::endl; stream << "[ " << tensor_.toString() << "{} ]"; diff --git a/aten/src/ATen/core/Formatting.h b/aten/src/ATen/core/Formatting.h index c03a00e39..1055ad197 100644 --- a/aten/src/ATen/core/Formatting.h +++ b/aten/src/ATen/core/Formatting.h @@ -13,6 +13,7 @@ CAFFE2_API std::ostream& operator<<(std::ostream& out, Backend b); namespace at { CAFFE2_API std::ostream& operator<<(std::ostream& out, const Type& t); +CAFFE2_API std::ostream& operator<<(std::ostream& out, const DeprecatedTypeProperties& t); CAFFE2_API std::ostream& print( std::ostream& stream, const Tensor& tensor, diff --git a/aten/src/ATen/core/LegacyTypeDispatch.cpp b/aten/src/ATen/core/LegacyTypeDispatch.cpp index d9936eaf6..521e587c0 100644 --- a/aten/src/ATen/core/LegacyTypeDispatch.cpp +++ b/aten/src/ATen/core/LegacyTypeDispatch.cpp @@ -7,13 +7,13 @@ namespace at { /// Previously, in VariableType_*.cpp (generated by gen_variable_type.py), when /// a function is using the 'use_derived' strategy, we call its implementation /// on the base non-Variable type (`baseType`), passing unwrapped tensors to the -/// call so that any `.type()` calls in the implementation can treat the passed +/// call so that any `.dispatch_type()` calls in the implementation can treat the passed /// tensors as non-Variables and won't dispatch back to functions in VariableType. /// /// However, after the Variable/Tensor merge, there is no concept of unwrapping /// a tensor anymore, and directly passing variables to the base type calls will -/// cause the `.type()` dispatch in the implementation to treat the tensor as a -/// variable, and any function dispatch based on `.type()` will dispatch back to +/// cause the `.dispatch_type()` dispatch in the implementation to treat the tensor as a +/// variable, and any function dispatch based on `.dispatch_type()` will dispatch back to /// VariableType, which is not what we want. /// /// The solution to the above problem is to add `at::NonVariableTypeMode`, which diff --git a/aten/src/ATen/core/Tensor.cpp b/aten/src/ATen/core/Tensor.cpp index d575423cc..aa611e874 100644 --- a/aten/src/ATen/core/Tensor.cpp +++ b/aten/src/ATen/core/Tensor.cpp @@ -35,14 +35,14 @@ void Tensor::enforce_invariants() { void Tensor::print() const { if (defined()) { - std::cerr << "[" << type().toString() << " " << sizes() << "]" << std::endl; + std::cerr << "[" << dispatch_type().toString() << " " << sizes() << "]" << std::endl; } else { std::cerr << "[UndefinedTensor]" << std::endl; } } const char * Tensor::toString() const { - return type().toString(); + return dispatch_type().toString(); } } // namespace at diff --git a/aten/src/ATen/core/Tensor.h b/aten/src/ATen/core/Tensor.h index 8dbfb8147..a171eabfd 100644 --- a/aten/src/ATen/core/Tensor.h +++ b/aten/src/ATen/core/Tensor.h @@ -13,6 +13,7 @@ #include #include #include +#include namespace c10{ struct TensorOptions; @@ -196,7 +197,11 @@ class CAFFE2_API Tensor { return impl_->itemsize(); } - Type & type() const { + DeprecatedTypeProperties & type() const { + return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( + tensorTypeIdToBackend(type_id()), scalar_type()); + } + Type & dispatch_type() const { return legacyTensorType(*impl_); } TensorTypeId type_id() const { diff --git a/aten/src/ATen/core/TensorMethods.h b/aten/src/ATen/core/TensorMethods.h index 9e5093146..efe387d7a 100644 --- a/aten/src/ATen/core/TensorMethods.h +++ b/aten/src/ATen/core/TensorMethods.h @@ -10,33 +10,33 @@ namespace at { inline Tensor Tensor::toType(const Type & t, bool non_blocking) const { - if(type() == t) + if(dispatch_type() == t) return *this; return t.copy(*this, non_blocking); } inline Tensor Tensor::cpu() const { - return toType(type().cpu()); + return toType(dispatch_type().cpu()); } inline Tensor Tensor::cuda() const { - return toType(type().cuda()); + return toType(dispatch_type().cuda()); } inline Tensor Tensor::hip() const { - return toType(type().hip()); + return toType(dispatch_type().hip()); } inline Tensor & Tensor::copy_(const Tensor & src, bool non_blocking) { - return type().copy_(*this, src, non_blocking); + return dispatch_type().copy_(*this, src, non_blocking); } inline Tensor Tensor::toType(ScalarType t) const { - return toType(type().toScalarType(t)); + return toType(dispatch_type().toScalarType(t)); } inline Tensor Tensor::toBackend(Backend b) const { - return toType(type().toBackend(b)); + return toType(dispatch_type().toBackend(b)); } inline TensorOptions Tensor::options() const { @@ -50,1240 +50,1240 @@ inline void Tensor::backward( c10::optional gradient, bool keep_graph, bool create_graph) { - type().backward(*this, std::move(gradient), keep_graph, create_graph); + dispatch_type().backward(*this, std::move(gradient), keep_graph, create_graph); } inline void Tensor::set_data(Tensor new_data) { - type().set_data(*this, new_data); + dispatch_type().set_data(*this, new_data); } // all static inline to allow for inlining of the non-dynamic part of dispatch inline Tensor Tensor::abs() const { - return type().abs(*this); + return dispatch_type().abs(*this); } inline Tensor & Tensor::abs_() { - return type().abs_(*this); + return dispatch_type().abs_(*this); } inline Tensor Tensor::acos() const { - return type().acos(*this); + return dispatch_type().acos(*this); } inline Tensor & Tensor::acos_() { - return type().acos_(*this); + return dispatch_type().acos_(*this); } inline Tensor Tensor::add(const Tensor & other, Scalar alpha) const { - return type().add(*this, other, alpha); + return dispatch_type().add(*this, other, alpha); } inline Tensor & Tensor::add_(const Tensor & other, Scalar alpha) { - return type().add_(*this, other, alpha); + return dispatch_type().add_(*this, other, alpha); } inline Tensor Tensor::add(Scalar other, Scalar alpha) const { - return type().add(*this, other, alpha); + return dispatch_type().add(*this, other, alpha); } inline Tensor & Tensor::add_(Scalar other, Scalar alpha) { - return type().add_(*this, other, alpha); + return dispatch_type().add_(*this, other, alpha); } inline Tensor Tensor::addmv(const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha) const { - return type().addmv(*this, mat, vec, beta, alpha); + return dispatch_type().addmv(*this, mat, vec, beta, alpha); } inline Tensor & Tensor::addmv_(const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha) { - return type().addmv_(*this, mat, vec, beta, alpha); + return dispatch_type().addmv_(*this, mat, vec, beta, alpha); } inline Tensor Tensor::addr(const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha) const { - return type().addr(*this, vec1, vec2, beta, alpha); + return dispatch_type().addr(*this, vec1, vec2, beta, alpha); } inline Tensor & Tensor::addr_(const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha) { - return type().addr_(*this, vec1, vec2, beta, alpha); + return dispatch_type().addr_(*this, vec1, vec2, beta, alpha); } inline Tensor Tensor::all(int64_t dim, bool keepdim) const { - return type().all(*this, dim, keepdim); + return dispatch_type().all(*this, dim, keepdim); } inline bool Tensor::allclose(const Tensor & other, double rtol, double atol, bool equal_nan) const { - return type().allclose(*this, other, rtol, atol, equal_nan); + return dispatch_type().allclose(*this, other, rtol, atol, equal_nan); } inline Tensor Tensor::any(int64_t dim, bool keepdim) const { - return type().any(*this, dim, keepdim); + return dispatch_type().any(*this, dim, keepdim); } inline Tensor Tensor::argmax(c10::optional dim, bool keepdim) const { - return type().argmax(*this, dim, keepdim); + return dispatch_type().argmax(*this, dim, keepdim); } inline Tensor Tensor::argmin(c10::optional dim, bool keepdim) const { - return type().argmin(*this, dim, keepdim); + return dispatch_type().argmin(*this, dim, keepdim); } inline Tensor Tensor::as_strided(IntArrayRef size, IntArrayRef stride, c10::optional storage_offset) const { - return type().as_strided(*this, size, stride, storage_offset); + return dispatch_type().as_strided(*this, size, stride, storage_offset); } inline Tensor & Tensor::as_strided_(IntArrayRef size, IntArrayRef stride, c10::optional storage_offset) { - return type().as_strided_(*this, size, stride, storage_offset); + return dispatch_type().as_strided_(*this, size, stride, storage_offset); } inline Tensor Tensor::asin() const { - return type().asin(*this); + return dispatch_type().asin(*this); } inline Tensor & Tensor::asin_() { - return type().asin_(*this); + return dispatch_type().asin_(*this); } inline Tensor Tensor::atan() const { - return type().atan(*this); + return dispatch_type().atan(*this); } inline Tensor & Tensor::atan_() { - return type().atan_(*this); + return dispatch_type().atan_(*this); } inline Tensor Tensor::baddbmm(const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const { - return type().baddbmm(*this, batch1, batch2, beta, alpha); + return dispatch_type().baddbmm(*this, batch1, batch2, beta, alpha); } inline Tensor & Tensor::baddbmm_(const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) { - return type().baddbmm_(*this, batch1, batch2, beta, alpha); + return dispatch_type().baddbmm_(*this, batch1, batch2, beta, alpha); } inline Tensor Tensor::bernoulli(Generator * generator) const { - return type().bernoulli(*this, generator); + return dispatch_type().bernoulli(*this, generator); } inline Tensor & Tensor::bernoulli_(const Tensor & p, Generator * generator) { - return type().bernoulli_(*this, p, generator); + return dispatch_type().bernoulli_(*this, p, generator); } inline Tensor & Tensor::bernoulli_(double p, Generator * generator) { - return type().bernoulli_(*this, p, generator); + return dispatch_type().bernoulli_(*this, p, generator); } inline Tensor Tensor::bernoulli(double p, Generator * generator) const { - return type().bernoulli(*this, p, generator); + return dispatch_type().bernoulli(*this, p, generator); } inline Tensor Tensor::bincount(const Tensor & weights, int64_t minlength) const { - return type().bincount(*this, weights, minlength); + return dispatch_type().bincount(*this, weights, minlength); } inline Tensor Tensor::bmm(const Tensor & mat2) const { - return type().bmm(*this, mat2); + return dispatch_type().bmm(*this, mat2); } inline Tensor Tensor::ceil() const { - return type().ceil(*this); + return dispatch_type().ceil(*this); } inline Tensor & Tensor::ceil_() { - return type().ceil_(*this); + return dispatch_type().ceil_(*this); } inline std::vector Tensor::chunk(int64_t chunks, int64_t dim) const { - return type().chunk(*this, chunks, dim); + return dispatch_type().chunk(*this, chunks, dim); } inline Tensor Tensor::clamp(c10::optional min, c10::optional max) const { - return type().clamp(*this, min, max); + return dispatch_type().clamp(*this, min, max); } inline Tensor & Tensor::clamp_(c10::optional min, c10::optional max) { - return type().clamp_(*this, min, max); + return dispatch_type().clamp_(*this, min, max); } inline Tensor Tensor::clamp_max(Scalar max) const { - return type().clamp_max(*this, max); + return dispatch_type().clamp_max(*this, max); } inline Tensor & Tensor::clamp_max_(Scalar max) { - return type().clamp_max_(*this, max); + return dispatch_type().clamp_max_(*this, max); } inline Tensor Tensor::clamp_min(Scalar min) const { - return type().clamp_min(*this, min); + return dispatch_type().clamp_min(*this, min); } inline Tensor & Tensor::clamp_min_(Scalar min) { - return type().clamp_min_(*this, min); + return dispatch_type().clamp_min_(*this, min); } inline Tensor Tensor::contiguous() const { - return type().contiguous(*this); + return dispatch_type().contiguous(*this); } inline Tensor Tensor::cos() const { - return type().cos(*this); + return dispatch_type().cos(*this); } inline Tensor & Tensor::cos_() { - return type().cos_(*this); + return dispatch_type().cos_(*this); } inline Tensor Tensor::cosh() const { - return type().cosh(*this); + return dispatch_type().cosh(*this); } inline Tensor & Tensor::cosh_() { - return type().cosh_(*this); + return dispatch_type().cosh_(*this); } inline Tensor Tensor::cumsum(int64_t dim, ScalarType dtype) const { - return type().cumsum(*this, dim, dtype); + return dispatch_type().cumsum(*this, dim, dtype); } inline Tensor Tensor::cumsum(int64_t dim) const { - return type().cumsum(*this, dim); + return dispatch_type().cumsum(*this, dim); } inline Tensor Tensor::cumprod(int64_t dim, ScalarType dtype) const { - return type().cumprod(*this, dim, dtype); + return dispatch_type().cumprod(*this, dim, dtype); } inline Tensor Tensor::cumprod(int64_t dim) const { - return type().cumprod(*this, dim); + return dispatch_type().cumprod(*this, dim); } inline Tensor Tensor::det() const { - return type().det(*this); + return dispatch_type().det(*this); } inline Tensor Tensor::diag_embed(int64_t offset, int64_t dim1, int64_t dim2) const { - return type().diag_embed(*this, offset, dim1, dim2); + return dispatch_type().diag_embed(*this, offset, dim1, dim2); } inline Tensor Tensor::diagflat(int64_t offset) const { - return type().diagflat(*this, offset); + return dispatch_type().diagflat(*this, offset); } inline Tensor Tensor::diagonal(int64_t offset, int64_t dim1, int64_t dim2) const { - return type().diagonal(*this, offset, dim1, dim2); + return dispatch_type().diagonal(*this, offset, dim1, dim2); } inline Tensor Tensor::div(const Tensor & other) const { - return type().div(*this, other); + return dispatch_type().div(*this, other); } inline Tensor & Tensor::div_(const Tensor & other) { - return type().div_(*this, other); + return dispatch_type().div_(*this, other); } inline Tensor Tensor::div(Scalar other) const { - return type().div(*this, other); + return dispatch_type().div(*this, other); } inline Tensor & Tensor::div_(Scalar other) { - return type().div_(*this, other); + return dispatch_type().div_(*this, other); } inline Tensor Tensor::dot(const Tensor & tensor) const { - return type().dot(*this, tensor); + return dispatch_type().dot(*this, tensor); } inline Tensor & Tensor::resize_(IntArrayRef size) { - return type().resize_(*this, size); + return dispatch_type().resize_(*this, size); } inline Tensor Tensor::erf() const { - return type().erf(*this); + return dispatch_type().erf(*this); } inline Tensor & Tensor::erf_() { - return type().erf_(*this); + return dispatch_type().erf_(*this); } inline Tensor Tensor::erfc() const { - return type().erfc(*this); + return dispatch_type().erfc(*this); } inline Tensor & Tensor::erfc_() { - return type().erfc_(*this); + return dispatch_type().erfc_(*this); } inline Tensor Tensor::exp() const { - return type().exp(*this); + return dispatch_type().exp(*this); } inline Tensor & Tensor::exp_() { - return type().exp_(*this); + return dispatch_type().exp_(*this); } inline Tensor Tensor::expm1() const { - return type().expm1(*this); + return dispatch_type().expm1(*this); } inline Tensor & Tensor::expm1_() { - return type().expm1_(*this); + return dispatch_type().expm1_(*this); } inline Tensor Tensor::expand(IntArrayRef size, bool implicit) const { - return type().expand(*this, size, implicit); + return dispatch_type().expand(*this, size, implicit); } inline Tensor Tensor::expand_as(const Tensor & other) const { - return type().expand_as(*this, other); + return dispatch_type().expand_as(*this, other); } inline Tensor Tensor::flatten(int64_t start_dim, int64_t end_dim) const { - return type().flatten(*this, start_dim, end_dim); + return dispatch_type().flatten(*this, start_dim, end_dim); } inline Tensor & Tensor::fill_(Scalar value) { - return type().fill_(*this, value); + return dispatch_type().fill_(*this, value); } inline Tensor & Tensor::fill_(const Tensor & value) { - return type().fill_(*this, value); + return dispatch_type().fill_(*this, value); } inline Tensor Tensor::floor() const { - return type().floor(*this); + return dispatch_type().floor(*this); } inline Tensor & Tensor::floor_() { - return type().floor_(*this); + return dispatch_type().floor_(*this); } inline Tensor Tensor::ger(const Tensor & vec2) const { - return type().ger(*this, vec2); + return dispatch_type().ger(*this, vec2); } inline Tensor Tensor::fft(int64_t signal_ndim, bool normalized) const { - return type().fft(*this, signal_ndim, normalized); + return dispatch_type().fft(*this, signal_ndim, normalized); } inline Tensor Tensor::ifft(int64_t signal_ndim, bool normalized) const { - return type().ifft(*this, signal_ndim, normalized); + return dispatch_type().ifft(*this, signal_ndim, normalized); } inline Tensor Tensor::rfft(int64_t signal_ndim, bool normalized, bool onesided) const { - return type().rfft(*this, signal_ndim, normalized, onesided); + return dispatch_type().rfft(*this, signal_ndim, normalized, onesided); } inline Tensor Tensor::irfft(int64_t signal_ndim, bool normalized, bool onesided, IntArrayRef signal_sizes) const { - return type().irfft(*this, signal_ndim, normalized, onesided, signal_sizes); + return dispatch_type().irfft(*this, signal_ndim, normalized, onesided, signal_sizes); } inline Tensor Tensor::index(TensorList indices) const { - return type().index(*this, indices); + return dispatch_type().index(*this, indices); } inline Tensor & Tensor::index_copy_(int64_t dim, const Tensor & index, const Tensor & source) { - return type().index_copy_(*this, dim, index, source); + return dispatch_type().index_copy_(*this, dim, index, source); } inline Tensor Tensor::index_copy(int64_t dim, const Tensor & index, const Tensor & source) const { - return type().index_copy(*this, dim, index, source); + return dispatch_type().index_copy(*this, dim, index, source); } inline Tensor & Tensor::index_put_(TensorList indices, const Tensor & values, bool accumulate) { - return type().index_put_(*this, indices, values, accumulate); + return dispatch_type().index_put_(*this, indices, values, accumulate); } inline Tensor Tensor::index_put(TensorList indices, const Tensor & values, bool accumulate) const { - return type().index_put(*this, indices, values, accumulate); + return dispatch_type().index_put(*this, indices, values, accumulate); } inline Tensor Tensor::inverse() const { - return type().inverse(*this); + return dispatch_type().inverse(*this); } inline Tensor Tensor::isclose(const Tensor & other, double rtol, double atol, bool equal_nan) const { - return type().isclose(*this, other, rtol, atol, equal_nan); + return dispatch_type().isclose(*this, other, rtol, atol, equal_nan); } inline bool Tensor::is_distributed() const { - return type().is_distributed(*this); + return dispatch_type().is_distributed(*this); } inline bool Tensor::is_floating_point() const { - return type().is_floating_point(*this); + return dispatch_type().is_floating_point(*this); } inline bool Tensor::is_complex() const { - return type().is_complex(*this); + return dispatch_type().is_complex(*this); } inline bool Tensor::is_nonzero() const { - return type().is_nonzero(*this); + return dispatch_type().is_nonzero(*this); } inline bool Tensor::is_same_size(const Tensor & other) const { - return type().is_same_size(*this, other); + return dispatch_type().is_same_size(*this, other); } inline bool Tensor::is_signed() const { - return type().is_signed(*this); + return dispatch_type().is_signed(*this); } inline std::tuple Tensor::kthvalue(int64_t k, int64_t dim, bool keepdim) const { - return type().kthvalue(*this, k, dim, keepdim); + return dispatch_type().kthvalue(*this, k, dim, keepdim); } inline Tensor Tensor::log() const { - return type().log(*this); + return dispatch_type().log(*this); } inline Tensor & Tensor::log_() { - return type().log_(*this); + return dispatch_type().log_(*this); } inline Tensor Tensor::log10() const { - return type().log10(*this); + return dispatch_type().log10(*this); } inline Tensor & Tensor::log10_() { - return type().log10_(*this); + return dispatch_type().log10_(*this); } inline Tensor Tensor::log1p() const { - return type().log1p(*this); + return dispatch_type().log1p(*this); } inline Tensor & Tensor::log1p_() { - return type().log1p_(*this); + return dispatch_type().log1p_(*this); } inline Tensor Tensor::log2() const { - return type().log2(*this); + return dispatch_type().log2(*this); } inline Tensor & Tensor::log2_() { - return type().log2_(*this); + return dispatch_type().log2_(*this); } inline Tensor Tensor::logdet() const { - return type().logdet(*this); + return dispatch_type().logdet(*this); } inline Tensor Tensor::log_softmax(int64_t dim, ScalarType dtype) const { - return type().log_softmax(*this, dim, dtype); + return dispatch_type().log_softmax(*this, dim, dtype); } inline Tensor Tensor::log_softmax(int64_t dim) const { - return type().log_softmax(*this, dim); + return dispatch_type().log_softmax(*this, dim); } inline Tensor Tensor::logsumexp(IntArrayRef dim, bool keepdim) const { - return type().logsumexp(*this, dim, keepdim); + return dispatch_type().logsumexp(*this, dim, keepdim); } inline Tensor Tensor::matmul(const Tensor & other) const { - return type().matmul(*this, other); + return dispatch_type().matmul(*this, other); } inline Tensor Tensor::matrix_power(int64_t n) const { - return type().matrix_power(*this, n); + return dispatch_type().matrix_power(*this, n); } inline std::tuple Tensor::max(int64_t dim, bool keepdim) const { - return type().max(*this, dim, keepdim); + return dispatch_type().max(*this, dim, keepdim); } inline Tensor Tensor::max_values(IntArrayRef dim, bool keepdim) const { - return type().max_values(*this, dim, keepdim); + return dispatch_type().max_values(*this, dim, keepdim); } inline Tensor Tensor::mean(ScalarType dtype) const { - return type().mean(*this, dtype); + return dispatch_type().mean(*this, dtype); } inline Tensor Tensor::mean() const { - return type().mean(*this); + return dispatch_type().mean(*this); } inline Tensor Tensor::mean(IntArrayRef dim, bool keepdim, ScalarType dtype) const { - return type().mean(*this, dim, keepdim, dtype); + return dispatch_type().mean(*this, dim, keepdim, dtype); } inline Tensor Tensor::mean(IntArrayRef dim, bool keepdim) const { - return type().mean(*this, dim, keepdim); + return dispatch_type().mean(*this, dim, keepdim); } inline Tensor Tensor::mean(IntArrayRef dim, ScalarType dtype) const { - return type().mean(*this, dim, dtype); + return dispatch_type().mean(*this, dim, dtype); } inline std::tuple Tensor::median(int64_t dim, bool keepdim) const { - return type().median(*this, dim, keepdim); + return dispatch_type().median(*this, dim, keepdim); } inline std::tuple Tensor::min(int64_t dim, bool keepdim) const { - return type().min(*this, dim, keepdim); + return dispatch_type().min(*this, dim, keepdim); } inline Tensor Tensor::min_values(IntArrayRef dim, bool keepdim) const { - return type().min_values(*this, dim, keepdim); + return dispatch_type().min_values(*this, dim, keepdim); } inline Tensor Tensor::mm(const Tensor & mat2) const { - return type().mm(*this, mat2); + return dispatch_type().mm(*this, mat2); } inline std::tuple Tensor::mode(int64_t dim, bool keepdim) const { - return type().mode(*this, dim, keepdim); + return dispatch_type().mode(*this, dim, keepdim); } inline Tensor Tensor::mul(const Tensor & other) const { - return type().mul(*this, other); + return dispatch_type().mul(*this, other); } inline Tensor & Tensor::mul_(const Tensor & other) { - return type().mul_(*this, other); + return dispatch_type().mul_(*this, other); } inline Tensor Tensor::mul(Scalar other) const { - return type().mul(*this, other); + return dispatch_type().mul(*this, other); } inline Tensor & Tensor::mul_(Scalar other) { - return type().mul_(*this, other); + return dispatch_type().mul_(*this, other); } inline Tensor Tensor::mv(const Tensor & vec) const { - return type().mv(*this, vec); + return dispatch_type().mv(*this, vec); } inline Tensor Tensor::mvlgamma(int64_t p) const { - return type().mvlgamma(*this, p); + return dispatch_type().mvlgamma(*this, p); } inline Tensor & Tensor::mvlgamma_(int64_t p) { - return type().mvlgamma_(*this, p); + return dispatch_type().mvlgamma_(*this, p); } inline Tensor Tensor::narrow_copy(int64_t dim, int64_t start, int64_t length) const { - return type().narrow_copy(*this, dim, start, length); + return dispatch_type().narrow_copy(*this, dim, start, length); } inline Tensor Tensor::narrow(int64_t dim, int64_t start, int64_t length) const { - return type().narrow(*this, dim, start, length); + return dispatch_type().narrow(*this, dim, start, length); } inline Tensor Tensor::permute(IntArrayRef dims) const { - return type().permute(*this, dims); + return dispatch_type().permute(*this, dims); } inline Tensor Tensor::pin_memory() const { - return type().pin_memory(*this); + return dispatch_type().pin_memory(*this); } inline Tensor Tensor::pinverse(double rcond) const { - return type().pinverse(*this, rcond); + return dispatch_type().pinverse(*this, rcond); } inline Tensor Tensor::repeat(IntArrayRef repeats) const { - return type().repeat(*this, repeats); + return dispatch_type().repeat(*this, repeats); } inline Tensor Tensor::reshape(IntArrayRef shape) const { - return type().reshape(*this, shape); + return dispatch_type().reshape(*this, shape); } inline Tensor Tensor::reshape_as(const Tensor & other) const { - return type().reshape_as(*this, other); + return dispatch_type().reshape_as(*this, other); } inline Tensor Tensor::round() const { - return type().round(*this); + return dispatch_type().round(*this); } inline Tensor & Tensor::round_() { - return type().round_(*this); + return dispatch_type().round_(*this); } inline Tensor Tensor::relu() const { - return type().relu(*this); + return dispatch_type().relu(*this); } inline Tensor & Tensor::relu_() { - return type().relu_(*this); + return dispatch_type().relu_(*this); } inline Tensor Tensor::prelu(const Tensor & weight) const { - return type().prelu(*this, weight); + return dispatch_type().prelu(*this, weight); } inline std::tuple Tensor::prelu_backward(const Tensor & grad_output, const Tensor & weight) const { - return type().prelu_backward(grad_output, *this, weight); + return dispatch_type().prelu_backward(grad_output, *this, weight); } inline Tensor Tensor::hardshrink(Scalar lambd) const { - return type().hardshrink(*this, lambd); + return dispatch_type().hardshrink(*this, lambd); } inline Tensor Tensor::hardshrink_backward(const Tensor & grad_out, Scalar lambd) const { - return type().hardshrink_backward(grad_out, *this, lambd); + return dispatch_type().hardshrink_backward(grad_out, *this, lambd); } inline Tensor Tensor::rsqrt() const { - return type().rsqrt(*this); + return dispatch_type().rsqrt(*this); } inline Tensor & Tensor::rsqrt_() { - return type().rsqrt_(*this); + return dispatch_type().rsqrt_(*this); } inline Tensor Tensor::select(int64_t dim, int64_t index) const { - return type().select(*this, dim, index); + return dispatch_type().select(*this, dim, index); } inline Tensor Tensor::sigmoid() const { - return type().sigmoid(*this); + return dispatch_type().sigmoid(*this); } inline Tensor & Tensor::sigmoid_() { - return type().sigmoid_(*this); + return dispatch_type().sigmoid_(*this); } inline Tensor Tensor::sin() const { - return type().sin(*this); + return dispatch_type().sin(*this); } inline Tensor & Tensor::sin_() { - return type().sin_(*this); + return dispatch_type().sin_(*this); } inline Tensor Tensor::sinh() const { - return type().sinh(*this); + return dispatch_type().sinh(*this); } inline Tensor & Tensor::sinh_() { - return type().sinh_(*this); + return dispatch_type().sinh_(*this); } inline Tensor Tensor::detach() const { - return type().detach(*this); + return dispatch_type().detach(*this); } inline Tensor & Tensor::detach_() { - return type().detach_(*this); + return dispatch_type().detach_(*this); } inline int64_t Tensor::size(int64_t dim) const { - return type().size(*this, dim); + return dispatch_type().size(*this, dim); } inline Tensor Tensor::slice(int64_t dim, int64_t start, int64_t end, int64_t step) const { - return type().slice(*this, dim, start, end, step); + return dispatch_type().slice(*this, dim, start, end, step); } inline std::tuple Tensor::slogdet() const { - return type().slogdet(*this); + return dispatch_type().slogdet(*this); } inline Tensor Tensor::smm(const Tensor & mat2) const { - return type().smm(*this, mat2); + return dispatch_type().smm(*this, mat2); } inline Tensor Tensor::softmax(int64_t dim, ScalarType dtype) const { - return type().softmax(*this, dim, dtype); + return dispatch_type().softmax(*this, dim, dtype); } inline Tensor Tensor::softmax(int64_t dim) const { - return type().softmax(*this, dim); + return dispatch_type().softmax(*this, dim); } inline std::vector Tensor::split(int64_t split_size, int64_t dim) const { - return type().split(*this, split_size, dim); + return dispatch_type().split(*this, split_size, dim); } inline std::vector Tensor::split_with_sizes(IntArrayRef split_sizes, int64_t dim) const { - return type().split_with_sizes(*this, split_sizes, dim); + return dispatch_type().split_with_sizes(*this, split_sizes, dim); } inline Tensor Tensor::squeeze() const { - return type().squeeze(*this); + return dispatch_type().squeeze(*this); } inline Tensor Tensor::squeeze(int64_t dim) const { - return type().squeeze(*this, dim); + return dispatch_type().squeeze(*this, dim); } inline Tensor & Tensor::squeeze_() { - return type().squeeze_(*this); + return dispatch_type().squeeze_(*this); } inline Tensor & Tensor::squeeze_(int64_t dim) { - return type().squeeze_(*this, dim); + return dispatch_type().squeeze_(*this, dim); } inline Tensor Tensor::sspaddmm(const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) const { - return type().sspaddmm(*this, mat1, mat2, beta, alpha); + return dispatch_type().sspaddmm(*this, mat1, mat2, beta, alpha); } inline Tensor Tensor::stft(int64_t n_fft, c10::optional hop_length, c10::optional win_length, const Tensor & window, bool normalized, bool onesided) const { - return type().stft(*this, n_fft, hop_length, win_length, window, normalized, onesided); + return dispatch_type().stft(*this, n_fft, hop_length, win_length, window, normalized, onesided); } inline int64_t Tensor::stride(int64_t dim) const { - return type().stride(*this, dim); + return dispatch_type().stride(*this, dim); } inline Tensor Tensor::sum(ScalarType dtype) const { - return type().sum(*this, dtype); + return dispatch_type().sum(*this, dtype); } inline Tensor Tensor::sum() const { - return type().sum(*this); + return dispatch_type().sum(*this); } inline Tensor Tensor::sum(IntArrayRef dim, bool keepdim, ScalarType dtype) const { - return type().sum(*this, dim, keepdim, dtype); + return dispatch_type().sum(*this, dim, keepdim, dtype); } inline Tensor Tensor::sum(IntArrayRef dim, bool keepdim) const { - return type().sum(*this, dim, keepdim); + return dispatch_type().sum(*this, dim, keepdim); } inline Tensor Tensor::sum(IntArrayRef dim, ScalarType dtype) const { - return type().sum(*this, dim, dtype); + return dispatch_type().sum(*this, dim, dtype); } inline Tensor Tensor::sum_to_size(IntArrayRef size) const { - return type().sum_to_size(*this, size); + return dispatch_type().sum_to_size(*this, size); } inline Tensor Tensor::sqrt() const { - return type().sqrt(*this); + return dispatch_type().sqrt(*this); } inline Tensor & Tensor::sqrt_() { - return type().sqrt_(*this); + return dispatch_type().sqrt_(*this); } inline Tensor Tensor::std(bool unbiased) const { - return type().std(*this, unbiased); + return dispatch_type().std(*this, unbiased); } inline Tensor Tensor::std(IntArrayRef dim, bool unbiased, bool keepdim) const { - return type().std(*this, dim, unbiased, keepdim); + return dispatch_type().std(*this, dim, unbiased, keepdim); } inline Tensor Tensor::prod(ScalarType dtype) const { - return type().prod(*this, dtype); + return dispatch_type().prod(*this, dtype); } inline Tensor Tensor::prod() const { - return type().prod(*this); + return dispatch_type().prod(*this); } inline Tensor Tensor::prod(int64_t dim, bool keepdim, ScalarType dtype) const { - return type().prod(*this, dim, keepdim, dtype); + return dispatch_type().prod(*this, dim, keepdim, dtype); } inline Tensor Tensor::prod(int64_t dim, bool keepdim) const { - return type().prod(*this, dim, keepdim); + return dispatch_type().prod(*this, dim, keepdim); } inline Tensor Tensor::prod(int64_t dim, ScalarType dtype) const { - return type().prod(*this, dim, dtype); + return dispatch_type().prod(*this, dim, dtype); } inline Tensor Tensor::t() const { - return type().t(*this); + return dispatch_type().t(*this); } inline Tensor & Tensor::t_() { - return type().t_(*this); + return dispatch_type().t_(*this); } inline Tensor Tensor::tan() const { - return type().tan(*this); + return dispatch_type().tan(*this); } inline Tensor & Tensor::tan_() { - return type().tan_(*this); + return dispatch_type().tan_(*this); } inline Tensor Tensor::tanh() const { - return type().tanh(*this); + return dispatch_type().tanh(*this); } inline Tensor & Tensor::tanh_() { - return type().tanh_(*this); + return dispatch_type().tanh_(*this); } inline Tensor Tensor::transpose(int64_t dim0, int64_t dim1) const { - return type().transpose(*this, dim0, dim1); + return dispatch_type().transpose(*this, dim0, dim1); } inline Tensor & Tensor::transpose_(int64_t dim0, int64_t dim1) { - return type().transpose_(*this, dim0, dim1); + return dispatch_type().transpose_(*this, dim0, dim1); } inline Tensor Tensor::flip(IntArrayRef dims) const { - return type().flip(*this, dims); + return dispatch_type().flip(*this, dims); } inline Tensor Tensor::roll(IntArrayRef shifts, IntArrayRef dims) const { - return type().roll(*this, shifts, dims); + return dispatch_type().roll(*this, shifts, dims); } inline Tensor Tensor::rot90(int64_t k, IntArrayRef dims) const { - return type().rot90(*this, k, dims); + return dispatch_type().rot90(*this, k, dims); } inline Tensor Tensor::trunc() const { - return type().trunc(*this); + return dispatch_type().trunc(*this); } inline Tensor & Tensor::trunc_() { - return type().trunc_(*this); + return dispatch_type().trunc_(*this); } inline Tensor Tensor::type_as(const Tensor & other) const { - return type().type_as(*this, other); + return dispatch_type().type_as(*this, other); } inline Tensor Tensor::unsqueeze(int64_t dim) const { - return type().unsqueeze(*this, dim); + return dispatch_type().unsqueeze(*this, dim); } inline Tensor & Tensor::unsqueeze_(int64_t dim) { - return type().unsqueeze_(*this, dim); + return dispatch_type().unsqueeze_(*this, dim); } inline Tensor Tensor::var(bool unbiased) const { - return type().var(*this, unbiased); + return dispatch_type().var(*this, unbiased); } inline Tensor Tensor::var(IntArrayRef dim, bool unbiased, bool keepdim) const { - return type().var(*this, dim, unbiased, keepdim); + return dispatch_type().var(*this, dim, unbiased, keepdim); } inline Tensor Tensor::view_as(const Tensor & other) const { - return type().view_as(*this, other); + return dispatch_type().view_as(*this, other); } inline Tensor Tensor::where(const Tensor & condition, const Tensor & other) const { - return type().where(condition, *this, other); + return dispatch_type().where(condition, *this, other); } inline Tensor Tensor::norm(c10::optional p, ScalarType dtype) const { - return type().norm(*this, p, dtype); + return dispatch_type().norm(*this, p, dtype); } inline Tensor Tensor::norm(Scalar p) const { - return type().norm(*this, p); + return dispatch_type().norm(*this, p); } inline Tensor Tensor::norm(c10::optional p, IntArrayRef dim, bool keepdim, ScalarType dtype) const { - return type().norm(*this, p, dim, keepdim, dtype); + return dispatch_type().norm(*this, p, dim, keepdim, dtype); } inline Tensor Tensor::norm(c10::optional p, IntArrayRef dim, bool keepdim) const { - return type().norm(*this, p, dim, keepdim); + return dispatch_type().norm(*this, p, dim, keepdim); } inline Tensor Tensor::clone() const { - return type().clone(*this); + return dispatch_type().clone(*this); } inline Tensor & Tensor::resize_as_(const Tensor & the_template) { - return type().resize_as_(*this, the_template); + return dispatch_type().resize_as_(*this, the_template); } inline Tensor Tensor::pow(Scalar exponent) const { - return type().pow(*this, exponent); + return dispatch_type().pow(*this, exponent); } inline Tensor & Tensor::zero_() { - return type().zero_(*this); + return dispatch_type().zero_(*this); } inline Tensor Tensor::sub(const Tensor & other, Scalar alpha) const { - return type().sub(*this, other, alpha); + return dispatch_type().sub(*this, other, alpha); } inline Tensor & Tensor::sub_(const Tensor & other, Scalar alpha) { - return type().sub_(*this, other, alpha); + return dispatch_type().sub_(*this, other, alpha); } inline Tensor Tensor::sub(Scalar other, Scalar alpha) const { - return type().sub(*this, other, alpha); + return dispatch_type().sub(*this, other, alpha); } inline Tensor & Tensor::sub_(Scalar other, Scalar alpha) { - return type().sub_(*this, other, alpha); + return dispatch_type().sub_(*this, other, alpha); } inline Tensor Tensor::addmm(const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) const { - return type().addmm(*this, mat1, mat2, beta, alpha); + return dispatch_type().addmm(*this, mat1, mat2, beta, alpha); } inline Tensor & Tensor::addmm_(const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) { - return type().addmm_(*this, mat1, mat2, beta, alpha); + return dispatch_type().addmm_(*this, mat1, mat2, beta, alpha); } inline Tensor & Tensor::sparse_resize_(IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { - return type().sparse_resize_(*this, size, sparse_dim, dense_dim); + return dispatch_type().sparse_resize_(*this, size, sparse_dim, dense_dim); } inline Tensor & Tensor::sparse_resize_and_clear_(IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { - return type().sparse_resize_and_clear_(*this, size, sparse_dim, dense_dim); + return dispatch_type().sparse_resize_and_clear_(*this, size, sparse_dim, dense_dim); } inline Tensor Tensor::sparse_mask(SparseTensorRef mask) const { - return type().sparse_mask(*this, mask); + return dispatch_type().sparse_mask(*this, mask); } inline Tensor Tensor::to_dense() const { - return type().to_dense(*this); + return dispatch_type().to_dense(*this); } inline int64_t Tensor::sparse_dim() const { - return type().sparse_dim(*this); + return dispatch_type().sparse_dim(*this); } inline int64_t Tensor::_dimI() const { - return type()._dimI(*this); + return dispatch_type()._dimI(*this); } inline int64_t Tensor::dense_dim() const { - return type().dense_dim(*this); + return dispatch_type().dense_dim(*this); } inline int64_t Tensor::_dimV() const { - return type()._dimV(*this); + return dispatch_type()._dimV(*this); } inline int64_t Tensor::_nnz() const { - return type()._nnz(*this); + return dispatch_type()._nnz(*this); } inline Tensor Tensor::coalesce() const { - return type().coalesce(*this); + return dispatch_type().coalesce(*this); } inline bool Tensor::is_coalesced() const { - return type().is_coalesced(*this); + return dispatch_type().is_coalesced(*this); } inline Tensor Tensor::_indices() const { - return type()._indices(*this); + return dispatch_type()._indices(*this); } inline Tensor Tensor::_values() const { - return type()._values(*this); + return dispatch_type()._values(*this); } inline Tensor & Tensor::_coalesced_(bool coalesced) { - return type()._coalesced_(*this, coalesced); + return dispatch_type()._coalesced_(*this, coalesced); } inline Tensor Tensor::indices() const { - return type().indices(*this); + return dispatch_type().indices(*this); } inline Tensor Tensor::values() const { - return type().values(*this); + return dispatch_type().values(*this); } inline int64_t Tensor::numel() const { - return type().numel(*this); + return dispatch_type().numel(*this); } inline std::vector Tensor::unbind(int64_t dim) const { - return type().unbind(*this, dim); + return dispatch_type().unbind(*this, dim); } inline Tensor Tensor::to_sparse(int64_t sparse_dim) const { - return type().to_sparse(*this, sparse_dim); + return dispatch_type().to_sparse(*this, sparse_dim); } inline Tensor Tensor::to_sparse() const { - return type().to_sparse(*this); + return dispatch_type().to_sparse(*this); } inline Tensor Tensor::quantize_linear(double scale, int64_t zero_point) const { - return type().quantize_linear(*this, scale, zero_point); + return dispatch_type().quantize_linear(*this, scale, zero_point); } inline Tensor Tensor::dequantize() const { - return type().dequantize(*this); + return dispatch_type().dequantize(*this); } inline Scalar Tensor::q_scale() const { - return type().q_scale(*this); + return dispatch_type().q_scale(*this); } inline Scalar Tensor::q_zero_point() const { - return type().q_zero_point(*this); + return dispatch_type().q_zero_point(*this); } inline Tensor Tensor::to(const TensorOptions & options, bool non_blocking, bool copy) const { - return type().to(*this, options, non_blocking, copy); + return dispatch_type().to(*this, options, non_blocking, copy); } inline Tensor Tensor::to(Device device, ScalarType dtype, bool non_blocking, bool copy) const { - return type().to(*this, device, dtype, non_blocking, copy); + return dispatch_type().to(*this, device, dtype, non_blocking, copy); } inline Tensor Tensor::to(ScalarType dtype, bool non_blocking, bool copy) const { - return type().to(*this, dtype, non_blocking, copy); + return dispatch_type().to(*this, dtype, non_blocking, copy); } inline Tensor Tensor::to(const Tensor & other, bool non_blocking, bool copy) const { - return type().to(*this, other, non_blocking, copy); + return dispatch_type().to(*this, other, non_blocking, copy); } inline Scalar Tensor::item() const { - return type().item(*this); + return dispatch_type().item(*this); } inline void* Tensor::data_ptr() const { - return type().data_ptr(*this); + return dispatch_type().data_ptr(*this); } inline Tensor & Tensor::set_(Storage source) { - return type().set_(*this, source); + return dispatch_type().set_(*this, source); } inline Tensor & Tensor::set_(Storage source, int64_t storage_offset, IntArrayRef size, IntArrayRef stride) { - return type().set_(*this, source, storage_offset, size, stride); + return dispatch_type().set_(*this, source, storage_offset, size, stride); } inline Tensor & Tensor::set_(const Tensor & source) { - return type().set_(*this, source); + return dispatch_type().set_(*this, source); } inline Tensor & Tensor::set_() { - return type().set_(*this); + return dispatch_type().set_(*this); } inline bool Tensor::is_set_to(const Tensor & tensor) const { - return type().is_set_to(*this, tensor); + return dispatch_type().is_set_to(*this, tensor); } inline Tensor & Tensor::masked_fill_(const Tensor & mask, Scalar value) { - return type().masked_fill_(*this, mask, value); + return dispatch_type().masked_fill_(*this, mask, value); } inline Tensor Tensor::masked_fill(const Tensor & mask, Scalar value) const { - return type().masked_fill(*this, mask, value); + return dispatch_type().masked_fill(*this, mask, value); } inline Tensor & Tensor::masked_fill_(const Tensor & mask, const Tensor & value) { - return type().masked_fill_(*this, mask, value); + return dispatch_type().masked_fill_(*this, mask, value); } inline Tensor Tensor::masked_fill(const Tensor & mask, const Tensor & value) const { - return type().masked_fill(*this, mask, value); + return dispatch_type().masked_fill(*this, mask, value); } inline Tensor & Tensor::masked_scatter_(const Tensor & mask, const Tensor & source) { - return type().masked_scatter_(*this, mask, source); + return dispatch_type().masked_scatter_(*this, mask, source); } inline Tensor Tensor::masked_scatter(const Tensor & mask, const Tensor & source) const { - return type().masked_scatter(*this, mask, source); + return dispatch_type().masked_scatter(*this, mask, source); } inline Tensor Tensor::view(IntArrayRef size) const { - return type().view(*this, size); + return dispatch_type().view(*this, size); } inline Tensor & Tensor::put_(const Tensor & index, const Tensor & source, bool accumulate) { - return type().put_(*this, index, source, accumulate); + return dispatch_type().put_(*this, index, source, accumulate); } inline Tensor & Tensor::index_add_(int64_t dim, const Tensor & index, const Tensor & source) { - return type().index_add_(*this, dim, index, source); + return dispatch_type().index_add_(*this, dim, index, source); } inline Tensor Tensor::index_add(int64_t dim, const Tensor & index, const Tensor & source) const { - return type().index_add(*this, dim, index, source); + return dispatch_type().index_add(*this, dim, index, source); } inline Tensor & Tensor::index_fill_(int64_t dim, const Tensor & index, Scalar value) { - return type().index_fill_(*this, dim, index, value); + return dispatch_type().index_fill_(*this, dim, index, value); } inline Tensor Tensor::index_fill(int64_t dim, const Tensor & index, Scalar value) const { - return type().index_fill(*this, dim, index, value); + return dispatch_type().index_fill(*this, dim, index, value); } inline Tensor & Tensor::index_fill_(int64_t dim, const Tensor & index, const Tensor & value) { - return type().index_fill_(*this, dim, index, value); + return dispatch_type().index_fill_(*this, dim, index, value); } inline Tensor Tensor::index_fill(int64_t dim, const Tensor & index, const Tensor & value) const { - return type().index_fill(*this, dim, index, value); + return dispatch_type().index_fill(*this, dim, index, value); } inline Tensor & Tensor::scatter_(int64_t dim, const Tensor & index, const Tensor & src) { - return type().scatter_(*this, dim, index, src); + return dispatch_type().scatter_(*this, dim, index, src); } inline Tensor Tensor::scatter(int64_t dim, const Tensor & index, const Tensor & src) const { - return type().scatter(*this, dim, index, src); + return dispatch_type().scatter(*this, dim, index, src); } inline Tensor & Tensor::scatter_(int64_t dim, const Tensor & index, Scalar value) { - return type().scatter_(*this, dim, index, value); + return dispatch_type().scatter_(*this, dim, index, value); } inline Tensor Tensor::scatter(int64_t dim, const Tensor & index, Scalar value) const { - return type().scatter(*this, dim, index, value); + return dispatch_type().scatter(*this, dim, index, value); } inline Tensor & Tensor::scatter_add_(int64_t dim, const Tensor & index, const Tensor & src) { - return type().scatter_add_(*this, dim, index, src); + return dispatch_type().scatter_add_(*this, dim, index, src); } inline Tensor Tensor::scatter_add(int64_t dim, const Tensor & index, const Tensor & src) const { - return type().scatter_add(*this, dim, index, src); + return dispatch_type().scatter_add(*this, dim, index, src); } inline Tensor & Tensor::lt_(Scalar other) { - return type().lt_(*this, other); + return dispatch_type().lt_(*this, other); } inline Tensor & Tensor::lt_(const Tensor & other) { - return type().lt_(*this, other); + return dispatch_type().lt_(*this, other); } inline Tensor & Tensor::gt_(Scalar other) { - return type().gt_(*this, other); + return dispatch_type().gt_(*this, other); } inline Tensor & Tensor::gt_(const Tensor & other) { - return type().gt_(*this, other); + return dispatch_type().gt_(*this, other); } inline Tensor & Tensor::le_(Scalar other) { - return type().le_(*this, other); + return dispatch_type().le_(*this, other); } inline Tensor & Tensor::le_(const Tensor & other) { - return type().le_(*this, other); + return dispatch_type().le_(*this, other); } inline Tensor & Tensor::ge_(Scalar other) { - return type().ge_(*this, other); + return dispatch_type().ge_(*this, other); } inline Tensor & Tensor::ge_(const Tensor & other) { - return type().ge_(*this, other); + return dispatch_type().ge_(*this, other); } inline Tensor & Tensor::eq_(Scalar other) { - return type().eq_(*this, other); + return dispatch_type().eq_(*this, other); } inline Tensor & Tensor::eq_(const Tensor & other) { - return type().eq_(*this, other); + return dispatch_type().eq_(*this, other); } inline Tensor & Tensor::ne_(Scalar other) { - return type().ne_(*this, other); + return dispatch_type().ne_(*this, other); } inline Tensor & Tensor::ne_(const Tensor & other) { - return type().ne_(*this, other); + return dispatch_type().ne_(*this, other); } inline Tensor Tensor::__and__(Scalar other) const { - return type().__and__(*this, other); + return dispatch_type().__and__(*this, other); } inline Tensor Tensor::__and__(const Tensor & other) const { - return type().__and__(*this, other); + return dispatch_type().__and__(*this, other); } inline Tensor & Tensor::__iand__(Scalar other) { - return type().__iand__(*this, other); + return dispatch_type().__iand__(*this, other); } inline Tensor & Tensor::__iand__(const Tensor & other) { - return type().__iand__(*this, other); + return dispatch_type().__iand__(*this, other); } inline Tensor Tensor::__or__(Scalar other) const { - return type().__or__(*this, other); + return dispatch_type().__or__(*this, other); } inline Tensor Tensor::__or__(const Tensor & other) const { - return type().__or__(*this, other); + return dispatch_type().__or__(*this, other); } inline Tensor & Tensor::__ior__(Scalar other) { - return type().__ior__(*this, other); + return dispatch_type().__ior__(*this, other); } inline Tensor & Tensor::__ior__(const Tensor & other) { - return type().__ior__(*this, other); + return dispatch_type().__ior__(*this, other); } inline Tensor Tensor::__xor__(Scalar other) const { - return type().__xor__(*this, other); + return dispatch_type().__xor__(*this, other); } inline Tensor Tensor::__xor__(const Tensor & other) const { - return type().__xor__(*this, other); + return dispatch_type().__xor__(*this, other); } inline Tensor & Tensor::__ixor__(Scalar other) { - return type().__ixor__(*this, other); + return dispatch_type().__ixor__(*this, other); } inline Tensor & Tensor::__ixor__(const Tensor & other) { - return type().__ixor__(*this, other); + return dispatch_type().__ixor__(*this, other); } inline Tensor Tensor::__lshift__(Scalar other) const { - return type().__lshift__(*this, other); + return dispatch_type().__lshift__(*this, other); } inline Tensor Tensor::__lshift__(const Tensor & other) const { - return type().__lshift__(*this, other); + return dispatch_type().__lshift__(*this, other); } inline Tensor & Tensor::__ilshift__(Scalar other) { - return type().__ilshift__(*this, other); + return dispatch_type().__ilshift__(*this, other); } inline Tensor & Tensor::__ilshift__(const Tensor & other) { - return type().__ilshift__(*this, other); + return dispatch_type().__ilshift__(*this, other); } inline Tensor Tensor::__rshift__(Scalar other) const { - return type().__rshift__(*this, other); + return dispatch_type().__rshift__(*this, other); } inline Tensor Tensor::__rshift__(const Tensor & other) const { - return type().__rshift__(*this, other); + return dispatch_type().__rshift__(*this, other); } inline Tensor & Tensor::__irshift__(Scalar other) { - return type().__irshift__(*this, other); + return dispatch_type().__irshift__(*this, other); } inline Tensor & Tensor::__irshift__(const Tensor & other) { - return type().__irshift__(*this, other); + return dispatch_type().__irshift__(*this, other); } inline Tensor & Tensor::lgamma_() { - return type().lgamma_(*this); + return dispatch_type().lgamma_(*this); } inline Tensor & Tensor::atan2_(const Tensor & other) { - return type().atan2_(*this, other); + return dispatch_type().atan2_(*this, other); } inline Tensor & Tensor::tril_(int64_t diagonal) { - return type().tril_(*this, diagonal); + return dispatch_type().tril_(*this, diagonal); } inline Tensor & Tensor::triu_(int64_t diagonal) { - return type().triu_(*this, diagonal); + return dispatch_type().triu_(*this, diagonal); } inline Tensor & Tensor::digamma_() { - return type().digamma_(*this); + return dispatch_type().digamma_(*this); } inline Tensor & Tensor::polygamma_(int64_t n) { - return type().polygamma_(*this, n); + return dispatch_type().polygamma_(*this, n); } inline Tensor & Tensor::erfinv_() { - return type().erfinv_(*this); + return dispatch_type().erfinv_(*this); } inline Tensor & Tensor::frac_() { - return type().frac_(*this); + return dispatch_type().frac_(*this); } inline Tensor & Tensor::renorm_(Scalar p, int64_t dim, Scalar maxnorm) { - return type().renorm_(*this, p, dim, maxnorm); + return dispatch_type().renorm_(*this, p, dim, maxnorm); } inline Tensor & Tensor::reciprocal_() { - return type().reciprocal_(*this); + return dispatch_type().reciprocal_(*this); } inline Tensor & Tensor::neg_() { - return type().neg_(*this); + return dispatch_type().neg_(*this); } inline Tensor & Tensor::pow_(Scalar exponent) { - return type().pow_(*this, exponent); + return dispatch_type().pow_(*this, exponent); } inline Tensor & Tensor::pow_(const Tensor & exponent) { - return type().pow_(*this, exponent); + return dispatch_type().pow_(*this, exponent); } inline Tensor & Tensor::lerp_(const Tensor & end, Scalar weight) { - return type().lerp_(*this, end, weight); + return dispatch_type().lerp_(*this, end, weight); } inline Tensor & Tensor::lerp_(const Tensor & end, const Tensor & weight) { - return type().lerp_(*this, end, weight); + return dispatch_type().lerp_(*this, end, weight); } inline Tensor & Tensor::sign_() { - return type().sign_(*this); + return dispatch_type().sign_(*this); } inline Tensor & Tensor::fmod_(Scalar other) { - return type().fmod_(*this, other); + return dispatch_type().fmod_(*this, other); } inline Tensor & Tensor::fmod_(const Tensor & other) { - return type().fmod_(*this, other); + return dispatch_type().fmod_(*this, other); } inline Tensor & Tensor::remainder_(Scalar other) { - return type().remainder_(*this, other); + return dispatch_type().remainder_(*this, other); } inline Tensor & Tensor::remainder_(const Tensor & other) { - return type().remainder_(*this, other); + return dispatch_type().remainder_(*this, other); } inline Tensor & Tensor::addbmm_(const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) { - return type().addbmm_(*this, batch1, batch2, beta, alpha); + return dispatch_type().addbmm_(*this, batch1, batch2, beta, alpha); } inline Tensor Tensor::addbmm(const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const { - return type().addbmm(*this, batch1, batch2, beta, alpha); + return dispatch_type().addbmm(*this, batch1, batch2, beta, alpha); } inline Tensor & Tensor::addcmul_(const Tensor & tensor1, const Tensor & tensor2, Scalar value) { - return type().addcmul_(*this, tensor1, tensor2, value); + return dispatch_type().addcmul_(*this, tensor1, tensor2, value); } inline Tensor & Tensor::addcdiv_(const Tensor & tensor1, const Tensor & tensor2, Scalar value) { - return type().addcdiv_(*this, tensor1, tensor2, value); + return dispatch_type().addcdiv_(*this, tensor1, tensor2, value); } inline Tensor & Tensor::random_(int64_t from, int64_t to, Generator * generator) { - return type().random_(*this, from, to, generator); + return dispatch_type().random_(*this, from, to, generator); } inline Tensor & Tensor::random_(int64_t to, Generator * generator) { - return type().random_(*this, to, generator); + return dispatch_type().random_(*this, to, generator); } inline Tensor & Tensor::random_(Generator * generator) { - return type().random_(*this, generator); + return dispatch_type().random_(*this, generator); } inline Tensor & Tensor::uniform_(double from, double to, Generator * generator) { - return type().uniform_(*this, from, to, generator); + return dispatch_type().uniform_(*this, from, to, generator); } inline Tensor & Tensor::normal_(double mean, double std, Generator * generator) { - return type().normal_(*this, mean, std, generator); + return dispatch_type().normal_(*this, mean, std, generator); } inline Tensor & Tensor::cauchy_(double median, double sigma, Generator * generator) { - return type().cauchy_(*this, median, sigma, generator); + return dispatch_type().cauchy_(*this, median, sigma, generator); } inline Tensor & Tensor::log_normal_(double mean, double std, Generator * generator) { - return type().log_normal_(*this, mean, std, generator); + return dispatch_type().log_normal_(*this, mean, std, generator); } inline Tensor & Tensor::exponential_(double lambd, Generator * generator) { - return type().exponential_(*this, lambd, generator); + return dispatch_type().exponential_(*this, lambd, generator); } inline Tensor & Tensor::geometric_(double p, Generator * generator) { - return type().geometric_(*this, p, generator); + return dispatch_type().geometric_(*this, p, generator); } inline Tensor Tensor::diag(int64_t diagonal) const { - return type().diag(*this, diagonal); + return dispatch_type().diag(*this, diagonal); } inline Tensor Tensor::cross(const Tensor & other, c10::optional dim) const { - return type().cross(*this, other, dim); + return dispatch_type().cross(*this, other, dim); } inline Tensor Tensor::triu(int64_t diagonal) const { - return type().triu(*this, diagonal); + return dispatch_type().triu(*this, diagonal); } inline Tensor Tensor::tril(int64_t diagonal) const { - return type().tril(*this, diagonal); + return dispatch_type().tril(*this, diagonal); } inline Tensor Tensor::trace() const { - return type().trace(*this); + return dispatch_type().trace(*this); } inline Tensor Tensor::ne(Scalar other) const { - return type().ne(*this, other); + return dispatch_type().ne(*this, other); } inline Tensor Tensor::ne(const Tensor & other) const { - return type().ne(*this, other); + return dispatch_type().ne(*this, other); } inline Tensor Tensor::eq(Scalar other) const { - return type().eq(*this, other); + return dispatch_type().eq(*this, other); } inline Tensor Tensor::eq(const Tensor & other) const { - return type().eq(*this, other); + return dispatch_type().eq(*this, other); } inline Tensor Tensor::ge(Scalar other) const { - return type().ge(*this, other); + return dispatch_type().ge(*this, other); } inline Tensor Tensor::ge(const Tensor & other) const { - return type().ge(*this, other); + return dispatch_type().ge(*this, other); } inline Tensor Tensor::le(Scalar other) const { - return type().le(*this, other); + return dispatch_type().le(*this, other); } inline Tensor Tensor::le(const Tensor & other) const { - return type().le(*this, other); + return dispatch_type().le(*this, other); } inline Tensor Tensor::gt(Scalar other) const { - return type().gt(*this, other); + return dispatch_type().gt(*this, other); } inline Tensor Tensor::gt(const Tensor & other) const { - return type().gt(*this, other); + return dispatch_type().gt(*this, other); } inline Tensor Tensor::lt(Scalar other) const { - return type().lt(*this, other); + return dispatch_type().lt(*this, other); } inline Tensor Tensor::lt(const Tensor & other) const { - return type().lt(*this, other); + return dispatch_type().lt(*this, other); } inline Tensor Tensor::take(const Tensor & index) const { - return type().take(*this, index); + return dispatch_type().take(*this, index); } inline Tensor Tensor::index_select(int64_t dim, const Tensor & index) const { - return type().index_select(*this, dim, index); + return dispatch_type().index_select(*this, dim, index); } inline Tensor Tensor::masked_select(const Tensor & mask) const { - return type().masked_select(*this, mask); + return dispatch_type().masked_select(*this, mask); } inline Tensor Tensor::nonzero() const { - return type().nonzero(*this); + return dispatch_type().nonzero(*this); } inline Tensor Tensor::gather(int64_t dim, const Tensor & index, bool sparse_grad) const { - return type().gather(*this, dim, index, sparse_grad); + return dispatch_type().gather(*this, dim, index, sparse_grad); } inline Tensor Tensor::addcmul(const Tensor & tensor1, const Tensor & tensor2, Scalar value) const { - return type().addcmul(*this, tensor1, tensor2, value); + return dispatch_type().addcmul(*this, tensor1, tensor2, value); } inline Tensor Tensor::addcdiv(const Tensor & tensor1, const Tensor & tensor2, Scalar value) const { - return type().addcdiv(*this, tensor1, tensor2, value); + return dispatch_type().addcdiv(*this, tensor1, tensor2, value); } inline std::tuple Tensor::gels(const Tensor & A) const { - return type().gels(*this, A); + return dispatch_type().gels(*this, A); } inline std::tuple Tensor::triangular_solve(const Tensor & A, bool upper, bool transpose, bool unitriangular) const { - return type().triangular_solve(*this, A, upper, transpose, unitriangular); + return dispatch_type().triangular_solve(*this, A, upper, transpose, unitriangular); } inline std::tuple Tensor::symeig(bool eigenvectors, bool upper) const { - return type().symeig(*this, eigenvectors, upper); + return dispatch_type().symeig(*this, eigenvectors, upper); } inline std::tuple Tensor::eig(bool eigenvectors) const { - return type().eig(*this, eigenvectors); + return dispatch_type().eig(*this, eigenvectors); } inline std::tuple Tensor::svd(bool some, bool compute_uv) const { - return type().svd(*this, some, compute_uv); + return dispatch_type().svd(*this, some, compute_uv); } inline Tensor Tensor::cholesky(bool upper) const { - return type().cholesky(*this, upper); + return dispatch_type().cholesky(*this, upper); } inline Tensor Tensor::cholesky_solve(const Tensor & input2, bool upper) const { - return type().cholesky_solve(*this, input2, upper); + return dispatch_type().cholesky_solve(*this, input2, upper); } inline std::tuple Tensor::solve(const Tensor & A) const { - return type().solve(*this, A); + return dispatch_type().solve(*this, A); } inline Tensor Tensor::potri(bool upper) const { - return type().potri(*this, upper); + return dispatch_type().potri(*this, upper); } inline std::tuple Tensor::pstrf(bool upper, Scalar tol) const { - return type().pstrf(*this, upper, tol); + return dispatch_type().pstrf(*this, upper, tol); } inline std::tuple Tensor::qr() const { - return type().qr(*this); + return dispatch_type().qr(*this); } inline std::tuple Tensor::geqrf() const { - return type().geqrf(*this); + return dispatch_type().geqrf(*this); } inline Tensor Tensor::orgqr(const Tensor & input2) const { - return type().orgqr(*this, input2); + return dispatch_type().orgqr(*this, input2); } inline Tensor Tensor::ormqr(const Tensor & input2, const Tensor & input3, bool left, bool transpose) const { - return type().ormqr(*this, input2, input3, left, transpose); + return dispatch_type().ormqr(*this, input2, input3, left, transpose); } inline Tensor Tensor::btrisolve(const Tensor & LU_data, const Tensor & LU_pivots) const { - return type().btrisolve(*this, LU_data, LU_pivots); + return dispatch_type().btrisolve(*this, LU_data, LU_pivots); } inline Tensor Tensor::multinomial(int64_t num_samples, bool replacement, Generator * generator) const { - return type().multinomial(*this, num_samples, replacement, generator); + return dispatch_type().multinomial(*this, num_samples, replacement, generator); } inline Tensor Tensor::lgamma() const { - return type().lgamma(*this); + return dispatch_type().lgamma(*this); } inline Tensor Tensor::digamma() const { - return type().digamma(*this); + return dispatch_type().digamma(*this); } inline Tensor Tensor::polygamma(int64_t n) const { - return type().polygamma(n, *this); + return dispatch_type().polygamma(n, *this); } inline Tensor Tensor::erfinv() const { - return type().erfinv(*this); + return dispatch_type().erfinv(*this); } inline Tensor Tensor::frac() const { - return type().frac(*this); + return dispatch_type().frac(*this); } inline Tensor Tensor::dist(const Tensor & other, Scalar p) const { - return type().dist(*this, other, p); + return dispatch_type().dist(*this, other, p); } inline Tensor Tensor::reciprocal() const { - return type().reciprocal(*this); + return dispatch_type().reciprocal(*this); } inline Tensor Tensor::neg() const { - return type().neg(*this); + return dispatch_type().neg(*this); } inline Tensor Tensor::atan2(const Tensor & other) const { - return type().atan2(*this, other); + return dispatch_type().atan2(*this, other); } inline Tensor Tensor::lerp(const Tensor & end, Scalar weight) const { - return type().lerp(*this, end, weight); + return dispatch_type().lerp(*this, end, weight); } inline Tensor Tensor::lerp(const Tensor & end, const Tensor & weight) const { - return type().lerp(*this, end, weight); + return dispatch_type().lerp(*this, end, weight); } inline Tensor Tensor::histc(int64_t bins, Scalar min, Scalar max) const { - return type().histc(*this, bins, min, max); + return dispatch_type().histc(*this, bins, min, max); } inline Tensor Tensor::sign() const { - return type().sign(*this); + return dispatch_type().sign(*this); } inline Tensor Tensor::fmod(Scalar other) const { - return type().fmod(*this, other); + return dispatch_type().fmod(*this, other); } inline Tensor Tensor::fmod(const Tensor & other) const { - return type().fmod(*this, other); + return dispatch_type().fmod(*this, other); } inline Tensor Tensor::remainder(Scalar other) const { - return type().remainder(*this, other); + return dispatch_type().remainder(*this, other); } inline Tensor Tensor::remainder(const Tensor & other) const { - return type().remainder(*this, other); + return dispatch_type().remainder(*this, other); } inline Tensor Tensor::min(const Tensor & other) const { - return type().min(*this, other); + return dispatch_type().min(*this, other); } inline Tensor Tensor::min() const { - return type().min(*this); + return dispatch_type().min(*this); } inline Tensor Tensor::max(const Tensor & other) const { - return type().max(*this, other); + return dispatch_type().max(*this, other); } inline Tensor Tensor::max() const { - return type().max(*this); + return dispatch_type().max(*this); } inline Tensor Tensor::median() const { - return type().median(*this); + return dispatch_type().median(*this); } inline std::tuple Tensor::sort(int64_t dim, bool descending) const { - return type().sort(*this, dim, descending); + return dispatch_type().sort(*this, dim, descending); } inline Tensor Tensor::argsort(int64_t dim, bool descending) const { - return type().argsort(*this, dim, descending); + return dispatch_type().argsort(*this, dim, descending); } inline std::tuple Tensor::topk(int64_t k, int64_t dim, bool largest, bool sorted) const { - return type().topk(*this, k, dim, largest, sorted); + return dispatch_type().topk(*this, k, dim, largest, sorted); } inline Tensor Tensor::all() const { - return type().all(*this); + return dispatch_type().all(*this); } inline Tensor Tensor::any() const { - return type().any(*this); + return dispatch_type().any(*this); } inline Tensor Tensor::renorm(Scalar p, int64_t dim, Scalar maxnorm) const { - return type().renorm(*this, p, dim, maxnorm); + return dispatch_type().renorm(*this, p, dim, maxnorm); } inline Tensor Tensor::unfold(int64_t dimension, int64_t size, int64_t step) const { - return type().unfold(*this, dimension, size, step); + return dispatch_type().unfold(*this, dimension, size, step); } inline bool Tensor::equal(const Tensor & other) const { - return type().equal(*this, other); + return dispatch_type().equal(*this, other); } inline Tensor Tensor::pow(const Tensor & exponent) const { - return type().pow(*this, exponent); + return dispatch_type().pow(*this, exponent); } inline Tensor Tensor::alias() const { - return type().alias(*this); + return dispatch_type().alias(*this); } inline bool Tensor::is_variable() const noexcept { diff --git a/aten/src/ATen/function_wrapper.py b/aten/src/ATen/function_wrapper.py index f50537bd0..ce585e37c 100644 --- a/aten/src/ATen/function_wrapper.py +++ b/aten/src/ATen/function_wrapper.py @@ -130,7 +130,7 @@ def TypedDict(name, attrs, total=True): # type: ignore # add non-virtual declaration to Tensor.cpp TENSOR_METHOD_DEFINITION = CodeTemplate("""\ inline ${return_type} Tensor::${api_name}(${method_formals})${const_mark} { - return type().${api_name}(${method_actuals}); + return dispatch_type().${api_name}(${method_actuals}); } """) # add a method declaration in Functions.h diff --git a/aten/src/ATen/native/BatchLinearAlgebra.cpp b/aten/src/ATen/native/BatchLinearAlgebra.cpp index 350727939..3da24a11d 100644 --- a/aten/src/ATen/native/BatchLinearAlgebra.cpp +++ b/aten/src/ATen/native/BatchLinearAlgebra.cpp @@ -140,7 +140,7 @@ static void apply_solve(Tensor& b, Tensor& A, std::vector& infos) { auto n = A.size(-2); auto nrhs = b.size(-1); - auto ipiv = at::empty({n}, b.type().toScalarType(kInt)); + auto ipiv = at::empty({n}, b.options().dtype(kInt)); int info; if (b.dim() == 2) { @@ -211,7 +211,7 @@ static void apply_inverse(Tensor& self, std::vector& infos) { auto batch_size = batchCount(self); auto n = self.size(-2); - auto ipiv = at::empty({n}, self.type().toScalarType(kInt)); + auto ipiv = at::empty({n}, self.options().dtype(kInt)); int lwork; scalar_t wkopt; Tensor work; @@ -230,7 +230,7 @@ static void apply_inverse(Tensor& self, std::vector& infos) { lapackGetri(n, self_working_ptr, n, ipiv.data(), &wkopt, lwork, &info); lwork = static_cast(wkopt); - work = at::empty({lwork}, self.type()); + work = at::empty({lwork}, self.options()); // now to compute the actual inverse lapackGetri(n, self_working_ptr, n, ipiv.data(), work.data(), lwork, &info); diff --git a/aten/src/ATen/native/Indexing.cpp b/aten/src/ATen/native/Indexing.cpp index 34851f011..062d26788 100644 --- a/aten/src/ATen/native/Indexing.cpp +++ b/aten/src/ATen/native/Indexing.cpp @@ -190,7 +190,6 @@ static Tensor wrapIndexOnce(const Tensor & index, int64_t dim, int64_t dim_size) static Tensor computeLinearIndex(const Tensor & src, TensorList indices) { auto strides = computeLinearStride(src); - Type& longType = src.type().toScalarType(kLong); // Compute the linear index by multiplying the indexing tensors by the // stride and summing them. All the indexing tensors have the same shape at @@ -202,7 +201,7 @@ static Tensor computeLinearIndex(const Tensor & src, TensorList indices) { if (indices[i].defined()) { // Cast index to the longType matching src's backend // This allows us to support ie indexing a cuda tensor with a cpu tensor - Tensor index = (wrapIndexOnce(indices[i], i, src.size(i)) * strides[i]).toType(longType); + Tensor index = (wrapIndexOnce(indices[i], i, src.size(i)) * strides[i]).to(kLong); if (linearIndex.defined()) { linearIndex += index; } else { @@ -220,13 +219,13 @@ static Tensor computeLinearIndex(const Tensor & src, TensorList indices) { // Compute the linear indices for the parts of the tensor not being indexed Tensor beforeIndex; if (emptyBefore > 0) { - auto index = at::arange(0, nElemBefore, longType) * strides[emptyBefore - 1]; + auto index = at::arange(0, nElemBefore, src.options().dtype(kLong)) * strides[emptyBefore - 1]; index = index.view(src.sizes().slice(0, emptyBefore)); beforeIndex = unsqueezeN(index, 0, linearIndex.dim() + emptyAfter); } Tensor afterIndex; if (emptyAfter > 0) { - auto index = at::arange(0, nElemAfter, longType); + auto index = at::arange(0, nElemAfter, src.options().dtype(kLong)); index = index.view(src.sizes().slice(src.dim() - emptyAfter, emptyAfter)); afterIndex = unsqueezeN(index, linearIndex.dim() + emptyBefore, 0); } @@ -408,7 +407,7 @@ static AdvancedIndex make_info(Tensor self, TensorList orig) { static std::unique_ptr make_index_iterator(const AdvancedIndex& info) { auto builder = TensorIterator::Builder(); builder.dont_compute_common_dtype(); - builder.add_output(Tensor(), &info.src.type()); + builder.add_output(Tensor(), &info.src.dispatch_type()); builder.add_input(info.src); for (auto& index : info.indices) { builder.add_input(index); @@ -425,7 +424,7 @@ static std::unique_ptr make_index_put_iterator(const AdvancedInd builder.dont_compute_common_dtype(); builder.dont_resize_outputs(); builder.add_output(info.src); - builder.add_input(value, &info.src.type()); + builder.add_input(value, &info.src.dispatch_type()); for (auto& index : info.indices) { builder.add_input(index); } diff --git a/aten/src/ATen/native/LegacyBridge.cpp b/aten/src/ATen/native/LegacyBridge.cpp index 62f2984f2..f0556f6c2 100644 --- a/aten/src/ATen/native/LegacyBridge.cpp +++ b/aten/src/ATen/native/LegacyBridge.cpp @@ -7,12 +7,8 @@ namespace at { namespace native { namespace { - static bool _type_has_native(const Type& dtype) { - return dtype.is_sparse(); - } - static bool _has_native(const Tensor& self) { - return _type_has_native(self.type()); + return self.is_sparse(); } } diff --git a/aten/src/ATen/native/LinearAlgebra.cpp b/aten/src/ATen/native/LinearAlgebra.cpp index 5d3c157ff..ee8b74322 100644 --- a/aten/src/ATen/native/LinearAlgebra.cpp +++ b/aten/src/ATen/native/LinearAlgebra.cpp @@ -157,7 +157,7 @@ Tensor& ger_out(Tensor& result, const Tensor& self, const Tensor& vec2) { Tensor mm(const Tensor& self, const Tensor& mat2) { if (self.is_sparse()) { - return mat2.type().addmm(at::zeros({}, mat2.type()), self, mat2, 0, 1); + return at::zeros({}, mat2.options()).addmm(self, mat2, 0, 1); } return at::legacy::th::_th_mm(self, mat2); } @@ -368,8 +368,9 @@ Tensor dot(const Tensor& self, const Tensor& tensor) { Tensor& dot_out(Tensor& result, const Tensor& self, const Tensor& tensor) { result.resize_({}); - // dispatching through type ensures we don't allow mismatched types. - return self.type().fill_(result, self.dot(tensor)); + AT_CHECK(result.scalar_type() == self.scalar_type(), + "result dtype ", result.scalar_type(), " does not match self dtype ", self.scalar_type()); + return result.fill_(self.dot(tensor)); } /* diff --git a/aten/src/ATen/native/LossCTC.cpp b/aten/src/ATen/native/LossCTC.cpp index f6d8906fd..cdd7a4e28 100644 --- a/aten/src/ATen/native/LossCTC.cpp +++ b/aten/src/ATen/native/LossCTC.cpp @@ -364,7 +364,7 @@ Tensor ctc_loss(const Tensor& log_probs, const Tensor& targets, IntArrayRef inpu } } if (reduction == Reduction::Mean) { - auto target_lengths_t = at::tensor(target_lengths, res.options().device(at::Device(at::Device::Type::CPU)).dtype(kLong)).toType(res.type()); + auto target_lengths_t = at::tensor(target_lengths, res.options()); return (res / target_lengths_t).mean(); } else if (reduction == Reduction::Sum) { return res.sum(); diff --git a/aten/src/ATen/native/Memory.cpp b/aten/src/ATen/native/Memory.cpp index 571773502..861b7b249 100644 --- a/aten/src/ATen/native/Memory.cpp +++ b/aten/src/ATen/native/Memory.cpp @@ -12,7 +12,7 @@ Tensor pin_memory(const Tensor& self) { AT_ERROR("cannot pin '", self.type().toString(), "' only dense CPU tensors can be pinned"); } auto* allocator = detail::getCUDAHooks().getPinnedMemoryAllocator(); - auto tensor = self.type().tensorWithAllocator(self.sizes(), self.strides(), allocator); + auto tensor = self.dispatch_type().tensorWithAllocator(self.sizes(), self.strides(), allocator); tensor.copy_(self); return tensor; } diff --git a/aten/src/ATen/native/NNPACK.cpp b/aten/src/ATen/native/NNPACK.cpp index a1ebbca04..874a65b99 100644 --- a/aten/src/ATen/native/NNPACK.cpp +++ b/aten/src/ATen/native/NNPACK.cpp @@ -213,10 +213,10 @@ Tensor _nnpack_spatial_convolution( auto algorithm = nnp_convolution_algorithm_auto; // All Tensors must be float Tensors - if (input.type().ID() != at::TypeID::CPUFloat || - weight.type().ID() != at::TypeID::CPUFloat || - output.type().ID() != at::TypeID::CPUFloat || - (bias.defined() && bias.type().ID() != at::TypeID::CPUFloat)) { + if (input.dispatch_type().ID() != at::TypeID::CPUFloat || + weight.dispatch_type().ID() != at::TypeID::CPUFloat || + output.dispatch_type().ID() != at::TypeID::CPUFloat || + (bias.defined() && bias.dispatch_type().ID() != at::TypeID::CPUFloat)) { throw std::runtime_error( "Mismatched Tensor types in NNPack convolutionOutput"); } diff --git a/aten/src/ATen/native/ReduceOps.cpp b/aten/src/ATen/native/ReduceOps.cpp index 97994aa81..9cb247def 100644 --- a/aten/src/ATen/native/ReduceOps.cpp +++ b/aten/src/ATen/native/ReduceOps.cpp @@ -67,7 +67,7 @@ static void allocate_reduction_result( if (result.defined()) { result.resize_(shape); } else { - result = at::empty(shape, self.type().toScalarType(dtype)); + result = at::empty(shape, self.options().dtype(dtype)); } } diff --git a/aten/src/ATen/native/TensorConversions.cpp b/aten/src/ATen/native/TensorConversions.cpp index 05c87342b..bf5eb4ad4 100644 --- a/aten/src/ATen/native/TensorConversions.cpp +++ b/aten/src/ATen/native/TensorConversions.cpp @@ -20,8 +20,8 @@ static inline Device ensure_has_index(Device device) { } static inline Tensor to_impl(const Tensor& self, const TensorOptions& options, bool non_blocking) { - return self.type().toBackend(options.backend()).toScalarType(typeMetaToScalarType(options.dtype())) - .copy(self, non_blocking, options.device()); + return self.dispatch_type().toBackend(options.backend()).toScalarType(typeMetaToScalarType(options.dtype())) + .copy(self, non_blocking, options.device()); } Tensor to(const Tensor& self, const TensorOptions& options, bool non_blocking, bool copy) { diff --git a/aten/src/ATen/native/TensorFactories.cpp b/aten/src/ATen/native/TensorFactories.cpp index 3fc0ee272..5727cafa4 100644 --- a/aten/src/ATen/native/TensorFactories.cpp +++ b/aten/src/ATen/native/TensorFactories.cpp @@ -140,12 +140,12 @@ Tensor& empty_out(Tensor& result, IntArrayRef size) { // specialized operators for each datatype. // TODO: remove when we have Type support in the IR -#define DEFINE_CAST_OP(_1, n, _2) \ - Tensor _cast_##n(const Tensor& self, bool non_blocking) { \ - auto& target_type = self.type().toScalarType(ScalarType::n); \ - if (self.type() == target_type) \ - return self; \ - return target_type.copy(self, non_blocking); \ +#define DEFINE_CAST_OP(_1, n, _2) \ + Tensor _cast_##n(const Tensor& self, bool non_blocking) { \ + auto& target_type = self.dispatch_type().toScalarType(ScalarType::n); \ + if (self.dispatch_type() == target_type) \ + return self; \ + return target_type.copy(self, non_blocking); \ } AT_FORALL_SCALAR_TYPES_AND_BOOL_EXCEPT_QINT(DEFINE_CAST_OP) diff --git a/aten/src/ATen/native/TensorIterator.h b/aten/src/ATen/native/TensorIterator.h index f3510ab6e..affcade0e 100644 --- a/aten/src/ATen/native/TensorIterator.h +++ b/aten/src/ATen/native/TensorIterator.h @@ -69,7 +69,7 @@ struct CAFFE2_API OperandInfo { OperandInfo(const Tensor& t, const Type* type=nullptr) : tensor(t), type(const_cast(type)) { if (t.defined() && !type) { - this->type = &t.type(); + this->type = &t.dispatch_type(); } } diff --git a/aten/src/ATen/native/TensorIteratorReduce.cpp b/aten/src/ATen/native/TensorIteratorReduce.cpp index 5ec8f9815..f6d20281a 100644 --- a/aten/src/ATen/native/TensorIteratorReduce.cpp +++ b/aten/src/ATen/native/TensorIteratorReduce.cpp @@ -35,7 +35,7 @@ static void two_pass_reduction(TensorIterator& iter, const loop2d_t& loop) { auto& dst = iter.tensor(0); auto buffer_shape = DimVector(dst.sizes()); buffer_shape.insert(buffer_shape.begin(), max_threads); - auto buffer = at::empty(buffer_shape, dst.type()); + auto buffer = at::empty(buffer_shape, dst.options()); std::unique_ptr written(new bool[max_threads]); std::fill(written.get(), written.get() + max_threads, false); diff --git a/aten/src/ATen/native/TypeProperties.cpp b/aten/src/ATen/native/TypeProperties.cpp index 9ed7648fc..c2cae175b 100644 --- a/aten/src/ATen/native/TypeProperties.cpp +++ b/aten/src/ATen/native/TypeProperties.cpp @@ -10,7 +10,7 @@ bool is_cuda(const Tensor& self) { } bool is_distributed(const Tensor& self) { - return self.type().is_distributed(); + return self.dispatch_type().is_distributed(); } bool is_complex(const Tensor& self) { @@ -35,7 +35,7 @@ bool is_sparse(const Tensor& self) { } Tensor type_as(const Tensor& self, const Tensor& other) { - return self.toType(other.type()); + return self.toType(other.dispatch_type()); } }} // namespace at::native diff --git a/aten/src/ATen/native/cuda/Distributions.cu b/aten/src/ATen/native/cuda/Distributions.cu index 4b0f6f098..fb72346ef 100644 --- a/aten/src/ATen/native/cuda/Distributions.cu +++ b/aten/src/ATen/native/cuda/Distributions.cu @@ -237,7 +237,6 @@ Tensor& bernoulli_tensor_cuda_(Tensor &self, const Tensor& p_, Generator* gen) { auto p = std::get<0>(expand_inplace(self, p_.to(kCUDA))); AT_DISPATCH_ALL_TYPES_AND( at::ScalarType::Half, self.scalar_type(), "bernoulli_tensor_cuda_self_", [&] { - const at::Type& p_type = p.type(); using self_t = scalar_t; auto seeds = next_philox_seed(gen, 10); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, p.scalar_type(), "bernoulli_tensor_cuda_p_", [&] { diff --git a/aten/src/ATen/native/cuda/LossCTC.cu b/aten/src/ATen/native/cuda/LossCTC.cu index 547dd6ccd..9fe969703 100644 --- a/aten/src/ATen/native/cuda/LossCTC.cu +++ b/aten/src/ATen/native/cuda/LossCTC.cu @@ -164,12 +164,11 @@ ctc_loss_log_alpha_gpu_kernel(scalar_t* __restrict__ log_alpha_data, // We return log_alpha (currently, might change to (log_alpha+log_beta) to be passed to the // backward. The dispatch function will only return the loss. template -std::tuple ctc_loss_gpu_template(const Tensor& log_probs, const Tensor& targets_, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t BLANK) { +std::tuple ctc_loss_gpu_template(const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t BLANK) { // log_probs: input_len x batch_size x num_labels // targets [int64]: batch_size x target_length OR sum(target_lengths) CheckedFrom c = "ctc_loss_gpu"; using target_t = typename std::conditional::type; - auto targets = targets_.toType(log_probs.type().toScalarType(target_scalar_type)); // to log_probs cuda if it isn't there already auto log_probs_arg = TensorArg(log_probs, "log_probs", 1); auto targets_arg = TensorArg(targets, "targets", 2); checkAllSameGPU(c, {log_probs_arg, targets_arg}); @@ -225,7 +224,7 @@ std::tuple ctc_loss_gpu_template(const Tensor& log_probs, const auto target_lengths_t = at::tensor(target_lengths, targets.options().dtype(kLong)); auto input_lengths_t = at::tensor(input_lengths, targets.options().dtype(kLong)); - tg_batch_offsets = tg_batch_offsets.toType(targets.type().toScalarType(kLong)); + tg_batch_offsets = tg_batch_offsets.cuda(); Tensor log_alpha = at::empty({batch_size, log_probs.size(0), 2*max_target_length+1}, log_probs.options()); Tensor neg_log_likelihood = at::empty({batch_size}, log_probs.options()); @@ -481,11 +480,10 @@ ctc_loss_backward_collect_gpu_kernel(scalar_t* __restrict__ gradient_data, // The backward. It essentially computes eq 16 by using the above kernels. // We don't do a lot of checking as we envision this to be called only when backpropagating through a (well-checked) forward. template -Tensor ctc_loss_backward_gpu_template(const Tensor& grad_out, const Tensor& log_probs, const Tensor& targets_, IntArrayRef input_lengths, IntArrayRef target_lengths, +Tensor ctc_loss_backward_gpu_template(const Tensor& grad_out, const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths, const Tensor& neg_log_likelihood, const Tensor& log_alpha, int64_t BLANK, bool zero_infinity) { constexpr scalar_t neginf = -INFINITY; using target_t = typename std::conditional::type; - auto targets = targets_.toType(log_probs.type().toScalarType(target_scalar_type)); // to cuda if it isn't there already int64_t batch_size = log_probs.size(1); int64_t num_labels = log_probs.size(2); int64_t lp_input_stride = log_probs.stride(0); @@ -515,9 +513,9 @@ Tensor ctc_loss_backward_gpu_template(const Tensor& grad_out, const Tensor& log_ tg_target_stride = targets.stride(1); max_target_length = targets.size(1); } - auto target_lengths_t = at::tensor(target_lengths, targets.options().device(at::Device(at::Device::Type::CPU)).dtype(kLong)).toType(targets.type().toScalarType(kLong)); - auto input_lengths_t = at::tensor(input_lengths, targets.options().device(at::Device(at::Device::Type::CPU)).dtype(kLong)).toType(targets.type().toScalarType(kLong)); - tg_batch_offsets = tg_batch_offsets.toType(targets.type().toScalarType(kLong)); + auto target_lengths_t = at::tensor(target_lengths, targets.options().dtype(kLong)); + auto input_lengths_t = at::tensor(input_lengths, targets.options().dtype(kLong)); + tg_batch_offsets = tg_batch_offsets.cuda(); Tensor log_beta = at::empty({batch_size, log_probs.size(0), 2*max_target_length+1}, log_probs.options()); Tensor grad = at::full_like(log_probs, neginf); // initialization for log(sum (alpha beta)) diff --git a/aten/src/ATen/native/cuda/MiscUtils.h b/aten/src/ATen/native/cuda/MiscUtils.h index e9dedc78b..4cc1c4ffe 100644 --- a/aten/src/ATen/native/cuda/MiscUtils.h +++ b/aten/src/ATen/native/cuda/MiscUtils.h @@ -57,9 +57,9 @@ template static inline Storage pin_memory(int64_t size, Tensor dummy) { int64_t adjusted_size = size * sizeof(T); auto* allocator = cuda::getPinnedMemoryAllocator(); - auto& backend = dummy.type().toBackend(Backend::CPU).toScalarType(kByte); + auto& backend = dummy.dispatch_type().toBackend(Backend::CPU).toScalarType(kByte); return backend.storageWithAllocator(adjusted_size, allocator); } - + } // namespace native } // namespace at diff --git a/aten/src/ATen/templates/Tensor.h b/aten/src/ATen/templates/Tensor.h index 091450e26..b1e917ad9 100644 --- a/aten/src/ATen/templates/Tensor.h +++ b/aten/src/ATen/templates/Tensor.h @@ -13,6 +13,7 @@ #include #include #include +#include namespace c10{ struct TensorOptions; @@ -196,7 +197,11 @@ class CAFFE2_API Tensor { return impl_->itemsize(); } - Type & type() const { + DeprecatedTypeProperties & type() const { + return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( + tensorTypeIdToBackend(type_id()), scalar_type()); + } + Type & dispatch_type() const { return legacyTensorType(*impl_); } TensorTypeId type_id() const { diff --git a/aten/src/ATen/templates/TensorMethods.h b/aten/src/ATen/templates/TensorMethods.h index bbaa19b26..5928907d7 100644 --- a/aten/src/ATen/templates/TensorMethods.h +++ b/aten/src/ATen/templates/TensorMethods.h @@ -10,33 +10,33 @@ namespace at { inline Tensor Tensor::toType(const Type & t, bool non_blocking) const { - if(type() == t) + if(dispatch_type() == t) return *this; return t.copy(*this, non_blocking); } inline Tensor Tensor::cpu() const { - return toType(type().cpu()); + return toType(dispatch_type().cpu()); } inline Tensor Tensor::cuda() const { - return toType(type().cuda()); + return toType(dispatch_type().cuda()); } inline Tensor Tensor::hip() const { - return toType(type().hip()); + return toType(dispatch_type().hip()); } inline Tensor & Tensor::copy_(const Tensor & src, bool non_blocking) { - return type().copy_(*this, src, non_blocking); + return dispatch_type().copy_(*this, src, non_blocking); } inline Tensor Tensor::toType(ScalarType t) const { - return toType(type().toScalarType(t)); + return toType(dispatch_type().toScalarType(t)); } inline Tensor Tensor::toBackend(Backend b) const { - return toType(type().toBackend(b)); + return toType(dispatch_type().toBackend(b)); } inline TensorOptions Tensor::options() const { @@ -50,11 +50,11 @@ inline void Tensor::backward( c10::optional gradient, bool keep_graph, bool create_graph) { - type().backward(*this, std::move(gradient), keep_graph, create_graph); + dispatch_type().backward(*this, std::move(gradient), keep_graph, create_graph); } inline void Tensor::set_data(Tensor new_data) { - type().set_data(*this, new_data); + dispatch_type().set_data(*this, new_data); } // all static inline to allow for inlining of the non-dynamic part of dispatch diff --git a/aten/src/ATen/test/cuda_tensor_interop_test.cpp b/aten/src/ATen/test/cuda_tensor_interop_test.cpp index 9877fd2e0..f206a937a 100644 --- a/aten/src/ATen/test/cuda_tensor_interop_test.cpp +++ b/aten/src/ATen/test/cuda_tensor_interop_test.cpp @@ -30,7 +30,7 @@ TEST(CUDACaffe2ToPytorch, SimpleLegacy) { caffe2::math::Set(16, 777, data, &context); } at::Tensor at_tensor(c2_tensor); - ASSERT_TRUE(&at_tensor.type() != nullptr); + ASSERT_TRUE(&at_tensor.dispatch_type() != nullptr); ASSERT_TRUE(at_tensor.is_cuda()); auto at_cpu = at_tensor.cpu(); @@ -50,7 +50,7 @@ TEST(CUDACaffe2ToPytorch, Simple) { caffe2::math::Set(16, 777, data, &context); } at::Tensor at_tensor(c2_tensor); - ASSERT_TRUE(&at_tensor.type() != nullptr); + ASSERT_TRUE(&at_tensor.dispatch_type() != nullptr); ASSERT_TRUE(at_tensor.is_cuda()); auto at_cpu = at_tensor.cpu(); diff --git a/aten/src/ATen/test/scalar_test.cpp b/aten/src/ATen/test/scalar_test.cpp index 24c4da856..08b4bdf3a 100644 --- a/aten/src/ATen/test/scalar_test.cpp +++ b/aten/src/ATen/test/scalar_test.cpp @@ -19,7 +19,7 @@ struct Foo { static void apply(Tensor a, Tensor b) { scalar_type s = 1; std::stringstream ss; - ss << "hello, dispatch: " << a.type().toString() << s << "\n"; + ss << "hello, dispatch: " << a.dispatch_type().toString() << s << "\n"; auto data = (scalar_type*)a.data_ptr(); (void)data; } @@ -105,7 +105,7 @@ TEST(TestScalar, TestScalar) { scalar_t s = 1; std::stringstream ss; ASSERT_NO_THROW( - ss << "hello, dispatch" << x.type().toString() << s << "\n"); + ss << "hello, dispatch" << x.dispatch_type().toString() << s << "\n"); auto data = (scalar_t*)x.data_ptr(); (void)data; }); diff --git a/aten/src/ATen/test/tensor_interop_test.cpp b/aten/src/ATen/test/tensor_interop_test.cpp index 495fe0f8b..5cdd0e442 100644 --- a/aten/src/ATen/test/tensor_interop_test.cpp +++ b/aten/src/ATen/test/tensor_interop_test.cpp @@ -12,7 +12,7 @@ TEST(Caffe2ToPytorch, SimpleLegacy) { data[i] = i; } at::Tensor at_tensor(c2_tensor); - ASSERT_TRUE(&at_tensor.type() != nullptr); + ASSERT_TRUE(&at_tensor.dispatch_type() != nullptr); auto it = at_tensor.data(); for (int64_t i = 0; i < 16; i++) { @@ -27,7 +27,7 @@ TEST(Caffe2ToPytorch, Simple) { data[i] = i; } at::Tensor at_tensor(c2_tensor); - ASSERT_TRUE(&at_tensor.type() != nullptr); + ASSERT_TRUE(&at_tensor.dispatch_type() != nullptr); auto it = at_tensor.data(); for (int64_t i = 0; i < 16; i++) { diff --git a/aten/src/ATen/test/undefined_tensor_test.cpp b/aten/src/ATen/test/undefined_tensor_test.cpp index 9c9c42ce5..5a3c926de 100644 --- a/aten/src/ATen/test/undefined_tensor_test.cpp +++ b/aten/src/ATen/test/undefined_tensor_test.cpp @@ -27,9 +27,9 @@ TEST(TestUndefined, UndefinedTest) { ASSERT_ANY_THROW(und.add(5)); ASSERT_ANY_THROW(und.mm(und)); - und.toType(und.type()); - ASSERT_ANY_THROW(und.toType(ft.type())); - ASSERT_ANY_THROW(ft.toType(und.type())); + und.toType(und.dispatch_type()); + ASSERT_ANY_THROW(und.toType(ft.dispatch_type())); + ASSERT_ANY_THROW(ft.toType(und.dispatch_type())); und.toType(ScalarType::Undefined); ASSERT_ANY_THROW(und.toType(ScalarType::Float)); ASSERT_ANY_THROW(ft.toType(ScalarType::Undefined));