From e29330deab7693ea94c3474427aca6e2febc92f5 Mon Sep 17 00:00:00 2001 From: Scott Wolchok Date: Mon, 18 Sep 2023 14:25:28 -0700 Subject: [PATCH] [PyTorch] clang-format ivalue.h (#109272) I don't know how this got out of format, but now it's formatted. Differential Revision: [D49245911](https://our.internmc.facebook.com/intern/diff/D49245911/) Pull Request resolved: https://github.com/pytorch/pytorch/pull/109272 Approved by: https://github.com/ezyang, https://github.com/Skylion007 --- aten/src/ATen/core/ivalue.h | 188 +++++++++++++++++++----------------- 1 file changed, 102 insertions(+), 86 deletions(-) diff --git a/aten/src/ATen/core/ivalue.h b/aten/src/ATen/core/ivalue.h index a309573177b00f..07e1db8d6ba4b8 100644 --- a/aten/src/ATen/core/ivalue.h +++ b/aten/src/ATen/core/ivalue.h @@ -7,14 +7,14 @@ #include #include #include -#include #include +#include #include #include #include #include -#include #include +#include #include #include #include @@ -66,25 +66,24 @@ struct PyObjectHolder; struct EnumHolder; // We need a ComplexHolder because currently the payloads in the Union // only take 64 bits. Since ComplexDouble takes up 128 bits, and is too big -// to fit in the IValue directly, we indirect complex numbers through an intrusive -// pointer to ComplexHolder (which contains a c10::complex). +// to fit in the IValue directly, we indirect complex numbers through an +// intrusive pointer to ComplexHolder (which contains a c10::complex). struct ComplexHolder : c10::intrusive_ptr_target { - public: - template - ComplexHolder(c10::complex c) { - val = convert>(c); - } - ComplexHolder() = default; - c10::complex val; + public: + template + ComplexHolder(c10::complex c) { + val = convert>(c); + } + ComplexHolder() = default; + c10::complex val; }; // Similar to ComplexHolder, for StreamData3 struct StreamData3Holder : c10::intrusive_ptr_target { - public: - StreamData3Holder(struct c10::StreamData3 d):val(d) { - } - StreamData3Holder() = delete; - struct c10::StreamData3 val; + public: + StreamData3Holder(struct c10::StreamData3 d) : val(d) {} + StreamData3Holder() = delete; + struct c10::StreamData3 val; }; } // namespace ivalue @@ -98,7 +97,7 @@ template struct OptionalArray { c10::optional> list; - OptionalArray()= default; + OptionalArray() = default; OptionalArray(std::vector val) : list(std::move(val)) {} // Used when saving an argument for the backwards pass. @@ -222,9 +221,9 @@ struct Capsule { /// torch::Tensor my_tensor = my_ivalue.toTensor(); /// \endrst struct TORCH_API IValue final { - IValue(const IValue& rhs) - : IValue(rhs.payload, rhs.tag) { - if (isIntrusivePtr() && payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()) { + IValue(const IValue& rhs) : IValue(rhs.payload, rhs.tag) { + if (isIntrusivePtr() && + payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()) { c10::raw::intrusive_ptr::incref(payload.u.as_intrusive_ptr); } } @@ -292,7 +291,7 @@ struct TORCH_API IValue final { */ bool is(const IValue& rhs) const; - /** + /** * Hashing for IValues. Returns an IValue-boxed int. * * Some notes: @@ -325,7 +324,7 @@ struct TORCH_API IValue final { const IValue& lhs, const IValue& rhs); -private: + private: static bool isAliasOf(const at::Tensor& a, const at::Tensor& b) { if (a.is_sparse()) { return isAliasOf(a._values(), b) || isAliasOf(a._indices(), b); @@ -334,14 +333,12 @@ struct TORCH_API IValue final { return isAliasOf(a, b._values()) || isAliasOf(a, b._indices()); } if (a.is_sparse_csr()) { - return isAliasOf(a.values(), b) || - isAliasOf(a.crow_indices(), b) || - isAliasOf(a.col_indices(), b); + return isAliasOf(a.values(), b) || isAliasOf(a.crow_indices(), b) || + isAliasOf(a.col_indices(), b); } if (b.is_sparse_csr()) { - return isAliasOf(a, b.values()) || - isAliasOf(a, b.crow_indices()) || - isAliasOf(a, b.col_indices()); + return isAliasOf(a, b.values()) || isAliasOf(a, b.crow_indices()) || + isAliasOf(a, b.col_indices()); } // Opaque tensors such as the ones constructed by the MKL-DNN backend @@ -357,7 +354,7 @@ struct TORCH_API IValue final { template bool isListOf() const; -public: + public: /// @private [doxygen private] bool isAliasOf(const IValue& rhs) const { if (this->tag != rhs.tag) { @@ -448,7 +445,8 @@ struct TORCH_API IValue final { } IValue(at::Storage s) : tag(Tag::Storage) { - payload.u.as_intrusive_ptr = null_to_undefined_tensor(s.unsafeReleaseStorageImpl()); + payload.u.as_intrusive_ptr = + null_to_undefined_tensor(s.unsafeReleaseStorageImpl()); } bool isStorage() const { return Tag::Storage == tag; @@ -464,8 +462,7 @@ struct TORCH_API IValue final { } /// @private [doxygen private] - IValue(intrusive_ptr blob) - : tag(Tag::Blob) { + IValue(intrusive_ptr blob) : tag(Tag::Blob) { // TODO (after Tensor merge) If we pass in a Blob holding a Tensor, extract // and store it as a Tensor instead. payload.u.as_intrusive_ptr = null_to_undefined_tensor(blob.release()); @@ -546,7 +543,9 @@ struct TORCH_API IValue final { // ComplexDouble template IValue(c10::complex c); - bool isComplexDouble() const { return Tag::ComplexDouble == tag; } + bool isComplexDouble() const { + return Tag::ComplexDouble == tag; + } c10::complex toComplexDouble() const; // Future @@ -620,7 +619,7 @@ struct TORCH_API IValue final { c10::SymFloat toSymFloat() const&; IValue(const c10::SymBool& i) { - if (auto mi = i.maybe_as_bool()) { + if (auto mi = i.maybe_as_bool()) { tag = Tag::Bool; payload.u.as_int = *mi; } else { @@ -680,7 +679,7 @@ struct TORCH_API IValue final { IValue(c10::intrusive_ptr v); IValue(std::string v); IValue(const char* v) : IValue(std::string(v)) {} - IValue(c10::string_view v) : IValue(std::string(v)) {}; + IValue(c10::string_view v) : IValue(std::string(v)){}; bool isString() const { return Tag::String == tag; } @@ -744,9 +743,10 @@ struct TORCH_API IValue final { // didn't bother making it work for the other constructors, we just make sure // they're not selectable. template - using enable_if_list_is_ivalue_constructible = - std::enable_if_t::value && - !std::is_same::value, std::nullptr_t>; + using enable_if_list_is_ivalue_constructible = std::enable_if_t< + std::is_constructible::value && + !std::is_same::value, + std::nullptr_t>; template = nullptr> IValue(c10::List&& v); @@ -776,7 +776,8 @@ struct TORCH_API IValue final { template using enable_if_ilist_is_ivalue_constructible = std::enable_if_t< std::is_constructible::value && - std::is_constructible::boxed_type>::value && + std::is_constructible::boxed_type>:: + value && !std::is_same::value, std::nullptr_t>; @@ -875,14 +876,16 @@ struct TORCH_API IValue final { tag = Tag::Bool; payload.u.as_bool = s.toBool(); } else { - TORCH_INTERNAL_ASSERT_DEBUG_ONLY(s.isIntegral(false), "Unknown type in Scalar"); - tag = Tag::Int; + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + s.isIntegral(false), "Unknown type in Scalar"); + tag = Tag::Int; payload.u.as_int = s.toLong(); } } bool isScalar() const { - return isDouble() || isInt() || isComplexDouble() || isBool() || isSymInt() || isSymFloat() || isSymBool(); + return isDouble() || isInt() || isComplexDouble() || isBool() || + isSymInt() || isSymFloat() || isSymBool(); } at::Scalar toScalar() const { @@ -917,14 +920,15 @@ struct TORCH_API IValue final { } // Stream - IValue(c10::Stream s) - : tag(Tag::Stream) { + IValue(c10::Stream s) : tag(Tag::Stream) { auto v = c10::make_intrusive(s.pack3()); payload.u.as_intrusive_ptr = v.release(); } c10::Stream toStream() &&; - c10::Stream toStream() const &; - bool isStream() const { return Tag::Stream == tag; } + c10::Stream toStream() const&; + bool isStream() const { + return Tag::Stream == tag; + } // ScalarType IValue(ScalarType t) @@ -965,7 +969,8 @@ struct TORCH_API IValue final { // Generator IValue(at::Generator g) : tag(Tag::Generator) { - payload.u.as_intrusive_ptr = null_to_undefined_tensor(g.unsafeReleaseGeneratorImpl()); + payload.u.as_intrusive_ptr = + null_to_undefined_tensor(g.unsafeReleaseGeneratorImpl()); } bool isGenerator() const { return Tag::Generator == tag; @@ -998,7 +1003,8 @@ struct TORCH_API IValue final { template T to() &&; template - typename c10::detail::ivalue_to_const_ref_overload_return::type to() const&; + typename c10::detail::ivalue_to_const_ref_overload_return::type to() + const&; // ToOptional: convert a IValue to the Optional obj that accepts both T and // None @@ -1031,9 +1037,7 @@ struct TORCH_API IValue final { // This is different from `repr()` in that there is no expectation that we can // exactly reconstruct an IValue from the output; feel free to use a // concise/pretty form - TORCH_API friend std::ostream& operator<<( - std::ostream& out, - const IValue& v); + TORCH_API friend std::ostream& operator<<(std::ostream& out, const IValue& v); bool isPtrType() const { if (isTensor()) { @@ -1050,7 +1054,8 @@ struct TORCH_API IValue final { return payload.as_tensor.unsafeGetTensorImpl(); } else { return payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton() - ? payload.u.as_intrusive_ptr : nullptr; + ? payload.u.as_intrusive_ptr + : nullptr; } } @@ -1070,14 +1075,13 @@ struct TORCH_API IValue final { // so this will detect overlap of sparse tensors that share a values // tensor, but not sparse tensors that share an indices tensor. return hashTensor(ten.values()); - } else if (!ten.has_storage()) { + } else if (!ten.has_storage()) { // Opaque tensors such as the ones constructed by the MKL-DNN backend // don't have storage so we just use their TensorImpls. // TODO: Find way to expose alias info for opaque tensors. return reinterpret_cast(ten.unsafeGetTensorImpl()); } else { - return reinterpret_cast( - ten.storage().unsafeGetStorageImpl()); + return reinterpret_cast(ten.storage().unsafeGetStorageImpl()); } } size_t operator()(const IValue& val) const { @@ -1118,8 +1122,11 @@ struct TORCH_API IValue final { c10::optional device = c10::nullopt) const; private: - static c10::intrusive_ptr_target* null_to_undefined_tensor(c10::intrusive_ptr_target* p) { - return p ? p : static_cast(c10::UndefinedTensorImpl::singleton()); + static c10::intrusive_ptr_target* null_to_undefined_tensor( + c10::intrusive_ptr_target* p) { + return p ? p + : static_cast( + c10::UndefinedTensorImpl::singleton()); } static bool ptrEqual(const IValue& lhs, const IValue& rhs); @@ -1150,8 +1157,11 @@ struct TORCH_API IValue final { // the compiler to generate the same code for each case. It is // surprisingly difficult to get this right. if (isTensor() || isIntrusivePtr()) { - c10::intrusive_ptr_target* p = isTensor() ? payload.as_tensor.unsafeGetTensorImpl() : payload.u.as_intrusive_ptr; - c10::intrusive_ptr::reclaim(p); + c10::intrusive_ptr_target* p = isTensor() + ? payload.as_tensor.unsafeGetTensorImpl() + : payload.u.as_intrusive_ptr; + c10::intrusive_ptr:: + reclaim(p); // No need to make this destructor call! // payload.as_tensor.~Tensor(); } @@ -1238,7 +1248,8 @@ struct TORCH_API IValue final { case Tag::Enum: return true; } - TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false, "unexpected tag ", static_cast(tag)); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + false, "unexpected tag ", static_cast(tag)); return false; } @@ -1247,7 +1258,8 @@ struct TORCH_API IValue final { // preserves the old behavior for use with WeakIValue for now. bool isIntrusivePtrLegacyBehavior() const { if (tag == Tag::Storage || tag == Tag::Generator) { - return payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(); + return payload.u.as_intrusive_ptr != + c10::UndefinedTensorImpl::singleton(); } else { return isIntrusivePtr(); } @@ -1304,13 +1316,13 @@ struct TORCH_API WeakIValue final { : payload(rhs.payload), tag(rhs.tag), is_intrusive_ptr(rhs.is_intrusive_ptr) { - if (is_intrusive_ptr && payload.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()) { + if (is_intrusive_ptr && + payload.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()) { c10::raw::weak_intrusive_ptr::incref(payload.as_intrusive_ptr); } } WeakIValue(const IValue& rhs) - : tag(rhs.tag), - is_intrusive_ptr(rhs.isIntrusivePtrLegacyBehavior()) { + : tag(rhs.tag), is_intrusive_ptr(rhs.isIntrusivePtrLegacyBehavior()) { if (rhs.isTensor()) { payload.as_intrusive_ptr = rhs.unsafeToTensorImpl(); is_intrusive_ptr = true; @@ -1327,7 +1339,8 @@ struct TORCH_API WeakIValue final { swap(rhs); } ~WeakIValue() { - if (is_intrusive_ptr && payload.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()) { + if (is_intrusive_ptr && + payload.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()) { c10::raw::weak_intrusive_ptr::decref(payload.as_intrusive_ptr); } } @@ -1357,9 +1370,11 @@ struct TORCH_API WeakIValue final { return IValue(newPayload, tag); } if (IValue::Tag::Tensor == tag) { - auto temp = c10::weak_intrusive_ptr::reclaim( - static_cast(payload.as_intrusive_ptr)); - c10::intrusive_ptr ip(temp.lock()); + auto temp = + c10::weak_intrusive_ptr:: + reclaim(static_cast(payload.as_intrusive_ptr)); + c10::intrusive_ptr ip( + temp.lock()); temp.release(); if (!ip) { return IValue(); @@ -1369,8 +1384,8 @@ struct TORCH_API WeakIValue final { } else { auto temp = c10::weak_intrusive_ptr::reclaim( payload.as_intrusive_ptr == c10::UndefinedTensorImpl::singleton() - ? nullptr - : payload.as_intrusive_ptr); + ? nullptr + : payload.as_intrusive_ptr); IValue::Payload pl; pl.u.as_intrusive_ptr = temp.lock().release(); temp.release(); @@ -1386,8 +1401,9 @@ struct TORCH_API WeakIValue final { if (!is_intrusive_ptr) { return 1; } - auto temp = c10::weak_intrusive_ptr::reclaim( - payload.as_intrusive_ptr); + auto temp = c10::weak_intrusive_ptr< + c10::intrusive_ptr_target, + c10::UndefinedTensorImpl>::reclaim(payload.as_intrusive_ptr); size_t result = temp.use_count(); temp.release(); return result; @@ -1397,8 +1413,9 @@ struct TORCH_API WeakIValue final { if (!is_intrusive_ptr) { return 1; } - auto temp = c10::weak_intrusive_ptr::reclaim( - payload.as_intrusive_ptr); + auto temp = c10::weak_intrusive_ptr< + c10::intrusive_ptr_target, + c10::UndefinedTensorImpl>::reclaim(payload.as_intrusive_ptr); size_t result = temp.weak_use_count(); temp.release(); return result; @@ -1418,9 +1435,7 @@ struct TORCH_API WeakIValue final { // of shared_ptrs to the class type and its owning CU, so that the class type is // guaranteed to stay alive as long as we hold this object. struct TORCH_API StrongTypePtr { - StrongTypePtr( - std::shared_ptr cu, - TypePtr type); + StrongTypePtr(std::shared_ptr cu, TypePtr type); std::shared_ptr cu_; TypePtr type_; @@ -1432,9 +1447,7 @@ struct TORCH_API StrongTypePtr { // from Object -> CompilationUnit and CompilationUnit -> Graph (which owns the // Constant Object) struct TORCH_API WeakTypePtr { - WeakTypePtr( - std::weak_ptr cu, - TypePtr type); + WeakTypePtr(std::weak_ptr cu, TypePtr type); std::weak_ptr cu_; TypePtr type_; @@ -1443,10 +1456,12 @@ struct TORCH_API WeakTypePtr { // internal build errors with std::variant :/ struct WeakOrStrongCompilationUnit { explicit WeakOrStrongCompilationUnit( - std::shared_ptr shared_cu) : strong_ptr_(std::move(shared_cu)), weak_ptr_(c10::nullopt) {} + std::shared_ptr shared_cu) + : strong_ptr_(std::move(shared_cu)), weak_ptr_(c10::nullopt) {} explicit WeakOrStrongCompilationUnit( - std::weak_ptr weak_cu) : strong_ptr_(c10::nullopt), weak_ptr_(std::move(weak_cu)) {} + std::weak_ptr weak_cu) + : strong_ptr_(c10::nullopt), weak_ptr_(std::move(weak_cu)) {} std::shared_ptr getStrongRefOrThrow() const { TORCH_INTERNAL_ASSERT(strong_ptr_ != c10::nullopt); @@ -1474,9 +1489,11 @@ struct WeakOrStrongCompilationUnit { // Constant in the graph and a Owning reference otherwise struct TORCH_API WeakOrStrongTypePtr { explicit WeakOrStrongTypePtr(WeakTypePtr weak) - : cu_(WeakOrStrongCompilationUnit(std::move(weak.cu_))), type_(std::move(weak.type_)) {} + : cu_(WeakOrStrongCompilationUnit(std::move(weak.cu_))), + type_(std::move(weak.type_)) {} explicit WeakOrStrongTypePtr(StrongTypePtr strong) - : cu_(WeakOrStrongCompilationUnit(std::move(strong.cu_))), type_(std::move(strong.type_)) {} + : cu_(WeakOrStrongCompilationUnit(std::move(strong.cu_))), + type_(std::move(strong.type_)) {} explicit WeakOrStrongTypePtr(WeakOrStrongCompilationUnit cu, TypePtr type) : cu_(std::move(cu)), type_(std::move(type)) {} WeakTypePtr asWeakTypePtr() const; @@ -1493,7 +1510,6 @@ struct TORCH_API WeakOrStrongTypePtr { } }; - } // namespace c10 -#include // IWYU pragma: keep +#include // IWYU pragma: keep