diff --git a/aten/src/ATen/native/vulkan/ops/VulkanPackedContext.h b/aten/src/ATen/native/vulkan/ops/VulkanPackedContext.h index 343b02441f0509..b47805fc3b9972 100644 --- a/aten/src/ATen/native/vulkan/ops/VulkanPackedContext.h +++ b/aten/src/ATen/native/vulkan/ops/VulkanPackedContext.h @@ -15,12 +15,14 @@ class VulkanPackedContext { public: VulkanPackedContext() : packed_{c10::AnyType::get()} {} + VulkanPackedContext(const VulkanPackedContext&) = default; + VulkanPackedContext(VulkanPackedContext&&) = default; inline const c10::IValue get_val(int64_t i) const { return packed_.get(i); } - inline void set_val(int64_t i, c10::IValue val) const { + inline void set_val(int64_t i, const c10::IValue& val) const { return packed_.set(i, val); } diff --git a/c10/core/impl/DeviceGuardImplInterface.h b/c10/core/impl/DeviceGuardImplInterface.h index 2cfc8c443dff5c..fbec3d401bb24c 100644 --- a/c10/core/impl/DeviceGuardImplInterface.h +++ b/c10/core/impl/DeviceGuardImplInterface.h @@ -62,6 +62,14 @@ namespace impl { * those uses will be devirtualized. */ struct C10_API DeviceGuardImplInterface { + DeviceGuardImplInterface() = default; + DeviceGuardImplInterface(const DeviceGuardImplInterface&) = default; + DeviceGuardImplInterface& operator=(const DeviceGuardImplInterface&) = + default; + DeviceGuardImplInterface(DeviceGuardImplInterface&&) noexcept = default; + DeviceGuardImplInterface& operator=(DeviceGuardImplInterface&&) noexcept = + default; + /** * Return the type of device managed by this guard implementation. */ diff --git a/torch/csrc/api/include/torch/imethod.h b/torch/csrc/api/include/torch/imethod.h index 1ee84b729c24fd..1d3bdd04449de6 100644 --- a/torch/csrc/api/include/torch/imethod.h +++ b/torch/csrc/api/include/torch/imethod.h @@ -22,6 +22,11 @@ class TORCH_API IMethod { using IValueList = std::vector; using IValueMap = std::unordered_map; + IMethod() = default; + IMethod(const IMethod&) = default; + IMethod& operator=(const IMethod&) = default; + IMethod(IMethod&&) noexcept = default; + IMethod& operator=(IMethod&&) noexcept = default; virtual ~IMethod() = default; virtual c10::IValue operator()( diff --git a/torch/csrc/api/include/torch/nn/cloneable.h b/torch/csrc/api/include/torch/nn/cloneable.h index bd41c8d727108b..aaf30d90974b11 100644 --- a/torch/csrc/api/include/torch/nn/cloneable.h +++ b/torch/csrc/api/include/torch/nn/cloneable.h @@ -21,7 +21,7 @@ namespace nn { /// because then storing a module would always require templatizing it. template // NOLINTNEXTLINE(bugprone-exception-escape) -class Cloneable : public virtual Module { +class Cloneable : public Module { public: using Module::Module; @@ -90,7 +90,7 @@ class Cloneable : public virtual Module { clone != nullptr, "Attempted to clone submodule, but it is of a " "different type than the submodule it was to be cloned into"); - static_cast(*this) = std::move(*clone); + static_cast(*this) = *clone; } }; diff --git a/torch/csrc/api/include/torch/nn/module.h b/torch/csrc/api/include/torch/nn/module.h index 20d1024ad41073..de8d243533a787 100644 --- a/torch/csrc/api/include/torch/nn/module.h +++ b/torch/csrc/api/include/torch/nn/module.h @@ -81,6 +81,10 @@ class TORCH_API Module : public std::enable_shared_from_this { /// The name of the submodule is inferred via RTTI (if possible) the first /// time `.name()` is invoked. Module(); + Module(const Module&) = default; + Module& operator=(const Module&) = default; + Module(Module&&) noexcept = default; + Module& operator=(Module&&) noexcept = default; virtual ~Module() = default; diff --git a/torch/csrc/api/include/torch/nn/modules/batchnorm.h b/torch/csrc/api/include/torch/nn/modules/batchnorm.h index 943e80bf01b191..3264d90bd6ed7a 100644 --- a/torch/csrc/api/include/torch/nn/modules/batchnorm.h +++ b/torch/csrc/api/include/torch/nn/modules/batchnorm.h @@ -137,7 +137,23 @@ class BatchNormImplBase : public NormImplBase { } /// Pretty prints the `BatchNorm{1,2,3}d` module into the given `stream`. - void pretty_print(std::ostream& stream) const override; + void pretty_print(std::ostream& stream) const override { + stream << std::boolalpha << "torch::nn::BatchNorm" << D << "d(" + << this->options.num_features() << ", " + << "eps=" << this->options.eps() << ", " + << "momentum="; + + if (this->options.momentum().has_value()) { + stream << this->options.momentum().value(); + } else { + stream << "None"; + } + + stream << ", " + << "affine=" << this->options.affine() << ", " + << "track_running_stats=" << this->options.track_running_stats() + << ")"; + } }; // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BatchNorm1d diff --git a/torch/csrc/api/include/torch/nn/modules/container/any_value.h b/torch/csrc/api/include/torch/nn/modules/container/any_value.h index 1f8cc55f59284b..3e6c23ef977ca8 100644 --- a/torch/csrc/api/include/torch/nn/modules/container/any_value.h +++ b/torch/csrc/api/include/torch/nn/modules/container/any_value.h @@ -91,6 +91,8 @@ class AnyValue { struct Placeholder { explicit Placeholder(const std::type_info& type_info_) noexcept : type_info(type_info_) {} + Placeholder(const Placeholder&) = default; + Placeholder(Placeholder&&) = default; virtual ~Placeholder() = default; virtual std::unique_ptr clone() const { TORCH_CHECK(false, "clone() should only be called on `AnyValue::Holder`"); diff --git a/torch/csrc/api/include/torch/nn/modules/instancenorm.h b/torch/csrc/api/include/torch/nn/modules/instancenorm.h index 3b22e6ee011b83..b29ad007de7355 100644 --- a/torch/csrc/api/include/torch/nn/modules/instancenorm.h +++ b/torch/csrc/api/include/torch/nn/modules/instancenorm.h @@ -45,7 +45,15 @@ class InstanceNormImpl } /// Pretty prints the `InstanceNorm{1,2,3}d` module into the given `stream`. - void pretty_print(std::ostream& stream) const override; + void pretty_print(std::ostream& stream) const override { + stream << std::boolalpha << "torch::nn::InstanceNorm" << D << "d(" + << this->options.num_features() << ", " + << "eps=" << this->options.eps() << ", " + << "momentum=" << this->options.momentum() << ", " + << "affine=" << this->options.affine() << ", " + << "track_running_stats=" << this->options.track_running_stats() + << ")"; + } }; // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ InstanceNorm1d diff --git a/torch/csrc/api/include/torch/nn/options/rnn.h b/torch/csrc/api/include/torch/nn/options/rnn.h index 479ff82627eddb..133acc500276d2 100644 --- a/torch/csrc/api/include/torch/nn/options/rnn.h +++ b/torch/csrc/api/include/torch/nn/options/rnn.h @@ -164,8 +164,6 @@ struct TORCH_API RNNCellOptionsBase { int64_t hidden_size, bool bias, int64_t num_chunks); - virtual ~RNNCellOptionsBase() = default; - TORCH_ARG(int64_t, input_size); TORCH_ARG(int64_t, hidden_size); TORCH_ARG(bool, bias); diff --git a/torch/csrc/api/include/torch/optim/adagrad.h b/torch/csrc/api/include/torch/optim/adagrad.h index 21b907ee9eef50..4b2ff3c676b3d7 100644 --- a/torch/csrc/api/include/torch/optim/adagrad.h +++ b/torch/csrc/api/include/torch/optim/adagrad.h @@ -34,7 +34,6 @@ struct TORCH_API AdagradOptions TORCH_API friend bool operator==( const AdagradOptions& lhs, const AdagradOptions& rhs); - ~AdagradOptions() override = default; double get_lr() const override; void set_lr(const double lr) override; }; @@ -45,12 +44,16 @@ struct TORCH_API AdagradParamState TORCH_ARG(int64_t, step) = 0; public: + AdagradParamState() = default; + AdagradParamState(const AdagradParamState&) = default; + AdagradParamState& operator=(const AdagradParamState&) = default; + AdagradParamState(AdagradParamState&&) noexcept = default; + AdagradParamState& operator=(AdagradParamState&&) noexcept = default; void serialize(torch::serialize::InputArchive& archive) override; void serialize(torch::serialize::OutputArchive& archive) const override; TORCH_API friend bool operator==( const AdagradParamState& lhs, const AdagradParamState& rhs); - ~AdagradParamState() override = default; }; class TORCH_API Adagrad : public Optimizer { diff --git a/torch/csrc/api/include/torch/optim/adam.h b/torch/csrc/api/include/torch/optim/adam.h index 1458c7c2b5b159..6e5e02d82c5442 100644 --- a/torch/csrc/api/include/torch/optim/adam.h +++ b/torch/csrc/api/include/torch/optim/adam.h @@ -32,7 +32,6 @@ struct TORCH_API AdamOptions : public OptimizerCloneableOptions { TORCH_API friend bool operator==( const AdamOptions& lhs, const AdamOptions& rhs); - ~AdamOptions() override = default; double get_lr() const override; void set_lr(const double lr) override; }; @@ -50,7 +49,6 @@ struct TORCH_API AdamParamState TORCH_API friend bool operator==( const AdamParamState& lhs, const AdamParamState& rhs); - ~AdamParamState() override = default; }; class TORCH_API Adam : public Optimizer { diff --git a/torch/csrc/api/include/torch/optim/adamw.h b/torch/csrc/api/include/torch/optim/adamw.h index b6b4c25ff1bdc0..a63d7fc32d4554 100644 --- a/torch/csrc/api/include/torch/optim/adamw.h +++ b/torch/csrc/api/include/torch/optim/adamw.h @@ -32,7 +32,6 @@ struct TORCH_API AdamWOptions : public OptimizerCloneableOptions { TORCH_API friend bool operator==( const AdamWOptions& lhs, const AdamWOptions& rhs); - ~AdamWOptions() override = default; double get_lr() const override; void set_lr(const double lr) override; }; @@ -50,7 +49,6 @@ struct TORCH_API AdamWParamState TORCH_API friend bool operator==( const AdamWParamState& lhs, const AdamWParamState& rhs); - ~AdamWParamState() override = default; }; class TORCH_API AdamW : public Optimizer { diff --git a/torch/csrc/api/include/torch/optim/lbfgs.h b/torch/csrc/api/include/torch/optim/lbfgs.h index 7d7204caf3ee1e..99aa35d36e4b5d 100644 --- a/torch/csrc/api/include/torch/optim/lbfgs.h +++ b/torch/csrc/api/include/torch/optim/lbfgs.h @@ -29,7 +29,6 @@ struct TORCH_API LBFGSOptions : public OptimizerCloneableOptions { TORCH_API friend bool operator==( const LBFGSOptions& lhs, const LBFGSOptions& rhs); - ~LBFGSOptions() override = default; double get_lr() const override; void set_lr(const double lr) override; }; @@ -54,7 +53,6 @@ struct TORCH_API LBFGSParamState TORCH_API friend bool operator==( const LBFGSParamState& lhs, const LBFGSParamState& rhs); - ~LBFGSParamState() override = default; }; class TORCH_API LBFGS : public Optimizer { diff --git a/torch/csrc/api/include/torch/optim/optimizer.h b/torch/csrc/api/include/torch/optim/optimizer.h index 5982e7186fd6d2..1f448e4fffd61c 100644 --- a/torch/csrc/api/include/torch/optim/optimizer.h +++ b/torch/csrc/api/include/torch/optim/optimizer.h @@ -34,6 +34,11 @@ namespace optim { class TORCH_API OptimizerParamState { public: + OptimizerParamState() = default; + OptimizerParamState(const OptimizerParamState&) = default; + OptimizerParamState& operator=(const OptimizerParamState&) = default; + OptimizerParamState(OptimizerParamState&&) noexcept = default; + OptimizerParamState& operator=(OptimizerParamState&&) noexcept = default; virtual std::unique_ptr clone() const; virtual void serialize(torch::serialize::InputArchive& archive); virtual void serialize(torch::serialize::OutputArchive& archive) const; @@ -49,6 +54,11 @@ class OptimizerCloneableParamState : public OptimizerParamState { class TORCH_API OptimizerOptions { public: + OptimizerOptions() = default; + OptimizerOptions(const OptimizerOptions&) = default; + OptimizerOptions& operator=(const OptimizerOptions&) = default; + OptimizerOptions(OptimizerOptions&&) noexcept = default; + OptimizerOptions& operator=(OptimizerOptions&&) noexcept = default; virtual std::unique_ptr clone() const; virtual void serialize(torch::serialize::InputArchive& archive); virtual void serialize(torch::serialize::OutputArchive& archive) const; diff --git a/torch/csrc/api/include/torch/optim/rmsprop.h b/torch/csrc/api/include/torch/optim/rmsprop.h index 9839968032eca4..69a2e27993d5b7 100644 --- a/torch/csrc/api/include/torch/optim/rmsprop.h +++ b/torch/csrc/api/include/torch/optim/rmsprop.h @@ -37,7 +37,6 @@ struct TORCH_API RMSpropOptions TORCH_API friend bool operator==( const RMSpropOptions& lhs, const RMSpropOptions& rhs); - ~RMSpropOptions() override = default; double get_lr() const override; void set_lr(const double lr) override; }; @@ -55,7 +54,6 @@ struct TORCH_API RMSpropParamState TORCH_API friend bool operator==( const RMSpropParamState& lhs, const RMSpropParamState& rhs); - ~RMSpropParamState() override = default; }; class TORCH_API RMSprop : public Optimizer { diff --git a/torch/csrc/api/include/torch/optim/sgd.h b/torch/csrc/api/include/torch/optim/sgd.h index 8c752c0c9fade0..85e9aba7ba48f7 100644 --- a/torch/csrc/api/include/torch/optim/sgd.h +++ b/torch/csrc/api/include/torch/optim/sgd.h @@ -34,7 +34,6 @@ struct TORCH_API SGDOptions : public OptimizerCloneableOptions { TORCH_API friend bool operator==( const SGDOptions& lhs, const SGDOptions& rhs); - ~SGDOptions() override = default; double get_lr() const override; void set_lr(const double lr) override; }; @@ -49,7 +48,6 @@ struct TORCH_API SGDParamState TORCH_API friend bool operator==( const SGDParamState& lhs, const SGDParamState& rhs); - ~SGDParamState() override = default; }; class TORCH_API SGD : public Optimizer { diff --git a/torch/csrc/api/src/nn/modules/activation.cpp b/torch/csrc/api/src/nn/modules/activation.cpp index 05214d5402ef2c..56218ad091de5d 100644 --- a/torch/csrc/api/src/nn/modules/activation.cpp +++ b/torch/csrc/api/src/nn/modules/activation.cpp @@ -432,7 +432,7 @@ void ThresholdImpl::pretty_print(std::ostream& stream) const { MultiheadAttentionImpl::MultiheadAttentionImpl( const MultiheadAttentionOptions& options_) - : Module("torch::nn::MultiheadAttention"), options(options_) { + : Cloneable("torch::nn::MultiheadAttention"), options(options_) { // NOLINTNEXTLINE(clang-analyzer-optin.cplusplus.VirtualCall) reset(); } diff --git a/torch/csrc/api/src/nn/modules/batchnorm.cpp b/torch/csrc/api/src/nn/modules/batchnorm.cpp index 105bd16f9d688e..d8744c32d9cab5 100644 --- a/torch/csrc/api/src/nn/modules/batchnorm.cpp +++ b/torch/csrc/api/src/nn/modules/batchnorm.cpp @@ -14,25 +14,6 @@ namespace torch { namespace nn { -template -void BatchNormImplBase::pretty_print(std::ostream& stream) const { - stream << std::boolalpha << "torch::nn::BatchNorm" << D << "d(" - << this->options.num_features() << ", " - << "eps=" << this->options.eps() << ", " - << "momentum="; - - if (this->options.momentum().has_value()) { - stream << this->options.momentum().value(); - } else { - stream << "None"; - } - - stream << ", " - << "affine=" << this->options.affine() << ", " - << "track_running_stats=" << this->options.track_running_stats() - << ")"; -} - void BatchNorm1dImpl::_check_input_dim(const Tensor& input) { TORCH_CHECK( input.dim() == 2 || input.dim() == 3, diff --git a/torch/csrc/api/src/nn/modules/instancenorm.cpp b/torch/csrc/api/src/nn/modules/instancenorm.cpp index 99ab1d7d670898..8e4201c01a6142 100644 --- a/torch/csrc/api/src/nn/modules/instancenorm.cpp +++ b/torch/csrc/api/src/nn/modules/instancenorm.cpp @@ -4,17 +4,6 @@ namespace torch { namespace nn { -template -void InstanceNormImpl::pretty_print(std::ostream& stream) const { - stream << std::boolalpha << "torch::nn::InstanceNorm" << D << "d(" - << this->options.num_features() << ", " - << "eps=" << this->options.eps() << ", " - << "momentum=" << this->options.momentum() << ", " - << "affine=" << this->options.affine() << ", " - << "track_running_stats=" << this->options.track_running_stats() - << ")"; -} - void InstanceNorm1dImpl::_check_input_dim(const Tensor& input) { if (input.dim() != 3 && input.dim() != 2) { TORCH_CHECK( diff --git a/torch/csrc/distributed/c10d/Store.hpp b/torch/csrc/distributed/c10d/Store.hpp index 12663eedec1e10..3c0ae960ff7ca7 100644 --- a/torch/csrc/distributed/c10d/Store.hpp +++ b/torch/csrc/distributed/c10d/Store.hpp @@ -28,6 +28,9 @@ class TORCH_API Store : public torch::CustomClassHolder { explicit Store(const std::chrono::milliseconds& timeout) : timeout_(timeout) {} + Store(const Store&) = default; + Store(Store&&) noexcept = default; + ~Store() override = default; void set(const std::string& key, const std::string& value); diff --git a/torch/csrc/distributed/rpc/py_rref.h b/torch/csrc/distributed/rpc/py_rref.h index 7710c6acbc728b..432141a97cf5c0 100644 --- a/torch/csrc/distributed/rpc/py_rref.h +++ b/torch/csrc/distributed/rpc/py_rref.h @@ -18,6 +18,7 @@ class PYBIND11_EXPORT PyRRef { // for more explanations. explicit PyRRef(const py::object& value, const py::object& type_hint); explicit PyRRef(c10::intrusive_ptr rref); + PyRRef(const PyRRef&) = default; ~PyRRef(); bool isOwner() const; diff --git a/torch/csrc/distributed/rpc/rref_proto.cpp b/torch/csrc/distributed/rpc/rref_proto.cpp index fff094596b5b3e..737a4f07a1433f 100644 --- a/torch/csrc/distributed/rpc/rref_proto.cpp +++ b/torch/csrc/distributed/rpc/rref_proto.cpp @@ -142,8 +142,7 @@ std::unique_ptr RRefUserDelete::fromMessage( const Message& message) { auto pair = ForkMessageBase::fromMessage(message, MessageType::RREF_USER_DELETE); - return std::make_unique( - RRefUserDelete(pair.first, pair.second)); + return std::make_unique(pair.first, pair.second); } std::unique_ptr RemoteRet::fromMessage(const Message& message) { diff --git a/torch/csrc/distributed/rpc/tensorpipe_agent.cpp b/torch/csrc/distributed/rpc/tensorpipe_agent.cpp index c0a4c4b0b44f74..a071f5885e807d 100644 --- a/torch/csrc/distributed/rpc/tensorpipe_agent.cpp +++ b/torch/csrc/distributed/rpc/tensorpipe_agent.cpp @@ -161,11 +161,9 @@ C10_DEFINE_REGISTRY_WITHOUT_WARNING( const std::string& TensorPipeAgent::guessAddress() { static const std::string uvAddress = []() { - tensorpipe::Error error; - std::string result; char* ifnameEnv = std::getenv(kSocketIfnameEnvVar.c_str()); if (ifnameEnv != nullptr) { - std::tie(error, result) = + auto [error, result] = tensorpipe::transport::uv::lookupAddrForIface(ifnameEnv); if (error) { LOG(WARNING) << "Failed to look up the IP address for interface " @@ -173,15 +171,13 @@ const std::string& TensorPipeAgent::guessAddress() { << kDefaultUvAddress; return kDefaultUvAddress; } - } else { - std::tie(error, result) = - tensorpipe::transport::uv::lookupAddrForHostname(); - if (error) { - LOG(WARNING) << "Failed to look up the IP address for the hostname (" - << error.what() << "), defaulting to " - << kDefaultUvAddress; - return kDefaultUvAddress; - } + return result; + } + auto [error, result] = tensorpipe::transport::uv::lookupAddrForHostname(); + if (error) { + LOG(WARNING) << "Failed to look up the IP address for the hostname (" + << error.what() << "), defaulting to " << kDefaultUvAddress; + return kDefaultUvAddress; } return result; }(); @@ -1226,8 +1222,8 @@ const std::string& TensorPipeAgent::findWorkerURL( void TensorPipeAgent::updateGroupMembership( const WorkerInfo& workerInfo, - const std::vector devices, - const std::unordered_map reverseDeviceMaps, + const std::vector& devices, + const std::unordered_map& reverseDeviceMaps, bool isJoin) { std::string name = workerInfo.name_; worker_id_t id = workerInfo.id_; diff --git a/torch/csrc/distributed/rpc/tensorpipe_agent.h b/torch/csrc/distributed/rpc/tensorpipe_agent.h index f38b970adfbaf5..ef733bcfb18996 100644 --- a/torch/csrc/distributed/rpc/tensorpipe_agent.h +++ b/torch/csrc/distributed/rpc/tensorpipe_agent.h @@ -194,8 +194,8 @@ class TORCH_API TensorPipeAgent : public RpcAgent { std::vector getWorkerInfos() const override; void updateGroupMembership( const WorkerInfo& workerInfo, - const std::vector devices, - const std::unordered_map reverseDeviceMaps, + const std::vector& devices, + const std::unordered_map& reverseDeviceMaps, bool isJoin); std::unordered_map getMetrics() override; diff --git a/torch/csrc/jit/mobile/train/optim/sgd.h b/torch/csrc/jit/mobile/train/optim/sgd.h index a1e8064f314253..0d82d24ddd8ac5 100644 --- a/torch/csrc/jit/mobile/train/optim/sgd.h +++ b/torch/csrc/jit/mobile/train/optim/sgd.h @@ -22,7 +22,6 @@ class SGDParamState { static_cast(*this)); } friend bool operator==(const SGDParamState& lhs, const SGDParamState& rhs); - ~SGDParamState() = default; }; struct TORCH_API SGDOptions { @@ -40,7 +39,6 @@ struct TORCH_API SGDOptions { TORCH_API friend bool operator==( const SGDOptions& lhs, const SGDOptions& rhs); - ~SGDOptions() = default; }; /// Stores parameters in the param_group and stores a pointer to the SGDOptions diff --git a/torch/csrc/jit/runtime/interpreter.cpp b/torch/csrc/jit/runtime/interpreter.cpp index 34822748be14cf..e7c4a8f1d9bd4f 100644 --- a/torch/csrc/jit/runtime/interpreter.cpp +++ b/torch/csrc/jit/runtime/interpreter.cpp @@ -1100,7 +1100,6 @@ Code::Code( remaining_bailout_depth)) {} Code::Code(CodeImpl* codeImpl) : pImpl(codeImpl) {} -Code::~Code() = default; MobileCode::MobileCode( const std::shared_ptr& graph, @@ -1117,8 +1116,6 @@ MobileCode::MobileCode( emit_promoted_ops, remaining_bailout_depth)) {} -MobileCode::~MobileCode() = default; - const std::vector& Code::grad_executors() { return pImpl->grad_executors(); } @@ -1172,7 +1169,6 @@ InterpreterState::InterpreterState(const Code& code, TaskLauncher taskLauncher) : pImpl(c10::make_intrusive( code, std::move(taskLauncher))) {} -InterpreterState::~InterpreterState() = default; void InterpreterState::run(Stack& stack) { static_cast(pImpl.get())->run(stack); diff --git a/torch/csrc/jit/runtime/interpreter.h b/torch/csrc/jit/runtime/interpreter.h index 3531ea62c353f7..75e4ebc370b3c9 100644 --- a/torch/csrc/jit/runtime/interpreter.h +++ b/torch/csrc/jit/runtime/interpreter.h @@ -9,11 +9,6 @@ #include #include -C10_CLANG_DIAGNOSTIC_PUSH() -#if C10_CLANG_HAS_WARNING("-Wdeprecated-copy-dtor") -C10_CLANG_DIAGNOSTIC_IGNORE("-Wdeprecated-copy-dtor") -#endif - C10_DECLARE_bool(torch_jit_disable_warning_prints); C10_DECLARE_bool(torch_jit_enable_rethrow_caught_exception); @@ -55,7 +50,6 @@ struct TORCH_API Code { const std::shared_ptr& graph, std::string function_name, size_t remaining_bailout_depth = 0); - ~Code(); const std::vector& grad_executors(); const std::vector& diff_graph_op_executors(); @@ -89,7 +83,6 @@ struct TORCH_API MobileCode : Code { bool support_default_args_before_out = true, bool emit_promoted_ops = true, size_t remaining_bailout_depth = 0); - ~MobileCode(); }; struct InterpreterState { @@ -99,7 +92,6 @@ struct InterpreterState { TORCH_API void run(Stack& stack); TORCH_API c10::intrusive_ptr runAsync(Stack& stack); c10::intrusive_ptr getFuture(); - TORCH_API ~InterpreterState(); private: InterpreterState(c10::intrusive_ptr pImpl); @@ -127,18 +119,19 @@ struct Suspend : public std::exception { // through (and only through) the forward pass manually, other // thread local settings are propagated with ThreadLocalState struct InterpreterContinuation { - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) InterpreterContinuation( - const InterpreterState& state_, + InterpreterState state_, Stack stack_, int64_t dist_autograd_context_id = 0, c10::optional tls_state = c10::nullopt) - : state(state_), + : state(std::move(state_)), stack(std::move(stack_)), - tls_state_(std::move(tls_state)) { + tls_state_(std::move(tls_state)) #ifdef USE_DISTRIBUTED - dist_autograd_context_id_ = dist_autograd_context_id; + , + dist_autograd_context_id_(dist_autograd_context_id) #endif + { } void operator()(); @@ -163,5 +156,3 @@ TORCH_API std::vector currentCallstack(); TORCH_API std::vector currentModuleHierarchy(); } // namespace torch::jit - -C10_CLANG_DIAGNOSTIC_POP() diff --git a/torch/csrc/jit/serialization/python_print.cpp b/torch/csrc/jit/serialization/python_print.cpp index d6e60f06494d76..cac31c6ce58684 100644 --- a/torch/csrc/jit/serialization/python_print.cpp +++ b/torch/csrc/jit/serialization/python_print.cpp @@ -1677,8 +1677,6 @@ uint64_t PythonPrint::minVersion() const { return pImpl->min_version_; } -PythonPrint::~PythonPrint() = default; - static std::vector traverseIValueAndGetObjects(IValue ivalue) { std::vector result; std::vector stack; diff --git a/torch/csrc/jit/serialization/python_print.h b/torch/csrc/jit/serialization/python_print.h index dd9f3d4e06507a..ede364ec02f81e 100644 --- a/torch/csrc/jit/serialization/python_print.h +++ b/torch/csrc/jit/serialization/python_print.h @@ -42,8 +42,6 @@ struct TORCH_API PythonPrint { const SourceRangeRecords& ranges() const; uint64_t minVersion() const; - ~PythonPrint(); - private: std::shared_ptr pImpl; }; diff --git a/torch/csrc/jit/tensorexpr/reduction.h b/torch/csrc/jit/tensorexpr/reduction.h index ac802a2ae6910f..845d28bbe55df9 100644 --- a/torch/csrc/jit/tensorexpr/reduction.h +++ b/torch/csrc/jit/tensorexpr/reduction.h @@ -29,7 +29,6 @@ class TORCH_API Reducer { template Reducer(ExprHandle init, RI interaction) : init_(init.node()), interaction_(std::move(interaction)) {} - virtual ~Reducer() = default; ExprPtr initializer() const { return init_; diff --git a/torch/csrc/lazy/core/tensor.h b/torch/csrc/lazy/core/tensor.h index 2506b096c4c6c4..3a15c91c03452d 100644 --- a/torch/csrc/lazy/core/tensor.h +++ b/torch/csrc/lazy/core/tensor.h @@ -67,6 +67,8 @@ class TORCH_API LazyTensor : public c10::intrusive_ptr_target { // used to rely on a LazyTensor obj with a null Data can now rely on a null // LazyTensorPtr instead. LazyTensor() = delete; + LazyTensor(const LazyTensor&) = default; + LazyTensor(LazyTensor&&) noexcept = default; ~LazyTensor() override = default; diff --git a/torch/csrc/profiler/orchestration/observer.h b/torch/csrc/profiler/orchestration/observer.h index 5d42f9234c381f..86f047e65396a4 100644 --- a/torch/csrc/profiler/orchestration/observer.h +++ b/torch/csrc/profiler/orchestration/observer.h @@ -49,7 +49,6 @@ struct TORCH_API ExperimentalConfig { std::vector performance_events = {}, bool enable_cuda_sync_events = false, bool adjust_timestamps = false); - ~ExperimentalConfig() = default; explicit operator bool() const; std::vector profiler_metrics; @@ -88,7 +87,6 @@ struct TORCH_API ProfilerConfig { bool with_flops = false, bool with_modules = false, ExperimentalConfig experimental_config = ExperimentalConfig()); - ~ProfilerConfig() = default; bool disabled() const; bool global() const; diff --git a/torch/csrc/utils/invalid_arguments.cpp b/torch/csrc/utils/invalid_arguments.cpp index cd85931c3c7013..acef176d175109 100644 --- a/torch/csrc/utils/invalid_arguments.cpp +++ b/torch/csrc/utils/invalid_arguments.cpp @@ -6,7 +6,9 @@ #include #include +#include #include +#include namespace torch { @@ -17,12 +19,17 @@ std::string py_typename(PyObject* object) { } struct Type { + Type() = default; + Type(const Type&) = default; + Type& operator=(const Type&) = default; + Type(Type&&) noexcept = default; + Type& operator=(Type&&) noexcept = default; virtual bool is_matching(PyObject* object) = 0; virtual ~Type() = default; }; struct SimpleType : public Type { - SimpleType(std::string& name) : name(name){}; + SimpleType(std::string_view name) : name(name){}; bool is_matching(PyObject* object) override { return py_typename(object) == name; @@ -36,11 +43,10 @@ struct MultiType : public Type { : types(accepted_types){}; bool is_matching(PyObject* object) override { - auto it = std::find(types.begin(), types.end(), py_typename(object)); - return it != types.end(); + return types.find(py_typename(object)) != types.end(); } - std::vector types; + std::unordered_set types; }; struct NullableType : public Type { @@ -93,8 +99,8 @@ struct SequenceType : public Type { }; struct Argument { - Argument(std::string name, std::unique_ptr type) - : name(std::move(name)), type(std::move(type)){}; + Argument(std::string_view name, std::unique_ptr type) + : name(name), type(std::move(type)){}; std::string name; std::unique_ptr type; @@ -118,13 +124,13 @@ struct Option { bool has_out; }; -std::vector _splitString( - const std::string& s, - const std::string& delim) { - std::vector tokens; +std::vector _splitString( + std::string_view s, + std::string_view delim) { + std::vector tokens; size_t start = 0; size_t end = 0; - while ((end = s.find(delim, start)) != std::string::npos) { + while ((end = s.find(delim, start)) != std::string_view::npos) { tokens.push_back(s.substr(start, end - start)); start = end + delim.length(); } @@ -132,7 +138,7 @@ std::vector _splitString( return tokens; } -std::unique_ptr _buildType(std::string type_name, bool is_nullable) { +std::unique_ptr _buildType(std::string_view type_name, bool is_nullable) { std::unique_ptr result; if (type_name == "float") { result = std::make_unique(MultiType{"float", "int", "long"}); @@ -140,14 +146,16 @@ std::unique_ptr _buildType(std::string type_name, bool is_nullable) { result = std::make_unique(MultiType{"int", "long"}); } else if (type_name.find("tuple[") == 0) { auto type_list = type_name.substr(6); - type_list.pop_back(); + type_list.remove_suffix(1); + auto sub_string_views = _splitString(type_list, ","); std::vector> types; - for (auto& type : _splitString(type_list, ",")) + types.reserve(sub_string_views.size()); + for (auto& type : sub_string_views) types.emplace_back(_buildType(type, false)); result = std::make_unique(std::move(types)); } else if (type_name.find("sequence[") == 0) { auto subtype = type_name.substr(9); - subtype.pop_back(); + subtype.remove_suffix(1); result = std::make_unique(_buildType(subtype, false)); } else { result = std::make_unique(type_name); @@ -194,7 +202,7 @@ std::pair _parseOption( if (arg[type_start_idx] == '[') { is_nullable = true; type_start_idx++; - arg.erase(arg.length() - std::string(" or None]").length()); + arg.remove_suffix(std::string(" or None]").length()); } auto type_end_idx = arg.find_last_of(' '); @@ -203,17 +211,15 @@ std::pair _parseOption( // "type ... name" => "type ... name" // ^ ^ auto dots_idx = arg.find("..."); - if (dots_idx != std::string::npos) + if (dots_idx != std::string_view::npos) type_end_idx -= 4; - std::string type_name = - arg.substr(type_start_idx, type_end_idx - type_start_idx); - std::string name = arg.substr(name_start_idx); - + auto type_name = arg.substr(type_start_idx, type_end_idx - type_start_idx); + auto name = arg.substr(name_start_idx); arguments.emplace_back(name, _buildType(type_name, is_nullable)); } - bool is_variadic = option_str.find("...") != std::string::npos; + bool is_variadic = option_str.find("...") != std::string_view::npos; return std::pair( Option(std::move(arguments), is_variadic, has_out), std::move(printable_option));