Skip to content

Commit

Permalink
[3/N] Add -Wdeprecated and related fixes (pytorch#109698)
Browse files Browse the repository at this point in the history
This PR follows pytorch#108626. Hopefully we can enable the warning in the next PR.

Pull Request resolved: pytorch#109698
Approved by: https://github.com/Skylion007, https://github.com/ezyang
  • Loading branch information
cyyever authored and pytorchmergebot committed Oct 3, 2023
1 parent 836ba64 commit c31fcda
Show file tree
Hide file tree
Showing 33 changed files with 119 additions and 118 deletions.
4 changes: 3 additions & 1 deletion aten/src/ATen/native/vulkan/ops/VulkanPackedContext.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,14 @@ class VulkanPackedContext {

public:
VulkanPackedContext() : packed_{c10::AnyType::get()} {}
VulkanPackedContext(const VulkanPackedContext&) = default;
VulkanPackedContext(VulkanPackedContext&&) = default;

inline const c10::IValue get_val(int64_t i) const {
return packed_.get(i);
}

inline void set_val(int64_t i, c10::IValue val) const {
inline void set_val(int64_t i, const c10::IValue& val) const {
return packed_.set(i, val);
}

Expand Down
8 changes: 8 additions & 0 deletions c10/core/impl/DeviceGuardImplInterface.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,14 @@ namespace impl {
* those uses will be devirtualized.
*/
struct C10_API DeviceGuardImplInterface {
DeviceGuardImplInterface() = default;
DeviceGuardImplInterface(const DeviceGuardImplInterface&) = default;
DeviceGuardImplInterface& operator=(const DeviceGuardImplInterface&) =
default;
DeviceGuardImplInterface(DeviceGuardImplInterface&&) noexcept = default;
DeviceGuardImplInterface& operator=(DeviceGuardImplInterface&&) noexcept =
default;

/**
* Return the type of device managed by this guard implementation.
*/
Expand Down
5 changes: 5 additions & 0 deletions torch/csrc/api/include/torch/imethod.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,11 @@ class TORCH_API IMethod {
using IValueList = std::vector<c10::IValue>;
using IValueMap = std::unordered_map<std::string, at::IValue>;

IMethod() = default;
IMethod(const IMethod&) = default;
IMethod& operator=(const IMethod&) = default;
IMethod(IMethod&&) noexcept = default;
IMethod& operator=(IMethod&&) noexcept = default;
virtual ~IMethod() = default;

virtual c10::IValue operator()(
Expand Down
4 changes: 2 additions & 2 deletions torch/csrc/api/include/torch/nn/cloneable.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ namespace nn {
/// because then storing a module would always require templatizing it.
template <typename Derived>
// NOLINTNEXTLINE(bugprone-exception-escape)
class Cloneable : public virtual Module {
class Cloneable : public Module {
public:
using Module::Module;

Expand Down Expand Up @@ -90,7 +90,7 @@ class Cloneable : public virtual Module {
clone != nullptr,
"Attempted to clone submodule, but it is of a "
"different type than the submodule it was to be cloned into");
static_cast<Derived&>(*this) = std::move(*clone);
static_cast<Derived&>(*this) = *clone;
}
};

Expand Down
4 changes: 4 additions & 0 deletions torch/csrc/api/include/torch/nn/module.h
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,10 @@ class TORCH_API Module : public std::enable_shared_from_this<Module> {
/// The name of the submodule is inferred via RTTI (if possible) the first
/// time `.name()` is invoked.
Module();
Module(const Module&) = default;
Module& operator=(const Module&) = default;
Module(Module&&) noexcept = default;
Module& operator=(Module&&) noexcept = default;

virtual ~Module() = default;

Expand Down
18 changes: 17 additions & 1 deletion torch/csrc/api/include/torch/nn/modules/batchnorm.h
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,23 @@ class BatchNormImplBase : public NormImplBase<D, Derived, BatchNormOptions> {
}

/// Pretty prints the `BatchNorm{1,2,3}d` module into the given `stream`.
void pretty_print(std::ostream& stream) const override;
void pretty_print(std::ostream& stream) const override {
stream << std::boolalpha << "torch::nn::BatchNorm" << D << "d("
<< this->options.num_features() << ", "
<< "eps=" << this->options.eps() << ", "
<< "momentum=";

if (this->options.momentum().has_value()) {
stream << this->options.momentum().value();
} else {
stream << "None";
}

stream << ", "
<< "affine=" << this->options.affine() << ", "
<< "track_running_stats=" << this->options.track_running_stats()
<< ")";
}
};

// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BatchNorm1d
Expand Down
2 changes: 2 additions & 0 deletions torch/csrc/api/include/torch/nn/modules/container/any_value.h
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,8 @@ class AnyValue {
struct Placeholder {
explicit Placeholder(const std::type_info& type_info_) noexcept
: type_info(type_info_) {}
Placeholder(const Placeholder&) = default;
Placeholder(Placeholder&&) = default;
virtual ~Placeholder() = default;
virtual std::unique_ptr<Placeholder> clone() const {
TORCH_CHECK(false, "clone() should only be called on `AnyValue::Holder`");
Expand Down
10 changes: 9 additions & 1 deletion torch/csrc/api/include/torch/nn/modules/instancenorm.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,15 @@ class InstanceNormImpl
}

/// Pretty prints the `InstanceNorm{1,2,3}d` module into the given `stream`.
void pretty_print(std::ostream& stream) const override;
void pretty_print(std::ostream& stream) const override {
stream << std::boolalpha << "torch::nn::InstanceNorm" << D << "d("
<< this->options.num_features() << ", "
<< "eps=" << this->options.eps() << ", "
<< "momentum=" << this->options.momentum() << ", "
<< "affine=" << this->options.affine() << ", "
<< "track_running_stats=" << this->options.track_running_stats()
<< ")";
}
};

// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ InstanceNorm1d
Expand Down
2 changes: 0 additions & 2 deletions torch/csrc/api/include/torch/nn/options/rnn.h
Original file line number Diff line number Diff line change
Expand Up @@ -164,8 +164,6 @@ struct TORCH_API RNNCellOptionsBase {
int64_t hidden_size,
bool bias,
int64_t num_chunks);
virtual ~RNNCellOptionsBase() = default;

TORCH_ARG(int64_t, input_size);
TORCH_ARG(int64_t, hidden_size);
TORCH_ARG(bool, bias);
Expand Down
7 changes: 5 additions & 2 deletions torch/csrc/api/include/torch/optim/adagrad.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ struct TORCH_API AdagradOptions
TORCH_API friend bool operator==(
const AdagradOptions& lhs,
const AdagradOptions& rhs);
~AdagradOptions() override = default;
double get_lr() const override;
void set_lr(const double lr) override;
};
Expand All @@ -45,12 +44,16 @@ struct TORCH_API AdagradParamState
TORCH_ARG(int64_t, step) = 0;

public:
AdagradParamState() = default;
AdagradParamState(const AdagradParamState&) = default;
AdagradParamState& operator=(const AdagradParamState&) = default;
AdagradParamState(AdagradParamState&&) noexcept = default;
AdagradParamState& operator=(AdagradParamState&&) noexcept = default;
void serialize(torch::serialize::InputArchive& archive) override;
void serialize(torch::serialize::OutputArchive& archive) const override;
TORCH_API friend bool operator==(
const AdagradParamState& lhs,
const AdagradParamState& rhs);
~AdagradParamState() override = default;
};

class TORCH_API Adagrad : public Optimizer {
Expand Down
2 changes: 0 additions & 2 deletions torch/csrc/api/include/torch/optim/adam.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@ struct TORCH_API AdamOptions : public OptimizerCloneableOptions<AdamOptions> {
TORCH_API friend bool operator==(
const AdamOptions& lhs,
const AdamOptions& rhs);
~AdamOptions() override = default;
double get_lr() const override;
void set_lr(const double lr) override;
};
Expand All @@ -50,7 +49,6 @@ struct TORCH_API AdamParamState
TORCH_API friend bool operator==(
const AdamParamState& lhs,
const AdamParamState& rhs);
~AdamParamState() override = default;
};

class TORCH_API Adam : public Optimizer {
Expand Down
2 changes: 0 additions & 2 deletions torch/csrc/api/include/torch/optim/adamw.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@ struct TORCH_API AdamWOptions : public OptimizerCloneableOptions<AdamWOptions> {
TORCH_API friend bool operator==(
const AdamWOptions& lhs,
const AdamWOptions& rhs);
~AdamWOptions() override = default;
double get_lr() const override;
void set_lr(const double lr) override;
};
Expand All @@ -50,7 +49,6 @@ struct TORCH_API AdamWParamState
TORCH_API friend bool operator==(
const AdamWParamState& lhs,
const AdamWParamState& rhs);
~AdamWParamState() override = default;
};

class TORCH_API AdamW : public Optimizer {
Expand Down
2 changes: 0 additions & 2 deletions torch/csrc/api/include/torch/optim/lbfgs.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ struct TORCH_API LBFGSOptions : public OptimizerCloneableOptions<LBFGSOptions> {
TORCH_API friend bool operator==(
const LBFGSOptions& lhs,
const LBFGSOptions& rhs);
~LBFGSOptions() override = default;
double get_lr() const override;
void set_lr(const double lr) override;
};
Expand All @@ -54,7 +53,6 @@ struct TORCH_API LBFGSParamState
TORCH_API friend bool operator==(
const LBFGSParamState& lhs,
const LBFGSParamState& rhs);
~LBFGSParamState() override = default;
};

class TORCH_API LBFGS : public Optimizer {
Expand Down
10 changes: 10 additions & 0 deletions torch/csrc/api/include/torch/optim/optimizer.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,11 @@ namespace optim {

class TORCH_API OptimizerParamState {
public:
OptimizerParamState() = default;
OptimizerParamState(const OptimizerParamState&) = default;
OptimizerParamState& operator=(const OptimizerParamState&) = default;
OptimizerParamState(OptimizerParamState&&) noexcept = default;
OptimizerParamState& operator=(OptimizerParamState&&) noexcept = default;
virtual std::unique_ptr<OptimizerParamState> clone() const;
virtual void serialize(torch::serialize::InputArchive& archive);
virtual void serialize(torch::serialize::OutputArchive& archive) const;
Expand All @@ -49,6 +54,11 @@ class OptimizerCloneableParamState : public OptimizerParamState {

class TORCH_API OptimizerOptions {
public:
OptimizerOptions() = default;
OptimizerOptions(const OptimizerOptions&) = default;
OptimizerOptions& operator=(const OptimizerOptions&) = default;
OptimizerOptions(OptimizerOptions&&) noexcept = default;
OptimizerOptions& operator=(OptimizerOptions&&) noexcept = default;
virtual std::unique_ptr<OptimizerOptions> clone() const;
virtual void serialize(torch::serialize::InputArchive& archive);
virtual void serialize(torch::serialize::OutputArchive& archive) const;
Expand Down
2 changes: 0 additions & 2 deletions torch/csrc/api/include/torch/optim/rmsprop.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ struct TORCH_API RMSpropOptions
TORCH_API friend bool operator==(
const RMSpropOptions& lhs,
const RMSpropOptions& rhs);
~RMSpropOptions() override = default;
double get_lr() const override;
void set_lr(const double lr) override;
};
Expand All @@ -55,7 +54,6 @@ struct TORCH_API RMSpropParamState
TORCH_API friend bool operator==(
const RMSpropParamState& lhs,
const RMSpropParamState& rhs);
~RMSpropParamState() override = default;
};

class TORCH_API RMSprop : public Optimizer {
Expand Down
2 changes: 0 additions & 2 deletions torch/csrc/api/include/torch/optim/sgd.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ struct TORCH_API SGDOptions : public OptimizerCloneableOptions<SGDOptions> {
TORCH_API friend bool operator==(
const SGDOptions& lhs,
const SGDOptions& rhs);
~SGDOptions() override = default;
double get_lr() const override;
void set_lr(const double lr) override;
};
Expand All @@ -49,7 +48,6 @@ struct TORCH_API SGDParamState
TORCH_API friend bool operator==(
const SGDParamState& lhs,
const SGDParamState& rhs);
~SGDParamState() override = default;
};

class TORCH_API SGD : public Optimizer {
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/api/src/nn/modules/activation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -432,7 +432,7 @@ void ThresholdImpl::pretty_print(std::ostream& stream) const {

MultiheadAttentionImpl::MultiheadAttentionImpl(
const MultiheadAttentionOptions& options_)
: Module("torch::nn::MultiheadAttention"), options(options_) {
: Cloneable("torch::nn::MultiheadAttention"), options(options_) {
// NOLINTNEXTLINE(clang-analyzer-optin.cplusplus.VirtualCall)
reset();
}
Expand Down
19 changes: 0 additions & 19 deletions torch/csrc/api/src/nn/modules/batchnorm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,25 +14,6 @@
namespace torch {
namespace nn {

template <size_t D, typename Derived>
void BatchNormImplBase<D, Derived>::pretty_print(std::ostream& stream) const {
stream << std::boolalpha << "torch::nn::BatchNorm" << D << "d("
<< this->options.num_features() << ", "
<< "eps=" << this->options.eps() << ", "
<< "momentum=";

if (this->options.momentum().has_value()) {
stream << this->options.momentum().value();
} else {
stream << "None";
}

stream << ", "
<< "affine=" << this->options.affine() << ", "
<< "track_running_stats=" << this->options.track_running_stats()
<< ")";
}

void BatchNorm1dImpl::_check_input_dim(const Tensor& input) {
TORCH_CHECK(
input.dim() == 2 || input.dim() == 3,
Expand Down
11 changes: 0 additions & 11 deletions torch/csrc/api/src/nn/modules/instancenorm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,17 +4,6 @@
namespace torch {
namespace nn {

template <size_t D, typename Derived>
void InstanceNormImpl<D, Derived>::pretty_print(std::ostream& stream) const {
stream << std::boolalpha << "torch::nn::InstanceNorm" << D << "d("
<< this->options.num_features() << ", "
<< "eps=" << this->options.eps() << ", "
<< "momentum=" << this->options.momentum() << ", "
<< "affine=" << this->options.affine() << ", "
<< "track_running_stats=" << this->options.track_running_stats()
<< ")";
}

void InstanceNorm1dImpl::_check_input_dim(const Tensor& input) {
if (input.dim() != 3 && input.dim() != 2) {
TORCH_CHECK(
Expand Down
3 changes: 3 additions & 0 deletions torch/csrc/distributed/c10d/Store.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,9 @@ class TORCH_API Store : public torch::CustomClassHolder {
explicit Store(const std::chrono::milliseconds& timeout)
: timeout_(timeout) {}

Store(const Store&) = default;
Store(Store&&) noexcept = default;

~Store() override = default;

void set(const std::string& key, const std::string& value);
Expand Down
1 change: 1 addition & 0 deletions torch/csrc/distributed/rpc/py_rref.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ class PYBIND11_EXPORT PyRRef {
// for more explanations.
explicit PyRRef(const py::object& value, const py::object& type_hint);
explicit PyRRef(c10::intrusive_ptr<RRef> rref);
PyRRef(const PyRRef&) = default;
~PyRRef();

bool isOwner() const;
Expand Down
3 changes: 1 addition & 2 deletions torch/csrc/distributed/rpc/rref_proto.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -142,8 +142,7 @@ std::unique_ptr<RRefUserDelete> RRefUserDelete::fromMessage(
const Message& message) {
auto pair =
ForkMessageBase::fromMessage(message, MessageType::RREF_USER_DELETE);
return std::make_unique<RRefUserDelete>(
RRefUserDelete(pair.first, pair.second));
return std::make_unique<RRefUserDelete>(pair.first, pair.second);
}

std::unique_ptr<RemoteRet> RemoteRet::fromMessage(const Message& message) {
Expand Down
24 changes: 10 additions & 14 deletions torch/csrc/distributed/rpc/tensorpipe_agent.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -161,27 +161,23 @@ C10_DEFINE_REGISTRY_WITHOUT_WARNING(

const std::string& TensorPipeAgent::guessAddress() {
static const std::string uvAddress = []() {
tensorpipe::Error error;
std::string result;
char* ifnameEnv = std::getenv(kSocketIfnameEnvVar.c_str());
if (ifnameEnv != nullptr) {
std::tie(error, result) =
auto [error, result] =
tensorpipe::transport::uv::lookupAddrForIface(ifnameEnv);
if (error) {
LOG(WARNING) << "Failed to look up the IP address for interface "
<< ifnameEnv << " (" << error.what() << "), defaulting to "
<< kDefaultUvAddress;
return kDefaultUvAddress;
}
} else {
std::tie(error, result) =
tensorpipe::transport::uv::lookupAddrForHostname();
if (error) {
LOG(WARNING) << "Failed to look up the IP address for the hostname ("
<< error.what() << "), defaulting to "
<< kDefaultUvAddress;
return kDefaultUvAddress;
}
return result;
}
auto [error, result] = tensorpipe::transport::uv::lookupAddrForHostname();
if (error) {
LOG(WARNING) << "Failed to look up the IP address for the hostname ("
<< error.what() << "), defaulting to " << kDefaultUvAddress;
return kDefaultUvAddress;
}
return result;
}();
Expand Down Expand Up @@ -1226,8 +1222,8 @@ const std::string& TensorPipeAgent::findWorkerURL(

void TensorPipeAgent::updateGroupMembership(
const WorkerInfo& workerInfo,
const std::vector<c10::Device> devices,
const std::unordered_map<std::string, DeviceMap> reverseDeviceMaps,
const std::vector<c10::Device>& devices,
const std::unordered_map<std::string, DeviceMap>& reverseDeviceMaps,
bool isJoin) {
std::string name = workerInfo.name_;
worker_id_t id = workerInfo.id_;
Expand Down
Loading

0 comments on commit c31fcda

Please sign in to comment.