Skip to content

Commit

Permalink
[codemod] c10:optional -> std::optional (pytorch#126135)
Browse files Browse the repository at this point in the history
Generated by running the following from PyTorch root:
```
find . -regex ".*\.\(cpp\|h\|cu\|hpp\|cc\|cxx\)$" | grep -v "build/" | xargs -n 50 -P 4 perl -pi -e 's/c10::optional/std::optional/'
```

`c10::optional` is just an alias for `std::optional`. This removes usages of that alias in preparation for eliminating it entirely.

Pull Request resolved: pytorch#126135
Approved by: https://github.com/Skylion007, https://github.com/malfet, https://github.com/albanD, https://github.com/aaronenyeshi
  • Loading branch information
r-barnes authored and pytorchmergebot committed May 14, 2024
1 parent b55f57b commit ed32787
Show file tree
Hide file tree
Showing 907 changed files with 5,659 additions and 5,659 deletions.
24 changes: 12 additions & 12 deletions aten/src/ATen/CPUGeneratorImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -81,8 +81,8 @@ inline uint64_t make64BitsFrom32Bits(uint32_t hi, uint32_t lo) {
CPUGeneratorImpl::CPUGeneratorImpl(uint64_t seed_in)
: c10::GeneratorImpl{Device(DeviceType::CPU), DispatchKeySet(c10::DispatchKey::CPU)},
engine_{seed_in},
next_float_normal_sample_{c10::optional<float>()},
next_double_normal_sample_{c10::optional<double>()} { }
next_float_normal_sample_{std::optional<float>()},
next_double_normal_sample_{std::optional<double>()} { }

/**
* Manually seeds the engine with the seed input
Expand Down Expand Up @@ -151,16 +151,16 @@ void CPUGeneratorImpl::set_state(const c10::TensorImpl& new_state) {
detail::check_rng_state(new_state);

at::mt19937 engine;
auto float_normal_sample = c10::optional<float>();
auto double_normal_sample = c10::optional<double>();
auto float_normal_sample = std::optional<float>();
auto double_normal_sample = std::optional<double>();

// Construct the state of at::CPUGeneratorImpl based on input byte tensor size.
CPUGeneratorImplStateLegacy* legacy_pod{nullptr};
auto new_state_size = new_state.numel();
if (new_state_size == size_legacy) {
legacy_pod = (CPUGeneratorImplStateLegacy*)new_state.data();
// Note that in CPUGeneratorImplStateLegacy, we didn't have float version
// of normal sample and hence we leave the c10::optional<float> as is
// of normal sample and hence we leave the std::optional<float> as is

// Update next_double_normal_sample.
// Note that CPUGeneratorImplStateLegacy stores two uniform values (normal_x, normal_y)
Expand All @@ -171,22 +171,22 @@ void CPUGeneratorImpl::set_state(const c10::TensorImpl& new_state) {
auto r = legacy_pod->normal_rho;
auto theta = 2.0 * c10::pi<double> * legacy_pod->normal_x;
// we return the sin version of the normal sample when in caching mode
double_normal_sample = c10::optional<double>(r * ::sin(theta));
double_normal_sample = std::optional<double>(r * ::sin(theta));
}
} else if (new_state_size == size_current) {
auto rng_state = (CPUGeneratorImplState*)new_state.data();
legacy_pod = &rng_state->legacy_pod;
// update next_float_normal_sample
if (rng_state->is_next_float_normal_sample_valid) {
float_normal_sample = c10::optional<float>(rng_state->next_float_normal_sample);
float_normal_sample = std::optional<float>(rng_state->next_float_normal_sample);
}

// Update next_double_normal_sample.
// Note that in getRNGState, we now return the actual normal sample in normal_y
// and if it's valid in normal_is_valid. The redundant normal_x and normal_rho
// are squashed to 0.0.
if (legacy_pod->normal_is_valid) {
double_normal_sample = c10::optional<double>(legacy_pod->normal_y);
double_normal_sample = std::optional<double>(legacy_pod->normal_y);
}
} else {
AT_ERROR("Expected either a CPUGeneratorImplStateLegacy of size ", size_legacy,
Expand Down Expand Up @@ -283,14 +283,14 @@ uint64_t CPUGeneratorImpl::random64() {
/**
* Get the cached normal random in float
*/
c10::optional<float> CPUGeneratorImpl::next_float_normal_sample() {
std::optional<float> CPUGeneratorImpl::next_float_normal_sample() {
return next_float_normal_sample_;
}

/**
* Get the cached normal random in double
*/
c10::optional<double> CPUGeneratorImpl::next_double_normal_sample() {
std::optional<double> CPUGeneratorImpl::next_double_normal_sample() {
return next_double_normal_sample_;
}

Expand All @@ -299,7 +299,7 @@ c10::optional<double> CPUGeneratorImpl::next_double_normal_sample() {
*
* See Note [Acquire lock when using random generators]
*/
void CPUGeneratorImpl::set_next_float_normal_sample(c10::optional<float> randn) {
void CPUGeneratorImpl::set_next_float_normal_sample(std::optional<float> randn) {
next_float_normal_sample_ = randn;
}

Expand All @@ -308,7 +308,7 @@ void CPUGeneratorImpl::set_next_float_normal_sample(c10::optional<float> randn)
*
* See Note [Acquire lock when using random generators]
*/
void CPUGeneratorImpl::set_next_double_normal_sample(c10::optional<double> randn) {
void CPUGeneratorImpl::set_next_double_normal_sample(std::optional<double> randn) {
next_double_normal_sample_ = randn;
}

Expand Down
12 changes: 6 additions & 6 deletions aten/src/ATen/CPUGeneratorImpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,18 +24,18 @@ struct TORCH_API CPUGeneratorImpl : public c10::GeneratorImpl {
static c10::DeviceType device_type();
uint32_t random();
uint64_t random64();
c10::optional<float> next_float_normal_sample();
c10::optional<double> next_double_normal_sample();
void set_next_float_normal_sample(c10::optional<float> randn);
void set_next_double_normal_sample(c10::optional<double> randn);
std::optional<float> next_float_normal_sample();
std::optional<double> next_double_normal_sample();
void set_next_float_normal_sample(std::optional<float> randn);
void set_next_double_normal_sample(std::optional<double> randn);
at::mt19937 engine();
void set_engine(at::mt19937 engine);

private:
CPUGeneratorImpl* clone_impl() const override;
at::mt19937 engine_;
c10::optional<float> next_float_normal_sample_;
c10::optional<double> next_double_normal_sample_;
std::optional<float> next_float_normal_sample_;
std::optional<double> next_double_normal_sample_;
};

namespace detail {
Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/Context.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ class TORCH_API Context {
}
}
const AcceleratorHooksInterface& getAcceleratorHooksInterface(
c10::optional<c10::DeviceType> opt_device_type = c10::nullopt) {
std::optional<c10::DeviceType> opt_device_type = c10::nullopt) {
c10::DeviceType device_type = opt_device_type.has_value()
? opt_device_type.value()
: at::getAccelerator(true).value();
Expand Down Expand Up @@ -395,7 +395,7 @@ class TORCH_API Context {
bool release_original_weights = false;
#endif
bool display_vmap_fallback_warnings_ = false;
c10::optional<at::QEngine> quantized_engine = c10::nullopt;
std::optional<at::QEngine> quantized_engine = c10::nullopt;
bool enable_sparse_tensor_invariant_checks = false;
bool allow_fp16_reduction_cpu = false;

Expand Down
6 changes: 3 additions & 3 deletions aten/src/ATen/DeviceGuard.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,22 +15,22 @@ namespace at {
// OptionalDeviceGuard guard(device_of(tensor));

/// Return the Device of a Tensor, if the Tensor is defined.
inline c10::optional<Device> device_of(const Tensor& t) {
inline std::optional<Device> device_of(const Tensor& t) {
if (t.defined()) {
return c10::make_optional(t.device());
} else {
return c10::nullopt;
}
}

inline c10::optional<Device> device_of(const c10::optional<Tensor>& t) {
inline std::optional<Device> device_of(const c10::optional<Tensor>& t) {
return t.has_value() ? device_of(t.value()) : c10::nullopt;
}

/// Return the Device of a TensorList, if the list is non-empty and
/// the first Tensor is defined. (This function implicitly assumes
/// that all tensors in the list have the same device.)
inline c10::optional<Device> device_of(ITensorListRef t) {
inline std::optional<Device> device_of(ITensorListRef t) {
if (!t.empty()) {
return device_of(t.front());
} else {
Expand Down
64 changes: 32 additions & 32 deletions aten/src/ATen/EmptyTensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ TensorBase _empty_generic(
c10::Allocator* allocator,
c10::DispatchKeySet ks,
ScalarType scalar_type,
c10::optional<c10::MemoryFormat> memory_format_opt) {
std::optional<c10::MemoryFormat> memory_format_opt) {
at::detail::check_size_nonnegative(size);
at::detail::raise_warning_for_complex_half(scalar_type);
caffe2::TypeMeta dtype = scalarTypeToTypeMeta(scalar_type);
Expand Down Expand Up @@ -197,7 +197,7 @@ TensorBase empty_generic(
c10::Allocator* allocator,
c10::DispatchKeySet ks,
ScalarType scalar_type,
c10::optional<c10::MemoryFormat> memory_format_opt) {
std::optional<c10::MemoryFormat> memory_format_opt) {
return _empty_generic(size, allocator, ks, scalar_type, memory_format_opt);
}

Expand All @@ -206,7 +206,7 @@ TensorBase empty_generic_symint(
c10::Allocator* allocator,
c10::DispatchKeySet ks,
ScalarType scalar_type,
c10::optional<c10::MemoryFormat> memory_format_opt) {
std::optional<c10::MemoryFormat> memory_format_opt) {
return _empty_generic(size, allocator, ks, scalar_type, memory_format_opt);
}

Expand Down Expand Up @@ -252,19 +252,19 @@ TensorBase empty_strided_symint_generic(
}

TensorBase empty_cpu(IntArrayRef size, ScalarType dtype, bool pin_memory,
c10::optional<c10::MemoryFormat> memory_format_opt) {
std::optional<c10::MemoryFormat> memory_format_opt) {
auto allocator = GetCPUAllocatorMaybePinned(pin_memory);
constexpr c10::DispatchKeySet cpu_ks(c10::DispatchKey::CPU);
return empty_generic(size, allocator, cpu_ks, dtype, memory_format_opt);
}

TensorBase empty_cpu(
IntArrayRef size,
c10::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt,
c10::optional<Device> device_opt,
c10::optional<bool> pin_memory_opt,
c10::optional<c10::MemoryFormat> memory_format_opt) {
std::optional<ScalarType> dtype_opt,
std::optional<Layout> layout_opt,
std::optional<Device> device_opt,
std::optional<bool> pin_memory_opt,
std::optional<c10::MemoryFormat> memory_format_opt) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::CPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided);

Expand Down Expand Up @@ -295,10 +295,10 @@ TensorBase empty_strided_cpu(IntArrayRef size, IntArrayRef stride,
TensorBase empty_strided_cpu(
IntArrayRef size,
IntArrayRef stride,
c10::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt,
c10::optional<Device> device_opt,
c10::optional<bool> pin_memory_opt) {
std::optional<ScalarType> dtype_opt,
std::optional<Layout> layout_opt,
std::optional<Device> device_opt,
std::optional<bool> pin_memory_opt) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::CPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided);

Expand Down Expand Up @@ -342,7 +342,7 @@ static MetaAllocator g_meta_alloc;
REGISTER_ALLOCATOR(kMeta, &g_meta_alloc);

TensorBase empty_meta(IntArrayRef size, ScalarType dtype,
c10::optional<c10::MemoryFormat> memory_format_opt) {
std::optional<c10::MemoryFormat> memory_format_opt) {
auto *allocator = GetAllocator(kMeta);
constexpr c10::DispatchKeySet meta_dks(c10::DispatchKey::Meta);
return at::detail::empty_generic(
Expand All @@ -351,11 +351,11 @@ TensorBase empty_meta(IntArrayRef size, ScalarType dtype,

TensorBase empty_meta(
IntArrayRef size,
c10::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt,
c10::optional<Device> device_opt,
c10::optional<bool> pin_memory_opt,
c10::optional<c10::MemoryFormat> memory_format_opt
std::optional<ScalarType> dtype_opt,
std::optional<Layout> layout_opt,
std::optional<Device> device_opt,
std::optional<bool> pin_memory_opt,
std::optional<c10::MemoryFormat> memory_format_opt
) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::Meta);
// NB: because there is no SparseMeta (yet), non-strided layout is
Expand All @@ -371,11 +371,11 @@ TensorBase empty_meta(

TensorBase empty_symint_meta(
SymIntArrayRef size,
c10::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt,
c10::optional<Device> device_opt,
c10::optional<bool> pin_memory_opt,
c10::optional<c10::MemoryFormat> memory_format_opt
std::optional<ScalarType> dtype_opt,
std::optional<Layout> layout_opt,
std::optional<Device> device_opt,
std::optional<bool> pin_memory_opt,
std::optional<c10::MemoryFormat> memory_format_opt
) {
auto *allocator = GetAllocator(kMeta);
constexpr c10::DispatchKeySet ks(c10::DispatchKey::Meta);
Expand Down Expand Up @@ -405,10 +405,10 @@ TensorBase empty_strided_meta(IntArrayRef size, IntArrayRef stride,
TensorBase empty_strided_meta(
IntArrayRef size,
IntArrayRef stride,
c10::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt,
c10::optional<Device> device_opt,
c10::optional<bool> pin_memory_opt) {
std::optional<ScalarType> dtype_opt,
std::optional<Layout> layout_opt,
std::optional<Device> device_opt,
std::optional<bool> pin_memory_opt) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::Meta);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided);

Expand Down Expand Up @@ -440,10 +440,10 @@ TensorBase empty_strided_symint_meta(SymIntArrayRef size, SymIntArrayRef stride,
TensorBase empty_strided_symint_meta(
SymIntArrayRef size,
SymIntArrayRef stride,
c10::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt,
c10::optional<Device> device_opt,
c10::optional<bool> pin_memory_opt) {
std::optional<ScalarType> dtype_opt,
std::optional<Layout> layout_opt,
std::optional<Device> device_opt,
std::optional<bool> pin_memory_opt) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::Meta);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided);

Expand Down
Loading

0 comments on commit ed32787

Please sign in to comment.