From ed327876f574c6a014321be419a3ae9808a765a2 Mon Sep 17 00:00:00 2001 From: Richard Barnes Date: Tue, 14 May 2024 19:35:49 +0000 Subject: [PATCH] [codemod] `c10:optional` -> `std::optional` (#126135) Generated by running the following from PyTorch root: ``` find . -regex ".*\.\(cpp\|h\|cu\|hpp\|cc\|cxx\)$" | grep -v "build/" | xargs -n 50 -P 4 perl -pi -e 's/c10::optional/std::optional/' ``` `c10::optional` is just an alias for `std::optional`. This removes usages of that alias in preparation for eliminating it entirely. Pull Request resolved: https://github.com/pytorch/pytorch/pull/126135 Approved by: https://github.com/Skylion007, https://github.com/malfet, https://github.com/albanD, https://github.com/aaronenyeshi --- aten/src/ATen/CPUGeneratorImpl.cpp | 24 +- aten/src/ATen/CPUGeneratorImpl.h | 12 +- aten/src/ATen/Context.h | 4 +- aten/src/ATen/DeviceGuard.h | 6 +- aten/src/ATen/EmptyTensor.cpp | 64 +- aten/src/ATen/EmptyTensor.h | 62 +- aten/src/ATen/FunctionalInverses.cpp | 4 +- aten/src/ATen/FunctionalTensorWrapper.cpp | 8 +- aten/src/ATen/FunctionalTensorWrapper.h | 24 +- aten/src/ATen/FunctionalizeFallbackKernel.cpp | 16 +- aten/src/ATen/InferSize.h | 2 +- aten/src/ATen/LegacyBatchingRegistrations.cpp | 20 +- aten/src/ATen/NamedTensorUtils.cpp | 2 +- aten/src/ATen/NamedTensorUtils.h | 2 +- aten/src/ATen/NestedTensorImpl.cpp | 2 +- aten/src/ATen/NestedTensorImpl.h | 6 +- aten/src/ATen/SavedTensorHooks.cpp | 2 +- aten/src/ATen/SavedTensorHooks.h | 4 +- aten/src/ATen/ScalarOps.cpp | 2 +- aten/src/ATen/ScalarOps.h | 4 +- aten/src/ATen/TensorIndexing.h | 26 +- aten/src/ATen/TensorIterator.h | 8 +- aten/src/ATen/TensorSubclassLikeUtils.h | 2 +- aten/src/ATen/TensorUtils.cpp | 8 +- aten/src/ATen/TensorUtils.h | 6 +- aten/src/ATen/VmapModeRegistrations.cpp | 2 +- aten/src/ATen/ZeroTensorFallback.cpp | 2 +- aten/src/ATen/autocast_mode.cpp | 2 +- aten/src/ATen/autocast_mode.h | 20 +- aten/src/ATen/core/CachingHostAllocator.h | 6 +- aten/src/ATen/core/CheckMemoryFormat.h | 4 +- .../ATen/core/DeprecatedTypeProperties.cpp | 2 +- aten/src/ATen/core/DeprecatedTypeProperties.h | 4 +- aten/src/ATen/core/Dimname.h | 2 +- aten/src/ATen/core/DistributionsHelper.h | 2 +- aten/src/ATen/core/Generator.h | 4 +- .../ATen/core/GeneratorForPrivateuseone.cpp | 4 +- .../src/ATen/core/GeneratorForPrivateuseone.h | 2 +- aten/src/ATen/core/List.h | 6 +- aten/src/ATen/core/List_inl.h | 4 +- aten/src/ATen/core/List_test.cpp | 10 +- aten/src/ATen/core/NamedTensor.h | 6 +- aten/src/ATen/core/NestedIntSymNodeImpl.cpp | 4 +- aten/src/ATen/core/NestedIntSymNodeImpl.h | 4 +- aten/src/ATen/core/PythonFallbackKernel.cpp | 2 +- aten/src/ATen/core/Tensor.cpp | 6 +- aten/src/ATen/core/TensorBase.h | 8 +- aten/src/ATen/core/TorchDispatchUtils.cpp | 2 +- aten/src/ATen/core/TorchDispatchUtils.h | 2 +- aten/src/ATen/core/VariableHooksInterface.h | 4 +- aten/src/ATen/core/boxing/KernelFunction.h | 6 +- .../ATen/core/boxing/KernelFunction_impl.h | 2 +- .../ATen/core/boxing/KernelFunction_test.cpp | 2 +- .../impl/kernel_function_legacy_test.cpp | 22 +- .../core/boxing/impl/kernel_function_test.cpp | 22 +- .../boxing/impl/kernel_lambda_legacy_test.cpp | 32 +- .../core/boxing/impl/kernel_lambda_test.cpp | 20 +- .../impl/make_boxed_from_unboxed_functor.h | 4 +- .../make_boxed_from_unboxed_functor_test.cpp | 24 +- aten/src/ATen/core/builtin_function.h | 2 +- aten/src/ATen/core/class_type.cpp | 10 +- aten/src/ATen/core/class_type.h | 12 +- .../ATen/core/dispatch/DispatchKeyExtractor.h | 10 +- aten/src/ATen/core/dispatch/Dispatcher.cpp | 18 +- aten/src/ATen/core/dispatch/Dispatcher.h | 14 +- aten/src/ATen/core/dispatch/OperatorEntry.cpp | 10 +- aten/src/ATen/core/dispatch/OperatorEntry.h | 14 +- aten/src/ATen/core/dynamic_type.h | 6 +- aten/src/ATen/core/function.h | 2 +- aten/src/ATen/core/function_schema.cpp | 20 +- aten/src/ATen/core/function_schema.h | 40 +- aten/src/ATen/core/function_schema_inl.h | 4 +- aten/src/ATen/core/ivalue.cpp | 10 +- aten/src/ATen/core/ivalue.h | 26 +- aten/src/ATen/core/ivalue_inl.h | 30 +- aten/src/ATen/core/jit_type.h | 150 ++-- aten/src/ATen/core/jit_type_base.h | 8 +- aten/src/ATen/core/library.cpp | 6 +- .../core/op_registration/infer_schema.cpp | 2 +- .../ATen/core/op_registration/infer_schema.h | 2 +- .../core/op_registration/op_registration.cpp | 6 +- .../core/op_registration/op_registration.h | 8 +- .../op_registration/op_registration_test.cpp | 94 +-- aten/src/ATen/core/operator_name.h | 2 +- aten/src/ATen/core/tensor_type.cpp | 38 +- aten/src/ATen/core/type.cpp | 20 +- aten/src/ATen/core/union_type.cpp | 12 +- aten/src/ATen/cuda/CachingHostAllocator.cpp | 2 +- aten/src/ATen/cuda/EmptyTensor.cpp | 24 +- aten/src/ATen/cuda/EmptyTensor.h | 24 +- aten/src/ATen/cuda/PinnedMemoryAllocator.cpp | 4 +- aten/src/ATen/cudnn/AutocastRNN.cpp | 6 +- .../ATen/functorch/BatchRulesBinaryOps.cpp | 6 +- .../ATen/functorch/BatchRulesConvolution.cpp | 2 +- aten/src/ATen/functorch/BatchRulesFactory.cpp | 58 +- .../functorch/BatchRulesLinearAlgebra.cpp | 38 +- aten/src/ATen/functorch/BatchRulesLoss.cpp | 2 +- aten/src/ATen/functorch/BatchRulesNorm.cpp | 108 +-- .../ATen/functorch/BatchRulesRandomness.cpp | 6 +- .../ATen/functorch/BatchRulesReduceOps.cpp | 8 +- .../ATen/functorch/BatchRulesScatterOps.cpp | 10 +- .../src/ATen/functorch/BatchRulesUnaryOps.cpp | 2 +- aten/src/ATen/functorch/BatchRulesViews.cpp | 8 +- aten/src/ATen/functorch/DynamicLayer.cpp | 2 +- aten/src/ATen/functorch/DynamicLayer.h | 4 +- .../functorch/LegacyBatchingRegistrations.cpp | 4 +- aten/src/ATen/functorch/PlumbingHelper.cpp | 6 +- aten/src/ATen/functorch/PlumbingHelper.h | 6 +- .../ATen/functorch/PyTorchOperatorHacks.cpp | 6 +- aten/src/ATen/miopen/AutocastRNN.cpp | 4 +- aten/src/ATen/mps/EmptyTensor.cpp | 12 +- aten/src/ATen/mps/EmptyTensor.h | 12 +- aten/src/ATen/mps/MPSGuardImpl.h | 10 +- aten/src/ATen/native/Activation.cpp | 12 +- aten/src/ATen/native/AveragePool2d.cpp | 8 +- aten/src/ATen/native/AveragePool3d.cpp | 12 +- aten/src/ATen/native/BatchLinearAlgebra.cpp | 24 +- aten/src/ATen/native/BatchLinearAlgebra.h | 2 +- .../ATen/native/BatchLinearAlgebraKernel.cpp | 2 +- aten/src/ATen/native/BinaryOps.cpp | 22 +- aten/src/ATen/native/Bucketization.cpp | 16 +- aten/src/ATen/native/BucketizationUtils.h | 2 +- aten/src/ATen/native/CPUFallback.cpp | 4 +- aten/src/ATen/native/ComparisonUtils.cpp | 2 +- aten/src/ATen/native/Constraints.cpp | 24 +- aten/src/ATen/native/ConvUtils.h | 6 +- aten/src/ATen/native/Convolution.cpp | 42 +- aten/src/ATen/native/ConvolutionMM2d.cpp | 8 +- aten/src/ATen/native/ConvolutionMM3d.cpp | 8 +- aten/src/ATen/native/Correlation.cpp | 4 +- aten/src/ATen/native/Cross.cpp | 6 +- aten/src/ATen/native/Distance.cpp | 6 +- aten/src/ATen/native/DistributionTemplates.h | 34 +- aten/src/ATen/native/Distributions.cpp | 102 +-- aten/src/ATen/native/Dropout.cpp | 2 +- aten/src/ATen/native/Embedding.cpp | 2 +- aten/src/ATen/native/EmbeddingBag.cpp | 32 +- aten/src/ATen/native/EmbeddingBag.h | 16 +- aten/src/ATen/native/ForeachUtils.h | 4 +- aten/src/ATen/native/FusedAdam.cpp | 16 +- aten/src/ATen/native/FusedSGD.cpp | 8 +- aten/src/ATen/native/Histogram.cpp | 54 +- aten/src/ATen/native/Histogram.h | 4 +- aten/src/ATen/native/IndexingUtils.h | 10 +- aten/src/ATen/native/Linear.cpp | 6 +- aten/src/ATen/native/LinearAlgebra.cpp | 6 +- aten/src/ATen/native/Loss.cpp | 10 +- aten/src/ATen/native/LossMulti.h | 2 +- aten/src/ATen/native/LossMultiMargin.cpp | 10 +- aten/src/ATen/native/LossNLL.cpp | 10 +- aten/src/ATen/native/LossNLL2d.cpp | 14 +- aten/src/ATen/native/MathBitsFallback.h | 2 +- aten/src/ATen/native/Memory.cpp | 4 +- aten/src/ATen/native/MetaTensor.cpp | 26 +- aten/src/ATen/native/NNPACK.cpp | 4 +- .../native/NaiveConvolutionTranspose3d.cpp | 4 +- .../ATen/native/NaiveDilatedConvolution.cpp | 4 +- aten/src/ATen/native/NamedTensor.cpp | 4 +- aten/src/ATen/native/NonSymbolicBC.h | 10 +- aten/src/ATen/native/Normalization.cpp | 40 +- aten/src/ATen/native/PadNd.cpp | 4 +- aten/src/ATen/native/Pool.h | 8 +- aten/src/ATen/native/RNN.cpp | 24 +- aten/src/ATen/native/ReduceOps.cpp | 134 ++-- aten/src/ATen/native/ReduceOps.h | 2 +- aten/src/ATen/native/ReduceOpsUtils.h | 8 +- aten/src/ATen/native/Repeat.cpp | 10 +- aten/src/ATen/native/Repeat.h | 2 +- aten/src/ATen/native/Resize.cpp | 8 +- aten/src/ATen/native/ResizeCommon.h | 2 +- aten/src/ATen/native/ScatterGatherChecks.h | 4 +- aten/src/ATen/native/SegmentReduce.cpp | 28 +- aten/src/ATen/native/SegmentReduce.h | 8 +- aten/src/ATen/native/SoftMax.cpp | 18 +- aten/src/ATen/native/Sorting.cpp | 4 +- aten/src/ATen/native/SpectralOps.cpp | 144 ++-- aten/src/ATen/native/SummaryOps.cpp | 2 +- .../ATen/native/TensorAdvancedIndexing.cpp | 32 +- aten/src/ATen/native/TensorAdvancedIndexing.h | 6 +- .../ATen/native/TensorAdvancedIndexingUtils.h | 4 +- aten/src/ATen/native/TensorCompare.cpp | 14 +- aten/src/ATen/native/TensorConversions.cpp | 106 +-- aten/src/ATen/native/TensorConversions.h | 12 +- aten/src/ATen/native/TensorFactories.cpp | 640 +++++++++--------- aten/src/ATen/native/TensorFactories.h | 2 +- aten/src/ATen/native/TensorShape.cpp | 18 +- .../src/ATen/native/TensorTransformations.cpp | 2 +- aten/src/ATen/native/TestOps.cpp | 2 +- aten/src/ATen/native/UnaryOps.cpp | 28 +- aten/src/ATen/native/UnaryOps.h | 30 +- aten/src/ATen/native/Unique.cpp | 2 +- aten/src/ATen/native/UpSample.cpp | 2 +- aten/src/ATen/native/UpSample.h | 16 +- aten/src/ATen/native/UpSampleBicubic2d.cpp | 40 +- aten/src/ATen/native/UpSampleBilinear2d.cpp | 32 +- aten/src/ATen/native/UpSampleLinear1d.cpp | 10 +- aten/src/ATen/native/UpSampleNearest1d.cpp | 20 +- aten/src/ATen/native/UpSampleNearest2d.cpp | 32 +- aten/src/ATen/native/UpSampleNearest3d.cpp | 52 +- aten/src/ATen/native/UpSampleTrilinear3d.cpp | 26 +- aten/src/ATen/native/VariableMethodStubs.cpp | 2 +- .../ao_sparse/quantized/cpu/fbgemm_utils.h | 8 +- .../ao_sparse/quantized/cpu/packed_params.h | 8 +- .../quantized/cpu/qlinear_prepack.cpp | 10 +- .../ao_sparse/quantized/cpu/qnnpack_utils.h | 8 +- aten/src/ATen/native/cpu/AvgPoolKernel.cpp | 28 +- .../ATen/native/cpu/DistributionKernels.cpp | 30 +- .../ATen/native/cpu/DistributionTemplates.h | 22 +- .../ATen/native/cpu/FlashAttentionKernel.cpp | 16 +- aten/src/ATen/native/cpu/HistogramKernel.cpp | 18 +- aten/src/ATen/native/cpu/MaxUnpoolKernel.cpp | 6 +- .../src/ATen/native/cpu/MultinomialKernel.cpp | 6 +- aten/src/ATen/native/cpu/ReduceUtils.h | 4 +- aten/src/ATen/native/cpu/UnaryOpsKernel.cpp | 6 +- aten/src/ATen/native/cpu/UpSampleKernel.cpp | 82 +-- .../ATen/native/cpu/UpSampleMoreKernel.cpp | 38 +- aten/src/ATen/native/cuda/AveragePool2d.cu | 4 +- aten/src/ATen/native/cuda/AveragePool3d.cu | 4 +- aten/src/ATen/native/cuda/Blas.cpp | 20 +- aten/src/ATen/native/cuda/Bucketization.cu | 16 +- aten/src/ATen/native/cuda/ConvolutionMM2d.cu | 4 +- aten/src/ATen/native/cuda/DepthwiseConv2d.cu | 4 +- aten/src/ATen/native/cuda/DepthwiseConv3d.cu | 2 +- .../ATen/native/cuda/DistributionBernoulli.cu | 4 +- .../native/cuda/DistributionCauchyKernel.cu | 2 +- .../cuda/DistributionExponentialKernel.cu | 2 +- .../cuda/DistributionGeometricKernel.cu | 2 +- .../cuda/DistributionLogNormalKernel.cu | 2 +- .../ATen/native/cuda/DistributionNormal.cu | 2 +- .../native/cuda/DistributionRandomKernel.cu | 6 +- .../ATen/native/cuda/DistributionTemplates.h | 20 +- .../ATen/native/cuda/DistributionUniform.cu | 2 +- aten/src/ATen/native/cuda/Distributions.cpp | 8 +- aten/src/ATen/native/cuda/Dropout.cu | 4 +- aten/src/ATen/native/cuda/EmbeddingBag.cu | 6 +- aten/src/ATen/native/cuda/FusedAdamKernel.cu | 8 +- aten/src/ATen/native/cuda/FusedAdamWKernel.cu | 8 +- aten/src/ATen/native/cuda/FusedSgdKernel.cu | 16 +- aten/src/ATen/native/cuda/IndexKernel.cpp | 2 +- aten/src/ATen/native/cuda/Indexing.cu | 6 +- .../ATen/native/cuda/LinearAlgebraStubs.cpp | 2 +- aten/src/ATen/native/cuda/Loss.cu | 8 +- .../src/ATen/native/cuda/MixedDtypesLinear.cu | 4 +- aten/src/ATen/native/cuda/MultiMarginLoss.cu | 10 +- .../src/ATen/native/cuda/MultinomialKernel.cu | 2 +- aten/src/ATen/native/cuda/NLLLoss2d.cu | 12 +- .../cuda/NaiveConvolutionTranspose3d.cu | 4 +- .../native/cuda/NaiveDilatedConvolution.cu | 4 +- aten/src/ATen/native/cuda/Normalization.cu | 44 +- aten/src/ATen/native/cuda/RNN.cu | 6 +- aten/src/ATen/native/cuda/Randperm.cu | 2 +- aten/src/ATen/native/cuda/Repeat.cu | 2 +- aten/src/ATen/native/cuda/Resize.cpp | 2 +- aten/src/ATen/native/cuda/RreluWithNoise.cu | 8 +- aten/src/ATen/native/cuda/SegmentReduce.cu | 12 +- aten/src/ATen/native/cuda/SoftMax.cu | 4 +- .../cuda/SparseBinaryOpIntersectionKernel.cu | 2 +- aten/src/ATen/native/cuda/SpectralOps.cpp | 2 +- aten/src/ATen/native/cuda/SummaryOps.cu | 2 +- aten/src/ATen/native/cuda/TensorFactories.cu | 20 +- aten/src/ATen/native/cuda/UnaryOpsKernel.cu | 6 +- aten/src/ATen/native/cuda/Unique.cu | 2 +- .../src/ATen/native/cuda/UpSampleBicubic2d.cu | 16 +- .../ATen/native/cuda/UpSampleBilinear2d.cu | 40 +- aten/src/ATen/native/cuda/UpSampleLinear1d.cu | 8 +- .../src/ATen/native/cuda/UpSampleNearest1d.cu | 12 +- .../src/ATen/native/cuda/UpSampleNearest2d.cu | 24 +- .../src/ATen/native/cuda/UpSampleNearest3d.cu | 36 +- .../ATen/native/cuda/UpSampleTrilinear3d.cu | 24 +- .../native/cuda/fused_adam_amsgrad_impl.cu | 8 +- aten/src/ATen/native/cuda/fused_adam_impl.cu | 8 +- .../native/cuda/fused_adamw_amsgrad_impl.cu | 8 +- aten/src/ATen/native/cuda/fused_adamw_impl.cu | 8 +- aten/src/ATen/native/cuda/jit_utils.cpp | 4 +- .../src/ATen/native/cuda/layer_norm_kernel.cu | 8 +- .../native/cuda/linalg/BatchLinearAlgebra.cpp | 2 +- .../cuda/linalg/BatchLinearAlgebraLib.cpp | 2 +- .../cuda/linalg/BatchLinearAlgebraLib.h | 2 +- aten/src/ATen/native/cudnn/BatchNorm.cpp | 28 +- .../ATen/native/cudnn/ConvPlaceholders.cpp | 6 +- aten/src/ATen/native/cudnn/ConvShared.cpp | 6 +- aten/src/ATen/native/cudnn/RNN.cpp | 54 +- aten/src/ATen/native/group_norm.cpp | 14 +- aten/src/ATen/native/layer_norm.cpp | 14 +- aten/src/ATen/native/layer_norm.h | 4 +- aten/src/ATen/native/metal/MetalNeuronType.h | 4 +- .../ATen/native/metal/MetalPrepackOpContext.h | 48 +- .../native/metal/MetalPrepackOpRegister.cpp | 24 +- .../ATen/native/metal/ops/MetalConvolution.h | 2 +- .../ATen/native/miopen/BatchNorm_miopen.cpp | 6 +- aten/src/ATen/native/miopen/Conv_miopen.cpp | 22 +- aten/src/ATen/native/miopen/RNN_miopen.cpp | 20 +- aten/src/ATen/native/mkldnn/Common.h | 4 +- aten/src/ATen/native/mkldnn/Conv.cpp | 62 +- aten/src/ATen/native/mkldnn/ConvPrepack.cpp | 10 +- aten/src/ATen/native/mkldnn/ConvPrepack.h | 4 +- aten/src/ATen/native/mkldnn/Linear.cpp | 20 +- aten/src/ATen/native/mkldnn/MKLDNNCommon.cpp | 2 +- aten/src/ATen/native/mkldnn/MKLDNNCommon.h | 2 +- .../ATen/native/mkldnn/MKLDNNConversions.cpp | 10 +- aten/src/ATen/native/mkldnn/Normalization.cpp | 28 +- aten/src/ATen/native/mkldnn/OpContext.cpp | 2 +- aten/src/ATen/native/mkldnn/OpContext.h | 8 +- aten/src/ATen/native/mkldnn/Pooling.cpp | 32 +- aten/src/ATen/native/mkldnn/RNN.cpp | 12 +- .../ATen/native/mkldnn/TensorFactories.cpp | 4 +- aten/src/ATen/native/mkldnn/TensorShape.cpp | 4 +- aten/src/ATen/native/mkldnn/Utils.cpp | 20 +- aten/src/ATen/native/mkldnn/Utils.h | 4 +- aten/src/ATen/native/mkldnn/xpu/Conv.cpp | 2 +- aten/src/ATen/native/mps/TensorFactory.cpp | 34 +- .../native/nested/NestedTensorBackward.cpp | 4 +- .../native/nested/NestedTensorFactories.cpp | 32 +- .../ATen/native/nested/NestedTensorMath.cpp | 22 +- .../ATen/native/nested/NestedTensorMatmul.cpp | 2 +- .../NestedTensorTransformerFunctions.cpp | 8 +- .../nested/NestedTensorTransformerFunctions.h | 4 +- .../native/nested/NestedTensorUnaryOps.cpp | 2 +- .../ATen/native/nested/NestedTensorUtils.cpp | 2 +- .../ATen/native/nested/NestedTensorUtils.h | 8 +- .../cuda/NestedTensorTransformerFunctions.cpp | 8 +- aten/src/ATen/native/quantized/PackedParams.h | 8 +- aten/src/ATen/native/quantized/QTensor.cpp | 4 +- .../quantized/TensorAdvancedIndexing.cpp | 8 +- .../ATen/native/quantized/TensorCompare.cpp | 2 +- .../ATen/native/quantized/TensorFactories.cpp | 68 +- .../native/quantized/cpu/AveragePool2d.cpp | 8 +- .../native/quantized/cpu/AveragePool3d.cpp | 4 +- .../quantized/cpu/EmbeddingPackedParams.h | 12 +- .../native/quantized/cpu/LinearUnpackImpl.cpp | 16 +- .../native/quantized/cpu/Normalization.cpp | 18 +- .../ATen/native/quantized/cpu/OnednnUtils.h | 46 +- .../ATen/native/quantized/cpu/QnnpackUtils.h | 20 +- .../ATen/native/quantized/cpu/QuantizedOps.h | 12 +- .../ATen/native/quantized/cpu/ReduceOps.cpp | 16 +- .../native/quantized/cpu/TensorOperators.cpp | 2 +- .../ATen/native/quantized/cpu/TensorShape.cpp | 4 +- .../quantized/cpu/UpSampleBilinear2d.cpp | 10 +- .../quantized/cpu/UpSampleNearest2d.cpp | 24 +- .../quantized/cpu/UpSampleNearest3d.cpp | 30 +- .../native/quantized/cpu/conv_serialization.h | 20 +- .../native/quantized/cpu/fbgemm_utils.cpp | 4 +- .../ATen/native/quantized/cpu/fbgemm_utils.h | 42 +- .../cpu/kernels/QuantizedOpKernels.cpp | 14 +- aten/src/ATen/native/quantized/cpu/qconv.cpp | 38 +- .../native/quantized/cpu/qconv_prepack.cpp | 30 +- .../quantized/cpu/qconv_unpack_impl.cpp | 24 +- .../native/quantized/cpu/qembeddingbag.cpp | 84 +-- .../ATen/native/quantized/cpu/qembeddingbag.h | 12 +- .../src/ATen/native/quantized/cpu/qlinear.cpp | 42 +- .../native/quantized/cpu/qlinear_dynamic.cpp | 2 +- .../native/quantized/cpu/qlinear_prepack.cpp | 26 +- .../native/quantized/cpu/qnormalization.cpp | 12 +- .../ATen/native/quantized/cpu/qsoftmax.cpp | 4 +- .../native/quantized/cuda/EmbeddingBag.cu | 22 +- .../ATen/native/quantized/cudnn/BinaryOps.cpp | 2 +- aten/src/ATen/native/quantized/cudnn/Conv.cpp | 12 +- .../native/quantized/cudnn/ConvPrepack.cpp | 12 +- .../native/quantized/cudnn/ConvUnpackImpl.cpp | 6 +- .../ATen/native/quantized/cudnn/Linear.cpp | 12 +- .../native/quantized/cudnn/LinearPrepack.cpp | 4 +- .../quantized/cudnn/LinearUnpackImpl.cpp | 4 +- aten/src/ATen/native/quantized/cudnn/utils.h | 18 +- .../ATen/native/quantized/qconv_unpack.cpp | 12 +- .../ATen/native/quantized/qlinear_unpack.cpp | 8 +- aten/src/ATen/native/sparse/SoftMax.cpp | 4 +- .../sparse/SparseBinaryOpIntersectionCommon.h | 12 +- .../SparseBinaryOpIntersectionKernel.cpp | 2 +- .../ATen/native/sparse/SparseCsrTensor.cpp | 92 +-- .../native/sparse/SparseCsrTensorMath.cpp | 8 +- .../ATen/native/sparse/SparseFactories.cpp | 2 +- aten/src/ATen/native/sparse/SparseStubs.h | 4 +- aten/src/ATen/native/sparse/SparseTensor.cpp | 100 +-- .../ATen/native/sparse/SparseTensorMath.cpp | 8 +- .../src/ATen/native/sparse/SparseUnaryOps.cpp | 12 +- .../native/sparse/cuda/SparseCsrTensorMath.cu | 6 +- .../sparse/cuda/SparseSemiStructuredLinear.cu | 6 +- .../sparse/cuda/SparseSemiStructuredOps.cu | 10 +- .../sparse/cuda/SparseSemiStructuredTile.cu | 2 +- .../sparse/cuda/SparseSemiSturcturedApply.cu | 2 +- .../ATen/native/sparse/cuda/cuSPARSELtOps.cpp | 30 +- .../ATen/native/transformers/attention.cpp | 42 +- aten/src/ATen/native/transformers/attention.h | 14 +- .../native/transformers/cuda/attention.cu | 44 +- .../transformers/cuda/attention_backward.cu | 36 +- .../cuda/flash_attn/flash_api.cpp | 50 +- .../transformers/cuda/flash_attn/flash_api.h | 30 +- .../ATen/native/transformers/sdp_utils_cpp.h | 4 +- .../ATen/native/transformers/transformer.cpp | 6 +- aten/src/ATen/native/utils/Factory.cpp | 2 +- aten/src/ATen/native/utils/Factory.h | 2 +- aten/src/ATen/native/vulkan/ops/Batchnorm.cpp | 24 +- aten/src/ATen/native/vulkan/ops/Batchnorm.h | 16 +- aten/src/ATen/native/vulkan/ops/BinaryOp.cpp | 58 +- aten/src/ATen/native/vulkan/ops/Clamp.cpp | 16 +- aten/src/ATen/native/vulkan/ops/Clone.cpp | 2 +- aten/src/ATen/native/vulkan/ops/Common.h | 8 +- .../ATen/native/vulkan/ops/Convolution.cpp | 70 +- aten/src/ATen/native/vulkan/ops/Convolution.h | 54 +- aten/src/ATen/native/vulkan/ops/Factory.cpp | 16 +- aten/src/ATen/native/vulkan/ops/Factory.h | 8 +- aten/src/ATen/native/vulkan/ops/Layernorm.cpp | 16 +- aten/src/ATen/native/vulkan/ops/Layernorm.h | 8 +- aten/src/ATen/native/vulkan/ops/Mm.cpp | 16 +- aten/src/ATen/native/vulkan/ops/Mm.h | 4 +- .../native/vulkan/ops/NativeLayerNorm.cpp | 8 +- aten/src/ATen/native/vulkan/ops/Pool.cpp | 2 +- .../native/vulkan/ops/QuantizedFunctions.h | 6 +- aten/src/ATen/native/vulkan/ops/Random.cpp | 24 +- aten/src/ATen/native/vulkan/ops/Slice.cpp | 4 +- aten/src/ATen/native/vulkan/ops/Sum.cpp | 2 +- aten/src/ATen/native/vulkan/ops/Upsample.cpp | 8 +- aten/src/ATen/native/vulkan/ops/Zero.cpp | 8 +- aten/src/ATen/native/vulkan/ops/cumsum.cpp | 2 +- aten/src/ATen/native/xnnpack/Convolution.cpp | 14 +- aten/src/ATen/native/xnnpack/Convolution.h | 14 +- aten/src/ATen/native/xnnpack/Linear.cpp | 10 +- aten/src/ATen/native/xnnpack/Linear.h | 8 +- aten/src/ATen/native/xnnpack/OpContext.cpp | 18 +- aten/src/ATen/native/xnnpack/OpContext.h | 72 +- aten/src/ATen/ops/from_blob.h | 14 +- aten/src/ATen/record_function.cpp | 24 +- aten/src/ATen/record_function.h | 6 +- .../ATen/templates/RegisterBackendSelect.cpp | 4 +- .../templates/RegisterFunctionalization.cpp | 2 +- aten/src/ATen/templates/TensorBody.h | 8 +- aten/src/ATen/test/cpu_rng_test.cpp | 48 +- aten/src/ATen/test/cuda_distributions_test.cu | 2 +- aten/src/ATen/test/cuda_optional_test.cu | 4 +- aten/src/ATen/test/cuda_stream_test.cpp | 2 +- aten/src/ATen/test/extension_backend_test.cpp | 12 +- aten/src/ATen/test/operator_name_test.cpp | 4 +- aten/src/ATen/test/rng_test.h | 22 +- aten/src/ATen/test/type_test.cpp | 6 +- aten/src/ATen/test/vulkan_api_test.cpp | 10 +- .../ATen/test/vulkan_quantized_api_test.cpp | 2 +- aten/src/ATen/xpu/CachingHostAllocator.cpp | 2 +- binaries/compare_models_torch.cc | 2 +- binaries/speed_benchmark_torch.cc | 2 +- c10/core/ConstantSymNodeImpl.h | 4 +- c10/core/StorageImpl.cpp | 2 +- c10/core/StorageImpl.h | 2 +- c10/core/SymBool.h | 4 +- c10/core/SymInt.h | 2 +- c10/core/SymIntArrayRef.h | 2 +- c10/core/SymNodeImpl.h | 10 +- c10/core/SymbolicShapeMeta.cpp | 2 +- c10/core/TensorImpl.cpp | 6 +- c10/core/TensorImpl.h | 32 +- c10/core/TensorOptions.h | 70 +- c10/core/impl/PyObjectSlot.h | 4 +- c10/core/impl/TorchDispatchModeTLS.cpp | 4 +- c10/core/impl/TorchDispatchModeTLS.h | 6 +- c10/cuda/CUDACachingAllocator.cpp | 2 +- c10/cuda/CUDAFunctions.cpp | 2 +- c10/cuda/CUDAFunctions.h | 2 +- c10/cuda/impl/CUDAGuardImpl.h | 2 +- c10/test/util/optional_test.cpp | 18 +- c10/util/ArrayRef.h | 2 +- c10/util/Backtrace.cpp | 2 +- c10/util/OptionalArrayRef.h | 6 +- c10/xpu/test/impl/XPUStreamTest.cpp | 4 +- caffe2/core/context.h | 12 +- caffe2/core/export_c10_op_to_caffe2.h | 2 +- caffe2/core/export_caffe2_op_to_c10.h | 2 +- caffe2/core/operator.cc | 2 +- caffe2/core/operator.h | 4 +- functorch/csrc/dim/arena.h | 2 +- functorch/csrc/dim/dim.cpp | 2 +- test/cpp/api/autograd.cpp | 6 +- test/cpp/api/memory.cpp | 4 +- test/cpp/c10d/ProcessGroupNCCLTest.cpp | 2 +- test/cpp/jit/test_argument_spec.cpp | 2 +- .../jit/test_custom_class_registrations.cpp | 6 +- test/cpp/jit/test_exception.cpp | 4 +- test/cpp/jit/test_ir.cpp | 12 +- test/cpp/jit/test_jit_type.cpp | 2 +- test/cpp/jit/test_misc.cpp | 2 +- test/cpp/jit/test_shape_analysis.cpp | 16 +- test/cpp/lazy/test_lazy_ops.cpp | 12 +- test/cpp/lazy/test_misc.cpp | 6 +- test/cpp/tensorexpr/test_external_calls.cpp | 20 +- test/cpp/tensorexpr/test_kernel.cpp | 6 +- test/cpp/tensorexpr/test_quantization.cpp | 4 +- test/cpp_extensions/extension.cpp | 2 +- test/cpp_extensions/maia_extension.cpp | 6 +- .../open_registration_extension.cpp | 34 +- test/cpp_extensions/rng_extension.cpp | 6 +- test/custom_operator/op.cpp | 4 +- test/custom_operator/test_custom_ops.cpp | 4 +- .../cpp/extension_device.cpp | 14 +- tools/autograd/templates/Functions.h | 8 +- tools/autograd/templates/VariableType.h | 2 +- tools/autograd/templates/ViewFuncs.h | 2 +- .../templates/python_variable_methods.cpp | 10 +- torch/csrc/Module.cpp | 16 +- torch/csrc/PyInterpreter.cpp | 4 +- torch/csrc/Storage.cpp | 8 +- torch/csrc/Stream.cpp | 2 +- .../csrc/api/include/torch/expanding_array.h | 14 +- torch/csrc/api/include/torch/fft.h | 48 +- torch/csrc/api/include/torch/linalg.h | 56 +- torch/csrc/api/include/torch/nested.h | 4 +- .../include/torch/nn/functional/activation.h | 6 +- .../include/torch/nn/functional/batchnorm.h | 2 +- .../include/torch/nn/functional/embedding.h | 8 +- .../api/include/torch/nn/functional/loss.h | 4 +- .../torch/nn/functional/normalization.h | 2 +- .../api/include/torch/nn/functional/padding.h | 2 +- .../api/include/torch/nn/functional/pooling.h | 32 +- .../include/torch/nn/functional/upsampling.h | 16 +- .../api/include/torch/nn/functional/vision.h | 2 +- .../csrc/api/include/torch/nn/modules/conv.h | 14 +- .../api/include/torch/nn/modules/pooling.h | 12 +- .../csrc/api/include/torch/nn/modules/utils.h | 2 +- .../api/include/torch/nn/options/activation.h | 6 +- .../api/include/torch/nn/options/batchnorm.h | 4 +- .../api/include/torch/nn/options/embedding.h | 24 +- .../csrc/api/include/torch/nn/options/loss.h | 4 +- .../include/torch/nn/options/normalization.h | 2 +- .../api/include/torch/nn/options/pooling.h | 8 +- .../api/include/torch/nn/options/upsampling.h | 14 +- .../api/include/torch/nn/options/vision.h | 2 +- .../api/include/torch/nn/utils/clip_grad.h | 2 +- .../torch/nn/utils/convert_parameters.h | 8 +- torch/csrc/api/include/torch/nn/utils/rnn.h | 2 +- torch/csrc/api/include/torch/optim/lbfgs.h | 8 +- .../include/torch/serialize/input-archive.h | 8 +- torch/csrc/api/include/torch/special.h | 4 +- torch/csrc/api/include/torch/types.h | 2 +- torch/csrc/api/src/nn/modules/conv.cpp | 10 +- torch/csrc/api/src/nn/modules/pooling.cpp | 6 +- torch/csrc/api/src/optim/lbfgs.cpp | 4 +- .../csrc/api/src/serialize/input-archive.cpp | 8 +- torch/csrc/autograd/FunctionsManual.cpp | 100 +-- torch/csrc/autograd/FunctionsManual.h | 82 +-- torch/csrc/autograd/TraceTypeManual.cpp | 4 +- torch/csrc/autograd/VariableTypeManual.cpp | 8 +- torch/csrc/autograd/VariableTypeUtils.h | 16 +- torch/csrc/autograd/autograd.cpp | 4 +- torch/csrc/autograd/autograd.h | 4 +- .../autograd_not_implemented_fallback.cpp | 4 +- torch/csrc/autograd/custom_function.cpp | 6 +- torch/csrc/autograd/custom_function.h | 24 +- torch/csrc/autograd/function.h | 4 +- torch/csrc/autograd/functions/comm.cpp | 4 +- torch/csrc/autograd/functions/comm.h | 8 +- torch/csrc/autograd/functions/utils.h | 6 +- torch/csrc/autograd/graph_task.h | 2 +- torch/csrc/autograd/init.cpp | 4 +- torch/csrc/autograd/input_buffer.cpp | 8 +- torch/csrc/autograd/input_buffer.h | 4 +- torch/csrc/autograd/profiler_kineto.cpp | 2 +- torch/csrc/autograd/profiler_legacy.cpp | 4 +- torch/csrc/autograd/profiler_legacy.h | 10 +- torch/csrc/autograd/profiler_python.cpp | 8 +- torch/csrc/autograd/python_function.cpp | 8 +- torch/csrc/autograd/python_variable.cpp | 8 +- .../autograd/python_variable_indexing.cpp | 8 +- torch/csrc/autograd/record_function_ops.cpp | 6 +- torch/csrc/autograd/record_function_ops.h | 2 +- torch/csrc/autograd/saved_variable.cpp | 2 +- torch/csrc/autograd/saved_variable.h | 2 +- .../csrc/autograd/utils/python_arg_parsing.h | 6 +- torch/csrc/autograd/variable.cpp | 10 +- torch/csrc/autograd/variable.h | 16 +- torch/csrc/cuda/Graph.cpp | 2 +- torch/csrc/cuda/Module.cpp | 4 +- torch/csrc/cuda/comm.cpp | 10 +- torch/csrc/cuda/comm.h | 8 +- torch/csrc/cuda/memory_snapshot.cpp | 4 +- torch/csrc/cuda/memory_snapshot.h | 4 +- torch/csrc/cuda/nccl.h | 2 +- torch/csrc/cuda/python_comm.cpp | 12 +- torch/csrc/cuda/python_nccl.cpp | 6 +- torch/csrc/cuda/utils.cpp | 4 +- torch/csrc/distributed/c10d/Backend.hpp | 6 +- torch/csrc/distributed/c10d/NCCLUtils.cpp | 2 +- torch/csrc/distributed/c10d/NCCLUtils.hpp | 8 +- torch/csrc/distributed/c10d/Ops.cpp | 4 +- torch/csrc/distributed/c10d/ProcessGroup.hpp | 10 +- .../distributed/c10d/ProcessGroupGloo.cpp | 12 +- .../distributed/c10d/ProcessGroupGloo.hpp | 4 +- .../csrc/distributed/c10d/ProcessGroupMPI.cpp | 32 +- .../csrc/distributed/c10d/ProcessGroupMPI.hpp | 6 +- .../distributed/c10d/ProcessGroupNCCL.cpp | 18 +- .../distributed/c10d/ProcessGroupNCCL.hpp | 14 +- .../csrc/distributed/c10d/ProcessGroupUCC.hpp | 2 +- torch/csrc/distributed/c10d/Store.hpp | 2 +- torch/csrc/distributed/c10d/TCPStore.cpp | 4 +- torch/csrc/distributed/c10d/TCPStore.hpp | 8 +- torch/csrc/distributed/c10d/TraceUtils.h | 14 +- torch/csrc/distributed/c10d/Types.hpp | 4 +- torch/csrc/distributed/c10d/Work.cpp | 2 +- torch/csrc/distributed/c10d/Work.hpp | 2 +- torch/csrc/distributed/c10d/comm.hpp | 6 +- torch/csrc/distributed/c10d/init.cpp | 20 +- .../csrc/distributed/c10d/intra_node_comm.cpp | 2 +- .../csrc/distributed/c10d/intra_node_comm.cu | 2 +- .../csrc/distributed/c10d/intra_node_comm.hpp | 4 +- torch/csrc/distributed/c10d/logger.cpp | 2 +- torch/csrc/distributed/c10d/reducer.cpp | 6 +- torch/csrc/distributed/c10d/reducer.hpp | 10 +- torch/csrc/distributed/c10d/reducer_cuda.cpp | 2 +- torch/csrc/distributed/c10d/reducer_timer.hpp | 4 +- torch/csrc/distributed/c10d/sequence_num.hpp | 2 +- .../rpc/profiler/remote_profiler_manager.cpp | 2 +- .../rpc/profiler/remote_profiler_manager.h | 2 +- torch/csrc/distributed/rpc/py_rref.h | 4 +- torch/csrc/distributed/rpc/rpc_agent.h | 2 +- torch/csrc/distributed/rpc/rref_impl.cpp | 2 +- torch/csrc/distributed/rpc/rref_impl.h | 2 +- torch/csrc/distributed/rpc/script_call.h | 4 +- .../csrc/distributed/rpc/tensorpipe_agent.cpp | 2 +- .../csrc/distributed/rpc/tensorpipe_cuda.cpp | 2 +- .../csrc/distributed/rpc/tensorpipe_utils.cpp | 4 +- torch/csrc/distributed/rpc/tensorpipe_utils.h | 2 +- torch/csrc/dynamo/compiled_autograd.h | 12 +- .../csrc/dynamo/python_compiled_autograd.cpp | 4 +- torch/csrc/functorch/init.cpp | 8 +- .../inductor/aoti_eager/kernel_holder.cpp | 4 +- .../csrc/inductor/aoti_torch/shim_common.cpp | 2 +- torch/csrc/inductor/aoti_torch/utils.h | 18 +- torch/csrc/jit/api/compilation_unit.h | 14 +- torch/csrc/jit/api/function_impl.h | 8 +- torch/csrc/jit/api/module.cpp | 12 +- torch/csrc/jit/api/module.h | 10 +- torch/csrc/jit/api/object.cpp | 2 +- torch/csrc/jit/api/object.h | 8 +- torch/csrc/jit/backends/backend_debug_info.h | 4 +- .../xnnpack/xnnpack_graph_builder.cpp | 2 +- torch/csrc/jit/codegen/fuser/codegen.cpp | 2 +- torch/csrc/jit/codegen/fuser/codegen.h | 2 +- torch/csrc/jit/codegen/fuser/compiler.cpp | 2 +- .../jit/codegen/fuser/cpu/fused_kernel.cpp | 4 +- torch/csrc/jit/codegen/fuser/executor.cpp | 6 +- torch/csrc/jit/codegen/fuser/kernel_spec.h | 2 +- .../jit/codegen/onednn/defer_size_check.cpp | 2 +- torch/csrc/jit/codegen/onednn/graph_fuser.h | 2 +- .../csrc/jit/codegen/onednn/graph_helper.cpp | 2 +- .../jit/codegen/onednn/graph_rewriter.cpp | 2 +- .../jit/codegen/onednn/prepare_binary.cpp | 4 +- torch/csrc/jit/cuda/cuda.h | 4 +- .../jit/frontend/concrete_module_type.cpp | 10 +- .../csrc/jit/frontend/concrete_module_type.h | 10 +- .../jit/frontend/function_schema_parser.cpp | 10 +- torch/csrc/jit/frontend/ir_emitter.cpp | 50 +- .../csrc/jit/frontend/parse_string_literal.h | 2 +- torch/csrc/jit/frontend/parser.cpp | 2 +- torch/csrc/jit/frontend/schema_matching.cpp | 16 +- torch/csrc/jit/frontend/schema_matching.h | 8 +- .../csrc/jit/frontend/schema_type_parser.cpp | 22 +- torch/csrc/jit/frontend/schema_type_parser.h | 12 +- .../csrc/jit/frontend/script_type_parser.cpp | 8 +- torch/csrc/jit/frontend/script_type_parser.h | 4 +- torch/csrc/jit/frontend/source_range.cpp | 2 +- torch/csrc/jit/frontend/source_range.h | 16 +- torch/csrc/jit/frontend/sugared_value.cpp | 8 +- torch/csrc/jit/frontend/sugared_value.h | 30 +- torch/csrc/jit/frontend/tracer.cpp | 38 +- torch/csrc/jit/frontend/tracer.h | 34 +- torch/csrc/jit/ir/alias_analysis.cpp | 12 +- torch/csrc/jit/ir/alias_analysis.h | 8 +- torch/csrc/jit/ir/constants.cpp | 12 +- torch/csrc/jit/ir/constants.h | 14 +- torch/csrc/jit/ir/ir.cpp | 18 +- torch/csrc/jit/ir/ir.h | 32 +- torch/csrc/jit/ir/irparser.cpp | 2 +- torch/csrc/jit/ir/named_value.h | 4 +- torch/csrc/jit/ir/scope.cpp | 16 +- torch/csrc/jit/ir/scope.h | 20 +- .../compatibility/model_compatibility.cpp | 2 +- .../compatibility/runtime_compatibility.cpp | 2 +- .../compatibility/runtime_compatibility.h | 2 +- torch/csrc/jit/mobile/flatbuffer_loader.cpp | 14 +- torch/csrc/jit/mobile/flatbuffer_loader.h | 14 +- torch/csrc/jit/mobile/frame.h | 4 +- torch/csrc/jit/mobile/function.cpp | 10 +- torch/csrc/jit/mobile/function.h | 6 +- torch/csrc/jit/mobile/import.cpp | 40 +- torch/csrc/jit/mobile/import.h | 16 +- torch/csrc/jit/mobile/import_data.cpp | 16 +- torch/csrc/jit/mobile/import_data.h | 4 +- .../model_tracer/OperatorCallTracer.cpp | 2 +- torch/csrc/jit/mobile/module.cpp | 6 +- torch/csrc/jit/mobile/module.h | 2 +- torch/csrc/jit/mobile/nnc/aot_compiler.cpp | 6 +- torch/csrc/jit/mobile/nnc/context.h | 4 +- torch/csrc/jit/mobile/parse_operators.cpp | 2 +- torch/csrc/jit/mobile/promoted_prim_ops.cpp | 6 +- .../jit/mobile/register_ops_common_utils.h | 4 +- torch/csrc/jit/mobile/upgrader_mobile.h | 2 +- torch/csrc/jit/operator_upgraders/utils.cpp | 4 +- torch/csrc/jit/operator_upgraders/utils.h | 4 +- torch/csrc/jit/passes/autocast.cpp | 6 +- torch/csrc/jit/passes/canonicalize.cpp | 8 +- torch/csrc/jit/passes/canonicalize.h | 2 +- .../passes/canonicalize_graph_fuser_ops.cpp | 2 +- .../csrc/jit/passes/constant_propagation.cpp | 2 +- torch/csrc/jit/passes/constant_propagation.h | 2 +- .../jit/passes/create_autodiff_subgraphs.cpp | 10 +- torch/csrc/jit/passes/decompose_ops.cpp | 2 +- .../csrc/jit/passes/device_type_analysis.cpp | 6 +- torch/csrc/jit/passes/dtype_analysis.cpp | 2 +- torch/csrc/jit/passes/fold_conv_bn.cpp | 2 +- torch/csrc/jit/passes/freeze_module.cpp | 4 +- .../csrc/jit/passes/frozen_ops_to_mkldnn.cpp | 2 +- torch/csrc/jit/passes/graph_fuser.cpp | 2 +- .../csrc/jit/passes/graph_rewrite_helper.cpp | 2 +- torch/csrc/jit/passes/graph_rewrite_helper.h | 2 +- .../jit/passes/hoist_conv_packed_params.cpp | 2 +- .../jit/passes/integer_value_refinement.cpp | 2 +- torch/csrc/jit/passes/loop_unrolling.cpp | 4 +- torch/csrc/jit/passes/onnx/constant_fold.cpp | 62 +- torch/csrc/jit/passes/onnx/constant_fold.h | 2 +- torch/csrc/jit/passes/onnx/constant_map.cpp | 16 +- torch/csrc/jit/passes/onnx/constant_map.h | 16 +- .../jit/passes/onnx/function_extraction.cpp | 20 +- .../jit/passes/onnx/function_substitution.cpp | 2 +- torch/csrc/jit/passes/onnx/helper.cpp | 4 +- torch/csrc/jit/passes/onnx/helper.h | 2 +- .../pattern_encapsulation.cpp | 2 +- .../pattern_encapsulation.h | 2 +- torch/csrc/jit/passes/onnx/peephole.cpp | 4 +- .../jit/passes/onnx/scalar_type_analysis.cpp | 12 +- .../jit/passes/onnx/shape_type_inference.cpp | 22 +- .../passes/onnx/unpack_quantized_weights.cpp | 18 +- torch/csrc/jit/passes/peephole.cpp | 2 +- .../csrc/jit/passes/peephole_dict_idioms.cpp | 6 +- .../csrc/jit/passes/peephole_list_idioms.cpp | 4 +- torch/csrc/jit/passes/peephole_non_tensor.cpp | 2 +- torch/csrc/jit/passes/quantization/helper.cpp | 12 +- torch/csrc/jit/passes/quantization/helper.h | 12 +- .../passes/quantization/insert_observers.cpp | 28 +- .../passes/quantization/insert_observers.h | 2 +- .../quantization/insert_quant_dequant.cpp | 16 +- torch/csrc/jit/passes/remove_mutation.cpp | 2 +- torch/csrc/jit/passes/remove_mutation.h | 6 +- .../passes/replacement_of_old_operators.cpp | 2 +- torch/csrc/jit/passes/shape_analysis.cpp | 10 +- .../jit/passes/symbolic_shape_analysis.cpp | 18 +- .../csrc/jit/passes/symbolic_shape_analysis.h | 4 +- .../csrc/jit/passes/symbolic_shape_cache.cpp | 2 +- torch/csrc/jit/passes/symbolic_shape_cache.h | 4 +- .../passes/symbolic_shape_runtime_fusion.cpp | 2 +- torch/csrc/jit/passes/tensorexpr_fuser.cpp | 2 +- ...ate_differentiable_graph_requires_grad.cpp | 4 +- ...pdate_differentiable_graph_requires_grad.h | 2 +- .../passes/utils/check_alias_annotation.cpp | 2 +- torch/csrc/jit/passes/utils/memory_dag.h | 4 +- .../csrc/jit/passes/utils/subgraph_utils.cpp | 12 +- torch/csrc/jit/python/init.cpp | 24 +- torch/csrc/jit/python/module_python.h | 4 +- torch/csrc/jit/python/pybind_utils.cpp | 6 +- torch/csrc/jit/python/pybind_utils.h | 18 +- torch/csrc/jit/python/python_ir.cpp | 4 +- torch/csrc/jit/python/python_ir.h | 2 +- torch/csrc/jit/python/python_ivalue.h | 2 +- torch/csrc/jit/python/python_list.h | 2 +- .../csrc/jit/python/python_sugared_value.cpp | 6 +- torch/csrc/jit/python/python_sugared_value.h | 8 +- torch/csrc/jit/python/python_tracer.cpp | 2 +- torch/csrc/jit/python/python_tree_views.cpp | 6 +- torch/csrc/jit/python/script_init.cpp | 24 +- torch/csrc/jit/runtime/argument_spec.h | 16 +- torch/csrc/jit/runtime/autodiff.cpp | 4 +- torch/csrc/jit/runtime/custom_operator.h | 4 +- .../jit/runtime/decomposition_registry.cpp | 4 +- .../csrc/jit/runtime/decomposition_registry.h | 4 +- torch/csrc/jit/runtime/graph_executor.cpp | 4 +- torch/csrc/jit/runtime/graph_executor.h | 2 +- torch/csrc/jit/runtime/graph_executor_impl.h | 2 +- torch/csrc/jit/runtime/interpreter.cpp | 6 +- torch/csrc/jit/runtime/interpreter.h | 4 +- .../csrc/jit/runtime/interpreter/code_impl.h | 4 +- torch/csrc/jit/runtime/interpreter/frame.h | 2 +- torch/csrc/jit/runtime/jit_exception.cpp | 4 +- torch/csrc/jit/runtime/jit_exception.h | 12 +- torch/csrc/jit/runtime/operator.h | 14 +- .../runtime/profiling_graph_executor_impl.cpp | 6 +- .../runtime/profiling_graph_executor_impl.h | 12 +- torch/csrc/jit/runtime/register_ops_utils.cpp | 2 +- torch/csrc/jit/runtime/register_ops_utils.h | 2 +- torch/csrc/jit/runtime/register_prim_ops.cpp | 34 +- .../jit/runtime/register_prim_ops_fulljit.cpp | 4 +- .../csrc/jit/runtime/register_special_ops.cpp | 4 +- .../runtime/simple_graph_executor_impl.cpp | 2 +- .../jit/runtime/simple_graph_executor_impl.h | 4 +- torch/csrc/jit/runtime/static/fusion.cpp | 2 +- torch/csrc/jit/runtime/static/impl.cpp | 6 +- torch/csrc/jit/runtime/static/impl.h | 8 +- torch/csrc/jit/runtime/static/ops.cpp | 26 +- torch/csrc/jit/runtime/static/ops.h | 2 +- torch/csrc/jit/runtime/symbolic_script.cpp | 2 +- torch/csrc/jit/runtime/symbolic_script.h | 2 +- .../jit/runtime/symbolic_shape_registry.cpp | 4 +- .../jit/runtime/symbolic_shape_registry.h | 4 +- .../callstack_debug_info_serialization.cpp | 4 +- .../callstack_debug_info_serialization.h | 4 +- torch/csrc/jit/serialization/export.cpp | 6 +- .../jit/serialization/export_bytecode.cpp | 2 +- .../csrc/jit/serialization/export_module.cpp | 6 +- .../serialization/flatbuffer_serializer.cpp | 4 +- torch/csrc/jit/serialization/import.cpp | 44 +- torch/csrc/jit/serialization/import.h | 36 +- .../csrc/jit/serialization/import_legacy.cpp | 6 +- torch/csrc/jit/serialization/import_legacy.h | 2 +- torch/csrc/jit/serialization/import_read.cpp | 6 +- torch/csrc/jit/serialization/import_read.h | 6 +- .../csrc/jit/serialization/import_source.cpp | 4 +- torch/csrc/jit/serialization/import_source.h | 4 +- torch/csrc/jit/serialization/pickler.cpp | 2 +- torch/csrc/jit/serialization/pickler.h | 6 +- torch/csrc/jit/serialization/python_print.cpp | 2 +- .../source_range_serialization.cpp | 6 +- .../source_range_serialization.h | 2 +- .../source_range_serialization_impl.h | 2 +- torch/csrc/jit/serialization/unpickler.cpp | 4 +- torch/csrc/jit/serialization/unpickler.h | 4 +- torch/csrc/jit/tensorexpr/codegen.cpp | 2 +- torch/csrc/jit/tensorexpr/codegen.h | 8 +- torch/csrc/jit/tensorexpr/cuda_codegen.cpp | 8 +- torch/csrc/jit/tensorexpr/cuda_codegen.h | 8 +- torch/csrc/jit/tensorexpr/eval.cpp | 2 +- torch/csrc/jit/tensorexpr/eval.h | 2 +- torch/csrc/jit/tensorexpr/expr.cpp | 12 +- torch/csrc/jit/tensorexpr/expr.h | 12 +- .../jit/tensorexpr/external_functions.cpp | 32 +- .../csrc/jit/tensorexpr/external_functions.h | 4 +- torch/csrc/jit/tensorexpr/graph_opt.cpp | 18 +- torch/csrc/jit/tensorexpr/graph_opt.h | 2 +- torch/csrc/jit/tensorexpr/ir.h | 4 +- torch/csrc/jit/tensorexpr/ir_simplifier.cpp | 2 +- torch/csrc/jit/tensorexpr/kernel.cpp | 16 +- torch/csrc/jit/tensorexpr/kernel.h | 10 +- torch/csrc/jit/tensorexpr/llvm_codegen.cpp | 38 +- torch/csrc/jit/tensorexpr/llvm_codegen.h | 26 +- torch/csrc/jit/tensorexpr/llvm_jit.cpp | 32 +- torch/csrc/jit/tensorexpr/llvm_jit.h | 6 +- torch/csrc/jit/tensorexpr/lowerings.cpp | 170 ++--- torch/csrc/jit/tensorexpr/lowerings.h | 2 +- .../csrc/jit/tensorexpr/operators/conv2d.cpp | 10 +- torch/csrc/jit/tensorexpr/operators/conv2d.h | 10 +- .../csrc/jit/tensorexpr/operators/matmul.cpp | 4 +- torch/csrc/jit/tensorexpr/operators/matmul.h | 4 +- torch/csrc/jit/tensorexpr/operators/misc.cpp | 18 +- torch/csrc/jit/tensorexpr/operators/misc.h | 18 +- torch/csrc/jit/tensorexpr/operators/norm.cpp | 2 +- torch/csrc/jit/tensorexpr/operators/norm.h | 2 +- .../jit/tensorexpr/operators/pointwise.cpp | 18 +- .../csrc/jit/tensorexpr/operators/pointwise.h | 18 +- .../jit/tensorexpr/operators/quantization.cpp | 38 +- .../jit/tensorexpr/operators/quantization.h | 40 +- .../jit/tensorexpr/operators/reduction.cpp | 8 +- .../csrc/jit/tensorexpr/operators/reduction.h | 8 +- torch/csrc/jit/tensorexpr/tensor.cpp | 14 +- torch/csrc/jit/tensorexpr/tensor.h | 20 +- torch/csrc/jit/tensorexpr/tensorexpr_init.cpp | 6 +- torch/csrc/jit/testing/file_check.cpp | 10 +- torch/csrc/lazy/backend/backend_device.cpp | 12 +- torch/csrc/lazy/backend/backend_device.h | 14 +- torch/csrc/lazy/backend/backend_interface.h | 2 +- torch/csrc/lazy/core/hash.h | 6 +- torch/csrc/lazy/core/ir_builder.h | 4 +- torch/csrc/lazy/core/ir_dump_util.cpp | 4 +- torch/csrc/lazy/core/lazy_graph_executor.cpp | 4 +- torch/csrc/lazy/core/shape.cpp | 6 +- torch/csrc/lazy/core/shape.h | 8 +- torch/csrc/lazy/core/tensor.cpp | 8 +- torch/csrc/lazy/core/tensor.h | 6 +- torch/csrc/lazy/core/tensor_impl.h | 2 +- torch/csrc/lazy/core/tensor_util.h | 2 +- torch/csrc/lazy/core/unique.h | 2 +- torch/csrc/lazy/core/util.h | 4 +- torch/csrc/lazy/python/python_util.cpp | 2 +- torch/csrc/lazy/python/python_util.h | 2 +- torch/csrc/lazy/ts_backend/ir_builder.h | 2 +- torch/csrc/lazy/ts_backend/ops/to_copy.h | 30 +- .../csrc/lazy/ts_backend/ts_backend_impl.cpp | 2 +- torch/csrc/lazy/ts_backend/ts_backend_impl.h | 2 +- .../lazy/ts_backend/ts_eager_fallback.cpp | 18 +- .../lazy/ts_backend/ts_native_functions.cpp | 52 +- torch/csrc/profiler/collection.h | 24 +- torch/csrc/profiler/data_flow.cpp | 6 +- torch/csrc/profiler/unwind/unwind.cpp | 4 +- torch/csrc/profiler/unwind/unwind.h | 2 +- torch/csrc/profiler/util.cpp | 4 +- torch/csrc/profiler/util.h | 2 +- torch/csrc/tensor/python_tensor.cpp | 4 +- torch/csrc/utils.h | 2 +- torch/csrc/utils/device_lazy_init.h | 2 +- torch/csrc/utils/out_types.cpp | 6 +- torch/csrc/utils/out_types.h | 6 +- torch/csrc/utils/python_arg_parser.cpp | 6 +- torch/csrc/utils/python_arg_parser.h | 68 +- torch/csrc/utils/python_dispatch.cpp | 2 +- torch/csrc/utils/python_raii.h | 4 +- torch/csrc/utils/python_symnode.h | 2 +- torch/csrc/utils/schema_info.cpp | 16 +- torch/csrc/utils/schema_info.h | 2 +- torch/csrc/utils/tensor_new.cpp | 38 +- torch/csrc/utils/tensor_new.h | 8 +- torch/csrc/utils/tensor_numpy.cpp | 2 +- torch/csrc/utils/torch_dispatch_mode.h | 2 +- torch/csrc/utils/variadic.h | 2 +- torch/custom_class_detail.h | 2 +- torch/library.h | 16 +- 907 files changed, 5659 insertions(+), 5659 deletions(-) diff --git a/aten/src/ATen/CPUGeneratorImpl.cpp b/aten/src/ATen/CPUGeneratorImpl.cpp index 2d086ebbe71fe..156a2b663c033 100644 --- a/aten/src/ATen/CPUGeneratorImpl.cpp +++ b/aten/src/ATen/CPUGeneratorImpl.cpp @@ -81,8 +81,8 @@ inline uint64_t make64BitsFrom32Bits(uint32_t hi, uint32_t lo) { CPUGeneratorImpl::CPUGeneratorImpl(uint64_t seed_in) : c10::GeneratorImpl{Device(DeviceType::CPU), DispatchKeySet(c10::DispatchKey::CPU)}, engine_{seed_in}, - next_float_normal_sample_{c10::optional()}, - next_double_normal_sample_{c10::optional()} { } + next_float_normal_sample_{std::optional()}, + next_double_normal_sample_{std::optional()} { } /** * Manually seeds the engine with the seed input @@ -151,8 +151,8 @@ void CPUGeneratorImpl::set_state(const c10::TensorImpl& new_state) { detail::check_rng_state(new_state); at::mt19937 engine; - auto float_normal_sample = c10::optional(); - auto double_normal_sample = c10::optional(); + auto float_normal_sample = std::optional(); + auto double_normal_sample = std::optional(); // Construct the state of at::CPUGeneratorImpl based on input byte tensor size. CPUGeneratorImplStateLegacy* legacy_pod{nullptr}; @@ -160,7 +160,7 @@ void CPUGeneratorImpl::set_state(const c10::TensorImpl& new_state) { if (new_state_size == size_legacy) { legacy_pod = (CPUGeneratorImplStateLegacy*)new_state.data(); // Note that in CPUGeneratorImplStateLegacy, we didn't have float version - // of normal sample and hence we leave the c10::optional as is + // of normal sample and hence we leave the std::optional as is // Update next_double_normal_sample. // Note that CPUGeneratorImplStateLegacy stores two uniform values (normal_x, normal_y) @@ -171,14 +171,14 @@ void CPUGeneratorImpl::set_state(const c10::TensorImpl& new_state) { auto r = legacy_pod->normal_rho; auto theta = 2.0 * c10::pi * legacy_pod->normal_x; // we return the sin version of the normal sample when in caching mode - double_normal_sample = c10::optional(r * ::sin(theta)); + double_normal_sample = std::optional(r * ::sin(theta)); } } else if (new_state_size == size_current) { auto rng_state = (CPUGeneratorImplState*)new_state.data(); legacy_pod = &rng_state->legacy_pod; // update next_float_normal_sample if (rng_state->is_next_float_normal_sample_valid) { - float_normal_sample = c10::optional(rng_state->next_float_normal_sample); + float_normal_sample = std::optional(rng_state->next_float_normal_sample); } // Update next_double_normal_sample. @@ -186,7 +186,7 @@ void CPUGeneratorImpl::set_state(const c10::TensorImpl& new_state) { // and if it's valid in normal_is_valid. The redundant normal_x and normal_rho // are squashed to 0.0. if (legacy_pod->normal_is_valid) { - double_normal_sample = c10::optional(legacy_pod->normal_y); + double_normal_sample = std::optional(legacy_pod->normal_y); } } else { AT_ERROR("Expected either a CPUGeneratorImplStateLegacy of size ", size_legacy, @@ -283,14 +283,14 @@ uint64_t CPUGeneratorImpl::random64() { /** * Get the cached normal random in float */ -c10::optional CPUGeneratorImpl::next_float_normal_sample() { +std::optional CPUGeneratorImpl::next_float_normal_sample() { return next_float_normal_sample_; } /** * Get the cached normal random in double */ -c10::optional CPUGeneratorImpl::next_double_normal_sample() { +std::optional CPUGeneratorImpl::next_double_normal_sample() { return next_double_normal_sample_; } @@ -299,7 +299,7 @@ c10::optional CPUGeneratorImpl::next_double_normal_sample() { * * See Note [Acquire lock when using random generators] */ -void CPUGeneratorImpl::set_next_float_normal_sample(c10::optional randn) { +void CPUGeneratorImpl::set_next_float_normal_sample(std::optional randn) { next_float_normal_sample_ = randn; } @@ -308,7 +308,7 @@ void CPUGeneratorImpl::set_next_float_normal_sample(c10::optional randn) * * See Note [Acquire lock when using random generators] */ -void CPUGeneratorImpl::set_next_double_normal_sample(c10::optional randn) { +void CPUGeneratorImpl::set_next_double_normal_sample(std::optional randn) { next_double_normal_sample_ = randn; } diff --git a/aten/src/ATen/CPUGeneratorImpl.h b/aten/src/ATen/CPUGeneratorImpl.h index f74c42f44fda5..34dd33a475b91 100644 --- a/aten/src/ATen/CPUGeneratorImpl.h +++ b/aten/src/ATen/CPUGeneratorImpl.h @@ -24,18 +24,18 @@ struct TORCH_API CPUGeneratorImpl : public c10::GeneratorImpl { static c10::DeviceType device_type(); uint32_t random(); uint64_t random64(); - c10::optional next_float_normal_sample(); - c10::optional next_double_normal_sample(); - void set_next_float_normal_sample(c10::optional randn); - void set_next_double_normal_sample(c10::optional randn); + std::optional next_float_normal_sample(); + std::optional next_double_normal_sample(); + void set_next_float_normal_sample(std::optional randn); + void set_next_double_normal_sample(std::optional randn); at::mt19937 engine(); void set_engine(at::mt19937 engine); private: CPUGeneratorImpl* clone_impl() const override; at::mt19937 engine_; - c10::optional next_float_normal_sample_; - c10::optional next_double_normal_sample_; + std::optional next_float_normal_sample_; + std::optional next_double_normal_sample_; }; namespace detail { diff --git a/aten/src/ATen/Context.h b/aten/src/ATen/Context.h index b50f0479e2fab..a922bcd5922fc 100644 --- a/aten/src/ATen/Context.h +++ b/aten/src/ATen/Context.h @@ -59,7 +59,7 @@ class TORCH_API Context { } } const AcceleratorHooksInterface& getAcceleratorHooksInterface( - c10::optional opt_device_type = c10::nullopt) { + std::optional opt_device_type = c10::nullopt) { c10::DeviceType device_type = opt_device_type.has_value() ? opt_device_type.value() : at::getAccelerator(true).value(); @@ -395,7 +395,7 @@ class TORCH_API Context { bool release_original_weights = false; #endif bool display_vmap_fallback_warnings_ = false; - c10::optional quantized_engine = c10::nullopt; + std::optional quantized_engine = c10::nullopt; bool enable_sparse_tensor_invariant_checks = false; bool allow_fp16_reduction_cpu = false; diff --git a/aten/src/ATen/DeviceGuard.h b/aten/src/ATen/DeviceGuard.h index adc7f3efdbb6a..6c2f57e16c8ce 100644 --- a/aten/src/ATen/DeviceGuard.h +++ b/aten/src/ATen/DeviceGuard.h @@ -15,7 +15,7 @@ namespace at { // OptionalDeviceGuard guard(device_of(tensor)); /// Return the Device of a Tensor, if the Tensor is defined. -inline c10::optional device_of(const Tensor& t) { +inline std::optional device_of(const Tensor& t) { if (t.defined()) { return c10::make_optional(t.device()); } else { @@ -23,14 +23,14 @@ inline c10::optional device_of(const Tensor& t) { } } -inline c10::optional device_of(const c10::optional& t) { +inline std::optional device_of(const c10::optional& t) { return t.has_value() ? device_of(t.value()) : c10::nullopt; } /// Return the Device of a TensorList, if the list is non-empty and /// the first Tensor is defined. (This function implicitly assumes /// that all tensors in the list have the same device.) -inline c10::optional device_of(ITensorListRef t) { +inline std::optional device_of(ITensorListRef t) { if (!t.empty()) { return device_of(t.front()); } else { diff --git a/aten/src/ATen/EmptyTensor.cpp b/aten/src/ATen/EmptyTensor.cpp index 0b35fc67b53ac..1eb5c070b547c 100644 --- a/aten/src/ATen/EmptyTensor.cpp +++ b/aten/src/ATen/EmptyTensor.cpp @@ -163,7 +163,7 @@ TensorBase _empty_generic( c10::Allocator* allocator, c10::DispatchKeySet ks, ScalarType scalar_type, - c10::optional memory_format_opt) { + std::optional memory_format_opt) { at::detail::check_size_nonnegative(size); at::detail::raise_warning_for_complex_half(scalar_type); caffe2::TypeMeta dtype = scalarTypeToTypeMeta(scalar_type); @@ -197,7 +197,7 @@ TensorBase empty_generic( c10::Allocator* allocator, c10::DispatchKeySet ks, ScalarType scalar_type, - c10::optional memory_format_opt) { + std::optional memory_format_opt) { return _empty_generic(size, allocator, ks, scalar_type, memory_format_opt); } @@ -206,7 +206,7 @@ TensorBase empty_generic_symint( c10::Allocator* allocator, c10::DispatchKeySet ks, ScalarType scalar_type, - c10::optional memory_format_opt) { + std::optional memory_format_opt) { return _empty_generic(size, allocator, ks, scalar_type, memory_format_opt); } @@ -252,7 +252,7 @@ TensorBase empty_strided_symint_generic( } TensorBase empty_cpu(IntArrayRef size, ScalarType dtype, bool pin_memory, - c10::optional memory_format_opt) { + std::optional memory_format_opt) { auto allocator = GetCPUAllocatorMaybePinned(pin_memory); constexpr c10::DispatchKeySet cpu_ks(c10::DispatchKey::CPU); return empty_generic(size, allocator, cpu_ks, dtype, memory_format_opt); @@ -260,11 +260,11 @@ TensorBase empty_cpu(IntArrayRef size, ScalarType dtype, bool pin_memory, TensorBase empty_cpu( IntArrayRef size, - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt, - c10::optional memory_format_opt) { + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt, + std::optional memory_format_opt) { TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::CPU); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided); @@ -295,10 +295,10 @@ TensorBase empty_strided_cpu(IntArrayRef size, IntArrayRef stride, TensorBase empty_strided_cpu( IntArrayRef size, IntArrayRef stride, - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt) { + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt) { TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::CPU); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided); @@ -342,7 +342,7 @@ static MetaAllocator g_meta_alloc; REGISTER_ALLOCATOR(kMeta, &g_meta_alloc); TensorBase empty_meta(IntArrayRef size, ScalarType dtype, - c10::optional memory_format_opt) { + std::optional memory_format_opt) { auto *allocator = GetAllocator(kMeta); constexpr c10::DispatchKeySet meta_dks(c10::DispatchKey::Meta); return at::detail::empty_generic( @@ -351,11 +351,11 @@ TensorBase empty_meta(IntArrayRef size, ScalarType dtype, TensorBase empty_meta( IntArrayRef size, - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt, - c10::optional memory_format_opt + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt, + std::optional memory_format_opt ) { TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::Meta); // NB: because there is no SparseMeta (yet), non-strided layout is @@ -371,11 +371,11 @@ TensorBase empty_meta( TensorBase empty_symint_meta( SymIntArrayRef size, - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt, - c10::optional memory_format_opt + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt, + std::optional memory_format_opt ) { auto *allocator = GetAllocator(kMeta); constexpr c10::DispatchKeySet ks(c10::DispatchKey::Meta); @@ -405,10 +405,10 @@ TensorBase empty_strided_meta(IntArrayRef size, IntArrayRef stride, TensorBase empty_strided_meta( IntArrayRef size, IntArrayRef stride, - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt) { + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt) { TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::Meta); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided); @@ -440,10 +440,10 @@ TensorBase empty_strided_symint_meta(SymIntArrayRef size, SymIntArrayRef stride, TensorBase empty_strided_symint_meta( SymIntArrayRef size, SymIntArrayRef stride, - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt) { + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt) { TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::Meta); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided); diff --git a/aten/src/ATen/EmptyTensor.h b/aten/src/ATen/EmptyTensor.h index f6e2e53bc99f5..e0e304ea8e8f6 100644 --- a/aten/src/ATen/EmptyTensor.h +++ b/aten/src/ATen/EmptyTensor.h @@ -49,14 +49,14 @@ TORCH_API TensorBase empty_generic( c10::Allocator* allocator, c10::DispatchKeySet ks, ScalarType scalar_type, - c10::optional memory_format_opt); + std::optional memory_format_opt); TORCH_API TensorBase empty_generic_symint( SymIntArrayRef size, c10::Allocator* allocator, c10::DispatchKeySet ks, ScalarType scalar_type, - c10::optional memory_format_opt); + std::optional memory_format_opt); TORCH_API TensorBase empty_strided_generic( IntArrayRef size, @@ -76,15 +76,15 @@ TORCH_API TensorBase empty_cpu( IntArrayRef size, ScalarType dtype, bool pin_memory = false, - c10::optional memory_format_opt = c10::nullopt); + std::optional memory_format_opt = c10::nullopt); TORCH_API TensorBase empty_cpu( IntArrayRef size, - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt, - c10::optional memory_format_opt); + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt, + std::optional memory_format_opt); TORCH_API TensorBase empty_cpu(IntArrayRef size, const TensorOptions& options); @@ -97,10 +97,10 @@ TORCH_API TensorBase empty_strided_cpu( TORCH_API TensorBase empty_strided_cpu( IntArrayRef size, IntArrayRef stride, - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt); + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt); TORCH_API TensorBase empty_strided_cpu( IntArrayRef size, @@ -110,23 +110,23 @@ TORCH_API TensorBase empty_strided_cpu( TORCH_API TensorBase empty_meta( IntArrayRef size, ScalarType dtype, - c10::optional memory_format_opt = c10::nullopt); + std::optional memory_format_opt = c10::nullopt); TORCH_API TensorBase empty_meta( IntArrayRef size, - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt, - c10::optional memory_format_opt); + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt, + std::optional memory_format_opt); TORCH_API TensorBase empty_symint_meta( SymIntArrayRef size, - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt, - c10::optional memory_format_opt); + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt, + std::optional memory_format_opt); TORCH_API TensorBase empty_meta(IntArrayRef size, const TensorOptions& options); @@ -136,10 +136,10 @@ empty_strided_meta(IntArrayRef size, IntArrayRef stride, ScalarType dtype); TORCH_API TensorBase empty_strided_meta( IntArrayRef size, IntArrayRef stride, - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt); + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt); TORCH_API TensorBase empty_strided_meta( IntArrayRef size, @@ -154,10 +154,10 @@ TORCH_API TensorBase empty_strided_symint_meta( TORCH_API TensorBase empty_strided_symint_meta( SymIntArrayRef size, SymIntArrayRef stride, - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt); + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt); TORCH_API TensorBase empty_strided_symint_meta( SymIntArrayRef size, diff --git a/aten/src/ATen/FunctionalInverses.cpp b/aten/src/ATen/FunctionalInverses.cpp index ebc24085a74a8..c70c8bd842f9e 100644 --- a/aten/src/ATen/FunctionalInverses.cpp +++ b/aten/src/ATen/FunctionalInverses.cpp @@ -145,7 +145,7 @@ Tensor FunctionalInverses::_neg_view_inverse(const Tensor& base, const Tensor& m } } -Tensor FunctionalInverses::as_strided_inverse(const Tensor& base, const Tensor& mutated_view, InverseReturnMode inverse_return_mode, at::SymIntArrayRef size, at::SymIntArrayRef stride, c10::optional storage_offset) { +Tensor FunctionalInverses::as_strided_inverse(const Tensor& base, const Tensor& mutated_view, InverseReturnMode inverse_return_mode, at::SymIntArrayRef size, at::SymIntArrayRef stride, std::optional storage_offset) { if (inverse_return_mode == InverseReturnMode::AlwaysView) { // NB: assumes mutated_view is a narrowed view of base. // We should NOT do this for functionalization @@ -220,7 +220,7 @@ Tensor FunctionalInverses::lift_fresh_inverse(const Tensor& base, const Tensor& return mutated_view; } -Tensor FunctionalInverses::slice_Tensor_inverse(const Tensor& base, const Tensor& mutated_view, InverseReturnMode inverse_return_mode, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step) { +Tensor FunctionalInverses::slice_Tensor_inverse(const Tensor& base, const Tensor& mutated_view, InverseReturnMode inverse_return_mode, int64_t dim, std::optional start, c10::optional end, c10::SymInt step) { if (inverse_return_mode == InverseReturnMode::AlwaysView) { // NB: assumes mutated_view is a narrowed view of base. // We should NOT do this for functionalization diff --git a/aten/src/ATen/FunctionalTensorWrapper.cpp b/aten/src/ATen/FunctionalTensorWrapper.cpp index c9ef28dbf56e4..73edec07e2623 100644 --- a/aten/src/ATen/FunctionalTensorWrapper.cpp +++ b/aten/src/ATen/FunctionalTensorWrapper.cpp @@ -526,7 +526,7 @@ Tensor to_functional_tensor(const Tensor& tensor) { TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!isFunctionalTensor(tensor)); return at::detail::make_tensor(tensor); } -c10::optional to_functional_tensor(const c10::optional& tensor) { +std::optional to_functional_tensor(const c10::optional& tensor) { if (tensor.has_value()) { return c10::make_optional(to_functional_tensor(*tensor)); } @@ -564,7 +564,7 @@ Tensor from_functional_tensor(const Tensor& tensor, bool assert_functional) { return tensor; } } -c10::optional from_functional_tensor(const c10::optional& t, bool assert_functional) { +std::optional from_functional_tensor(const c10::optional& t, bool assert_functional) { if (t.has_value()) { return c10::make_optional(from_functional_tensor(*t, assert_functional)); } @@ -610,7 +610,7 @@ void sync(const Tensor& t) { auto functional_impl = at::functionalization::impl::unsafeGetFunctionalWrapper(t); functional_impl->sync_(); } -void sync(const c10::optional& t) { +void sync(const std::optional& t) { if (t.has_value()) { sync(*t); } @@ -692,7 +692,7 @@ bool isFunctionalTensor(const at::Tensor& tensor) { return tensor.unsafeGetTensorImpl()->key_set().has(c10::DispatchKey::Functionalize); } -bool isFunctionalTensor(const c10::optional& t) { +bool isFunctionalTensor(const std::optional& t) { if (t.has_value()) { return isFunctionalTensor(*t); } else { diff --git a/aten/src/ATen/FunctionalTensorWrapper.h b/aten/src/ATen/FunctionalTensorWrapper.h index 95d6afe5f0be0..6ef890b772c1c 100644 --- a/aten/src/ATen/FunctionalTensorWrapper.h +++ b/aten/src/ATen/FunctionalTensorWrapper.h @@ -286,32 +286,32 @@ TORCH_API inline FunctionalTensorWrapper* unsafeGetFunctionalWrapper( } TORCH_API bool isFunctionalTensor(const at::Tensor& tensor); -TORCH_API bool isFunctionalTensor(const c10::optional& t); +TORCH_API bool isFunctionalTensor(const std::optional& t); TORCH_API bool isFunctionalTensor( - const c10::List>& t_list); + const c10::List>& t_list); TORCH_API bool isFunctionalTensor(ITensorListRef list); TORCH_API Tensor to_functional_tensor(const Tensor& tensor); -TORCH_API c10::optional to_functional_tensor( - const c10::optional& tensor); -TORCH_API c10::List> to_functional_tensor( - const c10::List>& t_list); +TORCH_API std::optional to_functional_tensor( + const std::optional& tensor); +TORCH_API c10::List> to_functional_tensor( + const c10::List>& t_list); TORCH_API std::vector to_functional_tensor(ITensorListRef t_list); TORCH_API void freeze_functional_tensor(const Tensor& tensor); TORCH_API Tensor from_functional_tensor(const Tensor& tensor, bool assert_functional = true); -TORCH_API c10::optional from_functional_tensor( - const c10::optional& t, +TORCH_API std::optional from_functional_tensor( + const std::optional& t, bool assert_functional = true); -TORCH_API c10::List> from_functional_tensor( - const c10::List>& t_list); +TORCH_API c10::List> from_functional_tensor( + const c10::List>& t_list); TORCH_API std::vector from_functional_tensor(ITensorListRef t_list); TORCH_API void sync(const at::Tensor& t); -TORCH_API void sync(const c10::optional& t); -TORCH_API void sync(const c10::List>& t_list); +TORCH_API void sync(const std::optional& t); +TORCH_API void sync(const c10::List>& t_list); TORCH_API void sync(ITensorListRef t_list); TORCH_API void replace_(const Tensor& functional_tensor, const Tensor& other); diff --git a/aten/src/ATen/FunctionalizeFallbackKernel.cpp b/aten/src/ATen/FunctionalizeFallbackKernel.cpp index 8b26c875fc02c..1ffc268b7f79b 100644 --- a/aten/src/ATen/FunctionalizeFallbackKernel.cpp +++ b/aten/src/ATen/FunctionalizeFallbackKernel.cpp @@ -125,7 +125,7 @@ namespace { // - when we resize to a larger size, it acts as a mutation // - when we resize to a smaller size, it acts as a view // See Note [resize_ in Functionalization] for more dtails -static const at::Tensor & resize__functionalization(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format) { +static const at::Tensor & resize__functionalization(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, std::optional memory_format) { // First unwrap the tensor arguments at::Tensor self_; if (at::functionalization::impl::isFunctionalTensor(self)) { @@ -216,7 +216,7 @@ static at::Tensor lift_fresh_functionalize_copy(const at::Tensor & self) { // in the local include TLS. As a result, when we redispatch here, // we will end up hitting PreDispatch stack first. So, we should // directly redispatch to the functionalize key manually. - static auto op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::clone", "").typed)>(); + static auto op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::clone", "").typed)>(); return op.redispatch(c10::DispatchKeySet({c10::DispatchKey::Functionalize}), self, c10::nullopt); } @@ -225,7 +225,7 @@ static at::Tensor lift_fresh_functionalize_copy(const at::Tensor & self) { return at::functionalization::impl::to_functional_tensor(out); } -static bool device_opted_into_functionalization(c10::Device self_device, c10::optional tgt_device) { +static bool device_opted_into_functionalization(c10::Device self_device, std::optional tgt_device) { // If the target device is empty, then the output tensor should be on the same device as the input auto real_tgt_device = tgt_device.has_value() ? tgt_device.value() : self_device; return real_tgt_device.type() == c10::DeviceType::XLA || real_tgt_device.type() == c10::DeviceType::Lazy; @@ -235,12 +235,12 @@ static bool device_opted_into_functionalization(c10::Device self_device, c10::op // We should probably get rid of this though. static at::Tensor _to_copy_functionalize( const at::Tensor & self, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, bool non_blocking, - c10::optional memory_format) { + std::optional memory_format) { at::Tensor self_; if (at::functionalization::impl::isFunctionalTensor(self)) { // sync any pending updates diff --git a/aten/src/ATen/InferSize.h b/aten/src/ATen/InferSize.h index caa8ec42003c9..411cf12d51341 100644 --- a/aten/src/ATen/InferSize.h +++ b/aten/src/ATen/InferSize.h @@ -23,7 +23,7 @@ inline void infer_size_impl( ResultVec& res) { NumelType newsize = 1; // N.B. this is an index, not a sym dim! - auto infer_dim = c10::optional(); + auto infer_dim = std::optional(); for (int64_t dim = 0, ndim = shape.size(); dim != ndim; dim++) { if (shape[dim] == -1) { if (infer_dim) { diff --git a/aten/src/ATen/LegacyBatchingRegistrations.cpp b/aten/src/ATen/LegacyBatchingRegistrations.cpp index bae40e3c8e51f..e0f7fce43f9e4 100644 --- a/aten/src/ATen/LegacyBatchingRegistrations.cpp +++ b/aten/src/ATen/LegacyBatchingRegistrations.cpp @@ -380,8 +380,8 @@ Tensor select_backward_batching_rule(const Tensor& grad, IntArrayRef input_sizes Tensor slice_batching_rule( const Tensor& self, int64_t dim, - c10::optional start, - c10::optional end, + std::optional start, + std::optional end, int64_t step) { auto self_physical = MultiBatchVmapTransform::logicalToPhysical(self); auto dim_physical = self_physical.getPhysicalDim(dim); @@ -996,10 +996,10 @@ Tensor new_zeros_batching_rule( Tensor new_empty_batching_rule( const Tensor& self, IntArrayRef size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { auto physical_view = MultiBatchVmapTransform::logicalToPhysical(self); auto physical_size = physical_view.getPhysicalShape(size); auto result = physical_view.tensor().new_empty(physical_size, TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory)); @@ -1209,10 +1209,10 @@ TORCH_LIBRARY_IMPL(aten, Batched, m) { BINARY_POINTWISE(mul); BINARY_POINTWISE(div); { - using Binop = Tensor (*)(const Tensor&, const Tensor&, c10::optional); - using Unop = Tensor (*)(const Tensor&, const Scalar&, c10::optional); - m.impl("div.Tensor_mode", binary_pointwise_batching_rule>); - m.impl("div.Scalar_mode", unwrap_and_call>); + using Binop = Tensor (*)(const Tensor&, const Tensor&, std::optional); + using Unop = Tensor (*)(const Tensor&, const Scalar&, std::optional); + m.impl("div.Tensor_mode", binary_pointwise_batching_rule>); + m.impl("div.Scalar_mode", unwrap_and_call>); } // at::pow has three out-of-place overloads diff --git a/aten/src/ATen/NamedTensorUtils.cpp b/aten/src/ATen/NamedTensorUtils.cpp index a76156c03402d..3e66ef7f74dea 100644 --- a/aten/src/ATen/NamedTensorUtils.cpp +++ b/aten/src/ATen/NamedTensorUtils.cpp @@ -128,7 +128,7 @@ static void assert_names_equal(DimnameList a, DimnameList b) { } const Tensor& propagate_names_if_present_and_nonempty(const Tensor& result, - c10::optional maybe_names, + std::optional maybe_names, bool validate_names) { auto maybe_name_list = maybe_names.value_or(at::ArrayRef{}); propagate_names_if_nonempty(result.unsafeGetTensorImpl(), maybe_name_list, validate_names); diff --git a/aten/src/ATen/NamedTensorUtils.h b/aten/src/ATen/NamedTensorUtils.h index c1443b7eaa01b..47dcd6dd76851 100644 --- a/aten/src/ATen/NamedTensorUtils.h +++ b/aten/src/ATen/NamedTensorUtils.h @@ -81,7 +81,7 @@ namespace namedinference { const Tensor& propagate_names_if_present_and_nonempty( const Tensor& result, - c10::optional maybe_names, + std::optional maybe_names, bool validate_names = false); // Propagates `names` to `result` if `names` is not empty. // `names` can be empty; see [NOTE] Writing name inference rules diff --git a/aten/src/ATen/NestedTensorImpl.cpp b/aten/src/ATen/NestedTensorImpl.cpp index 2f73b7b304ee3..534e4e71e657f 100644 --- a/aten/src/ATen/NestedTensorImpl.cpp +++ b/aten/src/ATen/NestedTensorImpl.cpp @@ -236,7 +236,7 @@ NestedTensorImpl::NestedTensorImpl( set_custom_sizes_strides(c10::TensorImpl::SizesStridesPolicy::CustomSizes); } -c10::optional NestedTensorImpl::opt_size(int64_t d) const { +std::optional NestedTensorImpl::opt_size(int64_t d) const { if (C10_UNLIKELY(!opt_sizes_.has_value())) { // Cache the metadata to avoid recomputing it each time. opt_sizes_ = c10::make_optional(construct_opt_sizes(nested_sizes_)); diff --git a/aten/src/ATen/NestedTensorImpl.h b/aten/src/ATen/NestedTensorImpl.h index 0bd3d98e73c5c..697969edbbd44 100644 --- a/aten/src/ATen/NestedTensorImpl.h +++ b/aten/src/ATen/NestedTensorImpl.h @@ -61,10 +61,10 @@ struct TORCH_API NestedTensorImpl : public c10::TensorImpl { // Returns nullopt if the ith dimension is irregular. The ith dimension // of a NestedTensor is regular if the unbound tensors match in // size at the (i-1)th dimension. - c10::optional opt_size(int64_t d) const; + std::optional opt_size(int64_t d) const; int64_t size(int64_t d) const { - c10::optional optional_size = this->opt_size(d); + std::optional optional_size = this->opt_size(d); TORCH_CHECK( optional_size.has_value(), "Given dimension ", @@ -171,7 +171,7 @@ struct TORCH_API NestedTensorImpl : public c10::TensorImpl { // Optional to allow it to be computed lazily from nested. // TODO: maybe we can remove this metadata since // we can compute it from `nested_sizes_` - mutable c10::optional> opt_sizes_; + mutable std::optional> opt_sizes_; template c10::intrusive_ptr shallow_copy_and_detach_core( diff --git a/aten/src/ATen/SavedTensorHooks.cpp b/aten/src/ATen/SavedTensorHooks.cpp index c1c963409f40e..f2fb0642eb34c 100644 --- a/aten/src/ATen/SavedTensorHooks.cpp +++ b/aten/src/ATen/SavedTensorHooks.cpp @@ -35,7 +35,7 @@ void SavedTensorDefaultHooks::enable() { tls.disabled_error_message = c10::nullopt; } -const c10::optional& SavedTensorDefaultHooks::get_disabled_error_message() { +const std::optional& SavedTensorDefaultHooks::get_disabled_error_message() { return tls.disabled_error_message; } diff --git a/aten/src/ATen/SavedTensorHooks.h b/aten/src/ATen/SavedTensorHooks.h index af821cb908c6a..6ad46a8334c3f 100644 --- a/aten/src/ATen/SavedTensorHooks.h +++ b/aten/src/ATen/SavedTensorHooks.h @@ -21,7 +21,7 @@ struct TORCH_API SavedTensorDefaultHooksTLS { // disabled_error_message is nullopt IFF Saved Tensor hooks is enabled // We did this for efficiency (so we didn't have to keep a separate bool // around) - c10::optional disabled_error_message; + std::optional disabled_error_message; }; } // namespace impl @@ -46,7 +46,7 @@ struct TORCH_API SavedTensorDefaultHooks { static void disable(const std::string& error_message); static void enable(); static bool is_enabled(); - static const c10::optional& get_disabled_error_message(); + static const std::optional& get_disabled_error_message(); }; } // namespace at diff --git a/aten/src/ATen/ScalarOps.cpp b/aten/src/ATen/ScalarOps.cpp index 13a1754fa53a1..f931af0ad445e 100644 --- a/aten/src/ATen/ScalarOps.cpp +++ b/aten/src/ATen/ScalarOps.cpp @@ -23,7 +23,7 @@ Tensor& scalar_fill(Tensor& self, const Scalar& value) { return self; } -Tensor scalar_tensor_static(const Scalar& s, c10::optional dtype_opt, c10::optional device_opt) { +Tensor scalar_tensor_static(const Scalar& s, std::optional dtype_opt, c10::optional device_opt) { at::tracer::impl::NoTracerDispatchMode tracer_guard; at::AutoDispatchBelowAutograd mode; Tensor result = at::detail::empty_cpu( diff --git a/aten/src/ATen/ScalarOps.h b/aten/src/ATen/ScalarOps.h index 943ac161d4c18..ed591955dd876 100644 --- a/aten/src/ATen/ScalarOps.h +++ b/aten/src/ATen/ScalarOps.h @@ -18,8 +18,8 @@ namespace at::detail { Tensor& scalar_fill(Tensor& self, const Scalar& value); TORCH_API Tensor scalar_tensor_static( const Scalar& s, - c10::optional dtype_opt, - c10::optional device_opt); + std::optional dtype_opt, + std::optional device_opt); } // namespace at::detail // This is in the c10 namespace because we use ADL to find the functions in it. diff --git a/aten/src/ATen/TensorIndexing.h b/aten/src/ATen/TensorIndexing.h index eb29b4d5ad739..b2ef33ffc058d 100644 --- a/aten/src/ATen/TensorIndexing.h +++ b/aten/src/ATen/TensorIndexing.h @@ -39,9 +39,9 @@ TORCH_API extern const EllipsisIndexType Ellipsis; struct TORCH_API Slice final { public: Slice( - c10::optional start_index = c10::nullopt, - c10::optional stop_index = c10::nullopt, - c10::optional step_index = c10::nullopt) { + std::optional start_index = c10::nullopt, + std::optional stop_index = c10::nullopt, + std::optional step_index = c10::nullopt) { if (!step_index.has_value()) { step_ = c10::SymInt(1); } else { @@ -205,7 +205,7 @@ static inline Tensor applySlice( c10::SymInt step, bool disable_slice_optimization, const at::Device& self_device, - const c10::optional& self_sizes) { + const std::optional& self_sizes) { // TODO: implement negative step TORCH_CHECK_VALUE(step > 0, "step must be greater than zero"); @@ -233,7 +233,7 @@ static inline Tensor applySelect( SymInt index, int64_t real_dim, const at::Device& /*self_device*/, - const c10::optional& self_sizes) { + const std::optional& self_sizes) { // See NOTE [nested tensor size for indexing] if (self_sizes.has_value()) { auto maybe_index = index.maybe_as_int(); @@ -431,7 +431,7 @@ static inline Tensor handleDimInMultiDimIndexing( std::vector& outIndices, bool disable_slice_optimization, const at::Device& original_tensor_device, - const c10::optional& prev_dim_result_sizes) { + const std::optional& prev_dim_result_sizes) { if (index.is_integer()) { return impl::applySelect( prev_dim_result, @@ -515,7 +515,7 @@ static inline Tensor applySlicing( std::vector& outIndices, bool disable_slice_optimization, const at::Device& self_device, - const c10::optional& self_sizes) { + const std::optional& self_sizes) { int64_t dim = 0; int64_t specified_dims = impl::count_specified_dimensions(indices); @@ -531,9 +531,9 @@ static inline Tensor applySlicing( for (const auto i : c10::irange(indices.size())) { auto& obj = indices[i]; // See NOTE [nested tensor size for indexing] - c10::optional result_sizes = result.is_nested() - ? c10::optional(c10::nullopt) - : c10::optional(result.sym_sizes()); + std::optional result_sizes = result.is_nested() + ? std::optional(c10::nullopt) + : std::optional(result.sym_sizes()); result = handleDimInMultiDimIndexing( /*prev_dim_result=*/result, /*original_tensor=*/self, @@ -607,9 +607,9 @@ static inline Tensor get_item( // nested tensor does not have a size (yet) so for now we represent its size // as null may need to be changed after we reach a better solution for nested // tensor size - c10::optional self_sizes = self.is_nested() - ? c10::optional(c10::nullopt) - : c10::optional(self.sym_sizes()); + std::optional self_sizes = self.is_nested() + ? std::optional(c10::nullopt) + : std::optional(self.sym_sizes()); // handle simple types: integers, slices, none, ellipsis, bool if (indices.size() == 1) { diff --git a/aten/src/ATen/TensorIterator.h b/aten/src/ATen/TensorIterator.h index a241244a5744c..fb61ca65146a3 100644 --- a/aten/src/ATen/TensorIterator.h +++ b/aten/src/ATen/TensorIterator.h @@ -147,7 +147,7 @@ struct TORCH_API OperandInfo { /// promotion target_dtype value can become different from tensor's dtype /// also, during type promotion target_dtype and device can be set for an /// undefined tensor so that tensor can be properly constructed later. - c10::optional device = c10::nullopt; + std::optional device = c10::nullopt; ScalarType target_dtype = ScalarType::Undefined; // Caches dtype of the tensor, because scalar_type is an expensive operation // If dtype of the tensor is changed (e.g. as a result of type promotion or in @@ -971,9 +971,9 @@ class TORCH_API TensorIteratorConfig final { int num_outputs_ = 0; int num_inputs_ = 0; - c10::optional static_shape_ = c10::nullopt; - c10::optional static_dtype_ = c10::nullopt; - c10::optional static_device_ = c10::nullopt; + std::optional static_shape_ = c10::nullopt; + std::optional static_dtype_ = c10::nullopt; + std::optional static_device_ = c10::nullopt; bool check_mem_overlap_ = true; bool allow_cpu_scalars_ = false; bool is_reduction_ = false; diff --git a/aten/src/ATen/TensorSubclassLikeUtils.h b/aten/src/ATen/TensorSubclassLikeUtils.h index a9a0b4ecdcf8b..10c26dfe35eca 100644 --- a/aten/src/ATen/TensorSubclassLikeUtils.h +++ b/aten/src/ATen/TensorSubclassLikeUtils.h @@ -61,7 +61,7 @@ inline bool areAnyTensorSubclassLike(TensorList tensors) { } inline bool areAnyOptionalTensorSubclassLike( - const c10::List>& tensors) { + const c10::List>& tensors) { if (c10::impl::dispatch_mode_enabled()) return true; return std::any_of( diff --git a/aten/src/ATen/TensorUtils.cpp b/aten/src/ATen/TensorUtils.cpp index e425a0a8ed130..14e81d6504179 100644 --- a/aten/src/ATen/TensorUtils.cpp +++ b/aten/src/ATen/TensorUtils.cpp @@ -327,7 +327,7 @@ std::vector defaultStrides(IntArrayRef sizes) { // see overloads of computeStride() below. // template -inline c10::optional computeStride_impl( +inline std::optional computeStride_impl( const NewShapeVec& oldshape, const NewShapeVec& oldstride, const NewShapeVec& newshape, @@ -395,7 +395,7 @@ inline c10::optional computeStride_impl( return newstride; } -c10::optional> computeStride( +std::optional> computeStride( IntArrayRef oldshape, IntArrayRef oldstride, IntArrayRef newshape) { @@ -403,7 +403,7 @@ c10::optional> computeStride( return computeStride_impl, IntArrayRef, int64_t>(oldshape, oldstride, newshape, toResult); } -c10::optional computeStride( +std::optional computeStride( c10::SymIntArrayRef oldshape, c10::SymIntArrayRef oldstride, c10::SymIntArrayRef newshape) { @@ -411,7 +411,7 @@ c10::optional computeStride( return computeStride_impl(oldshape, oldstride, newshape, toResult); } -c10::optional computeStride( +std::optional computeStride( IntArrayRef oldshape, IntArrayRef oldstride, const DimVector& newshape) { diff --git a/aten/src/ATen/TensorUtils.h b/aten/src/ATen/TensorUtils.h index 4615ab50606ee..4a81dc280e242 100644 --- a/aten/src/ATen/TensorUtils.h +++ b/aten/src/ATen/TensorUtils.h @@ -171,17 +171,17 @@ TORCH_API void check_dim_size( namespace detail { TORCH_API std::vector defaultStrides(IntArrayRef sizes); -TORCH_API c10::optional> computeStride( +TORCH_API std::optional> computeStride( IntArrayRef oldshape, IntArrayRef oldstride, IntArrayRef newshape); -TORCH_API c10::optional computeStride( +TORCH_API std::optional computeStride( c10::SymIntArrayRef oldshape, c10::SymIntArrayRef oldstride, c10::SymIntArrayRef newshape); -TORCH_API c10::optional computeStride( +TORCH_API std::optional computeStride( IntArrayRef oldshape, IntArrayRef oldstride, const DimVector& newshape); diff --git a/aten/src/ATen/VmapModeRegistrations.cpp b/aten/src/ATen/VmapModeRegistrations.cpp index ab4556c8c4155..3b6198778a353 100644 --- a/aten/src/ATen/VmapModeRegistrations.cpp +++ b/aten/src/ATen/VmapModeRegistrations.cpp @@ -39,7 +39,7 @@ TORCH_LIBRARY_IMPL(aten, VmapMode, m) { // CppFunction::makeNamedNotSupported() to avoid listing out the types of everything. // However, registering e.g. CppFunction::makeNamedNotSupported() as an implementation // only works for operators that support boxing. -#define TENSOROPTIONS c10::optional, c10::optional, c10::optional, c10::optional +#define TENSOROPTIONS std::optional, c10::optional, c10::optional, c10::optional // random operations (out-of-place) m.impl("bernoulli", unsupportedRandomOp>); diff --git a/aten/src/ATen/ZeroTensorFallback.cpp b/aten/src/ATen/ZeroTensorFallback.cpp index bc012f8cde909..329216cf3789f 100644 --- a/aten/src/ATen/ZeroTensorFallback.cpp +++ b/aten/src/ATen/ZeroTensorFallback.cpp @@ -16,7 +16,7 @@ namespace at { const auto num_arguments = arguments.size(); const auto stack_start = stack->size() - num_arguments; - c10::optional is_write; + std::optional is_write; for (const auto i : c10::irange(num_arguments)) { const auto& alias_info = arguments[i].alias_info(); if (alias_info != nullptr) { diff --git a/aten/src/ATen/autocast_mode.cpp b/aten/src/ATen/autocast_mode.cpp index c233f17b44580..2d01bdeca500b 100644 --- a/aten/src/ATen/autocast_mode.cpp +++ b/aten/src/ATen/autocast_mode.cpp @@ -144,7 +144,7 @@ Tensor cached_cast(at::ScalarType to_type, const Tensor& arg, DeviceType device_ Banned functions *******************************/ -static Tensor binary_cross_entropy_banned(const Tensor &, const Tensor &, const c10::optional&, int64_t) { +static Tensor binary_cross_entropy_banned(const Tensor &, const Tensor &, const std::optional&, int64_t) { AT_ERROR("torch.nn.functional.binary_cross_entropy and torch.nn.BCELoss are unsafe to autocast.\n" "Many models use a sigmoid layer right before the binary cross entropy layer.\n" "In this case, combine the two layers using torch.nn.functional.binary_cross_entropy_with_logits\n" diff --git a/aten/src/ATen/autocast_mode.h b/aten/src/ATen/autocast_mode.h index 58bd390097206..c36030db5b048 100644 --- a/aten/src/ATen/autocast_mode.h +++ b/aten/src/ATen/autocast_mode.h @@ -297,9 +297,9 @@ TORCH_API Tensor cached_cast( c10::DeviceType device_type = c10::DeviceType::CUDA); // Overload to process optional -inline c10::optional cached_cast( +inline std::optional cached_cast( at::ScalarType to_type, - const c10::optional& arg, + const std::optional& arg, c10::DeviceType device_type = c10::DeviceType::CUDA) { if (arg.has_value()) { return cached_cast(to_type, *arg, device_type); @@ -353,9 +353,9 @@ Otherwise, set it to the autocast type. ********************************************************/ // Overload to catch dtype flags -c10::optional inline set_opt_dtype( +std::optional inline set_opt_dtype( at::ScalarType to_type, - const c10::optional& dtype) { + const std::optional& dtype) { return dtype.has_value() ? dtype : to_type; } @@ -392,7 +392,7 @@ enum class CastPolicy : uint8_t { fp32, // Cast all inputs to at::kFloat before running the op. fp32_set_opt_dtype, // Treats functions (like softmax) that // 1. we'd like to run in fp32 and - // 2. have a c10::optional arg that controls + // 2. have a std::optional arg that controls // the output type. // fp32_set_opt_dtype wrappers' policy is: if the output // type is already set, don't touch it, otherwise, set @@ -865,24 +865,24 @@ copy pasted in from VariableTypeEverything.cpp with appropriate substitutions. _(ADD_NS(norm), \ "norm.Scalar", \ Tensor(const Tensor&, const Scalar&), \ - Tensor(const Tensor&, const c10::optional&, ScalarType), \ + Tensor(const Tensor&, const std::optional&, ScalarType), \ fp32_append_dtype) \ _(ADD_NS(norm), \ "norm.ScalarOpt_dim", \ - Tensor(const Tensor&, const c10::optional&, IntArrayRef, bool), \ + Tensor(const Tensor&, const std::optional&, IntArrayRef, bool), \ Tensor( \ const Tensor&, \ - const c10::optional&, \ + const std::optional&, \ IntArrayRef, \ bool, \ ScalarType), \ fp32_append_dtype) \ _(ADD_NS(norm), \ "norm.names_ScalarOpt_dim", \ - Tensor(const Tensor&, const c10::optional&, DimnameList, bool), \ + Tensor(const Tensor&, const std::optional&, DimnameList, bool), \ Tensor( \ const Tensor&, \ - const c10::optional&, \ + const std::optional&, \ DimnameList, \ bool, \ ScalarType), \ diff --git a/aten/src/ATen/core/CachingHostAllocator.h b/aten/src/ATen/core/CachingHostAllocator.h index d04cb1c6b8a70..449f8d743157b 100644 --- a/aten/src/ATen/core/CachingHostAllocator.h +++ b/aten/src/ATen/core/CachingHostAllocator.h @@ -152,7 +152,7 @@ struct CachingHostAllocatorImpl { // do not need to look up the ctx in blocks_. auto* block = reinterpret_cast(ctx); - c10::optional> events; + std::optional> events; { std::lock_guard g(block->mutex_); block->allocated_ = false; @@ -263,7 +263,7 @@ struct CachingHostAllocatorImpl { // Avoid calling cudaEventDestroy while holding a mutex, so move // intermediate events out of the lock into this object. // process the last event - c10::optional> processed; + std::optional> processed; { std::lock_guard g(events_mutex_); if (!events_.empty()) { @@ -324,7 +324,7 @@ struct CachingHostAllocatorImpl { } // Record an event on stream and store event into events. - virtual void record_stream(c10::optional>& events, S stream) { + virtual void record_stream(std::optional>& events, S stream) { TORCH_CHECK_NOT_IMPLEMENTED(false, "Not implemented for record_stream"); } diff --git a/aten/src/ATen/core/CheckMemoryFormat.h b/aten/src/ATen/core/CheckMemoryFormat.h index 442889e2eec6f..8add9509f4d5f 100644 --- a/aten/src/ATen/core/CheckMemoryFormat.h +++ b/aten/src/ATen/core/CheckMemoryFormat.h @@ -2,10 +2,10 @@ namespace c10::impl { -inline c10::optional +inline std::optional check_tensor_options_and_extract_memory_format( const TensorOptions& options, - c10::optional memory_format) { + std::optional memory_format) { TORCH_CHECK( options.requires_grad_opt() == c10::nullopt || options.requires_grad_opt().value() == false, diff --git a/aten/src/ATen/core/DeprecatedTypeProperties.cpp b/aten/src/ATen/core/DeprecatedTypeProperties.cpp index 15231f965aefd..a97a6828571e7 100644 --- a/aten/src/ATen/core/DeprecatedTypeProperties.cpp +++ b/aten/src/ATen/core/DeprecatedTypeProperties.cpp @@ -14,7 +14,7 @@ Storage DeprecatedTypeProperties::unsafeStorageFromTH(void * th_pointer, bool re return at::unsafeStorageFromTH(th_pointer, retain); } -Tensor DeprecatedTypeProperties::copy(const Tensor & src, bool non_blocking, c10::optional to_device) const { +Tensor DeprecatedTypeProperties::copy(const Tensor & src, bool non_blocking, std::optional to_device) const { if (to_device) { return src.to(src.options().dtype(scalarType()).device(to_device), non_blocking, /*copy=*/true); } diff --git a/aten/src/ATen/core/DeprecatedTypeProperties.h b/aten/src/ATen/core/DeprecatedTypeProperties.h index 222465eac56f2..a945761e8ff97 100644 --- a/aten/src/ATen/core/DeprecatedTypeProperties.h +++ b/aten/src/ATen/core/DeprecatedTypeProperties.h @@ -107,7 +107,7 @@ class TORCH_API DeprecatedTypeProperties { /// Constructs the `TensorOptions` from a type and a Device. Asserts that /// the device type matches the device type of the type. - TensorOptions options(c10::optional device_opt) const { + TensorOptions options(std::optional device_opt) const { if (!device_opt.has_value()) { return options(-1); } else { @@ -129,7 +129,7 @@ class TORCH_API DeprecatedTypeProperties { Tensor unsafeTensorFromTH(void * th_pointer, bool retain) const; Storage unsafeStorageFromTH(void * th_pointer, bool retain) const; - Tensor copy(const Tensor & src, bool non_blocking=false, c10::optional to_device={}) const; + Tensor copy(const Tensor & src, bool non_blocking=false, std::optional to_device={}) const; private: Backend backend_; diff --git a/aten/src/ATen/core/Dimname.h b/aten/src/ATen/core/Dimname.h index e53db14732c89..d3bc5a45abb7a 100644 --- a/aten/src/ATen/core/Dimname.h +++ b/aten/src/ATen/core/Dimname.h @@ -21,7 +21,7 @@ struct TORCH_API Dimname { bool isWildcard() const { return type_ == NameType::WILDCARD; } bool matches(Dimname other) const; - c10::optional unify(Dimname other) const; + std::optional unify(Dimname other) const; private: Dimname(Symbol name) diff --git a/aten/src/ATen/core/DistributionsHelper.h b/aten/src/ATen/core/DistributionsHelper.h index 8b399510e94aa..a46608200e5b9 100644 --- a/aten/src/ATen/core/DistributionsHelper.h +++ b/aten/src/ATen/core/DistributionsHelper.h @@ -144,7 +144,7 @@ template next_##TYPE##_normal_sample()) { \ *ret = *(generator->next_##TYPE##_normal_sample()); \ - generator->set_next_##TYPE##_normal_sample(c10::optional()); \ + generator->set_next_##TYPE##_normal_sample(std::optional()); \ return true; \ } \ return false; \ diff --git a/aten/src/ATen/core/Generator.h b/aten/src/ATen/core/Generator.h index b237c571b22d3..6b76db5d06864 100644 --- a/aten/src/ATen/core/Generator.h +++ b/aten/src/ATen/core/Generator.h @@ -150,7 +150,7 @@ Generator make_generator(Args&&... args) { * the backend generator type (CPU/CUDAGeneratorImpl etc.) */ template -static inline T * check_generator(c10::optional gen) { +static inline T * check_generator(std::optional gen) { TORCH_CHECK(gen.has_value(), "Expected Generator but received nullopt"); TORCH_CHECK(gen->defined(), "Generator with undefined implementation is not allowed"); TORCH_CHECK(T::device_type() == gen->device().type(), "Expected a '", T::device_type(), "' device type for generator but found '", gen->device().type(), "'"); @@ -164,7 +164,7 @@ static inline T * check_generator(c10::optional gen) { * the backend generator type (CPU/CUDAGeneratorImpl etc.) */ template -static inline T* get_generator_or_default(const c10::optional& gen, const Generator& default_gen) { +static inline T* get_generator_or_default(const std::optional& gen, const Generator& default_gen) { return gen.has_value() && gen->defined() ? check_generator(gen) : check_generator(default_gen); } diff --git a/aten/src/ATen/core/GeneratorForPrivateuseone.cpp b/aten/src/ATen/core/GeneratorForPrivateuseone.cpp index 1e8d8daa9fc8f..35b1dd9fdd4eb 100644 --- a/aten/src/ATen/core/GeneratorForPrivateuseone.cpp +++ b/aten/src/ATen/core/GeneratorForPrivateuseone.cpp @@ -5,8 +5,8 @@ namespace at { static std::mutex _generator_mutex_lock; -c10::optional& GetGeneratorPrivate() { - static c10::optional generator_privateuse1 = c10::nullopt; +std::optional& GetGeneratorPrivate() { + static std::optional generator_privateuse1 = c10::nullopt; return generator_privateuse1; } diff --git a/aten/src/ATen/core/GeneratorForPrivateuseone.h b/aten/src/ATen/core/GeneratorForPrivateuseone.h index 9b84f162a7652..747c77897ff9b 100644 --- a/aten/src/ATen/core/GeneratorForPrivateuseone.h +++ b/aten/src/ATen/core/GeneratorForPrivateuseone.h @@ -7,7 +7,7 @@ namespace at { using GeneratorFuncType = std::function; -c10::optional& GetGeneratorPrivate(); +std::optional& GetGeneratorPrivate(); class TORCH_API _GeneratorRegister { public: diff --git a/aten/src/ATen/core/List.h b/aten/src/ATen/core/List.h index 68ecf5ed343f8..53560b9666ae3 100644 --- a/aten/src/ATen/core/List.h +++ b/aten/src/ATen/core/List.h @@ -58,10 +58,10 @@ struct ListElementConstReferenceTraits { using const_reference = typename c10::detail::ivalue_to_const_ref_overload_return::type; }; -// There is no to() overload for c10::optional. +// There is no to() overload for std::optional. template<> -struct ListElementConstReferenceTraits> { - using const_reference = c10::optional>; +struct ListElementConstReferenceTraits> { + using const_reference = std::optional>; }; template diff --git a/aten/src/ATen/core/List_inl.h b/aten/src/ATen/core/List_inl.h index f8ce73eb3f9cc..64760b5f782b4 100644 --- a/aten/src/ATen/core/List_inl.h +++ b/aten/src/ATen/core/List_inl.h @@ -168,8 +168,8 @@ list_element_to_const_ref(const IValue& element) { } template<> -inline typename ListElementConstReferenceTraits>::const_reference -list_element_to_const_ref>(const IValue& element) { +inline typename ListElementConstReferenceTraits>::const_reference +list_element_to_const_ref>(const IValue& element) { return element.toOptionalStringRef(); } diff --git a/aten/src/ATen/core/List_test.cpp b/aten/src/ATen/core/List_test.cpp index 56da3cf299e90..808cbe2d8b63a 100644 --- a/aten/src/ATen/core/List_test.cpp +++ b/aten/src/ATen/core/List_test.cpp @@ -1127,13 +1127,13 @@ TEST(ListTest, canAccessStringByReference) { } TEST(ListTest, canAccessOptionalStringByReference) { - List> list({"one", "two", c10::nullopt}); + List> list({"one", "two", c10::nullopt}); const auto& listRef = list; static_assert( - std::is_same_v>>, - "List> access should be by const reference"); - c10::optional str1 = list[1]; - c10::optional str2 = list[2]; + std::is_same_v>>, + "List> access should be by const reference"); + std::optional str1 = list[1]; + std::optional str2 = list[2]; decltype(auto) strRef1 = listRef[1]; decltype(auto) strRef2 = listRef[2]; // NOLINTNEXTLINE(bugprone-unchecked-optional-access) diff --git a/aten/src/ATen/core/NamedTensor.h b/aten/src/ATen/core/NamedTensor.h index d6ff30ce00838..7eed27e4f1a61 100644 --- a/aten/src/ATen/core/NamedTensor.h +++ b/aten/src/ATen/core/NamedTensor.h @@ -100,7 +100,7 @@ void check_names_valid_for(const TensorBase& tensor, DimnameList names); void check_names_valid_for(size_t tensor_dim, DimnameList names); // Sets the names of `tensor` to be `names`. -TORCH_API const TensorBase& internal_set_names_inplace(const TensorBase& tensor, c10::optional names); +TORCH_API const TensorBase& internal_set_names_inplace(const TensorBase& tensor, std::optional names); TORCH_API const TensorBase& internal_set_names_inplace(const TensorBase& tensor, std::vector&& names, bool validate_names); constexpr size_t kMaxNamedTensorDim = 64; @@ -111,7 +111,7 @@ namespace impl { // Some helper functions on TensorImpl. Useful for working with names in TH. // XXX: Ideally these would exist as methods on TensorImpl -TORCH_API void internal_set_names_inplace(TensorImpl* impl, c10::optional names, bool validate_names); +TORCH_API void internal_set_names_inplace(TensorImpl* impl, std::optional names, bool validate_names); TORCH_API void internal_set_names_inplace(TensorImpl* impl, std::vector&& names, bool validate_names); void check_names_valid_for(TensorImpl* impl, DimnameList names); @@ -132,7 +132,7 @@ TORCH_API DimnameList get_names(const TensorImpl* impl); // Returns the names of the tensor if they have been allocated; returns nullopt // instead if the haven't been. The names of a tensor are not allocated if a // tensor is constructed with names=None. -TORCH_API c10::optional get_opt_names(const TensorImpl* impl); +TORCH_API std::optional get_opt_names(const TensorImpl* impl); } // namespace impl diff --git a/aten/src/ATen/core/NestedIntSymNodeImpl.cpp b/aten/src/ATen/core/NestedIntSymNodeImpl.cpp index b703f76773b46..7cdc7aa2cbe8f 100644 --- a/aten/src/ATen/core/NestedIntSymNodeImpl.cpp +++ b/aten/src/ATen/core/NestedIntSymNodeImpl.cpp @@ -7,7 +7,7 @@ namespace c10 { namespace { bool _eq(const char* op, c10::SymNodeImpl* lhs, c10::SymNodeImpl* rhs) { TORCH_INTERNAL_ASSERT(lhs->is_nested_int()); - c10::optional c = rhs->nested_int(); + std::optional c = rhs->nested_int(); return ( c.has_value() && lhs->nested_int() == *c && lhs->nested_int_coeff() == rhs->nested_int_coeff()); @@ -68,7 +68,7 @@ c10::SymNode NestedIntSymNodeImpl::le(const c10::SymNode& other) { c10::SymNode NestedIntSymNodeImpl::mul(const c10::SymNode& other) { TORCH_CHECK(!other->nested_int(), "nested int cannot be multiplied by nested int"); - c10::optional c = other->constant_int(); + std::optional c = other->constant_int(); TORCH_CHECK(c.has_value()); return SymNode(c10::make_intrusive(val_, coeff_ * *c)); } diff --git a/aten/src/ATen/core/NestedIntSymNodeImpl.h b/aten/src/ATen/core/NestedIntSymNodeImpl.h index 228f4310a38fc..786464c4c3ea8 100644 --- a/aten/src/ATen/core/NestedIntSymNodeImpl.h +++ b/aten/src/ATen/core/NestedIntSymNodeImpl.h @@ -134,11 +134,11 @@ class TORCH_API NestedIntSymNodeImpl : public SymNodeImpl { c10::SymNode le(const c10::SymNode& other) override; c10::SymNode mul(const c10::SymNode& other) override; - c10::optional nested_int() override { + std::optional nested_int() override { return val_; } - c10::optional nested_int_coeff() override { + std::optional nested_int_coeff() override { return coeff_; } diff --git a/aten/src/ATen/core/PythonFallbackKernel.cpp b/aten/src/ATen/core/PythonFallbackKernel.cpp index a34341b4a9437..caef951ed1268 100644 --- a/aten/src/ATen/core/PythonFallbackKernel.cpp +++ b/aten/src/ATen/core/PythonFallbackKernel.cpp @@ -14,7 +14,7 @@ namespace { // To achieve this, we ensure that the tls is empty by default and emptied again both when // we call into user torch_dispatch or returning back to python after this call. -thread_local c10::optional tls_on_entry; +thread_local std::optional tls_on_entry; c10::impl::LocalDispatchKeySet safe_get_tls_on_entry() { TORCH_CHECK(tls_on_entry.has_value(), "Accessing torch dispatch state outside of '__torch_dispatch__' " diff --git a/aten/src/ATen/core/Tensor.cpp b/aten/src/ATen/core/Tensor.cpp index ed19144d0eaff..2ddd9b4e65bac 100644 --- a/aten/src/ATen/core/Tensor.cpp +++ b/aten/src/ATen/core/Tensor.cpp @@ -42,7 +42,7 @@ TensorBase TensorBase::to( at::TensorOptions options, bool non_blocking, bool copy, - c10::optional memory_format) const { + std::optional memory_format) const { Tensor self(*this); return at::_ops::to_dtype_layout::call( self, optTypeMetaToScalarType(options.dtype_opt()), @@ -134,8 +134,8 @@ bool TensorBase::retains_grad() const { } void Tensor::_backward(TensorList inputs, - const c10::optional& gradient, - c10::optional keep_graph, + const std::optional& gradient, + std::optional keep_graph, bool create_graph) const { return impl::GetVariableHooks()->_backward(*this, inputs, gradient, keep_graph, create_graph); } diff --git a/aten/src/ATen/core/TensorBase.h b/aten/src/ATen/core/TensorBase.h index e03c6bdf2bd10..87d5937cf9ebc 100644 --- a/aten/src/ATen/core/TensorBase.h +++ b/aten/src/ATen/core/TensorBase.h @@ -147,7 +147,7 @@ class TORCH_API TensorBase { const TensorBase& fill_(const c10::Scalar& scalar) const; const TensorBase& zero_() const; - TensorBase to(at::TensorOptions options={}, bool non_blocking=false, bool copy=false, c10::optional memory_format=c10::nullopt) const; + TensorBase to(at::TensorOptions options={}, bool non_blocking=false, bool copy=false, std::optional memory_format=c10::nullopt) const; bool is_complex() const { return at::isComplexType(this->scalar_type()); @@ -249,7 +249,7 @@ class TORCH_API TensorBase { return impl_->strides(); } // See impl::get_opt_names in ATen/NamedTensor.h for docs. - c10::optional opt_names() const { + std::optional opt_names() const { return impl::get_opt_names(unsafeGetTensorImpl()); } // See impl::get_names in ATen/NamedTensor.h for docs. @@ -712,7 +712,7 @@ class TORCH_API TensorBase { /// // f requires grad, has no operation creating it /// @endcode - /// \fn void backward(const Tensor & gradient={}, c10::optional retain_graph=c10::nullopt, bool create_graph=false, c10::optional inputs=c10::nullopt) const; + /// \fn void backward(const Tensor & gradient={}, std::optional retain_graph=c10::nullopt, bool create_graph=false, c10::optional inputs=c10::nullopt) const; /// /// Computes the gradient of current tensor with respect to graph leaves. /// @@ -1010,7 +1010,7 @@ struct ExclusivelyOwnedTraits : public c10::ExclusivelyOwnedTens namespace at { inline c10::MaybeOwned borrow_from_optional_tensor( - const c10::optional& opt) { + const std::optional& opt) { return opt.has_value() ? c10::MaybeOwned::borrowed(*opt) : c10::MaybeOwned::owned(std::in_place); diff --git a/aten/src/ATen/core/TorchDispatchUtils.cpp b/aten/src/ATen/core/TorchDispatchUtils.cpp index 8f666e5a476ab..32085a9f70627 100644 --- a/aten/src/ATen/core/TorchDispatchUtils.cpp +++ b/aten/src/ATen/core/TorchDispatchUtils.cpp @@ -17,7 +17,7 @@ bool tensorlist_has_dispatch(at::ITensorListRef li) { return false; } -bool tensorlist_has_dispatch(const c10::List>& li) { +bool tensorlist_has_dispatch(const c10::List>& li) { for (auto i : c10::irange(li.size())) { auto t = li.get(i); if (t && tensor_has_dispatch(*t)) { diff --git a/aten/src/ATen/core/TorchDispatchUtils.h b/aten/src/ATen/core/TorchDispatchUtils.h index 0ead779360097..4f5d9e22e4692 100644 --- a/aten/src/ATen/core/TorchDispatchUtils.h +++ b/aten/src/ATen/core/TorchDispatchUtils.h @@ -10,7 +10,7 @@ namespace at::impl { TORCH_API bool tensor_has_dispatch(const at::Tensor& t); TORCH_API bool tensorlist_has_dispatch(at::ITensorListRef li); -TORCH_API bool tensorlist_has_dispatch(const c10::List>& li); +TORCH_API bool tensorlist_has_dispatch(const c10::List>& li); using c10::impl::dispatch_mode_enabled; } diff --git a/aten/src/ATen/core/VariableHooksInterface.h b/aten/src/ATen/core/VariableHooksInterface.h index 47d74f5433ac2..f9c0aa4a5fc14 100644 --- a/aten/src/ATen/core/VariableHooksInterface.h +++ b/aten/src/ATen/core/VariableHooksInterface.h @@ -60,8 +60,8 @@ struct TORCH_API VariableHooksInterface { virtual void _backward( const Tensor&, TensorList, - const c10::optional&, - c10::optional, + const std::optional&, + std::optional, bool) const = 0; virtual void requires_grad_(const TensorBase&, bool) const = 0; virtual void basic_autograd_not_implemented_fallback( diff --git a/aten/src/ATen/core/boxing/KernelFunction.h b/aten/src/ATen/core/boxing/KernelFunction.h index c950f4c80ffc7..7b55c2323a2ff 100644 --- a/aten/src/ATen/core/boxing/KernelFunction.h +++ b/aten/src/ATen/core/boxing/KernelFunction.h @@ -22,7 +22,7 @@ using has_symint = std::is_same, std::is_same, std::is_same, - std::is_same, T> + std::is_same, T> >; template @@ -46,8 +46,8 @@ struct remove_symint { }; template <> -struct remove_symint> { - using type = c10::optional; +struct remove_symint> { + using type = std::optional; }; diff --git a/aten/src/ATen/core/boxing/KernelFunction_impl.h b/aten/src/ATen/core/boxing/KernelFunction_impl.h index 0d6149c8090a9..0ad79b00be56b 100644 --- a/aten/src/ATen/core/boxing/KernelFunction_impl.h +++ b/aten/src/ATen/core/boxing/KernelFunction_impl.h @@ -71,7 +71,7 @@ inline typename remove_symint::type unpackSymInt(c10::SymIn } template <> -inline typename remove_symint>::type unpackSymInt(c10::optional x) { +inline typename remove_symint>::type unpackSymInt(c10::optional x) { return x.has_value() ? c10::make_optional(x->guard_int(__FILE__, __LINE__)) : c10::nullopt; } diff --git a/aten/src/ATen/core/boxing/KernelFunction_test.cpp b/aten/src/ATen/core/boxing/KernelFunction_test.cpp index 6453e5e00b5c4..a0f990e87aafe 100644 --- a/aten/src/ATen/core/boxing/KernelFunction_test.cpp +++ b/aten/src/ATen/core/boxing/KernelFunction_test.cpp @@ -6,7 +6,7 @@ using std::vector; using std::tuple; -using c10::optional; +using std::optional; using c10::IValue; using c10::OperatorKernel; using c10::OperatorHandle; diff --git a/aten/src/ATen/core/boxing/impl/kernel_function_legacy_test.cpp b/aten/src/ATen/core/boxing/impl/kernel_function_legacy_test.cpp index 7eb0137b283fc..fa562c1d7ca4f 100644 --- a/aten/src/ATen/core/boxing/impl/kernel_function_legacy_test.cpp +++ b/aten/src/ATen/core/boxing/impl/kernel_function_legacy_test.cpp @@ -207,15 +207,15 @@ TEST(OperatorRegistrationTestLegacyFunctionBasedKernel, givenKernelWithIntListOu EXPECT_EQ(6, result[0].toIntVector()[2]); } -std::tuple, c10::optional, Dict> kernelWithMultipleOutputs(Tensor) { +std::tuple, std::optional, Dict> kernelWithMultipleOutputs(Tensor) { Dict dict; dict.insert("first", dummyTensor(DispatchKey::CPU)); dict.insert("second", dummyTensor(DispatchKey::CUDA)); - return std::tuple, c10::optional, Dict>( + return std::tuple, std::optional, Dict>( dummyTensor(DispatchKey::CUDA), 5, {dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)}, - c10::optional(std::in_place, 0), + std::optional(std::in_place, 0), dict ); } @@ -808,11 +808,11 @@ TEST(OperatorRegistrationTestLegacyFunctionBasedKernel, givenFallbackKernelWitho EXPECT_EQ(4, outputs[0].toInt()); } -c10::optional called_arg2 = c10::nullopt; -c10::optional called_arg3 = c10::nullopt; -c10::optional called_arg4 = c10::nullopt; +std::optional called_arg2 = c10::nullopt; +std::optional called_arg3 = c10::nullopt; +std::optional called_arg4 = c10::nullopt; -void kernelWithOptInputWithoutOutput(Tensor arg1, const c10::optional& arg2, c10::optional arg3, c10::optional arg4) { +void kernelWithOptInputWithoutOutput(Tensor arg1, const std::optional& arg2, c10::optional arg3, c10::optional arg4) { called = true; called_arg2 = arg2; called_arg3 = arg3; @@ -846,7 +846,7 @@ TEST(OperatorRegistrationTestLegacyFunctionBasedKernel, givenKernelWithOptionalI EXPECT_FALSE(called_arg4.has_value()); } -c10::optional kernelWithOptInputWithOutput(Tensor arg1, const c10::optional& arg2, c10::optional arg3, c10::optional arg4) { +std::optional kernelWithOptInputWithOutput(Tensor arg1, const c10::optional& arg2, c10::optional arg3, c10::optional arg4) { called = true; called_arg2 = arg2; called_arg3 = arg3; @@ -883,8 +883,8 @@ TEST(OperatorRegistrationTestLegacyFunctionBasedKernel, givenKernelWithOptionalI EXPECT_FALSE(called_arg4.has_value()); } -std::tuple, c10::optional, c10::optional> -kernelWithOptInputWithMultipleOutputs(Tensor arg1, const c10::optional& arg2, c10::optional arg3, c10::optional arg4) { +std::tuple, c10::optional, c10::optional> +kernelWithOptInputWithMultipleOutputs(Tensor arg1, const std::optional& arg2, c10::optional arg3, c10::optional arg4) { return std::make_tuple(arg2, arg3, arg4); } @@ -936,7 +936,7 @@ TEST(OperatorRegistrationTestLegacyFunctionBasedKernel, givenKernel_whenRegister auto op = c10::Dispatcher::singleton().findSchema({"_test::no_schema_specified", ""}); ASSERT_TRUE(op.has_value()); - c10::optional differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema()); + std::optional differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema()); EXPECT_FALSE(differences.has_value()); } diff --git a/aten/src/ATen/core/boxing/impl/kernel_function_test.cpp b/aten/src/ATen/core/boxing/impl/kernel_function_test.cpp index 15f7caae529b4..ed448d054c713 100644 --- a/aten/src/ATen/core/boxing/impl/kernel_function_test.cpp +++ b/aten/src/ATen/core/boxing/impl/kernel_function_test.cpp @@ -223,15 +223,15 @@ TEST(OperatorRegistrationTestFunctionBasedKernel, givenKernelWithIntListOutput_w EXPECT_EQ(6, result[0].toIntVector()[2]); } -std::tuple, c10::optional, Dict> kernelWithMultipleOutputs(Tensor) { +std::tuple, std::optional, Dict> kernelWithMultipleOutputs(Tensor) { Dict dict; dict.insert("first", dummyTensor(DispatchKey::CPU)); dict.insert("second", dummyTensor(DispatchKey::CUDA)); - return std::tuple, c10::optional, Dict>( + return std::tuple, std::optional, Dict>( dummyTensor(DispatchKey::CUDA), 5, c10::List({dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)}), - c10::optional(std::in_place, 0), + std::optional(std::in_place, 0), dict ); } @@ -550,11 +550,11 @@ TEST(OperatorRegistrationTestFunctionBasedKernel, givenFallbackKernelWithoutTens EXPECT_EQ(4, outputs[0].toInt()); } -c10::optional called_arg2 = c10::nullopt; -c10::optional called_arg3 = c10::nullopt; -c10::optional called_arg4 = c10::nullopt; +std::optional called_arg2 = c10::nullopt; +std::optional called_arg3 = c10::nullopt; +std::optional called_arg4 = c10::nullopt; -void kernelWithOptInputWithoutOutput(Tensor arg1, const c10::optional& arg2, c10::optional arg3, c10::optional arg4) { +void kernelWithOptInputWithoutOutput(Tensor arg1, const std::optional& arg2, c10::optional arg3, c10::optional arg4) { called = true; called_arg2 = arg2; called_arg3 = arg3; @@ -588,7 +588,7 @@ TEST(OperatorRegistrationTestFunctionBasedKernel, givenKernelWithOptionalInputs_ EXPECT_FALSE(called_arg4.has_value()); } -c10::optional kernelWithOptInputWithOutput(Tensor arg1, const c10::optional& arg2, c10::optional arg3, c10::optional arg4) { +std::optional kernelWithOptInputWithOutput(Tensor arg1, const c10::optional& arg2, c10::optional arg3, c10::optional arg4) { called = true; called_arg2 = arg2; called_arg3 = arg3; @@ -625,8 +625,8 @@ TEST(OperatorRegistrationTestFunctionBasedKernel, givenKernelWithOptionalInputs_ EXPECT_FALSE(called_arg4.has_value()); } -std::tuple, c10::optional, c10::optional> -kernelWithOptInputWithMultipleOutputs(Tensor arg1, const c10::optional& arg2, c10::optional arg3, c10::optional arg4) { +std::tuple, c10::optional, c10::optional> +kernelWithOptInputWithMultipleOutputs(Tensor arg1, const std::optional& arg2, c10::optional arg3, c10::optional arg4) { return std::make_tuple(arg2, arg3, arg4); } @@ -690,7 +690,7 @@ TEST(OperatorRegistrationTestFunctionBasedKernel, givenKernel_whenRegisteredWith auto op = c10::Dispatcher::singleton().findSchema({"_test::no_schema_specified", ""}); ASSERT_TRUE(op.has_value()); - c10::optional differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema()); + std::optional differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema()); EXPECT_FALSE(differences.has_value()); } diff --git a/aten/src/ATen/core/boxing/impl/kernel_lambda_legacy_test.cpp b/aten/src/ATen/core/boxing/impl/kernel_lambda_legacy_test.cpp index a1a1b37e2d83e..22203b7326f38 100644 --- a/aten/src/ATen/core/boxing/impl/kernel_lambda_legacy_test.cpp +++ b/aten/src/ATen/core/boxing/impl/kernel_lambda_legacy_test.cpp @@ -188,15 +188,15 @@ TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithIntListOutp TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithMultipleOutputs_whenRegistered_thenCanBeCalled) { auto registrar = RegisterOperators() - .op("_test::multiple_outputs(Tensor dummy) -> (Tensor, int, Tensor[], int?, Dict(str, Tensor))", [] (Tensor) -> std::tuple, c10::optional, Dict> { + .op("_test::multiple_outputs(Tensor dummy) -> (Tensor, int, Tensor[], int?, Dict(str, Tensor))", [] (Tensor) -> std::tuple, std::optional, Dict> { Dict dict; dict.insert("first", dummyTensor(DispatchKey::CPU)); dict.insert("second", dummyTensor(DispatchKey::CUDA)); - return std::tuple, c10::optional, Dict>( + return std::tuple, std::optional, Dict>( dummyTensor(DispatchKey::CUDA), 5, {dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)}, - c10::optional(std::in_place, 0), + std::optional(std::in_place, 0), dict ); }); @@ -733,13 +733,13 @@ TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenFallbackKernelWithout TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithOptionalInputs_withoutOutput_whenRegistered_thenCanBeCalled) { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) bool called; - c10::optional called_arg2 = c10::nullopt; - c10::optional called_arg3 = c10::nullopt; - c10::optional called_arg4 = c10::nullopt; + std::optional called_arg2 = c10::nullopt; + std::optional called_arg3 = c10::nullopt; + std::optional called_arg4 = c10::nullopt; auto registrar = RegisterOperators().op( "_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> ()", - [&] (Tensor arg1, const c10::optional& arg2, c10::optional arg3, c10::optional arg4) { + [&] (Tensor arg1, const std::optional& arg2, c10::optional arg3, c10::optional arg4) { called = true; called_arg2 = arg2; called_arg3 = arg3; @@ -773,13 +773,13 @@ TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithOptionalInp TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithOptionalInputs_withOutput_whenRegistered_thenCanBeCalled) { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) bool called; - c10::optional called_arg2 = c10::nullopt; - c10::optional called_arg3 = c10::nullopt; - c10::optional called_arg4 = c10::nullopt; + std::optional called_arg2 = c10::nullopt; + std::optional called_arg3 = c10::nullopt; + std::optional called_arg4 = c10::nullopt; auto registrar = RegisterOperators().op( "_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> Tensor?", - [&] (Tensor arg1, const c10::optional& arg2, c10::optional arg3, c10::optional arg4) { + [&] (Tensor arg1, const std::optional& arg2, c10::optional arg3, c10::optional arg4) { called = true; called_arg2 = arg2; called_arg3 = arg3; @@ -816,13 +816,13 @@ TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithOptionalInp TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithOptionalInputs_withMultipleOutputs_whenRegistered_thenCanBeCalled) { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) bool called; - c10::optional called_arg2 = c10::nullopt; - c10::optional called_arg3 = c10::nullopt; - c10::optional called_arg4 = c10::nullopt; + std::optional called_arg2 = c10::nullopt; + std::optional called_arg3 = c10::nullopt; + std::optional called_arg4 = c10::nullopt; auto registrar = RegisterOperators().op( "_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> (Tensor?, int?, str?)", - [] (Tensor arg1, const c10::optional& arg2, c10::optional arg3, c10::optional arg4) { + [] (Tensor arg1, const std::optional& arg2, c10::optional arg3, c10::optional arg4) { return std::make_tuple(arg2, arg3, arg4); }); auto op = c10::Dispatcher::singleton().findSchema({"_test::opt_input", ""}); @@ -866,7 +866,7 @@ TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernel_whenRegistered auto op = c10::Dispatcher::singleton().findSchema({"_test::no_schema_specified", ""}); ASSERT_TRUE(op.has_value()); - c10::optional differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema()); + std::optional differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema()); EXPECT_FALSE(differences.has_value()); } diff --git a/aten/src/ATen/core/boxing/impl/kernel_lambda_test.cpp b/aten/src/ATen/core/boxing/impl/kernel_lambda_test.cpp index dc463cb3fe180..ea06bbccc7bd6 100644 --- a/aten/src/ATen/core/boxing/impl/kernel_lambda_test.cpp +++ b/aten/src/ATen/core/boxing/impl/kernel_lambda_test.cpp @@ -187,15 +187,15 @@ TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernelWithIntListOutput_whe TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernelWithMultipleOutputs_whenRegistered_thenCanBeCalled) { auto registrar = RegisterOperators() .op("_test::multiple_outputs(Tensor dummy) -> (Tensor, int, Tensor[], int?, Dict(str, Tensor))", - RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor) -> std::tuple, c10::optional, Dict> { + RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor) -> std::tuple, std::optional, Dict> { Dict dict; dict.insert("first", dummyTensor(DispatchKey::CPU)); dict.insert("second", dummyTensor(DispatchKey::CUDA)); - return std::tuple, c10::optional, Dict>( + return std::tuple, std::optional, Dict>( dummyTensor(DispatchKey::CUDA), 5, c10::List({dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)}), - c10::optional(std::in_place, 0), + std::optional(std::in_place, 0), dict ); })); @@ -466,14 +466,14 @@ TEST(OperatorRegistrationTestLambdaBasedKernel, givenFallbackKernelWithoutTensor EXPECT_EQ(4, outputs[0].toInt()); } -c10::optional called_arg2 = c10::nullopt; -c10::optional called_arg3 = c10::nullopt; -c10::optional called_arg4 = c10::nullopt; +std::optional called_arg2 = c10::nullopt; +std::optional called_arg3 = c10::nullopt; +std::optional called_arg4 = c10::nullopt; TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernelWithOptionalInputs_withoutOutput_whenRegistered_thenCanBeCalled) { auto registrar = RegisterOperators().op( "_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> ()", - RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor arg1, const c10::optional& arg2, c10::optional arg3, c10::optional arg4) { + RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor arg1, const std::optional& arg2, c10::optional arg3, c10::optional arg4) { called = true; called_arg2 = arg2; called_arg3 = arg3; @@ -507,7 +507,7 @@ TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernelWithOptionalInputs_wi TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernelWithOptionalInputs_withOutput_whenRegistered_thenCanBeCalled) { auto registrar = RegisterOperators().op( "_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> Tensor?", - RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor arg1, const c10::optional& arg2, c10::optional arg3, c10::optional arg4) { + RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor arg1, const std::optional& arg2, c10::optional arg3, c10::optional arg4) { called = true; called_arg2 = arg2; called_arg3 = arg3; @@ -544,7 +544,7 @@ TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernelWithOptionalInputs_wi TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernelWithOptionalInputs_withMultipleOutputs_whenRegistered_thenCanBeCalled) { auto registrar = RegisterOperators().op( "_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> (Tensor?, int?, str?)", - RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor arg1, const c10::optional& arg2, c10::optional arg3, c10::optional arg4) { + RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor arg1, const std::optional& arg2, c10::optional arg3, c10::optional arg4) { return std::make_tuple(arg2, arg3, arg4); })); auto op = c10::Dispatcher::singleton().findSchema({"_test::opt_input", ""}); @@ -588,7 +588,7 @@ TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernel_whenRegisteredWithou auto op = c10::Dispatcher::singleton().findSchema({"_test::no_schema_specified", ""}); ASSERT_TRUE(op.has_value()); - c10::optional differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema()); + std::optional differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema()); EXPECT_FALSE(differences.has_value()); } diff --git a/aten/src/ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h b/aten/src/ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h index ccd94ff1de2be..4642be5d689a5 100644 --- a/aten/src/ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h +++ b/aten/src/ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h @@ -116,7 +116,7 @@ namespace impl { }; template - struct assert_is_valid_input_type, AllowDeprecatedTypes> + struct assert_is_valid_input_type, AllowDeprecatedTypes> : assert_is_valid_input_type {}; template @@ -226,7 +226,7 @@ namespace impl { }; template - struct assert_is_valid_output_type, AllowDeprecatedTypes> + struct assert_is_valid_output_type, AllowDeprecatedTypes> : assert_is_valid_output_type {}; template diff --git a/aten/src/ATen/core/boxing/impl/make_boxed_from_unboxed_functor_test.cpp b/aten/src/ATen/core/boxing/impl/make_boxed_from_unboxed_functor_test.cpp index 337f0d4c0cad3..1609e014f43f0 100644 --- a/aten/src/ATen/core/boxing/impl/make_boxed_from_unboxed_functor_test.cpp +++ b/aten/src/ATen/core/boxing/impl/make_boxed_from_unboxed_functor_test.cpp @@ -205,15 +205,15 @@ TEST(OperatorRegistrationTestFunctorBasedKernel, givenKernelWithIntListOutput_wh } struct KernelWithMultipleOutputs final : OperatorKernel { - std::tuple, c10::optional, Dict> operator()(Tensor) { + std::tuple, std::optional, Dict> operator()(Tensor) { Dict dict; dict.insert("first", dummyTensor(DispatchKey::CPU)); dict.insert("second", dummyTensor(DispatchKey::CUDA)); - return std::tuple, c10::optional, Dict>( + return std::tuple, std::optional, Dict>( dummyTensor(DispatchKey::CUDA), 5, c10::List({dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)}), - c10::optional(std::in_place, 0), + std::optional(std::in_place, 0), dict ); } @@ -679,12 +679,12 @@ TEST(OperatorRegistrationTestFunctorBasedKernel, givenFallbackKernelWithoutTenso EXPECT_EQ(4, outputs[0].toInt()); } -c10::optional called_arg2 = c10::nullopt; -c10::optional called_arg3 = c10::nullopt; -c10::optional called_arg4 = c10::nullopt; +std::optional called_arg2 = c10::nullopt; +std::optional called_arg3 = c10::nullopt; +std::optional called_arg4 = c10::nullopt; struct KernelWithOptInputWithoutOutput final : OperatorKernel { - void operator()(Tensor arg1, const c10::optional& arg2, c10::optional arg3, c10::optional arg4) { + void operator()(Tensor arg1, const std::optional& arg2, c10::optional arg3, c10::optional arg4) { called = true; called_arg2 = arg2; called_arg3 = arg3; @@ -720,7 +720,7 @@ TEST(OperatorRegistrationTestFunctorBasedKernel, givenKernelWithOptionalInputs_w } struct KernelWithOptInputWithOutput final : OperatorKernel { - c10::optional operator()(Tensor arg1, const c10::optional& arg2, c10::optional arg3, c10::optional arg4) { + std::optional operator()(Tensor arg1, const c10::optional& arg2, c10::optional arg3, c10::optional arg4) { called = true; called_arg2 = arg2; called_arg3 = arg3; @@ -759,8 +759,8 @@ TEST(OperatorRegistrationTestFunctorBasedKernel, givenKernelWithOptionalInputs_w } struct KernelWithOptInputWithMultipleOutputs final : OperatorKernel { - std::tuple, c10::optional, c10::optional> - operator()(Tensor arg1, const c10::optional& arg2, c10::optional arg3, c10::optional arg4) { + std::tuple, c10::optional, c10::optional> + operator()(Tensor arg1, const std::optional& arg2, c10::optional arg3, c10::optional arg4) { return std::make_tuple(arg2, arg3, arg4); } }; @@ -821,7 +821,7 @@ TEST(OperatorRegistrationTestFunctorBasedKernel, givenKernel_whenRegisteredWitho auto op = c10::Dispatcher::singleton().findSchema({"_test::no_schema_specified", ""}); ASSERT_TRUE(op.has_value()); - c10::optional differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema()); + std::optional differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema()); EXPECT_FALSE(differences.has_value()); } @@ -832,7 +832,7 @@ TEST(OperatorRegistrationTestFunctorBasedKernel, givenKernel_whenRegisteredCatch auto op = c10::Dispatcher::singleton().findSchema({"_test::no_schema_specified", ""}); ASSERT_TRUE(op.has_value()); - c10::optional differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema()); + std::optional differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema()); EXPECT_FALSE(differences.has_value()); } diff --git a/aten/src/ATen/core/builtin_function.h b/aten/src/ATen/core/builtin_function.h index b25ca55c16851..9aef3a0f62cf5 100644 --- a/aten/src/ATen/core/builtin_function.h +++ b/aten/src/ATen/core/builtin_function.h @@ -63,7 +63,7 @@ struct BuiltinOpFunction : public Function { bool call( Stack& stack, - c10::optional, + std::optional, c10::function_ref) override { run(stack); return false; diff --git a/aten/src/ATen/core/class_type.cpp b/aten/src/ATen/core/class_type.cpp index b4ef2979738f9..0a9a8074067ee 100644 --- a/aten/src/ATen/core/class_type.cpp +++ b/aten/src/ATen/core/class_type.cpp @@ -469,7 +469,7 @@ bool ClassType::isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const { } ClassTypePtr ClassType::create( - c10::optional qualifiedName, + std::optional qualifiedName, std::weak_ptr cu, bool is_module, std::string doc_string, @@ -483,7 +483,7 @@ ClassTypePtr ClassType::create( } ClassType::ClassType( - c10::optional name, + std::optional name, std::weak_ptr cu, bool is_module, std::string doc_string, @@ -620,7 +620,7 @@ IValue ClassType::getConstant(size_t slot) const { return constantValues_[slot]; } -c10::optional ClassType::findConstant(const std::string& name) const { +std::optional ClassType::findConstant(const std::string& name) const { TORCH_INTERNAL_ASSERT(constantNames_.size() == constantValues_.size()); size_t pos = 0; for (const auto& c : constantNames_) { @@ -652,7 +652,7 @@ std::shared_ptr ClassType::compilation_unit() const { return cu; } -c10::optional ClassType::getProperty(const std::string& name) { +std::optional ClassType::getProperty(const std::string& name) { for (auto& prop : properties_) { if (name == prop.name) { return prop; @@ -667,7 +667,7 @@ void ClassType::addProperty(const std::string& name, torch::jit::Function* gette properties_.push_back({name, getter, setter}); } -c10::optional ClassType::findConstantSlot(const std::string& name) const { +std::optional ClassType::findConstantSlot(const std::string& name) const { TORCH_CHECK(constantNames_.size() == constantValues_.size()); size_t slot = 0; for (const auto& constant : constantNames_) { diff --git a/aten/src/ATen/core/class_type.h b/aten/src/ATen/core/class_type.h index 99fd27bba5426..b137f0ed208a1 100644 --- a/aten/src/ATen/core/class_type.h +++ b/aten/src/ATen/core/class_type.h @@ -74,7 +74,7 @@ struct TORCH_API ClassType : public NamedType { // Create a class type with name `name` and its methods stored in `cu`. static ClassTypePtr create( - c10::optional qualifiedName, + std::optional qualifiedName, std::weak_ptr cu, bool is_module = false, std::string doc_string = "", @@ -152,7 +152,7 @@ struct TORCH_API ClassType : public NamedType { // Attributes are stored in a specific slot at runtime for effiency. // When emitting instructions we specify the slot so that attribute access is // a constant lookup - c10::optional findAttributeSlot(const std::string& name) const { + std::optional findAttributeSlot(const std::string& name) const { size_t slot = 0; for (const auto& attr : attributes_) { if (name == attr.getName()) { @@ -239,7 +239,7 @@ struct TORCH_API ClassType : public NamedType { } // Get the property with the given \p name, if it exists on the class. - c10::optional getProperty(const std::string& name); + std::optional getProperty(const std::string& name); // Add a property named \p name with \p getter and \p setter as its getter and setter. void addProperty(const std::string& name, torch::jit::Function* getter, torch::jit::Function* setter); // Get a list of all properties. @@ -257,7 +257,7 @@ struct TORCH_API ClassType : public NamedType { size_t addConstant(const std::string& name, const IValue& value); - c10::optional findConstantSlot(const std::string& name) const; + std::optional findConstantSlot(const std::string& name) const; size_t getConstantSlot(const std::string& name) const { if (auto r = findConstantSlot(name)) { @@ -281,7 +281,7 @@ struct TORCH_API ClassType : public NamedType { IValue getConstant(size_t slot) const; - c10::optional findConstant(const std::string& name) const; + std::optional findConstant(const std::string& name) const; size_t numConstants() const; @@ -384,7 +384,7 @@ struct TORCH_API ClassType : public NamedType { private: ClassType( - c10::optional name, + std::optional name, std::weak_ptr cu, bool is_module = false, std::string doc_string = "", diff --git a/aten/src/ATen/core/dispatch/DispatchKeyExtractor.h b/aten/src/ATen/core/dispatch/DispatchKeyExtractor.h index 33e910591de0a..46c291bada308 100644 --- a/aten/src/ATen/core/dispatch/DispatchKeyExtractor.h +++ b/aten/src/ATen/core/dispatch/DispatchKeyExtractor.h @@ -56,7 +56,7 @@ namespace detail { void operator()(const at::Tensor& x) { ts = ts | x.key_set(); } - void operator()(const c10::optional& x) { + void operator()(const std::optional& x) { if (x.has_value()) { ts = ts | x->key_set(); } @@ -67,8 +67,8 @@ namespace detail { } } // Tensor?[] translates to this case. - void operator()(const c10::List>& xs) { - for (c10::optional x : xs) { + void operator()(const c10::List>& xs) { + for (std::optional x : xs) { if (x.has_value()) { ts = ts | x.value().key_set(); } @@ -80,7 +80,7 @@ namespace detail { ts = ts | x.key_set(); } } - [[noreturn]] void operator()(at::ArrayRef>) { + [[noreturn]] void operator()(at::ArrayRef>) { // Just checking that the handling of Tensor?[] didn't change. TORCH_INTERNAL_ASSERT(false); } @@ -89,7 +89,7 @@ namespace detail { ts = ts | gen.key_set(); } } - void operator()(const c10::optional& gen) { + void operator()(const std::optional& gen) { if (gen.has_value() && gen->defined()) { ts = ts | gen->key_set(); } diff --git a/aten/src/ATen/core/dispatch/Dispatcher.cpp b/aten/src/ATen/core/dispatch/Dispatcher.cpp index 6077ac8e34cc8..85897f7653ee6 100644 --- a/aten/src/ATen/core/dispatch/Dispatcher.cpp +++ b/aten/src/ATen/core/dispatch/Dispatcher.cpp @@ -76,8 +76,8 @@ C10_EXPORT Dispatcher& Dispatcher::realSingleton() { return _singleton; } -c10::optional Dispatcher::findOp(const OperatorName& overload_name) { - return operatorLookupTable_.read([&] (const ska::flat_hash_map& operatorLookupTable) -> c10::optional { +std::optional Dispatcher::findOp(const OperatorName& overload_name) { + return operatorLookupTable_.read([&] (const ska::flat_hash_map& operatorLookupTable) -> std::optional { auto found = operatorLookupTable.find(overload_name); if (found == operatorLookupTable.end()) { return c10::nullopt; @@ -103,7 +103,7 @@ void Dispatcher::waitForDef(const FunctionSchema& schema) { "the same dependencies."); } -void Dispatcher::waitForImpl(const OperatorName& op_name, c10::optional maybe_dk) { +void Dispatcher::waitForImpl(const OperatorName& op_name, std::optional maybe_dk) { using namespace std::chrono_literals; std::unique_lock lock(guard_->mutex); auto dk = maybe_dk.value_or(DispatchKey::CompositeImplicitAutograd); @@ -121,7 +121,7 @@ void Dispatcher::waitForImpl(const OperatorName& op_name, c10::optional Dispatcher::findSchema(const OperatorName& overload_name) { +std::optional Dispatcher::findSchema(const OperatorName& overload_name) { auto it = findOp(overload_name); if (it.has_value()) { if (it->hasSchema()) { @@ -275,7 +275,7 @@ PythonModuleMapType& pythonModulesSingleton() { } -c10::optional> Dispatcher::getPyStub(OperatorName op_name) { +std::optional> Dispatcher::getPyStub(OperatorName op_name) { std::lock_guard lock(guard_->mutex); auto found = pythonModulesSingleton().find(op_name); if (found == pythonModulesSingleton().end()) { @@ -332,9 +332,9 @@ void Dispatcher::throwIfHasPythonModule(OperatorName op_name) { RegistrationHandleRAII Dispatcher::registerImpl( OperatorName op_name, - c10::optional dispatch_key, + std::optional dispatch_key, KernelFunction kernel, - c10::optional cpp_signature, + std::optional cpp_signature, std::unique_ptr inferred_function_schema, std::string debug ) { @@ -364,7 +364,7 @@ RegistrationHandleRAII Dispatcher::registerImpl( }); } -void Dispatcher::deregisterImpl_(const OperatorHandle& op, const OperatorName& op_name, c10::optional dispatch_key, impl::OperatorEntry::AnnotatedKernelContainerIterator handle) { +void Dispatcher::deregisterImpl_(const OperatorHandle& op, const OperatorName& op_name, std::optional dispatch_key, impl::OperatorEntry::AnnotatedKernelContainerIterator handle) { op.operatorDef_->op.deregisterKernel_(*this, dispatch_key, handle); TORCH_INTERNAL_ASSERT(op.operator_name() == op_name); @@ -486,7 +486,7 @@ std::vector Dispatcher::findDanglingImpls() const { }); } -std::vector Dispatcher::getRegistrationsForDispatchKey(c10::optional k) const { +std::vector Dispatcher::getRegistrationsForDispatchKey(std::optional k) const { return operatorLookupTable_.read([&] (const ska::flat_hash_map& operatorLookupTable) -> std::vector { std::vector op_names; for (const auto& op : operatorLookupTable) { diff --git a/aten/src/ATen/core/dispatch/Dispatcher.h b/aten/src/ATen/core/dispatch/Dispatcher.h index caf73d7cebb21..6e679992a9f2d 100644 --- a/aten/src/ATen/core/dispatch/Dispatcher.h +++ b/aten/src/ATen/core/dispatch/Dispatcher.h @@ -137,7 +137,7 @@ class TORCH_API Dispatcher final { * and returns it if it is registered WITH A SCHEMA. * Returns nullopt otherwise. */ - c10::optional findSchema(const OperatorName& operator_name); + std::optional findSchema(const OperatorName& operator_name); /** * Variant of findSchema that results in less code generated at the call site. @@ -155,7 +155,7 @@ class TORCH_API Dispatcher final { OperatorHandle findSchemaOrThrow(const char* name, const char* overload_name); // Like findSchema, but also returns OperatorHandle even if there is no schema - c10::optional findOp(const OperatorName& operator_name); + std::optional findOp(const OperatorName& operator_name); // Returns a list of all operator names present in the operatorLookupTable_ const std::vector getAllOpNames(); @@ -196,7 +196,7 @@ class TORCH_API Dispatcher final { // Used by torchdeploy/multipy for multiple interpreters racing. void waitForDef(const FunctionSchema& schema); - void waitForImpl(const OperatorName& op_name, c10::optional dispatch_key); + void waitForImpl(const OperatorName& op_name, std::optional dispatch_key); // ------------------------------------------------------------------------ // @@ -221,7 +221,7 @@ class TORCH_API Dispatcher final { */ // NB: steals the inferred function schema, as we may need to hold on to // it for a bit until the real schema turns up - RegistrationHandleRAII registerImpl(OperatorName op_name, c10::optional dispatch_key, KernelFunction kernel, c10::optional cpp_signature, std::unique_ptr inferred_function_schema, std::string debug); + RegistrationHandleRAII registerImpl(OperatorName op_name, std::optional dispatch_key, KernelFunction kernel, c10::optional cpp_signature, std::unique_ptr inferred_function_schema, std::string debug); /** * Given an operator, tells the Dispatcher that we have implemented a fake impl @@ -234,7 +234,7 @@ class TORCH_API Dispatcher final { */ void throwIfHasPythonModule(OperatorName op_name); - c10::optional> getPyStub(OperatorName op_name); + std::optional> getPyStub(OperatorName op_name); /** * Register a new operator by name. @@ -299,7 +299,7 @@ class TORCH_API Dispatcher final { * Returns the names of all operators with a kernel registered for the specified DispatchKey. * If no DispatchKey is specified, it returns all registered operators. */ - std::vector getRegistrationsForDispatchKey(c10::optional k) const; + std::vector getRegistrationsForDispatchKey(std::optional k) const; private: Dispatcher(); @@ -321,7 +321,7 @@ class TORCH_API Dispatcher final { void deregisterImpl_( const OperatorHandle& op, const OperatorName& op_name, - c10::optional dispatch_key, + std::optional dispatch_key, impl::OperatorEntry::AnnotatedKernelContainerIterator kernel_handle); void deregisterName_(const OperatorHandle& op, const OperatorName& op_name); void deregisterFallback_(DispatchKey dispatchKey); diff --git a/aten/src/ATen/core/dispatch/OperatorEntry.cpp b/aten/src/ATen/core/dispatch/OperatorEntry.cpp index 5f4538f2c9790..74e5a7e2cf955 100644 --- a/aten/src/ATen/core/dispatch/OperatorEntry.cpp +++ b/aten/src/ATen/core/dispatch/OperatorEntry.cpp @@ -7,7 +7,7 @@ namespace c10 { namespace impl { namespace { - std::string toString(c10::optional k) { + std::string toString(std::optional k) { if (k.has_value()) { return toString(*k); } else { @@ -39,7 +39,7 @@ namespace { // TODO: figure out if we can just directly save real schema at def time FunctionSchema from_def = from_def_.cloneWithRealTypes(kernel.isValidSymUnboxed()); FunctionSchema inferred = inferred_.cloneWithRealTypes(); - c10::optional schema_difference = findSchemaDifferences(from_def, inferred); + std::optional schema_difference = findSchemaDifferences(from_def, inferred); if (schema_difference.has_value()) { TORCH_CHECK(false, "Inferred operator schema for a C++ kernel function doesn't match the expected function schema.\n" @@ -101,9 +101,9 @@ void OperatorEntry::deregisterSchema() { OperatorEntry::AnnotatedKernelContainerIterator OperatorEntry::registerKernel( const c10::Dispatcher& dispatcher, - c10::optional dispatch_key, + std::optional dispatch_key, KernelFunction kernel, - c10::optional cpp_signature, + std::optional cpp_signature, std::unique_ptr inferred_function_schema, std::string debug ) { @@ -181,7 +181,7 @@ OperatorEntry::AnnotatedKernelContainerIterator OperatorEntry::registerKernel( void OperatorEntry::deregisterKernel_( const c10::Dispatcher& dispatcher, - c10::optional dispatch_key, + std::optional dispatch_key, AnnotatedKernelContainerIterator kernel ) { // Redirect catchAll deregistrations to CompositeImplicitAutograd. diff --git a/aten/src/ATen/core/dispatch/OperatorEntry.h b/aten/src/ATen/core/dispatch/OperatorEntry.h index 903ff043799b2..873b385845ed3 100644 --- a/aten/src/ATen/core/dispatch/OperatorEntry.h +++ b/aten/src/ATen/core/dispatch/OperatorEntry.h @@ -129,9 +129,9 @@ class TORCH_API OperatorEntry final { // Postcondition: caller is responsible for disposing of the kernel AnnotatedKernelContainerIterator registerKernel( const Dispatcher& dispatcher, - c10::optional dispatch_key, + std::optional dispatch_key, KernelFunction kernel, - c10::optional cpp_signature, + std::optional cpp_signature, std::unique_ptr inferred_function_schema, std::string debug ); @@ -139,7 +139,7 @@ class TORCH_API OperatorEntry final { // Precondition: Dispatcher::mutex_ is held void deregisterKernel_( const Dispatcher& dispatcher, - c10::optional dispatch_key, + std::optional dispatch_key, AnnotatedKernelContainerIterator kernel ); @@ -221,7 +221,7 @@ class TORCH_API OperatorEntry final { private: OperatorName name_; - c10::optional schema_; + std::optional schema_; #ifndef C10_MOBILE std::vector tags_; #endif @@ -282,10 +282,10 @@ class TORCH_API OperatorEntry final { struct CppSignatureWithDebug { CppSignature signature; std::string debug; - c10::optional dispatch_key; + std::optional dispatch_key; }; - c10::optional cpp_signature_; - c10::optional sym_cpp_signature_; + std::optional cpp_signature_; + std::optional sym_cpp_signature_; // A Python custom error handler for OperatorEntry::reportError std::unique_ptr report_error_callback_; diff --git a/aten/src/ATen/core/dynamic_type.h b/aten/src/ATen/core/dynamic_type.h index 25b75b9e51114..fe4f0b4dfe602 100644 --- a/aten/src/ATen/core/dynamic_type.h +++ b/aten/src/ATen/core/dynamic_type.h @@ -121,7 +121,7 @@ class DynamicType : public SharedType { * A implementation detail to support NamedTuple. */ struct LabeledDynamicType { - c10::optional label; + std::optional label; DynamicTypePtr ty; explicit LabeledDynamicType(DynamicTypePtr t) : ty(std::move(t)) {} @@ -163,7 +163,7 @@ class DynamicType : public SharedType { Tag tag() const { return tag_; } - const c10::optional& name() const { + const std::optional& name() const { return name_; } const Arguments& arguments() const { @@ -200,7 +200,7 @@ class DynamicType : public SharedType { } Tag tag_; - c10::optional name_; + std::optional name_; union { Arguments arguments_; ClassTypePtr class_; diff --git a/aten/src/ATen/core/function.h b/aten/src/ATen/core/function.h index f55e15e50b4fa..01e395bcf6106 100644 --- a/aten/src/ATen/core/function.h +++ b/aten/src/ATen/core/function.h @@ -97,7 +97,7 @@ struct TORCH_API Function { // executor. virtual bool call( Stack&, - c10::optional, + std::optional, c10::function_ref) { TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false); return false; diff --git a/aten/src/ATen/core/function_schema.cpp b/aten/src/ATen/core/function_schema.cpp index 6e119ae25cc72..6f6cc8ed68557 100644 --- a/aten/src/ATen/core/function_schema.cpp +++ b/aten/src/ATen/core/function_schema.cpp @@ -30,7 +30,7 @@ FunctionSchema FunctionSchema::cloneWithRealTypes(bool with_symint) const { // NB: keep this in sync with unpackSymInt in KernelFunction_impl.h if ( *a.real_type() == *getTypePtr() || - *a.real_type() == *getTypePtr>() || + *a.real_type() == *getTypePtr>() || *a.real_type() == *getTypePtr() || *a.real_type() == *getTypePtr() ) { @@ -53,7 +53,7 @@ FunctionSchema FunctionSchema::cloneWithRealTypes(bool with_symint) const { is_varret()); } -bool FunctionSchema::canAliasTypeSetsAlias(const c10::optional &lhs, const c10::optional &rhs) const { +bool FunctionSchema::canAliasTypeSetsAlias(const std::optional &lhs, const c10::optional &rhs) const { if (!lhs || !rhs) { return false; } @@ -67,7 +67,7 @@ bool FunctionSchema::canAliasTypeSetsAlias(const c10::optional &lh return false; } -c10::optional FunctionSchema::getAliasTypeSetContainedTypes(const c10::optional &aliasTypeSet) const { +std::optional FunctionSchema::getAliasTypeSetContainedTypes(const c10::optional &aliasTypeSet) const { if (!aliasTypeSet) { return c10::nullopt; } @@ -95,7 +95,7 @@ c10::optional FunctionSchema::getAliasTypeSetContainedTypes(const return AliasTypeSet(containedTypes.begin(), containedTypes.end()); } -c10::optional FunctionSchema::mapTypeToAliasTypeSet(const TypePtr& type) const { +std::optional FunctionSchema::mapTypeToAliasTypeSet(const TypePtr& type) const { switch(type->kind()) { case TypeKind::ListType: case TypeKind::DictType: @@ -155,8 +155,8 @@ bool FunctionSchema::may_alias(const SchemaArgument& lhs, const SchemaArgument& const Argument lhsArg = getCorrectList(lhs.type)[lhs.index]; const Argument rhsArg = getCorrectList(rhs.type)[rhs.index]; - c10::optional lhsTypes = mapTypeToAliasTypeSet(lhsArg.type()); - c10::optional rhsTypes = mapTypeToAliasTypeSet(rhsArg.type()); + std::optional lhsTypes = mapTypeToAliasTypeSet(lhsArg.type()); + std::optional rhsTypes = mapTypeToAliasTypeSet(rhsArg.type()); // Check to see if lhs and rhs have the same alias set if (canAliasTypeSetsAlias(lhsTypes, rhsTypes)) { @@ -182,10 +182,10 @@ bool FunctionSchema::may_contain_alias(const SchemaArgument& lhs, const SchemaAr const c10::Argument lhsArg = getCorrectList(lhs.type)[lhs.index]; const c10::Argument rhsArg = getCorrectList(rhs.type)[rhs.index]; - c10::optional lhsTypes = mapTypeToAliasTypeSet(lhsArg.type()); - c10::optional rhsTypes = mapTypeToAliasTypeSet(rhsArg.type()); - c10::optional lhsContainedTypes = getAliasTypeSetContainedTypes(lhsTypes); - c10::optional rhsContainedTypes = getAliasTypeSetContainedTypes(rhsTypes); + std::optional lhsTypes = mapTypeToAliasTypeSet(lhsArg.type()); + std::optional rhsTypes = mapTypeToAliasTypeSet(rhsArg.type()); + std::optional lhsContainedTypes = getAliasTypeSetContainedTypes(lhsTypes); + std::optional rhsContainedTypes = getAliasTypeSetContainedTypes(rhsTypes); // Checks if one side is wildcard and the other side is a container of the same type bool lhsWildcard = lhsArg.alias_info() && lhsArg.alias_info()->isWildcardAfter() && canAliasTypeSetsAlias(lhsTypes, rhsContainedTypes); diff --git a/aten/src/ATen/core/function_schema.h b/aten/src/ATen/core/function_schema.h index 79e7ffed1a14f..801bd43c84c01 100644 --- a/aten/src/ATen/core/function_schema.h +++ b/aten/src/ATen/core/function_schema.h @@ -29,20 +29,20 @@ struct Argument { Argument( std::string name = "", const TypePtr& type = nullptr, - c10::optional N = c10::nullopt, - c10::optional default_value = c10::nullopt, + std::optional N = c10::nullopt, + std::optional default_value = c10::nullopt, bool kwarg_only = false, - c10::optional alias_info = c10::nullopt) + std::optional alias_info = c10::nullopt) : Argument(std::move(name), type, type, N, std::move(default_value), kwarg_only, std::move(alias_info)) {} Argument( std::string name, TypePtr fake_type, TypePtr real_type, - c10::optional N = c10::nullopt, - c10::optional default_value = c10::nullopt, + std::optional N = c10::nullopt, + std::optional default_value = c10::nullopt, bool kwarg_only = false, - c10::optional alias_info = c10::nullopt) + std::optional alias_info = c10::nullopt) : name_(std::move(name)), type_(fake_type ? std::move(fake_type) : TensorType::get()), real_type_(real_type ? std::move(real_type) : type_), @@ -94,10 +94,10 @@ struct Argument { const TypePtr& real_type() const { return real_type_; } - c10::optional N() const { + std::optional N() const { return N_; } - const c10::optional& default_value() const { + const std::optional& default_value() const { return default_value_; } bool kwarg_only() const { @@ -150,7 +150,7 @@ struct Argument { N_, default_value_, kwarg_only_, - alias_info_ ? c10::optional(*alias_info_) : c10::nullopt); + alias_info_ ? std::optional(*alias_info_) : c10::nullopt); } // this function checks whether this Argument is backward compatible with @@ -179,9 +179,9 @@ struct Argument { // e.g. for int[3]: type = ListType::ofInts(), N = 3 // If present, this will allow scalars to be broadcast to this length to // become a list. - c10::optional N_; + std::optional N_; - c10::optional default_value_; + std::optional default_value_; // AliasInfo is huge, so let's only allocate memory for it if // necessary (which it isn't during schema parsing on startup, to // give a pertinent example). @@ -322,7 +322,7 @@ struct TORCH_API FunctionSchema { // alias information should we infer? // NB: due to alias analysis kind merging, this may be nullopt. Eventually // this should always be set no matter what - c10::optional alias_kind_; + std::optional alias_kind_; template void checkArg(const IValue& value, const Argument& argument, optional pos) const; @@ -395,7 +395,7 @@ struct TORCH_API FunctionSchema { return aliasInfo && aliasInfo->isWrite(); } bool is_mutable(c10::string_view name) const { - c10::optional index = argumentIndexWithName(name); + std::optional index = argumentIndexWithName(name); TORCH_INTERNAL_ASSERT( index != c10::nullopt, "Schema has no argument named ", name); @@ -416,22 +416,22 @@ struct TORCH_API FunctionSchema { // Returns whether the two AliasTypeSets contain any similarities // ie: whether the two type sets can alias. - bool canAliasTypeSetsAlias(const c10::optional &lhs, const c10::optional &rhs) const; + bool canAliasTypeSetsAlias(const std::optional &lhs, const c10::optional &rhs) const; // Recursively Finds all contained types within the AliasTypeSet. - c10::optional getAliasTypeSetContainedTypes(const c10::optional &aliasTypeSet) const; + std::optional getAliasTypeSetContainedTypes(const c10::optional &aliasTypeSet) const; // Similar to mapTypeToAliasTypeSet defined in alias_analysis.cpp. // Used to map types to a type such that all types that can alias will be mapped to the same type. // For example, calling this method on 'Optional[List[int]]' is the same as calling this method // on 'List[int]'. - c10::optional mapTypeToAliasTypeSet(const TypePtr& type) const; + std::optional mapTypeToAliasTypeSet(const TypePtr& type) const; // Returns either arguments() or returns() depending on the SchemaArgType // output => returns(), input => arguments() const std::vector& getCorrectList(SchemaArgType type) const; - c10::optional argumentIndexWithName(c10::string_view name) const { + std::optional argumentIndexWithName(c10::string_view name) const { for (const auto i : c10::irange(arguments().size())) { if(name == arguments()[i].name()) return i; @@ -470,8 +470,8 @@ struct TORCH_API FunctionSchema { std::string formatTypeMismatchMsg( const Argument& expected, const std::string& actual_type, - c10::optional position = c10::nullopt, - c10::optional value = c10::nullopt) const; + std::optional position = c10::nullopt, + std::optional value = c10::nullopt) const; FunctionSchema cloneWithRemappedTypes( const std::function type_map) const; @@ -514,7 +514,7 @@ struct TORCH_API FunctionSchema { alias_kind_ = v; } - c10::optional getNamespace() const { + std::optional getNamespace() const { return name_.getNamespace(); } diff --git a/aten/src/ATen/core/function_schema_inl.h b/aten/src/ATen/core/function_schema_inl.h index a6959c661af15..182d7a181cde4 100644 --- a/aten/src/ATen/core/function_schema_inl.h +++ b/aten/src/ATen/core/function_schema_inl.h @@ -162,8 +162,8 @@ inline bool Argument::isForwardCompatibleWith( inline std::string FunctionSchema::formatTypeMismatchMsg( const Argument& expected, const std::string& actual_type, - c10::optional position, - c10::optional value) const { + std::optional position, + std::optional value) const { std::string position_str; if (position) { position_str = c10::str("Position: ", *position, "\n"); diff --git a/aten/src/ATen/core/ivalue.cpp b/aten/src/ATen/core/ivalue.cpp index 7343d66fcb97d..6c505f8b656cf 100644 --- a/aten/src/ATen/core/ivalue.cpp +++ b/aten/src/ATen/core/ivalue.cpp @@ -471,7 +471,7 @@ bool IValue::isOptionalTensorList() const { return false; } const auto& ty = static_cast(payload.u.as_intrusive_ptr)->elementType; - const auto& expected_ty = c10::getTypePtr>(); + const auto& expected_ty = c10::getTypePtr>(); return expected_ty == ty; } @@ -886,14 +886,14 @@ c10::intrusive_ptr ivalue::Object::create( StrongTypePtr(nullptr, std::move(classType)), numSlots); } -IValue IValue::deepcopy(c10::optional device) const { +IValue IValue::deepcopy(std::optional device) const { IValue::HashAliasedIValueMap memo; return deepcopy(memo, device); } IValue IValue::deepcopy( IValue::HashAliasedIValueMap& memo, - c10::optional device) const { + std::optional device) const { if (memo.count(*this)) { return memo.at(*this); } @@ -1027,14 +1027,14 @@ c10::intrusive_ptr ivalue::Object::copy_to_weak_compilation_ref( } c10::intrusive_ptr ivalue::Object::deepcopy( - c10::optional device) const { + std::optional device) const { IValue::HashAliasedIValueMap memo; return deepcopy(memo, device); } c10::intrusive_ptr ivalue::Object::deepcopy( IValue::HashAliasedIValueMap& memo, - c10::optional device) const { + std::optional device) const { auto cu = type_.cu_; auto object = ivalue::Object::create(WeakOrStrongTypePtr(type_.cu_, type_.type_), type()->numAttributes()); for (const auto i : c10::irange(slots_.size())) { diff --git a/aten/src/ATen/core/ivalue.h b/aten/src/ATen/core/ivalue.h index 07e85677c3c75..7715ffbe3c31d 100644 --- a/aten/src/ATen/core/ivalue.h +++ b/aten/src/ATen/core/ivalue.h @@ -86,20 +86,20 @@ struct StreamData3Holder : c10::intrusive_ptr_target { } // namespace ivalue -// This is an owning wrapper for a c10::optional> +// This is an owning wrapper for a std::optional> // that can be implicitly converted to a (non-owning) optional>. // Its purpose is to be used in generated code to keep the vector alive // either until the end of a statement (as a temporary), or as a saved arg // in autograd. template struct OptionalArray { - c10::optional> list; + std::optional> list; OptionalArray() = default; OptionalArray(std::vector val) : list(std::move(val)) {} // Used when saving an argument for the backwards pass. - OptionalArray& operator=(c10::optional> ref) { + OptionalArray& operator=(std::optional> ref) { if (ref) { list = std::vector(ref->begin(), ref->end()); } else { @@ -118,7 +118,7 @@ struct OptionalArray { return *this; } - operator c10::optional>() { + operator std::optional>() { if (!list) { return nullopt; } @@ -697,7 +697,7 @@ struct TORCH_API IValue final { c10::intrusive_ptr toString() &&; c10::intrusive_ptr toString() const&; const std::string& toStringRef() const; - c10::optional> toOptionalStringRef() + std::optional> toOptionalStringRef() const; c10::string_view toStringView() const; @@ -726,9 +726,9 @@ struct TORCH_API IValue final { // OptionalTensorList bool isOptionalTensorList() const; - c10::List> toOptionalTensorList() &&; - c10::List> toOptionalTensorList() const&; - std::vector> toOptionalTensorVector() const; + c10::List> toOptionalTensorList() &&; + c10::List> toOptionalTensorList() const&; + std::vector> toOptionalTensorVector() const; // GenericList IValue(c10::List v); @@ -817,7 +817,7 @@ struct TORCH_API IValue final { IValue(std::unordered_map v); template = nullptr> - IValue(c10::optional v); + IValue(std::optional v); template = nullptr> IValue(c10::OptionalArrayRef v); IValue(c10::nullopt_t); @@ -1128,10 +1128,10 @@ struct TORCH_API IValue final { // TODO: There are several places that recurse over IValue. This is fragile. // This visitor should be used to recurse over ivalues. void visit(const std::function& visitor) const; - IValue deepcopy(c10::optional device = c10::nullopt) const; + IValue deepcopy(std::optional device = c10::nullopt) const; IValue deepcopy( HashAliasedIValueMap& memo, - c10::optional device = c10::nullopt) const; + std::optional device = c10::nullopt) const; private: static c10::intrusive_ptr_target* null_to_undefined_tensor( @@ -1530,8 +1530,8 @@ struct WeakOrStrongCompilationUnit { return holdingStrongRef() && *strong_ptr_ == nullptr; } - c10::optional> strong_ptr_; - c10::optional> weak_ptr_; + std::optional> strong_ptr_; + std::optional> weak_ptr_; }; // An Object will hold a non-owning Compilation Unit reference if it is a diff --git a/aten/src/ATen/core/ivalue_inl.h b/aten/src/ATen/core/ivalue_inl.h index 3e3525c274118..b1124c12cfb34 100644 --- a/aten/src/ATen/core/ivalue_inl.h +++ b/aten/src/ATen/core/ivalue_inl.h @@ -909,7 +909,7 @@ struct C10_EXPORT ivalue::Future final : c10::intrusive_ptr_target { using WeakStorage = c10::weak_intrusive_ptr; void markCompleted( IValue value, - c10::optional> storages = c10::nullopt) { + std::optional> storages = c10::nullopt) { // Start by performing all steps that can throw, before setting any field. // Do this before even acquiring the mutex, because extractStorages might // acquire the GIL, which could lead to a lock inversion with our mutex. @@ -1586,11 +1586,11 @@ struct C10_EXPORT ivalue::Object final : c10::intrusive_ptr_target { c10::intrusive_ptr copy() const; c10::intrusive_ptr deepcopy( - c10::optional device = c10::nullopt) const; + std::optional device = c10::nullopt) const; c10::intrusive_ptr deepcopy( IValue::HashAliasedIValueMap& memo, - c10::optional device = c10::nullopt) const; + std::optional device = c10::nullopt) const; bool is_weak_compilation_ref() const { return !type_.holds_strong_ref(); @@ -1613,7 +1613,7 @@ struct ivalue::PyObjectHolder : c10::intrusive_ptr_target { public: virtual PyObject* getPyObject() = 0; virtual c10::InferredType tryToInferType() = 0; - virtual IValue toIValue(const TypePtr& type, c10::optional N = c10::nullopt) = 0; + virtual IValue toIValue(const TypePtr& type, std::optional N = c10::nullopt) = 0; virtual std::string toStr() = 0; virtual std::vector extractTensors() = 0; @@ -1909,7 +1909,7 @@ std::unordered_map generic_to( } template -c10::optional generic_to(IValue ivalue, _fake_type>) { +std::optional generic_to(IValue ivalue, _fake_type>) { if (ivalue.isNone()) { return c10::nullopt; } @@ -1946,11 +1946,11 @@ inline T IValue::to() && { } template <> -inline c10::optional IValue::to() && { +inline std::optional IValue::to() && { // In the default implementation, the IValue is destroyed with std::move. // But if the unboxed type is optional we cannot destroy // the IValue. - return generic_to(*this, _fake_type>{}); + return generic_to(*this, _fake_type>{}); } template @@ -2046,20 +2046,20 @@ inline std::vector IValue::toTensorVector() const { return createVectorFromList( static_cast(payload.u.as_intrusive_ptr)); } -inline c10::List> IValue::toOptionalTensorList() && { +inline c10::List> IValue::toOptionalTensorList() && { AT_ASSERT(isOptionalTensorList(), "Expected OptionalTensorList but got ", tagKind()); - return c10::List>(moveToIntrusivePtr()); + return c10::List>(moveToIntrusivePtr()); } -inline c10::List> IValue::toOptionalTensorList() const& { +inline c10::List> IValue::toOptionalTensorList() const& { AT_ASSERT(isOptionalTensorList(), "Expected OptionalTensorList but got ", tagKind()); - return c10::List>(toIntrusivePtr()); + return c10::List>(toIntrusivePtr()); } -inline std::vector> IValue::toOptionalTensorVector() const { +inline std::vector> IValue::toOptionalTensorVector() const { AT_ASSERT(isOptionalTensorList(), "Expected OptionalTensorList but got ", tagKind()); TORCH_INTERNAL_ASSERT_DEBUG_ONLY( payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(), "called toOptionalTensorVector on null intrusive_ptr IValue"); - return createVectorFromList>( + return createVectorFromList>( static_cast(payload.u.as_intrusive_ptr)); } inline c10::List IValue::toList() && { @@ -2274,7 +2274,7 @@ inline IValue::IValue(std::unordered_map v) } template > -inline IValue::IValue(c10::optional v) : IValue() { +inline IValue::IValue(std::optional v) : IValue() { if (v.has_value()) { *this = IValue(std::move(*v)); } @@ -2360,7 +2360,7 @@ inline const std::string& IValue::toStringRef() const { payload.u.as_intrusive_ptr) ->string(); } -inline c10::optional> IValue:: +inline std::optional> IValue:: toOptionalStringRef() const { if (isNone()) { return c10::nullopt; diff --git a/aten/src/ATen/core/jit_type.h b/aten/src/ATen/core/jit_type.h index 05f7242855417..be4414e8fe5b0 100644 --- a/aten/src/ATen/core/jit_type.h +++ b/aten/src/ATen/core/jit_type.h @@ -32,7 +32,7 @@ class Dict; struct IValue; struct FunctionSchema; struct NamedType; -using OptNameList = c10::optional>; +using OptNameList = std::optional>; void standardizeVectorForUnion(std::vector& reference, std::vector* to_fill); void standardizeVectorForUnion(std::vector* to_flatten); @@ -164,9 +164,9 @@ struct TORCH_API UnionType : public SharedType { return has_free_variables_; } - c10::optional toOptional() const; + std::optional toOptional() const; - c10::optional subtractTypeSet(std::vector& to_subtract) const; + std::optional subtractTypeSet(std::vector& to_subtract) const; protected: explicit UnionType(std::vector types, TypeKind kind=TypeKind::UnionType); @@ -247,13 +247,13 @@ struct TORCH_API OptionalType : public UnionType { }; template -inline c10::optional merge_primitive( - const c10::optional& a, - const c10::optional& b) { +inline std::optional merge_primitive( + const std::optional& a, + const std::optional& b) { if (a.has_value() && b.has_value() && a.value() == b.value()) { return a; } - return c10::optional{}; + return std::optional{}; } // If we see `a + b + c` and know that a, b, and c are the same size and have @@ -274,9 +274,9 @@ inline c10::optional merge_primitive( struct TORCH_API Stride { Stride() = default; Stride( - const c10::optional& stride_index, - c10::optional contiguous, - const c10::optional& stride) + const std::optional& stride_index, + std::optional contiguous, + const std::optional& stride) : stride_index_(stride_index), contiguous_(contiguous), stride_(stride) {} bool operator==(const Stride& b) const { @@ -288,17 +288,17 @@ struct TORCH_API Stride { return stride_index_ && contiguous_ && stride_; } - c10::optional stride_index_; - c10::optional contiguous_; - c10::optional stride_; + std::optional stride_index_; + std::optional contiguous_; + std::optional stride_; }; template <> -inline c10::optional merge_primitive( - const c10::optional& a, - const c10::optional& b) { - c10::optional left = a; - c10::optional right = b; +inline std::optional merge_primitive( + const std::optional& a, + const std::optional& b) { + std::optional left = a; + std::optional right = b; if (!left.has_value()) { left = {Stride()}; } @@ -314,7 +314,7 @@ inline c10::optional merge_primitive( // normalize if (!r.stride_index_.has_value() && !r.contiguous_.has_value() && !r.stride_.has_value()) { - return c10::optional{}; + return std::optional{}; } return r; @@ -375,7 +375,7 @@ struct TORCH_API SymbolicShape { SymbolicShape() : dims_(c10::nullopt) {} // Known rank but unknown dimentions. - SymbolicShape(c10::optional rank) : dims_(c10::nullopt) { + SymbolicShape(std::optional rank) : dims_(c10::nullopt) { if(!rank) { return; } @@ -389,10 +389,10 @@ struct TORCH_API SymbolicShape { } // Mix of known and unknown ranks - SymbolicShape(const std::vector>& dims) { + SymbolicShape(const std::vector>& dims) { std::vector shape_symbols; shape_symbols.reserve(dims.size()); - for(c10::optional dim: dims) { + for(std::optional dim: dims) { if(!dim) { shape_symbols.push_back(ShapeSymbol::newSymbol()); } else { @@ -430,18 +430,18 @@ struct TORCH_API SymbolicShape { } // Returns rank or nullopt in case of unranked shape. - c10::optional rank() const { + std::optional rank() const { if(!dims_) { return c10::nullopt; } return dims_->size(); } - c10::optional> sizes() const { + std::optional> sizes() const { return dims_; } - c10::optional> symbolicDims() const { + std::optional> symbolicDims() const { if (!dims_) { return c10::nullopt; } @@ -482,7 +482,7 @@ struct TORCH_API SymbolicShape { } private: - c10::optional> dims_; + std::optional> dims_; }; namespace detail { @@ -498,14 +498,14 @@ inline bool isComplete(const T& /*t*/) { template struct VaryingShape { - using ListOfOptionalElements = std::vector>; + using ListOfOptionalElements = std::vector>; VaryingShape(const std::vector& vec) : VaryingShape(ListOfOptionalElements(vec.begin(), vec.end())) {} VaryingShape(c10::ArrayRef vec) : VaryingShape(ListOfOptionalElements(vec.begin(), vec.end())) {} - VaryingShape(c10::optional size = c10::nullopt) : dims_(c10::nullopt) { + VaryingShape(std::optional size = c10::nullopt) : dims_(c10::nullopt) { if (size) { dims_ = ListOfOptionalElements(*size); } @@ -513,20 +513,20 @@ struct VaryingShape { VaryingShape(ListOfOptionalElements dims) : dims_(std::move(dims)) {} - VaryingShape(size_t size) : VaryingShape(c10::optional(size)) {} + VaryingShape(size_t size) : VaryingShape(std::optional(size)) {} bool operator==(const VaryingShape& other) const { return dims_ == other.dims_; } - const c10::optional &operator[](size_t i) const { + const std::optional &operator[](size_t i) const { if (!dims_) { throw std::runtime_error("Rank isn't fixed"); } return (*dims_).at(i); } - c10::optional size() const { + std::optional size() const { if (!dims_) { return c10::nullopt; } @@ -534,13 +534,13 @@ struct VaryingShape { return dims.size(); } - const c10::optional& sizes() const { + const std::optional& sizes() const { return dims_; } TORCH_API VaryingShape merge(const VaryingShape& other) const; - c10::optional> concrete_sizes() const { + std::optional> concrete_sizes() const { if (!dims_) { return c10::nullopt; } @@ -568,7 +568,7 @@ struct VaryingShape { } private: - c10::optional dims_; + std::optional dims_; }; struct TensorType; @@ -581,27 +581,27 @@ struct TORCH_API TensorType : public SharedType { // used by TensorType::create(size_t dim) which in turn used by // shape_analysis.cpp static TensorTypePtr create( - c10::optional scalar_type, - c10::optional device, + std::optional scalar_type, + std::optional device, const VaryingShape& sizes, const VaryingShape& strides, - c10::optional requires_grad, - c10::optional undefined = false, + std::optional requires_grad, + std::optional undefined = false, bool tensor_contiguity = false); static TensorTypePtr create( - c10::optional scalar_type, - c10::optional device, + std::optional scalar_type, + std::optional device, const SymbolicShape& sizes, const VaryingShape& stride_, - c10::optional requires_grad, - c10::optional undefined = false); + std::optional requires_grad, + std::optional undefined = false); static TensorTypePtr create( - c10::optional scalar_type, - c10::optional device, - c10::optional dim, - c10::optional requires_grad); + std::optional scalar_type, + std::optional device, + std::optional dim, + std::optional requires_grad); // overloaded create variadic template argument as it could not distinguish // initializer list @@ -613,7 +613,7 @@ struct TORCH_API TensorType : public SharedType { static TypePtr fromNumberType(const Type& typ); static TypePtr fromBoolType(); - c10::optional dim() const { + std::optional dim() const { return sizes().size(); } @@ -625,13 +625,13 @@ struct TORCH_API TensorType : public SharedType { return strides_; } - c10::optional device() const { + std::optional device() const { return device_; } - c10::optional scalarType() const { + std::optional scalarType() const { return scalar_type_; } - c10::optional requiresGrad() const { + std::optional requiresGrad() const { return requires_grad_; } bool requires_grad() const override { @@ -651,32 +651,32 @@ struct TORCH_API TensorType : public SharedType { } } - c10::optional numel() const { + std::optional numel() const { size_t prod = 1; const auto& shape = sizes(); for (size_t i = 0; i < shape.size(); i++) { if (!shape[i]) { - return c10::optional{}; + return std::optional{}; } prod *= shape[i].value(); } return prod; } - TensorTypePtr withRequiresGrad(c10::optional s) { + TensorTypePtr withRequiresGrad(std::optional s) { auto copy = clone(); copy->requires_grad_ = s; return copy; } - TensorTypePtr withScalarType(c10::optional st) { + TensorTypePtr withScalarType(std::optional st) { auto copy = clone(); copy->scalar_type_ = st; return copy; } - TensorTypePtr withDim(c10::optional d) { + TensorTypePtr withDim(std::optional d) { auto copy = clone(); // withDim is only used by the legacy executor // that only cares about the rank, so create dummy symbols)) : @@ -712,7 +712,7 @@ struct TORCH_API TensorType : public SharedType { sizes, contiguousStridesOf(sizes)); } - TensorTypePtr withDevice(const c10::optional device) const { + TensorTypePtr withDevice(const std::optional device) const { auto copy = clone(); copy->device_ = device; return copy; @@ -784,7 +784,7 @@ struct TORCH_API TensorType : public SharedType { return r; } - c10::optional undefined() const { return undefined_; } + std::optional undefined() const { return undefined_; } static const TensorTypePtr& get(); @@ -824,12 +824,12 @@ struct TORCH_API TensorType : public SharedType { private: TensorType( - c10::optional scalar_type, - c10::optional device, + std::optional scalar_type, + std::optional device, SymbolicShape sizes, VaryingShape strides, - c10::optional requires_grad, - c10::optional undefined = false); + std::optional requires_grad, + std::optional undefined = false); TensorTypePtr clone() const { return TensorTypePtr(new TensorType( @@ -841,11 +841,11 @@ struct TORCH_API TensorType : public SharedType { at::IntArrayRef strides, bool tensor_contiguity = false); - c10::optional scalar_type_; - c10::optional device_; + std::optional scalar_type_; + std::optional device_; SymbolicShape sizes_; VaryingShape strides_; - c10::optional requires_grad_; + std::optional requires_grad_; // we exploit the fact certain tensors must be zero in the autograd to // optimize gradient computation. Such zero tensors are currently implemented // with `UndefinedTensorImpl.` They can be handled only by special operators @@ -857,7 +857,7 @@ struct TORCH_API TensorType : public SharedType { // undefined_ may become `c10::nullopt` if the tensor was observed to be both // defined and undefined. However, no tensor type starts out with // `undefined_` set to `c10::nullopt` - c10::optional undefined_; + std::optional undefined_; // Represents whether or not this type was inferred. bool is_inferred_ = false; }; @@ -1144,16 +1144,16 @@ using NameList = std::vector; // This type represents a Tuple struct TORCH_API TupleType : public NamedType { - static TupleTypePtr createNamed(const c10::optional& name, + static TupleTypePtr createNamed(const std::optional& name, const std::vector& field_names, const std::vector& field_types, std::vector& field_defaults); - static TupleTypePtr createNamed(const c10::optional& name, + static TupleTypePtr createNamed(const std::optional& name, const std::vector& field_names, const std::vector& field_types); - static TupleTypePtr createNamed(const c10::optional& name, + static TupleTypePtr createNamed(const std::optional& name, const std::vector& field_names, const std::vector& field_types); @@ -1190,21 +1190,21 @@ struct TORCH_API TupleType : public NamedType { const std::shared_ptr& schema() const { return schema_; } - c10::optional> names() const; + std::optional> names() const; static const TypeKind Kind = TypeKind::TupleType; private: template static TupleTypePtr createWithSpec( - const c10::optional& name, + const std::optional& name, const std::vector& field_names, const std::vector& field_types, std::vector& field_defaults); TupleType( std::vector elements_, - c10::optional name, + std::optional name, std::shared_ptr schema); bool compare( @@ -1747,7 +1747,7 @@ inline TypePtr TensorType::fromBoolType() { return TensorType::createContiguous(at::kBool, at::kCPU, {}); } -inline c10::optional tryScalarTypeFromJitType(const Type& type) { +inline std::optional tryScalarTypeFromJitType(const Type& type) { if (type == *FloatType::get()) { return at::typeMetaToScalarType(c10::get_default_dtype()); } else if (type == *IntType::get()) { @@ -1782,13 +1782,13 @@ inline at::ScalarType scalarTypeFromJitType(const Type& type) { // If `type_hint` is an `InterfaceType`, then we can use that as a // potential supertype for `ClassType`s in the list. Otherwise, we have // no way to find and use some common interface type -TORCH_API c10::optional unifyTypes( +TORCH_API std::optional unifyTypes( const TypePtr& t1, const TypePtr& t2, bool default_to_union = false, const TypePtr& type_hint = nullptr); -TORCH_API c10::optional unifyTypeList( +TORCH_API std::optional unifyTypeList( at::ArrayRef elements, std::ostream& why_not, bool default_to_union = false, @@ -2132,7 +2132,7 @@ struct MatchTypeReturn { private: MatchTypeReturn() : reason_(c10::nullopt) {} - c10::optional reason_; // is there is no match, this contains the reason + std::optional reason_; // is there is no match, this contains the reason }; // attempt to match the type variables in formal to actual, adding them to type_env. diff --git a/aten/src/ATen/core/jit_type_base.h b/aten/src/ATen/core/jit_type_base.h index 21692db56dd87..ac2cb0528245c 100644 --- a/aten/src/ATen/core/jit_type_base.h +++ b/aten/src/ATen/core/jit_type_base.h @@ -75,7 +75,7 @@ struct SharedType; // Use this to customize how a Type is printed using `annotation_str()`. If // c10::nullopt is returned, `annotation_str()` falls through to its default // implementation. -using TypePrinter = std::function(const Type&)>; +using TypePrinter = std::function(const Type&)>; namespace detail { template @@ -688,7 +688,7 @@ using NamedTypePtr = std::shared_ptr; using ConstNamedTypePtr = std::shared_ptr; struct TORCH_API NamedType : public SharedType { - NamedType(TypeKind tk, c10::optional name) + NamedType(TypeKind tk, std::optional name) : SharedType(tk), name_(std::move(name)) { TORCH_INTERNAL_ASSERT( tk == TypeKind::TupleType || tk == TypeKind::FunctionType || @@ -700,12 +700,12 @@ struct TORCH_API NamedType : public SharedType { // Fully qualified name of type // Looks like: "foo.bar.Baz". - const c10::optional& name() const { + const std::optional& name() const { return name_; } private: - c10::optional name_; + std::optional name_; }; } // namespace c10 diff --git a/aten/src/ATen/core/library.cpp b/aten/src/ATen/core/library.cpp index fd349da2f8b0c..6a910d7b60a57 100644 --- a/aten/src/ATen/core/library.cpp +++ b/aten/src/ATen/core/library.cpp @@ -42,7 +42,7 @@ namespace { constexpr auto CatchAll = c10::DispatchKey::CatchAll; } // anonymous namespace -CppFunction::CppFunction(c10::KernelFunction func, c10::optional cpp_signature, std::unique_ptr schema) +CppFunction::CppFunction(c10::KernelFunction func, std::optional cpp_signature, std::unique_ptr schema) : func_(std::move(func)) , cpp_signature_(cpp_signature) , schema_(std::move(schema)) @@ -57,10 +57,10 @@ void Library::reset() { #define ERROR_CONTEXT "(Error occurred while processing ", toString(kind_), " block at ", file_, ":", line_, ")" -Library::Library(Kind kind, std::string ns, c10::optional k, const char* file, uint32_t line) +Library::Library(Kind kind, std::string ns, std::optional k, const char* file, uint32_t line) : kind_(kind) , ns_(ns == "_" ? c10::nullopt : c10::make_optional(std::move(ns))) - , dispatch_key_(k.value_or(CatchAll) == CatchAll ? c10::optional() : k) + , dispatch_key_(k.value_or(CatchAll) == CatchAll ? std::optional() : k) , file_(file) , line_(line) { diff --git a/aten/src/ATen/core/op_registration/infer_schema.cpp b/aten/src/ATen/core/op_registration/infer_schema.cpp index 7e0fd28f9a7b1..e280bb140220b 100644 --- a/aten/src/ATen/core/op_registration/infer_schema.cpp +++ b/aten/src/ATen/core/op_registration/infer_schema.cpp @@ -43,7 +43,7 @@ FunctionSchema make_function_schema( } // namespace infer_schema } // namespace detail -c10::optional findSchemaDifferences( +std::optional findSchemaDifferences( const FunctionSchema& lhs, const FunctionSchema& rhs) { if (lhs.arguments().size() != rhs.arguments().size()) { diff --git a/aten/src/ATen/core/op_registration/infer_schema.h b/aten/src/ATen/core/op_registration/infer_schema.h index 57409442950f2..2f845f7c4c10f 100644 --- a/aten/src/ATen/core/op_registration/infer_schema.h +++ b/aten/src/ATen/core/op_registration/infer_schema.h @@ -155,6 +155,6 @@ FunctionSchema inferFunctionSchemaSingleReturn(std::string&& name, std::string&& return detail::infer_schema::createFunctionSchemaFromTraitsSingleReturn>(std::move(name), std::move(overload_name)); } -TORCH_API c10::optional findSchemaDifferences(const FunctionSchema& inferred, const FunctionSchema& specified); +TORCH_API std::optional findSchemaDifferences(const FunctionSchema& inferred, const FunctionSchema& specified); } diff --git a/aten/src/ATen/core/op_registration/op_registration.cpp b/aten/src/ATen/core/op_registration/op_registration.cpp index 8a516e68bd0dc..0a64e0f44d7e5 100644 --- a/aten/src/ATen/core/op_registration/op_registration.cpp +++ b/aten/src/ATen/core/op_registration/op_registration.cpp @@ -17,9 +17,9 @@ void build_feature_required_feature_not_available(const char* feature) { } // namespace impl static_assert(std::is_nothrow_move_constructible< - c10::optional>::value); + std::optional>::value); static_assert(std::is_nothrow_move_assignable< - c10::optional>::value); + std::optional>::value); void RegisterOperators::checkSchemaAndRegisterOp_(Options&& options) { TORCH_CHECK( @@ -71,7 +71,7 @@ c10::FunctionSchema RegisterOperators::inferSchemaFromKernels_( opName, " because there is no kernel specified."); - c10::optional inferred_schema = c10::nullopt; + std::optional inferred_schema = c10::nullopt; for (const auto& kernel : options.kernels) { if (nullptr != kernel.inferred_function_schema.get()) { if (!inferred_schema.has_value()) { diff --git a/aten/src/ATen/core/op_registration/op_registration.h b/aten/src/ATen/core/op_registration/op_registration.h index 0b083dc6b6759..b1b1e2c47bc45 100644 --- a/aten/src/ATen/core/op_registration/op_registration.h +++ b/aten/src/ATen/core/op_registration/op_registration.h @@ -399,7 +399,7 @@ class TORCH_API RegisterOperators final { } private: - Options&& kernel(c10::optional dispatch_key, KernelFunction&& func, c10::optional cpp_signature, std::unique_ptr&& inferred_function_schema) && { + Options&& kernel(std::optional dispatch_key, KernelFunction&& func, c10::optional cpp_signature, std::unique_ptr&& inferred_function_schema) && { KernelRegistrationConfig config; config.dispatch_key = dispatch_key; config.func = std::move(func); @@ -425,13 +425,13 @@ class TORCH_API RegisterOperators final { , inferred_function_schema(nullptr) {} - c10::optional dispatch_key; + std::optional dispatch_key; KernelFunction func; - c10::optional cpp_signature; + std::optional cpp_signature; std::unique_ptr inferred_function_schema; }; - c10::optional> schemaOrName_; + std::optional> schemaOrName_; std::vector kernels; optional aliasAnalysisKind_; diff --git a/aten/src/ATen/core/op_registration/op_registration_test.cpp b/aten/src/ATen/core/op_registration/op_registration_test.cpp index 377cb403cdcfd..d1305ac6d9491 100644 --- a/aten/src/ATen/core/op_registration/op_registration_test.cpp +++ b/aten/src/ATen/core/op_registration/op_registration_test.cpp @@ -882,56 +882,56 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) { // optional types (with has_value() == true) - testArgTypes>::test( - c10::optional(1.5), [] (const c10::optional& v) {EXPECT_EQ(1.5, v.value());}, - c10::optional(2.5), [] (const IValue& v) {EXPECT_EQ(2.5, v.toDouble());}, + testArgTypes>::test( + std::optional(1.5), [] (const c10::optional& v) {EXPECT_EQ(1.5, v.value());}, + std::optional(2.5), [] (const IValue& v) {EXPECT_EQ(2.5, v.toDouble());}, "(float? a) -> float?"); - testArgTypes>::test( - c10::optional(1), [] (const c10::optional& v) {EXPECT_EQ(1, v.value());}, - c10::optional(2), [] (const IValue& v) {EXPECT_EQ(2, v.toInt());}, + testArgTypes>::test( + std::optional(1), [] (const c10::optional& v) {EXPECT_EQ(1, v.value());}, + std::optional(2), [] (const IValue& v) {EXPECT_EQ(2, v.toInt());}, "(int? a) -> int?"); - testArgTypes>::test( - c10::optional(true), [] (const c10::optional& v) {EXPECT_EQ(true, v.value());}, - c10::optional(false), [] (const IValue& v) {EXPECT_EQ(false, v.toBool());}, + testArgTypes>::test( + std::optional(true), [] (const c10::optional& v) {EXPECT_EQ(true, v.value());}, + std::optional(false), [] (const IValue& v) {EXPECT_EQ(false, v.toBool());}, "(bool? a) -> bool?"); - testArgTypes>::test( - c10::optional(false), [] (const c10::optional& v) {EXPECT_EQ(false, v.value());}, - c10::optional(true), [] (const IValue& v) {EXPECT_EQ(true, v.toBool());}, + testArgTypes>::test( + std::optional(false), [] (const c10::optional& v) {EXPECT_EQ(false, v.value());}, + std::optional(true), [] (const IValue& v) {EXPECT_EQ(true, v.toBool());}, "(bool? a) -> bool?"); - testArgTypes>::test( - c10::optional("string1"), [] (const c10::optional& v) {EXPECT_EQ("string1", v.value());}, - c10::optional("string2"), [] (const IValue& v) {EXPECT_EQ("string2", v.toStringRef());}, + testArgTypes>::test( + std::optional("string1"), [] (const c10::optional& v) {EXPECT_EQ("string1", v.value());}, + std::optional("string2"), [] (const IValue& v) {EXPECT_EQ("string2", v.toStringRef());}, "(str? a) -> str?"); - testArgTypes>::test( - c10::optional(dummyTensor(c10::DispatchKey::CPU)), [] (const c10::optional& v) {EXPECT_EQ(c10::DispatchKey::CPU, extractDispatchKey(v.value()));}, - c10::optional(dummyTensor(c10::DispatchKey::CUDA)), [] (const IValue& v) {EXPECT_EQ(c10::DispatchKey::CUDA, extractDispatchKey(v.toTensor()));}, + testArgTypes>::test( + std::optional(dummyTensor(c10::DispatchKey::CPU)), [] (const c10::optional& v) {EXPECT_EQ(c10::DispatchKey::CPU, extractDispatchKey(v.value()));}, + std::optional(dummyTensor(c10::DispatchKey::CUDA)), [] (const IValue& v) {EXPECT_EQ(c10::DispatchKey::CUDA, extractDispatchKey(v.toTensor()));}, "(Tensor? a) -> Tensor?"); // optional types (with has_value() == false) - testArgTypes>::test( - c10::optional(c10::nullopt), [] (const c10::optional& v) {EXPECT_FALSE(v.has_value());}, - c10::optional(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());}, + testArgTypes>::test( + std::optional(c10::nullopt), [] (const c10::optional& v) {EXPECT_FALSE(v.has_value());}, + std::optional(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());}, "(float? a) -> float?"); - testArgTypes>::test( - c10::optional(c10::nullopt), [] (const c10::optional& v) {EXPECT_FALSE(v.has_value());}, - c10::optional(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());}, + testArgTypes>::test( + std::optional(c10::nullopt), [] (const c10::optional& v) {EXPECT_FALSE(v.has_value());}, + std::optional(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());}, "(int? a) -> int?"); - testArgTypes>::test( - c10::optional(c10::nullopt), [] (const c10::optional& v) {EXPECT_FALSE(v.has_value());}, - c10::optional(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());}, + testArgTypes>::test( + std::optional(c10::nullopt), [] (const c10::optional& v) {EXPECT_FALSE(v.has_value());}, + std::optional(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());}, "(bool? a) -> bool?"); - testArgTypes>::test( - c10::optional(c10::nullopt), [] (const c10::optional& v) {EXPECT_FALSE(v.has_value());}, - c10::optional(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());}, + testArgTypes>::test( + std::optional(c10::nullopt), [] (const c10::optional& v) {EXPECT_FALSE(v.has_value());}, + std::optional(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());}, "(bool? a) -> bool?"); - testArgTypes>::test( - c10::optional(c10::nullopt), [] (const c10::optional& v) {EXPECT_FALSE(v.has_value());}, - c10::optional(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());}, + testArgTypes>::test( + std::optional(c10::nullopt), [] (const c10::optional& v) {EXPECT_FALSE(v.has_value());}, + std::optional(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());}, "(str? a) -> str?"); - testArgTypes>::test( - c10::optional(c10::nullopt), [] (const c10::optional& v) {EXPECT_FALSE(v.has_value());}, - c10::optional(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());}, + testArgTypes>::test( + std::optional(c10::nullopt), [] (const c10::optional& v) {EXPECT_FALSE(v.has_value());}, + std::optional(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());}, "(Tensor? a) -> Tensor?"); @@ -1136,21 +1136,21 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) { "(Tensor[] a) -> Tensor[]"); // Test optional of list (with nullopt) - testArgTypes>>::test( - c10::optional>(c10::nullopt), [] (const c10::optional>& v) {EXPECT_FALSE(v.has_value());}, - c10::optional>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());}, + testArgTypes>>::test( + std::optional>(c10::nullopt), [] (const c10::optional>& v) {EXPECT_FALSE(v.has_value());}, + std::optional>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());}, "(int[]? a) -> int[]?"); // Test optional of list (with empty list) - testArgTypes>>::test( - c10::optional>(c10::List({})), [] (const c10::optional>& v) {EXPECT_EQ(0, v.value().size());}, - c10::optional>(c10::List({})), [] (const IValue& v) {EXPECT_EQ(0, v.to>().size());}, + testArgTypes>>::test( + std::optional>(c10::List({})), [] (const c10::optional>& v) {EXPECT_EQ(0, v.value().size());}, + std::optional>(c10::List({})), [] (const IValue& v) {EXPECT_EQ(0, v.to>().size());}, "(int[]? a) -> int[]?"); // Test optional of list (with values) - testArgTypes>>::test( - c10::optional>(c10::List({1, 2})), [] (const c10::optional>& v) {expectListEquals({1, 2}, v.value());}, - c10::optional>(c10::List({3, 4})), [] (const IValue& v) {expectListEquals({3, 4}, v.to>());}, + testArgTypes>>::test( + std::optional>(c10::List({1, 2})), [] (const c10::optional>& v) {expectListEquals({1, 2}, v.value());}, + std::optional>(c10::List({3, 4})), [] (const IValue& v) {expectListEquals({3, 4}, v.to>());}, "(int[]? a) -> int[]?"); // Test list of optional (with empty list) @@ -1161,8 +1161,8 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) { // Test list of optional (with values) testArgTypes>>::test( - c10::List<::std::optional>(c10::List<::std::optional>({3, c10::nullopt, 2})), [] (const c10::List<::std::optional>& v) {expectListEquals>({3, c10::nullopt, 2}, v);}, - c10::List<::std::optional>(c10::List<::std::optional>({3, c10::nullopt, 2})), [] (const IValue& v) {expectListEquals>({3, c10::nullopt, 2}, v.to>>());}, + c10::List<::std::optional>(c10::List<::std::optional>({3, c10::nullopt, 2})), [] (const c10::List<::std::optional>& v) {expectListEquals>({3, c10::nullopt, 2}, v);}, + c10::List<::std::optional>(c10::List<::std::optional>({3, c10::nullopt, 2})), [] (const IValue& v) {expectListEquals>({3, c10::nullopt, 2}, v.to>>());}, "(int?[] a) -> int?[]"); // dict types diff --git a/aten/src/ATen/core/operator_name.h b/aten/src/ATen/core/operator_name.h index 6440a695b55ec..5ba01b4a7df58 100644 --- a/aten/src/ATen/core/operator_name.h +++ b/aten/src/ATen/core/operator_name.h @@ -23,7 +23,7 @@ struct OperatorName final { // Return the namespace of this OperatorName, if it exists. The // returned string_view is only live as long as the OperatorName // exists and name is not mutated - c10::optional getNamespace() const { + std::optional getNamespace() const { auto pos = name.find("::"); if (pos == std::string::npos) { return c10::nullopt; diff --git a/aten/src/ATen/core/tensor_type.cpp b/aten/src/ATen/core/tensor_type.cpp index c7f8c8b05f91e..9110b4261d396 100644 --- a/aten/src/ATen/core/tensor_type.cpp +++ b/aten/src/ATen/core/tensor_type.cpp @@ -274,12 +274,12 @@ TensorTypePtr TensorType::create(const at::Tensor& t) { } TensorTypePtr TensorType::create( - c10::optional scalar_type, - c10::optional device, + std::optional scalar_type, + std::optional device, const VaryingShape& sizes, const VaryingShape& strides, - c10::optional requires_grad, - c10::optional undefined, bool tensor_contiguity) { + std::optional requires_grad, + std::optional undefined, bool tensor_contiguity) { if(strides.concrete_sizes() && strides.concrete_sizes().has_value()){ // handles case where strides are set // NOLINTNEXTLINE(bugprone-unchecked-optional-access) @@ -304,22 +304,22 @@ TensorTypePtr TensorType::create( } TensorTypePtr TensorType::create( - c10::optional scalar_type, - c10::optional device, + std::optional scalar_type, + std::optional device, const SymbolicShape& sizes, const VaryingShape& strides, - c10::optional requires_grad, - c10::optional undefined) { + std::optional requires_grad, + std::optional undefined) { auto pt = TensorTypePtr(new TensorType( scalar_type, device, sizes, strides, requires_grad, undefined)); return pt; } TensorTypePtr TensorType::create( - c10::optional scalar_type, - c10::optional device, - c10::optional dim, - c10::optional requires_grad) { + std::optional scalar_type, + std::optional device, + std::optional dim, + std::optional requires_grad) { return TensorType::create( scalar_type, device, @@ -349,7 +349,7 @@ VaryingShape TensorType::sizes() const { fmap(*sizes_.sizes(), [](ShapeSymbol ss) { // we turn symbolic shapes into unknowns return ss.is_static() - ? c10::optional(ss.static_size()) + ? std::optional(ss.static_size()) : c10::nullopt; })); } @@ -371,7 +371,7 @@ TensorTypePtr TensorType::merge(const TensorType& other, bool merge_sizes) const } template -bool is_null_or_equal(c10::optional a, c10::IntArrayRef b) { +bool is_null_or_equal(std::optional a, c10::IntArrayRef b) { return !a.has_value() || a.value() == b; } @@ -417,7 +417,7 @@ VaryingShape TensorType::strides() const { if (!strides_.size().has_value()) { return VaryingShape(); } - std::vector> ss(*strides_.size()); + std::vector> ss(*strides_.size()); for (size_t i = 0; i < *strides_.size(); i++) { if (!strides_[i].has_value()) { continue; @@ -431,12 +431,12 @@ VaryingShape TensorType::strides() const { } TensorType::TensorType( - c10::optional scalar_type, - c10::optional device, + std::optional scalar_type, + std::optional device, SymbolicShape sizes, VaryingShape strides, - c10::optional requires_grad, - c10::optional undefined) + std::optional requires_grad, + std::optional undefined) : SharedType(TypeKind::TensorType), scalar_type_(scalar_type), device_(device), diff --git a/aten/src/ATen/core/type.cpp b/aten/src/ATen/core/type.cpp index f7d67ca84861a..572b15a118b36 100644 --- a/aten/src/ATen/core/type.cpp +++ b/aten/src/ATen/core/type.cpp @@ -364,7 +364,7 @@ SymBoolTypePtr SymBoolType::get() { return value; } -static c10::optional unifyTypesImpl(const TypePtr& t1, const TypePtr& t2, bool default_to_union=false, const TypePtr& type_hint=nullptr) { +static std::optional unifyTypesImpl(const TypePtr& t1, const TypePtr& t2, bool default_to_union=false, const TypePtr& type_hint=nullptr) { // check direct subtyping relation if (t1->isSubtypeOf(*t2)) { return t2; @@ -446,7 +446,7 @@ static c10::optional unifyTypesImpl(const TypePtr& t1, const TypePtr& t return c10::nullopt; } -c10::optional unifyTypes(const TypePtr& t1, const TypePtr& t2, bool default_to_union, const TypePtr& type_hint) { +std::optional unifyTypes(const TypePtr& t1, const TypePtr& t2, bool default_to_union, const TypePtr& type_hint) { auto unified = unifyTypesImpl(t1, t2, default_to_union, type_hint); if (default_to_union && !unified) { @@ -456,7 +456,7 @@ c10::optional unifyTypes(const TypePtr& t1, const TypePtr& t2, bool def return unified; } -c10::optional unifyTypeList( +std::optional unifyTypeList( at::ArrayRef elements, std::ostream& why_not, bool default_to_union, @@ -468,7 +468,7 @@ c10::optional unifyTypeList( TypePtr ret_type = elements.at(0); for (size_t i = 1; i < elements.size() && ret_type; ++i) { - c10::optional maybe_unified = unifyTypes(ret_type, elements.at(i), default_to_union, type_hint); + std::optional maybe_unified = unifyTypes(ret_type, elements.at(i), default_to_union, type_hint); if (!maybe_unified) { why_not << "Could not unify type list since element " << i << " of type " << elements.at(i)->repr_str() @@ -719,7 +719,7 @@ bool Type::is_module() const { } TupleTypePtr TupleType::createNamed( - const c10::optional& qualName, + const std::optional& qualName, const std::vector& field_names, const std::vector& field_types) { std::vector empty_defaults; @@ -727,7 +727,7 @@ TupleTypePtr TupleType::createNamed( } TupleTypePtr TupleType::createNamed( - const c10::optional& qualName, + const std::optional& qualName, const std::vector& field_names, const std::vector& field_types) { std::vector empty_defaults; @@ -735,7 +735,7 @@ TupleTypePtr TupleType::createNamed( } TupleTypePtr TupleType::createNamed( - const c10::optional& qualName, + const std::optional& qualName, const std::vector& field_names, const std::vector& field_types, std::vector& field_defaults) { @@ -743,7 +743,7 @@ TupleTypePtr TupleType::createNamed( } template -TupleTypePtr TupleType::createWithSpec(const c10::optional& qualName, +TupleTypePtr TupleType::createWithSpec(const std::optional& qualName, const std::vector& field_names, const std::vector& field_types, std::vector& field_defaults) { @@ -784,7 +784,7 @@ TupleTypePtr TupleType::createWithSpec(const c10::optional& field_types, qualName, std::move(schema))); // NOLINT(modernize-make-shared) } -c10::optional> TupleType::names() const { +std::optional> TupleType::names() const { if (!schema_) { return {}; } @@ -820,7 +820,7 @@ bool NumberType::isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const { TupleType::TupleType( std::vector elements, - c10::optional name, + std::optional name, std::shared_ptr schema) : NamedType(TypeKind::TupleType, std::move(name)), elements_(std::move(elements)), diff --git a/aten/src/ATen/core/union_type.cpp b/aten/src/ATen/core/union_type.cpp index 2acc4c497ba56..4039e2a4418f9 100644 --- a/aten/src/ATen/core/union_type.cpp +++ b/aten/src/ATen/core/union_type.cpp @@ -29,7 +29,7 @@ ListTypePtr ListType::ofOptionalTensors() { namespace { -c10::optional subtractTypeSetFrom(std::vector& to_subtract, ArrayRef from) { +std::optional subtractTypeSetFrom(std::vector& to_subtract, ArrayRef from) { std::vector types; // Given a TypePtr `lhs`, this function says whether or not `lhs` (or @@ -93,7 +93,7 @@ void filterDuplicateSubtypes(std::vector* types) { if (types->empty()) { return; } - auto get_supertype = [](const TypePtr& t1, const TypePtr& t2) -> c10::optional { + auto get_supertype = [](const TypePtr& t1, const TypePtr& t2) -> std::optional { // We don't want nested Optionals. Also, prematurely unifying to // `Optional` could prevent us from coalescing other types if ((t1->isSubtypeOf(*NoneType::get()) && !t2->isSubtypeOf(*NoneType::get())) @@ -114,7 +114,7 @@ void filterDuplicateSubtypes(std::vector* types) { size_t end_idx = types->size()-1; for (size_t i = types->size()-1; i > 0; --i) { for (size_t j = std::min(i-1, end_idx); ; --j) { - c10::optional unified; + std::optional unified; unified = get_supertype((*types)[i], (*types)[j]); if (unified) { (*types)[j] = *unified; @@ -272,11 +272,11 @@ UnionTypePtr UnionType::create(std::vector reference) { return union_type; } -c10::optional UnionType::subtractTypeSet(std::vector& to_subtract) const { +std::optional UnionType::subtractTypeSet(std::vector& to_subtract) const { return subtractTypeSetFrom(to_subtract, containedTypes()); } -c10::optional UnionType::toOptional() const { +std::optional UnionType::toOptional() const { if (!canHoldType(*NoneType::get())) { return c10::nullopt; } @@ -432,7 +432,7 @@ bool UnionType::canHoldType(const Type& type) const { bool OptionalType::equals(const Type& rhs) const { if (auto union_rhs = rhs.cast()) { auto optional_rhs = union_rhs->toOptional(); - // `**optional_rhs` = `*` to get value of `c10::optional`, + // `**optional_rhs` = `*` to get value of `std::optional`, // then `*` to dereference the pointer return optional_rhs && *this == **optional_rhs; } else if (auto optional_rhs = rhs.cast()) { diff --git a/aten/src/ATen/cuda/CachingHostAllocator.cpp b/aten/src/ATen/cuda/CachingHostAllocator.cpp index f4f22711d61a3..9ae49113dc8a2 100644 --- a/aten/src/ATen/cuda/CachingHostAllocator.cpp +++ b/aten/src/ATen/cuda/CachingHostAllocator.cpp @@ -105,7 +105,7 @@ struct CUDACachingHostAllocatorImpl } void record_stream( - c10::optional>& events, + std::optional>& events, CUDAStream stream) override { auto event = create_event_internal(stream.device_index()); event->record(stream); diff --git a/aten/src/ATen/cuda/EmptyTensor.cpp b/aten/src/ATen/cuda/EmptyTensor.cpp index a3cd55f4b2b7b..269b4a3ecfc11 100644 --- a/aten/src/ATen/cuda/EmptyTensor.cpp +++ b/aten/src/ATen/cuda/EmptyTensor.cpp @@ -8,8 +8,8 @@ namespace at::detail { TensorBase empty_cuda( IntArrayRef size, ScalarType dtype, - c10::optional device_opt, - c10::optional memory_format_opt) { + std::optional device_opt, + std::optional memory_format_opt) { at::globalContext().lazyInitCUDA(); const auto device = device_or_default(device_opt); TORCH_INTERNAL_ASSERT(device.is_cuda()); @@ -22,11 +22,11 @@ TensorBase empty_cuda( TensorBase empty_cuda( IntArrayRef size, - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt, - c10::optional memory_format_opt) { + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt, + std::optional memory_format_opt) { TORCH_CHECK(!pin_memory_opt.has_value() || !*pin_memory_opt, "Only dense CPU tensors can be pinned"); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided); @@ -49,7 +49,7 @@ TensorBase empty_strided_cuda( IntArrayRef size, IntArrayRef stride, ScalarType dtype, - c10::optional device_opt) { + std::optional device_opt) { at::globalContext().lazyInitCUDA(); const auto device = device_or_default(device_opt); TORCH_INTERNAL_ASSERT(device.is_cuda()); @@ -63,10 +63,10 @@ TensorBase empty_strided_cuda( TensorBase empty_strided_cuda( IntArrayRef size, IntArrayRef stride, - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt) { + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt) { TORCH_CHECK(!pin_memory_opt.has_value() || !*pin_memory_opt, "Only dense CPU tensors can be pinned"); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided); diff --git a/aten/src/ATen/cuda/EmptyTensor.h b/aten/src/ATen/cuda/EmptyTensor.h index 18733f0beb30b..2fd88a94b75d2 100644 --- a/aten/src/ATen/cuda/EmptyTensor.h +++ b/aten/src/ATen/cuda/EmptyTensor.h @@ -6,16 +6,16 @@ namespace at::detail { TORCH_CUDA_CPP_API TensorBase empty_cuda( IntArrayRef size, ScalarType dtype, - c10::optional device_opt, - c10::optional memory_format_opt); + std::optional device_opt, + std::optional memory_format_opt); TORCH_CUDA_CPP_API TensorBase empty_cuda( IntArrayRef size, - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt, - c10::optional memory_format_opt); + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt, + std::optional memory_format_opt); TORCH_CUDA_CPP_API TensorBase empty_cuda( IntArrayRef size, @@ -25,15 +25,15 @@ TORCH_CUDA_CPP_API TensorBase empty_strided_cuda( IntArrayRef size, IntArrayRef stride, ScalarType dtype, - c10::optional device_opt); + std::optional device_opt); TORCH_CUDA_CPP_API TensorBase empty_strided_cuda( IntArrayRef size, IntArrayRef stride, - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt); + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt); TORCH_CUDA_CPP_API TensorBase empty_strided_cuda( IntArrayRef size, diff --git a/aten/src/ATen/cuda/PinnedMemoryAllocator.cpp b/aten/src/ATen/cuda/PinnedMemoryAllocator.cpp index 973027cd87f61..0c3e37825640d 100644 --- a/aten/src/ATen/cuda/PinnedMemoryAllocator.cpp +++ b/aten/src/ATen/cuda/PinnedMemoryAllocator.cpp @@ -8,13 +8,13 @@ namespace at::native { -bool is_pinned_cuda(const Tensor& self, c10::optional device) { +bool is_pinned_cuda(const Tensor& self, std::optional device) { TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!device.has_value() || device->is_cuda()); // TODO: unhook this return detail::getCUDAHooks().isPinnedPtr(self.storage().data()); } -Tensor _pin_memory_cuda(const Tensor& self, c10::optional device) { +Tensor _pin_memory_cuda(const Tensor& self, std::optional device) { TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!device.has_value() || device->is_cuda()); auto* allocator = at::cuda::getPinnedMemoryAllocator(); auto storage = Storage( diff --git a/aten/src/ATen/cudnn/AutocastRNN.cpp b/aten/src/ATen/cudnn/AutocastRNN.cpp index 083d435975c7c..2677e52df0929 100644 --- a/aten/src/ATen/cudnn/AutocastRNN.cpp +++ b/aten/src/ATen/cudnn/AutocastRNN.cpp @@ -22,9 +22,9 @@ std::tuple _cudnn_rnn_cast_reflatten(const Tensor & input, TensorList weight, int64_t weight_stride0, - const c10::optional& weight_buf_opt, + const std::optional& weight_buf_opt, const Tensor& hx, - const c10::optional& cx, + const std::optional& cx, int64_t mode, int64_t hidden_size, int64_t proj_size, @@ -34,7 +34,7 @@ _cudnn_rnn_cast_reflatten(const Tensor & input, bool train, bool bidirectional, IntArrayRef batch_sizes, - const c10::optional& dropout_state) { + const std::optional& dropout_state) { #if AT_CUDNN_ENABLED() c10::impl::ExcludeDispatchKeyGuard no_autocast(DispatchKey::Autocast); diff --git a/aten/src/ATen/functorch/BatchRulesBinaryOps.cpp b/aten/src/ATen/functorch/BatchRulesBinaryOps.cpp index 44ca2802bf3a2..e7a914c1e0f69 100644 --- a/aten/src/ATen/functorch/BatchRulesBinaryOps.cpp +++ b/aten/src/ATen/functorch/BatchRulesBinaryOps.cpp @@ -303,7 +303,7 @@ static std::tuple> log_sigmoid_backward_batch_rule( return std::make_tuple(at::log_sigmoid_backward(out_grad, out_self, out_buffer), 0); } -static Tensor binomial_wrapper(const Tensor& count, const Tensor& prob, c10::optional gen) { +static Tensor binomial_wrapper(const Tensor& count, const Tensor& prob, std::optional gen) { return at::binomial(count, prob.contiguous(), std::move(gen)); // Bug in PyTorch, prob shouldn't need to be contiguous } @@ -457,7 +457,7 @@ TORCH_LIBRARY_IMPL(aten, FuncTorchBatched, m) { using TensorScalarInplaceT = Tensor& (Tensor::*)(const Tensor&, const Scalar&) const; using ScalarScalarInplaceT = Tensor& (Tensor::*)(const Scalar&, const Scalar&) const; using TensorInplaceT = Tensor& (Tensor::*)(const Tensor&) const; - using TensorInplaceModeT = Tensor& (Tensor::*)(const Tensor&, c10::optional) const; + using TensorInplaceModeT = Tensor& (Tensor::*)(const Tensor&, std::optional) const; using ScalarInplaceT = Tensor& (Tensor::*)(const Scalar&) const; using CopyT = Tensor& (Tensor::*)(const Tensor&, bool) const; @@ -471,7 +471,7 @@ TORCH_LIBRARY_IMPL(aten, FuncTorchBatched, m) { VMAP_SUPPORT2(mul_, Tensor, SINGLE_ARG(binary_pointwise_inplace_batch_rule)); VMAP_SUPPORT2(mul_, Scalar, SINGLE_ARG(unary_inplace_batch_rule)); VMAP_SUPPORT2(div_, Tensor, SINGLE_ARG(binary_pointwise_inplace_batch_rule)); - VMAP_SUPPORT2(div_, Tensor_mode, SINGLE_ARG(binary_pointwise_inplace_batch_rule>)); + VMAP_SUPPORT2(div_, Tensor_mode, SINGLE_ARG(binary_pointwise_inplace_batch_rule>)); VMAP_SUPPORT2(div_, Scalar, SINGLE_ARG(unary_inplace_batch_rule)); VMAP_SUPPORT2(clamp_min_, Tensor, SINGLE_ARG(binary_pointwise_inplace_batch_rule)); VMAP_SUPPORT2(clamp_max_, Tensor, SINGLE_ARG(binary_pointwise_inplace_batch_rule)); diff --git a/aten/src/ATen/functorch/BatchRulesConvolution.cpp b/aten/src/ATen/functorch/BatchRulesConvolution.cpp index ca4eda19a36fb..dd24207e7e778 100644 --- a/aten/src/ATen/functorch/BatchRulesConvolution.cpp +++ b/aten/src/ATen/functorch/BatchRulesConvolution.cpp @@ -124,7 +124,7 @@ convolution_batch_rule(const Tensor& lhs, optional lhs_bdim, const Tens } static Tensor _convolution_decomp( - const Tensor& input_r, const Tensor& weight_r, const c10::optional& bias_r_opt, + const Tensor& input_r, const Tensor& weight_r, const std::optional& bias_r_opt, IntArrayRef stride_, IntArrayRef padding_, IntArrayRef dilation_, bool transposed_, IntArrayRef output_padding_, int64_t groups_, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) { diff --git a/aten/src/ATen/functorch/BatchRulesFactory.cpp b/aten/src/ATen/functorch/BatchRulesFactory.cpp index f317fee6af6c7..1edce4f52e271 100644 --- a/aten/src/ATen/functorch/BatchRulesFactory.cpp +++ b/aten/src/ATen/functorch/BatchRulesFactory.cpp @@ -107,11 +107,11 @@ static std::tuple> linspace_logspace_batch_rule_helper( const at::Tensor& start, optional start_bdim, const at::Tensor& end, optional end_bdim, int64_t steps, - c10::optional base, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) + std::optional base, + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { auto batch_size = get_bdim_size2(start, start_bdim, end, end_bdim); auto start_ = ensure_has_bdim(start, start_bdim.has_value(), batch_size); @@ -145,10 +145,10 @@ static std::tuple> linspace_Tensor_Tensor_batch_rule( const at::Tensor& start, optional start_bdim, const at::Tensor& end, optional end_bdim, int64_t steps, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory){ + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory){ return linspace_logspace_batch_rule_helper(start, start_bdim, end, end_bdim, steps, c10::nullopt, dtype, layout, device, pin_memory); } @@ -156,10 +156,10 @@ static std::tuple> linspace_Tensor_Scalar_batch_rule( const at::Tensor& start, optional start_bdim, const at::Scalar& end, int64_t steps, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory){ + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory){ auto end_t = at::native::wrapped_scalar_tensor(end, start.device()); return linspace_logspace_batch_rule_helper(start, start_bdim, end_t, c10::nullopt, steps, c10::nullopt, dtype, layout, device, pin_memory); @@ -169,10 +169,10 @@ static std::tuple> linspace_Scalar_Tensor_batch_rule( const at::Scalar& start, const at::Tensor& end, optional end_bdim, int64_t steps, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory){ + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory){ auto start_t = at::native::wrapped_scalar_tensor(start, end.device()); return linspace_logspace_batch_rule_helper(start_t, c10::nullopt, end, end_bdim, steps, c10::nullopt, dtype, layout, device, pin_memory); @@ -183,10 +183,10 @@ static std::tuple> logspace_Tensor_Tensor_batch_rule( const at::Tensor& end, optional end_bdim, int64_t steps, double base, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory){ + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory){ return linspace_logspace_batch_rule_helper(start, start_bdim, end, end_bdim, steps, c10::make_optional(base), dtype, layout, device, pin_memory); } @@ -195,10 +195,10 @@ static std::tuple> logspace_Tensor_Scalar_batch_rule( const at::Scalar& end, int64_t steps, double base, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory){ + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory){ auto end_t = at::native::wrapped_scalar_tensor(end, start.device()); return linspace_logspace_batch_rule_helper(start, start_bdim, end_t, c10::nullopt, steps, c10::make_optional(base), dtype, layout, device, pin_memory); @@ -209,10 +209,10 @@ static std::tuple> logspace_Scalar_Tensor_batch_rule( const at::Tensor& end, optional end_bdim, int64_t steps, double base, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory){ + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory){ auto start_t = at::native::wrapped_scalar_tensor(start, end.device()); return linspace_logspace_batch_rule_helper(start_t, c10::nullopt, end, end_bdim, steps, c10::make_optional(base), dtype, layout, device, pin_memory); diff --git a/aten/src/ATen/functorch/BatchRulesLinearAlgebra.cpp b/aten/src/ATen/functorch/BatchRulesLinearAlgebra.cpp index 6a17adb4e268c..511a0a6d45450 100644 --- a/aten/src/ATen/functorch/BatchRulesLinearAlgebra.cpp +++ b/aten/src/ATen/functorch/BatchRulesLinearAlgebra.cpp @@ -157,9 +157,9 @@ void _linalg_check_errors_batch_rule(const Tensor& info, optional info_ at::_linalg_check_errors(info_, api_name, false); } -std::tuple> -householder_product_batch_rule(const Tensor &input, c10::optional input_bdim, - const Tensor &tau, c10::optional tau_bdim) +std::tuple> +householder_product_batch_rule(const Tensor &input, std::optional input_bdim, + const Tensor &tau, std::optional tau_bdim) { auto input_ = moveBatchDimToFront(input, input_bdim); auto tau_ = moveBatchDimToFront(tau, tau_bdim); @@ -330,8 +330,8 @@ oneOutput linalg_lu_solve_batch_rule( } oneOutput cholesky_solve_batch_rule( - const Tensor& self, c10::optional self_bdim, - const Tensor& A, c10::optional A_bdim, + const Tensor& self, std::optional self_bdim, + const Tensor& A, std::optional A_bdim, bool upper) { TORCH_CHECK(rankWithoutBatchDim(self, self_bdim) >= 2, "b should have at least 2 dimensions, but has ", self.dim(), " dimensions instead"); @@ -345,14 +345,14 @@ oneOutput cholesky_solve_batch_rule( } threeOutputs linalg_lu_factor_ex_batch_rule( - const Tensor& A, c10::optional A_bdim, bool pivot, bool check_errors) { + const Tensor& A, std::optional A_bdim, bool pivot, bool check_errors) { TORCH_CHECK(rankWithoutBatchDim(A, A_bdim) >= 2, "torch.lu_factor_ex: Expected tensor with 2 or more dimensions. Got size: ", A.sizes(), " instead"); const auto A_ = moveBatchDimToFront(A, A_bdim); const auto res = at::linalg_lu_factor_ex(A_, pivot, check_errors); return std::make_tuple(std::get<0>(res), 0, std::get<1>(res), 0, std::get<2>(res), 0); } -oneOutput matrix_exp_batch_rule(const Tensor& self, c10::optional self_bdim) { +oneOutput matrix_exp_batch_rule(const Tensor& self, std::optional self_bdim) { TORCH_CHECK(rankWithoutBatchDim(self, self_bdim) >= 2, "torch.matrix_exp: The input tensor A must have at least 2 dimensions."); const auto self_ = moveBatchDimToFront(self, self_bdim).contiguous(); // seems to be a bug return std::make_tuple(at::matrix_exp(self_), 0); @@ -400,8 +400,8 @@ fourOutputs solve_ex_batch_rule( return std::make_tuple(std::get<0>(res), 0, std::get<1>(res), 0, std::get<2>(res), 0, std::get<3>(res), 0); } -oneOutput cross_batch_rule(const Tensor& self, c10::optional self_bdim, - const Tensor& other, c10::optional other_bdim, const int64_t dim) { +oneOutput cross_batch_rule(const Tensor& self, std::optional self_bdim, + const Tensor& other, std::optional other_bdim, const int64_t dim) { // match cross dimension checks TORCH_CHECK(rankWithoutBatchDim(self, self_bdim) == rankWithoutBatchDim(other, other_bdim), "linalg.cross: inputs must have the same number of dimensions." @@ -418,16 +418,16 @@ oneOutput cross_batch_rule(const Tensor& self, c10::optional self_bdim, return std::make_tuple(linalg_cross(self_, other_, dim_), 0); } -c10::optional batch_dim_if_not_empty(const Tensor& t) { +std::optional batch_dim_if_not_empty(const Tensor& t) { if (t.dim() == 1 && t.size(0) == 0) { - return c10::optional(); + return std::optional(); } - return c10::optional(0); + return std::optional(0); } fourOutputs linalg_lstsq_batch_rule( - const Tensor& self, c10::optional self_bdim, const Tensor& b, c10::optional b_bdim, - c10::optional rcond, c10::optional driver) { + const Tensor& self, std::optional self_bdim, const Tensor& b, c10::optional b_bdim, + std::optional rcond, c10::optional driver) { TORCH_CHECK(rankWithoutBatchDim(self, self_bdim) >= 2, "torch.linalg.lstsq: input must have at least 2 dimensions."); TORCH_CHECK(rankWithoutBatchDim(b, b_bdim) >= 1, "torch.linalg.lstsq: other must have at least 1 dimension."); @@ -449,7 +449,7 @@ fourOutputs linalg_lstsq_batch_rule( } template -std::tuple> +std::tuple> atol_rtol_tensor_batch_rule( F Func, const Tensor& input, optional input_bdim, const optional& atol, const optional atol_bdim, @@ -478,11 +478,11 @@ atol_rtol_tensor_batch_rule( return std::make_tuple(Func(input_, atol_, rtol_, hermitian), 0); } -static std::tuple> +static std::tuple> pinv_batch_rule( - const Tensor& input, c10::optional input_bdim, const optional& atol, - const c10::optional atol_bdim, const optional& rtol, - const c10::optional rtol_bdim, bool hermitian) { + const Tensor& input, std::optional input_bdim, const optional& atol, + const std::optional atol_bdim, const optional& rtol, + const std::optional rtol_bdim, bool hermitian) { return atol_rtol_tensor_batch_rule(ATEN_FN2(linalg_pinv, atol_rtol_tensor), input, input_bdim, atol, atol_bdim, rtol, rtol_bdim, hermitian, "linalg.pinv"); } } diff --git a/aten/src/ATen/functorch/BatchRulesLoss.cpp b/aten/src/ATen/functorch/BatchRulesLoss.cpp index 22f3adff95a01..cd5ef41d4069f 100644 --- a/aten/src/ATen/functorch/BatchRulesLoss.cpp +++ b/aten/src/ATen/functorch/BatchRulesLoss.cpp @@ -123,7 +123,7 @@ static Tensor binary_cross_entropy_plumbing( static Tensor binary_cross_entropy_backward_plumbing( const Tensor& grad, const Tensor& input, const Tensor& target, - const c10::optional& weight_opt, int64_t reduction) { + const std::optional& weight_opt, int64_t reduction) { auto maybe_layer = maybeCurrentDynamicLayer(); vmap_check_escaped(maybe_layer, "binary_cross_entropy_backward_plumbing"); int64_t cur_level = maybe_layer->layerId(); diff --git a/aten/src/ATen/functorch/BatchRulesNorm.cpp b/aten/src/ATen/functorch/BatchRulesNorm.cpp index faf39d8e374a3..89a23fe0298d7 100644 --- a/aten/src/ATen/functorch/BatchRulesNorm.cpp +++ b/aten/src/ATen/functorch/BatchRulesNorm.cpp @@ -45,10 +45,10 @@ template std::tuple,Tensor,optional,Tensor,optional> batch_norm_batch_rule( const Tensor& input, optional input_bdim, - const c10::optional& weight_opt, optional weight_bdim, - const c10::optional& bias_opt, optional bias_bdim, - const c10::optional& running_mean_opt, optional running_mean_bdim, - const c10::optional& running_var_opt, optional running_var_bdim, + const std::optional& weight_opt, optional weight_bdim, + const std::optional& bias_opt, optional bias_bdim, + const std::optional& running_mean_opt, optional running_mean_bdim, + const std::optional& running_var_opt, optional running_var_bdim, bool training, double momentum, double eps) { c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; @@ -63,7 +63,7 @@ batch_norm_batch_rule( "were not batched.\nIf you are using a module and do not need eval mode, please set `track_running_stats` to be False.", "If you are using a prebuilt module and do not need eval mode, please see the functorch website for resources on ", "how to patch your module to work with vmap"); - c10::optional bdim_size; + std::optional bdim_size; Tensor result0; Tensor mean; Tensor rstd; @@ -80,8 +80,8 @@ batch_norm_batch_rule( input_ = ensure_has_bdim(input_, input_bdim.has_value(), bdim_size.value()); input_ = reshape_dim_into(0, /*channels dim*/1, input_); - c10::optional running_mean_; - c10::optional running_var_; + std::optional running_mean_; + std::optional running_var_; if (running_mean.defined()) { running_mean_ = moveBatchDimToFront(running_mean, running_mean_bdim); running_mean_ = ensure_has_bdim(*running_mean_, running_mean_bdim.has_value(), bdim_size.value()); @@ -127,8 +127,8 @@ template std::tuple> batch_norm_backward_no_weight_bias_batch_rule( const at::Tensor & grad_out, optional grad_out_bdim, const at::Tensor & input, optional input_bdim, - const c10::optional & running_mean_opt, optional running_mean_bdim, - const c10::optional & running_var_opt, optional running_var_bdim, + const std::optional & running_mean_opt, optional running_mean_bdim, + const std::optional & running_var_opt, optional running_var_bdim, const at::Tensor & mean, optional mean_bdim, const at::Tensor & rstd, optional rstd_bdim, bool training, double eps) { @@ -199,11 +199,11 @@ template std::tuple batch_norm_backward_plumbing( const at::Tensor & grad_out, const at::Tensor & input, - const c10::optional & weight_opt, - const c10::optional & running_mean_opt, - const c10::optional & running_var_opt, - const c10::optional & save_mean_opt, - const c10::optional & save_rstd_opt, + const std::optional & weight_opt, + const std::optional & running_mean_opt, + const std::optional & running_var_opt, + const std::optional & save_mean_opt, + const std::optional & save_rstd_opt, bool training, double eps, std::array output_mask) { @@ -284,8 +284,8 @@ std::tuple batch_norm_backward_plumbing( } static std::tuple native_group_norm_plumbing( - const Tensor & input, const c10::optional & weight_opt, - const c10::optional & bias_opt, int64_t N, int64_t C, + const Tensor & input, const std::optional & weight_opt, + const std::optional & bias_opt, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); @@ -372,7 +372,7 @@ static std::tuple> group_norm_backward_no_weight_bi static std::tuple native_group_norm_backward_plumbing( const Tensor & grad_out, const Tensor & input, const Tensor & mean, - const Tensor & rstd, const c10::optional & weight_opt, + const Tensor & rstd, const std::optional & weight_opt, int64_t N, int64_t C, int64_t HxW, int64_t group, std::array output_mask ) { // See [Note: hacky wrapper removal for optional tensor] @@ -488,8 +488,8 @@ static std::tuple,Tensor,optional,Tensor,optio native_layer_norm_batch_rule( const Tensor& input, optional input_bdim, c10::SymIntArrayRef normalized_shape, - const c10::optional& weight_opt, optional weight_bdim, - const c10::optional& bias_opt, optional bias_bdim, + const std::optional& weight_opt, optional weight_bdim, + const std::optional& bias_opt, optional bias_bdim, double eps) { auto input_ = moveBatchDimToFront(input, input_bdim); if (!weight_bdim && !bias_bdim) { @@ -573,8 +573,8 @@ static std::tuple native_layer_norm_backward_p at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, - const c10::optional & weight_opt, - const c10::optional & bias_opt, + const std::optional & weight_opt, + const std::optional & bias_opt, std::array output_mask) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); @@ -653,10 +653,10 @@ template struct NativeBatchNormBatchRuleHelper { static std::tuple,Tensor,optional,Tensor,optional> apply( const Tensor& input, optional input_bdim, - const c10::optional& weight_opt, optional weight_bdim, - const c10::optional& bias_opt, optional bias_bdim, - const c10::optional& running_mean_opt, optional running_mean_bdim, - const c10::optional& running_var_opt, optional running_var_bdim, + const std::optional& weight_opt, optional weight_bdim, + const std::optional& bias_opt, optional bias_bdim, + const std::optional& running_mean_opt, optional running_mean_bdim, + const std::optional& running_var_opt, optional running_var_bdim, bool training, double momentum, double eps) { return batch_norm_batch_rule( input, input_bdim, weight_opt, weight_bdim, bias_opt, bias_bdim, @@ -669,9 +669,9 @@ struct CudnnBatchNormBatchRuleHelper { static std::tuple,Tensor,optional,Tensor,optional,Tensor,optional> apply( const Tensor& input, optional input_bdim, const Tensor& weight_opt, optional weight_bdim, - const c10::optional& bias_opt, optional bias_bdim, - const c10::optional& running_mean_opt, optional running_mean_bdim, - const c10::optional& running_var_opt, optional running_var_bdim, + const std::optional& bias_opt, optional bias_bdim, + const std::optional& running_mean_opt, optional running_mean_bdim, + const std::optional& running_var_opt, optional running_var_bdim, bool training, double momentum, double eps) { auto reserve = at::empty({0}, input.options().dtype(kByte)); // in experiments, reserve was never set to anything other than empty by cuda auto res = batch_norm_batch_rule( @@ -686,9 +686,9 @@ struct MiopenBatchNormBatchRuleHelper { static std::tuple,Tensor,optional,Tensor,optional> apply( const Tensor& input, optional input_bdim, const Tensor& weight_opt, optional weight_bdim, - const c10::optional& bias_opt, optional bias_bdim, - const c10::optional& running_mean_opt, optional running_mean_bdim, - const c10::optional& running_var_opt, optional running_var_bdim, + const std::optional& bias_opt, optional bias_bdim, + const std::optional& running_mean_opt, optional running_mean_bdim, + const std::optional& running_var_opt, optional running_var_bdim, bool training, double momentum, double eps) { return batch_norm_batch_rule( input, input_bdim, weight_opt, weight_bdim, bias_opt, bias_bdim, @@ -716,11 +716,11 @@ struct NativeBatchNormBackwardBatchRuleHelper { static std::tuple apply( const at::Tensor & grad_out, const at::Tensor & input, - const c10::optional & weight_opt, - const c10::optional & running_mean_opt, - const c10::optional & running_var_opt, - const c10::optional & save_mean_opt, - const c10::optional & save_rstd_opt, + const std::optional & weight_opt, + const std::optional & running_mean_opt, + const std::optional & running_var_opt, + const std::optional & save_mean_opt, + const std::optional & save_rstd_opt, bool training, double eps, std::array output_mask) { @@ -748,10 +748,10 @@ struct CudnnBatchNormBackwardBatchRuleHelper { const at::Tensor & input, const at::Tensor & grad_out, const at::Tensor & weight, - const c10::optional & running_mean_opt, - const c10::optional & running_var_opt, - const c10::optional & save_mean_opt, - const c10::optional & save_rstd_opt, + const std::optional & running_mean_opt, + const std::optional & running_var_opt, + const std::optional & save_mean_opt, + const std::optional & save_rstd_opt, double eps, const at::Tensor & reserve) { @@ -777,10 +777,10 @@ struct MiopenBatchNormBackwardBatchRuleHelper { const at::Tensor & input, const at::Tensor & grad_out, const at::Tensor & weight, - const c10::optional & running_mean_opt, - const c10::optional & running_var_opt, - const c10::optional & save_mean_opt, - const c10::optional & save_rstd_opt, + const std::optional & running_mean_opt, + const std::optional & running_var_opt, + const std::optional & save_mean_opt, + const std::optional & save_rstd_opt, double eps) { auto maybe_layer = maybeCurrentDynamicLayer(); @@ -818,10 +818,10 @@ static std::tuple cudnn_batch_norm_backward_wr const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor& weight_opt, - const c10::optional & running_mean_opt, - const c10::optional & running_var_opt, - const c10::optional & save_mean_opt, - const c10::optional & save_rstd_opt, + const std::optional & running_mean_opt, + const std::optional & running_var_opt, + const std::optional & save_mean_opt, + const std::optional & save_rstd_opt, bool training, double eps, std::array output_mask) { @@ -834,10 +834,10 @@ static std::tuple miopen_batch_norm_backward_w const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor& weight_opt, - const c10::optional & running_mean_opt, - const c10::optional & running_var_opt, - const c10::optional & save_mean_opt, - const c10::optional & save_rstd_opt, + const std::optional & running_mean_opt, + const std::optional & running_var_opt, + const std::optional & save_mean_opt, + const std::optional & save_rstd_opt, bool training, double eps, std::array output_mask) { @@ -850,13 +850,13 @@ static std::tuple miopen_batch_norm_backward_w // work with dynamo anyway so we gain some buffer room to do wrong things here. The (reasonable) hope is that we will // make native_batch_norm composite implicit within a few weeks and we can fix this before vmap works with dynamo. static std::tuple _native_batch_norm_legit_batch( - const Tensor& self, const c10::optional& weight_opt, const c10::optional& bias_opt, + const Tensor& self, const std::optional& weight_opt, const c10::optional& bias_opt, Tensor& running_mean, Tensor& running_var, bool train, double momentum, double eps) { return at::native_batch_norm(self, weight_opt, bias_opt, running_mean, running_var, train, momentum, eps); } static std::tuple _native_batch_norm_legit_no_stats_batch( - const Tensor& self, const c10::optional& weight_opt, const c10::optional& bias_opt, + const Tensor& self, const std::optional& weight_opt, const c10::optional& bias_opt, bool train, double momentum, double eps) { return at::native_batch_norm(self, weight_opt, bias_opt, Tensor(), Tensor(), train, momentum, eps); } diff --git a/aten/src/ATen/functorch/BatchRulesRandomness.cpp b/aten/src/ATen/functorch/BatchRulesRandomness.cpp index 79572f22ea3f6..fe2e790331fa0 100644 --- a/aten/src/ATen/functorch/BatchRulesRandomness.cpp +++ b/aten/src/ATen/functorch/BatchRulesRandomness.cpp @@ -58,7 +58,7 @@ Tensor& random_inplace_batching_rule(Tensor& self, ExtraArgs... extra_args) { } } -static Tensor& bernoulli_inplace_Tensor_batching_rule(Tensor& self, const Tensor& p_, c10::optional gen) { +static Tensor& bernoulli_inplace_Tensor_batching_rule(Tensor& self, const Tensor& p_, std::optional gen) { c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchVmapMode); auto maybe_layer = maybeCurrentDynamicLayer(); auto cur_level = maybe_layer->layerId(); @@ -173,7 +173,7 @@ Tensor tensor_like_random_batch_rule(const Tensor& self, ExtraArgs... extra_args return (randomness == RandomnessType::Same) ? res : makeBatched(res, 0, cur_level); } -static std::tuple native_dropout_batching_rule(const Tensor& tensor, double p, c10::optional train) { +static std::tuple native_dropout_batching_rule(const Tensor& tensor, double p, std::optional train) { c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchVmapMode); auto maybe_layer = maybeCurrentDynamicLayer(); const auto cur_level = maybe_layer->layerId(); @@ -213,7 +213,7 @@ static std::tuple native_dropout_batching_rule(const Tensor& tens return std::make_tuple(output, mask); } -static Tensor multinomial_batching_rule(const Tensor& self, const int64_t num_samples, const bool replacement, const c10::optional generator) { +static Tensor multinomial_batching_rule(const Tensor& self, const int64_t num_samples, const bool replacement, const std::optional generator) { c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchVmapMode); auto maybe_layer = maybeCurrentDynamicLayer(); const auto cur_level = maybe_layer->layerId(); diff --git a/aten/src/ATen/functorch/BatchRulesReduceOps.cpp b/aten/src/ATen/functorch/BatchRulesReduceOps.cpp index cb6d6ac519dd8..90371c0eb9ce8 100644 --- a/aten/src/ATen/functorch/BatchRulesReduceOps.cpp +++ b/aten/src/ATen/functorch/BatchRulesReduceOps.cpp @@ -169,7 +169,7 @@ void boxed_reduction_batch_rule(const c10::OperatorHandle& op, torch::jit::Stack new_dims.push_back(getPhysicalDim(self, self_bdim.has_value(), dim)); } bool is_scalar_case = logical_dim == 0 && dims.size() == 1 && is_allowed_dim_on_scalar_tensor(dims[0]); - c10::optional maybe_keepdim; + std::optional maybe_keepdim; if (is_scalar_case) { // NOTE: [boxed_reduction_batch_rule scalar tensor handling] // Reduction operations in PyTorch have an edge case where they allow @@ -321,9 +321,9 @@ static std::tuple> searchsorted_batch_rule( optional self_bdim, bool out_int32, bool right, - c10::optional side, - const c10::optional& sorter, - c10::optional sorter_bdim) { + std::optional side, + const std::optional& sorter, + std::optional sorter_bdim) { auto buckets_logical_rank = rankWithoutBatchDim(sorted_sequence, sorted_sequence_bdim); auto self_logical_rank = rankWithoutBatchDim(self, self_bdim); diff --git a/aten/src/ATen/functorch/BatchRulesScatterOps.cpp b/aten/src/ATen/functorch/BatchRulesScatterOps.cpp index 0a1475497b03d..839e0ee405abb 100644 --- a/aten/src/ATen/functorch/BatchRulesScatterOps.cpp +++ b/aten/src/ATen/functorch/BatchRulesScatterOps.cpp @@ -375,7 +375,7 @@ namespace { // Code is mostly duplicated from // https://github.com/pytorch/pytorch/blob/fb0e27d38a8fdab4e1c14d6378c9e41cb30fd6a3 // /aten/src/ATen/native/TensorAdvancedIndexing.cpp#L379-L405 - VmapDimVector get_indexed_shape(Tensor self, const torch::List> &orig) + VmapDimVector get_indexed_shape(Tensor self, const torch::List> &orig) { at::native::checkIndexTensorTypes(orig); // first expand BoolTensor (masks) or ByteTensor (masks) into 1 or more LongTensors @@ -869,8 +869,8 @@ Tensor index_copy_decomp( // through a decomposition: slice_scatter's output needs to have the same // size, size, strides and storage_offset as the input. Tensor slice_scatter_decomp(const Tensor &self, const Tensor &src, - int64_t dim, c10::optional start, - c10::optional end, int64_t step) + int64_t dim, std::optional start, + std::optional end, int64_t step) { auto idx = at::arange(start.value_or(0), end.value_or(self.size(dim)), step, self.options().dtype(kLong)); idx = get_expanded_index(idx, self.sizes(), dim); @@ -889,8 +889,8 @@ Tensor select_scatter_decomp( } std::tuple> diagonal_scatter_batch_rule( - const Tensor &self, c10::optional self_bdim, - const Tensor &src, c10::optional src_bdim, + const Tensor &self, std::optional self_bdim, + const Tensor &src, std::optional src_bdim, int64_t offset, int64_t dim1, int64_t dim2) { auto self_ = moveBatchDimToFront(self, self_bdim); diff --git a/aten/src/ATen/functorch/BatchRulesUnaryOps.cpp b/aten/src/ATen/functorch/BatchRulesUnaryOps.cpp index f44000674db8a..d8213a1b9e0dd 100644 --- a/aten/src/ATen/functorch/BatchRulesUnaryOps.cpp +++ b/aten/src/ATen/functorch/BatchRulesUnaryOps.cpp @@ -63,7 +63,7 @@ std::tuple> to_other_batch_rule(const Tensor& self, optional self_bdim, const Tensor& other, optional other_bdim, bool non_blocking, - bool copy, c10::optional memory_format) { + bool copy, std::optional memory_format) { return std::make_tuple(self.to(other, non_blocking, copy, memory_format), self_bdim); } } diff --git a/aten/src/ATen/functorch/BatchRulesViews.cpp b/aten/src/ATen/functorch/BatchRulesViews.cpp index 81e9d5b9aa21c..18f5d4f38f3cc 100644 --- a/aten/src/ATen/functorch/BatchRulesViews.cpp +++ b/aten/src/ATen/functorch/BatchRulesViews.cpp @@ -149,7 +149,7 @@ std::tuple> flip_batch_rule(const Tensor& self, optiona const Tensor& resize__plumbing( const Tensor& self, IntArrayRef size, - c10::optional optional_memory_format) { + std::optional optional_memory_format) { TORCH_CHECK( !optional_memory_format.has_value() || optional_memory_format == c10::MemoryFormat::Contiguous, @@ -217,7 +217,7 @@ std::tuple> squeeze_batch_rule(const Tensor& self, opt } auto result = self.view_symint(squeezed_sizes); - return std::make_tuple(std::move(result), c10::optional(new_batch_idx)); + return std::make_tuple(std::move(result), std::optional(new_batch_idx)); } std::tuple> squeeze_dims_batch_rule( @@ -335,8 +335,8 @@ std::tuple> slice_batch_rule( const Tensor& self, optional self_bdim, int64_t dim, - c10::optional start, - c10::optional end, + std::optional start, + std::optional end, c10::SymInt step) { auto self_ = moveBatchDimToFront(self, self_bdim); dim = getPhysicalDim(self, self_bdim.has_value(), dim); diff --git a/aten/src/ATen/functorch/DynamicLayer.cpp b/aten/src/ATen/functorch/DynamicLayer.cpp index 45976fa855f32..35f2439c982db 100644 --- a/aten/src/ATen/functorch/DynamicLayer.cpp +++ b/aten/src/ATen/functorch/DynamicLayer.cpp @@ -387,7 +387,7 @@ bool isInplaceOp(const FunctionSchema& schema) { return return_alias_info && return_alias_info->isWrite(); } -c10::optional findAliasedOutput(const FunctionSchema& schema, const int64_t immutable_input_idx) { +std::optional findAliasedOutput(const FunctionSchema& schema, const int64_t immutable_input_idx) { for (size_t res_idx = 0; res_idx != schema.returns().size(); ++res_idx) { if (schema.may_contain_alias(SchemaArgument(SchemaArgType::input, immutable_input_idx), SchemaArgument(SchemaArgType::output, res_idx))) { return res_idx; // for everything currently in native_functions, each input aliases at most one output (tensor list counts as one output) diff --git a/aten/src/ATen/functorch/DynamicLayer.h b/aten/src/ATen/functorch/DynamicLayer.h index 9311503f3538d..554e6678d09a1 100644 --- a/aten/src/ATen/functorch/DynamicLayer.h +++ b/aten/src/ATen/functorch/DynamicLayer.h @@ -71,7 +71,7 @@ TORCH_API int64_t initAndPushDynamicLayer( optional prev_fwd_grad_mode = nullopt, optional functionalize_add_back_views = nullopt); TORCH_API DynamicLayer popDynamicLayerAndDeleteMetadata(); -TORCH_API c10::optional maybeCurrentDynamicLayer(); +TORCH_API std::optional maybeCurrentDynamicLayer(); TORCH_API const std::vector& getDynamicLayerStack(); TORCH_API void setDynamicLayerStack(const std::vector& stack); TORCH_API void setDynamicLayerFrontBackKeysIncluded(bool included); @@ -95,7 +95,7 @@ TORCH_API const std::shared_ptr& getLifeHandleForLevel(int64_t level); TORCH_API bool isInplaceOp(const c10::FunctionSchema& schema); // Given the indices of unwrapped inputs and the schema, this returns the indices of any outputs that should remain unwrapped -TORCH_API c10::optional findAliasedOutput(const FunctionSchema& schema, const int64_t immutable_input); +TORCH_API std::optional findAliasedOutput(const FunctionSchema& schema, const int64_t immutable_input); TORCH_API Tensor unwrapIfDead(const Tensor& tensor); TORCH_API bool isDeadTensorWrapper(const Tensor& tensor); diff --git a/aten/src/ATen/functorch/LegacyBatchingRegistrations.cpp b/aten/src/ATen/functorch/LegacyBatchingRegistrations.cpp index b7a131766ec86..760035d8e46ec 100644 --- a/aten/src/ATen/functorch/LegacyBatchingRegistrations.cpp +++ b/aten/src/ATen/functorch/LegacyBatchingRegistrations.cpp @@ -536,7 +536,7 @@ Tensor cat_batching_rule(const ITensorListRef& tensors, int64_t dim) { // we'll just slice the tensor to get a Tensor of shape [0] to pass to at::cat. std::vector tensors_to_cat; tensors_to_cat.reserve(tensors.size()); - c10::optional bdim_size = c10::nullopt; + std::optional bdim_size = c10::nullopt; // find the bdim size. Might not exist if all BatchedTensors should be skipped // by cat's special case. @@ -573,7 +573,7 @@ Tensor cat_batching_rule(const ITensorListRef& tensors, int64_t dim) { } auto new_dim = bdim_size.has_value() ? dim + 1 : dim; - c10::optional new_bdim = bdim_size.has_value() ? c10::make_optional((int64_t)0) : nullopt; + std::optional new_bdim = bdim_size.has_value() ? c10::make_optional((int64_t)0) : nullopt; auto result = at::cat(tensors_to_cat, new_dim); return makeBatched(result, new_bdim, get_current_level()); } diff --git a/aten/src/ATen/functorch/PlumbingHelper.cpp b/aten/src/ATen/functorch/PlumbingHelper.cpp index 76982fd1b6480..e2a3a9582cf49 100644 --- a/aten/src/ATen/functorch/PlumbingHelper.cpp +++ b/aten/src/ATen/functorch/PlumbingHelper.cpp @@ -40,7 +40,7 @@ std::vector makeBatchedVector(const std::vector& tensors, option return res; } -std::tuple> unwrapTensorAtLevel(const Tensor& tensor, int64_t level) { +std::tuple> unwrapTensorAtLevel(const Tensor& tensor, int64_t level) { auto* batched = maybeGetBatchedImpl(tensor); if (!batched) { return std::make_tuple(tensor, nullopt); @@ -56,7 +56,7 @@ bool isBatchedAtLevel(const Tensor& tensor, int64_t level) { return std::get<1>(result).has_value(); } -bool isBatchedAtLevel(const c10::optional& maybe_tensor, int64_t level) { +bool isBatchedAtLevel(const std::optional& maybe_tensor, int64_t level) { if (!maybe_tensor.has_value()) { return false; } @@ -72,7 +72,7 @@ bool isBatchedAtLevel(ITensorListRef tensors, int64_t level) { return false; } -bool isBatchedAtLevel(const c10::List>& maybe_tensors, int64_t level) { +bool isBatchedAtLevel(const c10::List>& maybe_tensors, int64_t level) { for (const auto idx : c10::irange(0, maybe_tensors.size())) { const auto& maybe_tensor = maybe_tensors.get(idx); if (isBatchedAtLevel(maybe_tensor, level)) { diff --git a/aten/src/ATen/functorch/PlumbingHelper.h b/aten/src/ATen/functorch/PlumbingHelper.h index 552a618b144c8..c2c16c67bcd91 100644 --- a/aten/src/ATen/functorch/PlumbingHelper.h +++ b/aten/src/ATen/functorch/PlumbingHelper.h @@ -35,16 +35,16 @@ TORCH_API Tensor makeBatched(const Tensor& tensor, optional bdim, int64 // If `tensor` is not a BatchedTensor, or is a BatchedTensor but the level // doesn't match, then this returns (tensor, nullopt). // Otherwise, it returns (unwrap(tensor), bdim). -TORCH_API std::tuple> unwrapTensorAtLevel(const Tensor& tensor, int64_t level); +TORCH_API std::tuple> unwrapTensorAtLevel(const Tensor& tensor, int64_t level); // Creates a vector of BatchedTensor TORCH_API std::vector makeBatchedVector(const std::vector& tensors, optional bdim, int64_t level); // Returns True if ANY tensor in tensors is batched at level TORCH_API bool isBatchedAtLevel(ITensorListRef tensors, int64_t level); -TORCH_API bool isBatchedAtLevel(const c10::List>& maybe_tensors, int64_t level); +TORCH_API bool isBatchedAtLevel(const c10::List>& maybe_tensors, int64_t level); TORCH_API bool isBatchedAtLevel(const Tensor& tensor, int64_t level); -TORCH_API bool isBatchedAtLevel(const c10::optional& maybe_tensor, int64_t level); +TORCH_API bool isBatchedAtLevel(const std::optional& maybe_tensor, int64_t level); // Convenience helper. Returns true if any tensor is batched at level TORCH_API bool areAnyBatchedAtLevel(ArrayRef> maybe_tensors, int64_t level); diff --git a/aten/src/ATen/functorch/PyTorchOperatorHacks.cpp b/aten/src/ATen/functorch/PyTorchOperatorHacks.cpp index 355ac5965da51..ce3f20ef97efc 100644 --- a/aten/src/ATen/functorch/PyTorchOperatorHacks.cpp +++ b/aten/src/ATen/functorch/PyTorchOperatorHacks.cpp @@ -73,7 +73,7 @@ static bool can_perform_inplace(const Tensor& a, const Tensor& b) { // TODO: linear is pretty important for performance, but I'm not sure how to work // around the in-place. -Tensor linear_hack(const Tensor& input, const Tensor& weight, const c10::optional& bias_opt) { +Tensor linear_hack(const Tensor& input, const Tensor& weight, const std::optional& bias_opt) { // See [Note: hacky wrapper removal for optional tensor] auto bias = bias_opt.has_value() ? c10::MaybeOwned::borrowed(*bias_opt) @@ -123,8 +123,8 @@ static inline at::Tensor apply_loss_reduction(const at::Tensor& unreduced, int64 Tensor binary_cross_entropy_with_logits_hack( const Tensor& input, const Tensor& target, - const c10::optional& weight_opt, - const c10::optional& pos_weight_opt, + const std::optional& weight_opt, + const std::optional& pos_weight_opt, int64_t reduction) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); diff --git a/aten/src/ATen/miopen/AutocastRNN.cpp b/aten/src/ATen/miopen/AutocastRNN.cpp index 271d80ea03cd4..a23eb4a1a19b8 100644 --- a/aten/src/ATen/miopen/AutocastRNN.cpp +++ b/aten/src/ATen/miopen/AutocastRNN.cpp @@ -14,7 +14,7 @@ miopen_rnn(const Tensor & input_r, TensorList weight, int64_t weight_stride0, const Tensor & hx, - const c10::optional& cx_opt, + const std::optional& cx_opt, int64_t fn_mode, int64_t fn_hidden_size, int64_t fn_num_layers, @@ -23,7 +23,7 @@ miopen_rnn(const Tensor & input_r, bool fn_train, bool fn_bidirectional, IntArrayRef fn_batch_sizes, - const c10::optional& fn_dropout_state_opt) { + const std::optional& fn_dropout_state_opt) { #if AT_ROCM_ENABLED() diff --git a/aten/src/ATen/mps/EmptyTensor.cpp b/aten/src/ATen/mps/EmptyTensor.cpp index f7918ac18993c..baa91eabb3898 100644 --- a/aten/src/ATen/mps/EmptyTensor.cpp +++ b/aten/src/ATen/mps/EmptyTensor.cpp @@ -20,11 +20,11 @@ namespace at::detail { TensorBase empty_mps( IntArrayRef size, - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt, - c10::optional memory_format_opt) { + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt, + std::optional memory_format_opt) { #if defined(__APPLE__) #if __is_target_os(macOS) if (at::hasMPS()) { @@ -95,7 +95,7 @@ TensorBase empty_strided_mps( IntArrayRef size, IntArrayRef stride, ScalarType dtype, - c10::optional device_opt) { + std::optional device_opt) { #if defined(__APPLE__) #if __is_target_os(macOS) if (at::hasMPS()) { diff --git a/aten/src/ATen/mps/EmptyTensor.h b/aten/src/ATen/mps/EmptyTensor.h index 88a29547406cd..39b206cb3031d 100644 --- a/aten/src/ATen/mps/EmptyTensor.h +++ b/aten/src/ATen/mps/EmptyTensor.h @@ -7,11 +7,11 @@ namespace at::detail { C10_EXPORT TensorBase empty_mps( IntArrayRef size, - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt, - c10::optional memory_format_opt); + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt, + std::optional memory_format_opt); C10_EXPORT TensorBase empty_mps( IntArrayRef size, const TensorOptions &options); @@ -19,7 +19,7 @@ C10_EXPORT TensorBase empty_strided_mps( IntArrayRef size, IntArrayRef stride, ScalarType dtype, - c10::optional device_opt); + std::optional device_opt); C10_EXPORT TensorBase empty_strided_mps( IntArrayRef size, diff --git a/aten/src/ATen/mps/MPSGuardImpl.h b/aten/src/ATen/mps/MPSGuardImpl.h index fe43fcf40fd34..1b57d2966767a 100644 --- a/aten/src/ATen/mps/MPSGuardImpl.h +++ b/aten/src/ATen/mps/MPSGuardImpl.h @@ -52,7 +52,7 @@ struct TORCH_API MPSGuardImpl final : public c10::impl::DeviceGuardImplInterface return Device(c10::DeviceType::MPS, 0); } - c10::optional uncheckedGetDevice() const noexcept { + std::optional uncheckedGetDevice() const noexcept { return Device(c10::DeviceType::MPS, 0); } @@ -112,12 +112,12 @@ struct TORCH_API MPSGuardImpl final : public c10::impl::DeviceGuardImplInterface struct OptionalMPSGuard { explicit OptionalMPSGuard() : guard_() {} - explicit OptionalMPSGuard(c10::optional device_opt) + explicit OptionalMPSGuard(std::optional device_opt) : guard_(device_opt) {} /// Set the current MPS device to the passed device index, if it is not /// nullopt - explicit OptionalMPSGuard(c10::optional device_index_opt) + explicit OptionalMPSGuard(std::optional device_index_opt) : guard_(device_index_opt) {} // Copy is not allowed @@ -147,14 +147,14 @@ struct OptionalMPSGuard { /// Returns the device that was set immediately prior to initialization of the /// guard, or nullopt if the guard is uninitialized. - c10::optional original_device() const { + std::optional original_device() const { return guard_.original_device(); } /// Returns the most recent device that was set using this device guard, /// either from construction, or via set_device, if the guard is initialized, /// or nullopt if the guard is uninitialized. - c10::optional current_device() const { + std::optional current_device() const { return guard_.current_device(); } diff --git a/aten/src/ATen/native/Activation.cpp b/aten/src/ATen/native/Activation.cpp index 017749aa04c44..a0141f974923e 100644 --- a/aten/src/ATen/native/Activation.cpp +++ b/aten/src/ATen/native/Activation.cpp @@ -572,7 +572,7 @@ inline void _rrelu_with_noise_train( const Tensor& noise, const Scalar& lower_, const Scalar& upper_, - c10::optional generator) { + std::optional generator) { using opmath_t = at::opmath_type; opmath_t lower = lower_.to(); opmath_t upper = upper_.to(); @@ -603,7 +603,7 @@ Tensor& rrelu_with_noise_out_cpu(const Tensor& self, const Scalar& lower, const Scalar& upper, bool training, - c10::optional generator, + std::optional generator, Tensor& output) { TORCH_CHECK(self.sym_sizes() == noise.sym_sizes(), "noise tensor shape must match self tensor shape. Got self.shape = ", self.sym_sizes(), " noise.shape = ", noise.sym_sizes()); if (training) { @@ -626,7 +626,7 @@ Tensor rrelu_with_noise_cpu( const Scalar& lower, const Scalar& upper, bool training, - c10::optional generator) { + std::optional generator) { auto output = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); return at::native::rrelu_with_noise_out_cpu( self, noise, lower, upper, training, std::move(generator), output); @@ -638,7 +638,7 @@ Tensor& rrelu_with_noise_cpu_( const Scalar& lower, const Scalar& upper, bool training, - c10::optional generator) { + std::optional generator) { return at::native::rrelu_with_noise_out_cpu( self, noise, lower, upper, training, std::move(generator), self); } @@ -661,12 +661,12 @@ Tensor rrelu_with_noise_backward( } } -Tensor rrelu(const Tensor & self, const Scalar& lower, const Scalar& upper, bool training, c10::optional generator) { +Tensor rrelu(const Tensor & self, const Scalar& lower, const Scalar& upper, bool training, std::optional generator) { TORCH_CHECK(lower.to() <= upper.to(), "Lower bound should be less than or equal to the upper bound") return at::rrelu_with_noise(self, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT), lower, upper, training, std::move(generator)); } -Tensor & rrelu_(Tensor & self, const Scalar& lower, const Scalar& upper, bool training, c10::optional generator) { +Tensor & rrelu_(Tensor & self, const Scalar& lower, const Scalar& upper, bool training, std::optional generator) { TORCH_CHECK(lower.to() <= upper.to(), "Lower bound should be less than or equal to the upper bound") return at::rrelu_with_noise_(self, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT), lower, upper, training, std::move(generator)); } diff --git a/aten/src/ATen/native/AveragePool2d.cpp b/aten/src/ATen/native/AveragePool2d.cpp index 854b4585db10a..368dc02c2832f 100644 --- a/aten/src/ATen/native/AveragePool2d.cpp +++ b/aten/src/ATen/native/AveragePool2d.cpp @@ -21,7 +21,7 @@ TORCH_PRECOMPUTE_META_FUNC(avg_pool2d) IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2, "avg_pool2d: kernel_size must either be a single int, or a tuple of two ints"); @@ -101,7 +101,7 @@ TORCH_META_FUNC(avg_pool2d_backward) ( IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override + std::optional divisor_override ) { // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2, @@ -159,7 +159,7 @@ TORCH_IMPL_FUNC(avg_pool2d_out_cpu) int64_t padW, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override, + std::optional divisor_override, const Tensor& output) { avg_pool2d_kernel( kCPU, @@ -183,7 +183,7 @@ TORCH_IMPL_FUNC(avg_pool2d_backward_out_cpu) ( IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override, + std::optional divisor_override, const Tensor& gradInput ) { const int kH = safe_downcast(kernel_size[0]); diff --git a/aten/src/ATen/native/AveragePool3d.cpp b/aten/src/ATen/native/AveragePool3d.cpp index c2d7b44a5076c..701ad09bfd512 100644 --- a/aten/src/ATen/native/AveragePool3d.cpp +++ b/aten/src/ATen/native/AveragePool3d.cpp @@ -25,7 +25,7 @@ TORCH_META_FUNC(avg_pool3d) ( IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override + std::optional divisor_override ) { // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3, @@ -94,7 +94,7 @@ TORCH_META_FUNC(avg_pool3d_backward) ( IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override + std::optional divisor_override ) { // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3, @@ -174,7 +174,7 @@ static void avg_pool3d_out_frame( int padW, int padH, bool count_include_pad, - c10::optional divisor_override) + std::optional divisor_override) { at::parallel_for(0, nslices, 0, [&](int64_t start, int64_t end) { for (const auto k : c10::irange(start, end)) { @@ -261,7 +261,7 @@ TORCH_IMPL_FUNC(avg_pool3d_out_cpu) ( IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override, + std::optional divisor_override, const Tensor& output ) { const int kT = safe_downcast(kernel_size[0]); @@ -362,7 +362,7 @@ static void avg_pool3d_backward_out_frame( int padW, int padH, bool count_include_pad, - c10::optional divisor_override) + std::optional divisor_override) { at::parallel_for(0, nslices, 0, [&](int64_t start, int64_t end) { for (const auto k : c10::irange(start, end)) { @@ -441,7 +441,7 @@ TORCH_IMPL_FUNC(avg_pool3d_backward_out_cpu) ( IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override, + std::optional divisor_override, const Tensor& gradInput ) { const int kT = safe_downcast(kernel_size[0]); diff --git a/aten/src/ATen/native/BatchLinearAlgebra.cpp b/aten/src/ATen/native/BatchLinearAlgebra.cpp index 40e6b34dc9725..ce4b4d15b7968 100644 --- a/aten/src/ATen/native/BatchLinearAlgebra.cpp +++ b/aten/src/ATen/native/BatchLinearAlgebra.cpp @@ -656,7 +656,7 @@ TORCH_META_FUNC(linalg_qr)(const Tensor& A, TORCH_META_FUNC(_linalg_svd)(const Tensor& A, bool full_matrices, bool compute_uv, - c10::optional driver) { + std::optional driver) { at::native::checkIsMatrix(A, "linalg.svd"); at::native::checkFloatingOrComplex(A, "linalg.svd"); @@ -3128,7 +3128,7 @@ DEFINE_DISPATCH(svd_stub); TORCH_IMPL_FUNC(_linalg_svd_out)(const Tensor& A, const bool full_matrices, const bool compute_uv, - c10::optional driver, + std::optional driver, const Tensor & U, const Tensor & S, const Tensor & Vh) { @@ -3177,7 +3177,7 @@ TORCH_IMPL_FUNC(_linalg_svd_out)(const Tensor& A, std::tuple linalg_svd_out(const Tensor& A, bool full_matrices, - c10::optional driver, + std::optional driver, Tensor & U, Tensor & S, Tensor & Vh) { @@ -3196,12 +3196,12 @@ linalg_svd_out(const Tensor& A, } std::tuple linalg_svd(const Tensor& A, bool full_matrices, - c10::optional driver) { + std::optional driver) { return at::_linalg_svd(A, full_matrices, /*compute_uv=*/true, driver); } // See note in linalg_svd for why this function does not have an _ex variant -Tensor& linalg_svdvals_out(const Tensor& A, c10::optional driver, Tensor & S) { +Tensor& linalg_svdvals_out(const Tensor& A, std::optional driver, Tensor & S) { // Dummies auto U = at::empty({0}, A.options()); auto Vh = at::empty({0}, A.options()); @@ -3209,7 +3209,7 @@ Tensor& linalg_svdvals_out(const Tensor& A, c10::optional driv return S; } -Tensor linalg_svdvals(const Tensor& A, c10::optional driver) { +Tensor linalg_svdvals(const Tensor& A, std::optional driver) { return std::get<1>(at::_linalg_svd(A, /*full_matrices=*/false, /*compute_uv=*/_may_require_fw_or_bw_grad(A), /*driver=*/driver)); @@ -3469,7 +3469,7 @@ static void linalg_lstsq_out_info( } } -static std::string get_default_lstsq_driver(c10::optional driver, const Tensor& input) { +static std::string get_default_lstsq_driver(std::optional driver, const Tensor& input) { // if `driver` is empty, we set driver_str to "gels" if working with CUDA tensors, // otherwise to "gelsy" driver. std::string driver_str; @@ -3505,8 +3505,8 @@ static std::string get_default_lstsq_driver(c10::optional driv std::tuple linalg_lstsq_out( const Tensor& input, const Tensor& other, - c10::optional rcond, - c10::optional driver, + std::optional rcond, + std::optional driver, Tensor& solution, Tensor& residuals, Tensor& rank, @@ -3668,8 +3668,8 @@ std::tuple linalg_lstsq_out( std::tuple linalg_lstsq( const Tensor& input, const Tensor& other, - c10::optional rcond, - c10::optional driver) { + std::optional rcond, + std::optional driver) { Tensor solution = at::empty({0}, input.options()); Tensor residuals = at::empty({0}, input.options().dtype(toRealValueType(input.scalar_type()))); Tensor rank = at::empty({0}, input.options().dtype(at::kLong)); @@ -4003,7 +4003,7 @@ Tensor linalg_solve_triangular( Tensor linalg_vander_symint( const Tensor& x, - c10::optional N) { + std::optional N) { auto t = x.scalar_type(); TORCH_CHECK(t == ScalarType::Float || t == ScalarType::Double || diff --git a/aten/src/ATen/native/BatchLinearAlgebra.h b/aten/src/ATen/native/BatchLinearAlgebra.h index efbe7ce1b9d1c..c8402640aa08a 100644 --- a/aten/src/ATen/native/BatchLinearAlgebra.h +++ b/aten/src/ATen/native/BatchLinearAlgebra.h @@ -304,7 +304,7 @@ using svd_fn = void (*)( const Tensor& /*A*/, const bool /*full_matrices*/, const bool /*compute_uv*/, - const c10::optional& /*driver*/, + const std::optional& /*driver*/, const Tensor& /*U*/, const Tensor& /*S*/, const Tensor& /*Vh*/, diff --git a/aten/src/ATen/native/BatchLinearAlgebraKernel.cpp b/aten/src/ATen/native/BatchLinearAlgebraKernel.cpp index f29970afe2b44..79e7b8b049381 100644 --- a/aten/src/ATen/native/BatchLinearAlgebraKernel.cpp +++ b/aten/src/ATen/native/BatchLinearAlgebraKernel.cpp @@ -1087,7 +1087,7 @@ static void apply_svd(const Tensor& A, void svd_kernel(const Tensor& A, const bool full_matrices, const bool compute_uv, - const c10::optional& driver, + const std::optional& driver, const Tensor& U, const Tensor& S, const Tensor& Vh, diff --git a/aten/src/ATen/native/BinaryOps.cpp b/aten/src/ATen/native/BinaryOps.cpp index 78f57470a922d..19c70672fb93c 100644 --- a/aten/src/ATen/native/BinaryOps.cpp +++ b/aten/src/ATen/native/BinaryOps.cpp @@ -173,7 +173,7 @@ TORCH_META_FUNC2(div, Tensor) (const Tensor& self, const Tensor& other) { build_borrowing_binary_float_op(maybe_get_output(), self, other); } -TORCH_META_FUNC2(div, Tensor_mode) (const Tensor& self, const Tensor& other, c10::optional rounding_mode) { +TORCH_META_FUNC2(div, Tensor_mode) (const Tensor& self, const Tensor& other, std::optional rounding_mode) { if (!rounding_mode.has_value()) { build_borrowing_binary_float_op(maybe_get_output(), self, other); // NOLINTNEXTLINE(bugprone-branch-clone) @@ -303,7 +303,7 @@ TORCH_META_FUNC2(xlogy, Tensor) (const Tensor& self, const Tensor& other) { build_borrowing_binary_float_op(maybe_get_output(), self, other); } -TORCH_META_FUNC(logit_backward) (const Tensor& grad_output, const Tensor& input, c10::optional eps) { +TORCH_META_FUNC(logit_backward) (const Tensor& grad_output, const Tensor& input, std::optional eps) { build_borrowing_binary_op(maybe_get_output(), grad_output, input); } @@ -448,7 +448,7 @@ TORCH_IMPL_FUNC(div_out) (const Tensor& self, const Tensor& other, const Tensor& } TORCH_IMPL_FUNC(div_out_mode) ( - const Tensor& self, const Tensor& other, c10::optional rounding_mode, const Tensor& result + const Tensor& self, const Tensor& other, std::optional rounding_mode, const Tensor& result ) { if (!rounding_mode.has_value()) { div_true_stub(device_type(), *this); @@ -459,7 +459,7 @@ TORCH_IMPL_FUNC(div_out_mode) ( } } -TORCH_IMPL_FUNC(logit_backward_out) (const Tensor& grad_output, const Tensor& input, c10::optional eps, const Tensor& result) { +TORCH_IMPL_FUNC(logit_backward_out) (const Tensor& grad_output, const Tensor& input, std::optional eps, const Tensor& result) { logit_backward_stub(device_type(), *this, Scalar(eps ? eps.value() : -1.0)); } @@ -896,11 +896,11 @@ Tensor& div_(Tensor& self, const Scalar& other) { return self.div_(wrapped_scalar_tensor(other)); // redispatch! } -Tensor div(const Tensor& self, const Scalar& other, c10::optional rounding_mode) { +Tensor div(const Tensor& self, const Scalar& other, std::optional rounding_mode) { return self.div(wrapped_scalar_tensor(other), std::move(rounding_mode)); // redispatch! } -Tensor& div_(Tensor& self, const Scalar& other, c10::optional rounding_mode) { +Tensor& div_(Tensor& self, const Scalar& other, std::optional rounding_mode) { return self.div_(wrapped_scalar_tensor(other), std::move(rounding_mode)); // redispatch! } @@ -925,23 +925,23 @@ Tensor& divide_(Tensor& self, const Scalar& other) { return self.div_(other); } -Tensor& divide_out(const Tensor& self, const Tensor& other, c10::optional rounding_mode, Tensor& result) { +Tensor& divide_out(const Tensor& self, const Tensor& other, std::optional rounding_mode, Tensor& result) { return at::div_out(result, self, other, std::move(rounding_mode)); } -Tensor divide(const Tensor& self, const Tensor& other, c10::optional rounding_mode) { +Tensor divide(const Tensor& self, const Tensor& other, std::optional rounding_mode) { return self.div(other, std::move(rounding_mode)); } -Tensor& divide_(Tensor& self, const Tensor& other, c10::optional rounding_mode) { +Tensor& divide_(Tensor& self, const Tensor& other, std::optional rounding_mode) { return self.div_(other, std::move(rounding_mode)); } -Tensor divide(const Tensor& self, const Scalar& other, c10::optional rounding_mode) { +Tensor divide(const Tensor& self, const Scalar& other, std::optional rounding_mode) { return self.div(other, std::move(rounding_mode)); } -Tensor& divide_(Tensor& self, const Scalar& other, c10::optional rounding_mode) { +Tensor& divide_(Tensor& self, const Scalar& other, std::optional rounding_mode) { return self.div_(other, std::move(rounding_mode)); } diff --git a/aten/src/ATen/native/Bucketization.cpp b/aten/src/ATen/native/Bucketization.cpp index 736273a40cb09..98e37af91b316 100644 --- a/aten/src/ATen/native/Bucketization.cpp +++ b/aten/src/ATen/native/Bucketization.cpp @@ -146,8 +146,8 @@ Tensor& searchsorted_out_cpu( const Tensor& self, bool out_int32, bool right, - const c10::optional side_opt, - const c10::optional& sorter_opt, + const std::optional side_opt, + const std::optional& sorter_opt, Tensor& result) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned sorter_maybe_owned = at::borrow_from_optional_tensor(sorter_opt); @@ -193,8 +193,8 @@ Tensor& searchsorted_out_cpu( const Scalar& self, bool out_int32, bool right, - const c10::optional side_opt, - const c10::optional& sorter_opt, + const std::optional side_opt, + const std::optional& sorter_opt, Tensor& result) { const Tensor& scalar_tensor = searchsorted_scalar_tensor(self, sorted_sequence.device()); return searchsorted_out_cpu(sorted_sequence, scalar_tensor, out_int32, right, side_opt, sorter_opt, result); @@ -205,8 +205,8 @@ Tensor searchsorted_cpu( const Tensor& self, bool out_int32, bool right, - const c10::optional side_opt, - const c10::optional& sorter_opt) { + const std::optional side_opt, + const std::optional& sorter_opt) { ScalarType scalar_type = out_int32 ? ScalarType::Int : ScalarType::Long; c10::TensorOptions options = TensorOptions().device(self.options().device()).dtype(scalar_type); Tensor result = at::empty({0}, options, MemoryFormat::Contiguous); @@ -219,8 +219,8 @@ Tensor searchsorted_cpu( const Scalar& self, bool out_int32, bool right, - const c10::optional side_opt, - const c10::optional& sorter_opt) { + const std::optional side_opt, + const std::optional& sorter_opt) { const Tensor& scalar_tensor = searchsorted_scalar_tensor(self, sorted_sequence.device()); return searchsorted_cpu(sorted_sequence, scalar_tensor, out_int32, right, side_opt, sorter_opt); } diff --git a/aten/src/ATen/native/BucketizationUtils.h b/aten/src/ATen/native/BucketizationUtils.h index 59d459bd9c29e..90747c264b156 100644 --- a/aten/src/ATen/native/BucketizationUtils.h +++ b/aten/src/ATen/native/BucketizationUtils.h @@ -107,7 +107,7 @@ inline void searchsorted_pre_check( const Tensor& output, const bool out_int32, const bool right, - const c10::optional side_opt, + const std::optional side_opt, const Tensor& sorter) { if (side_opt) { const c10::string_view side = *side_opt; diff --git a/aten/src/ATen/native/CPUFallback.cpp b/aten/src/ATen/native/CPUFallback.cpp index 502c61e4d144c..1d0930cf3a5ea 100644 --- a/aten/src/ATen/native/CPUFallback.cpp +++ b/aten/src/ATen/native/CPUFallback.cpp @@ -48,7 +48,7 @@ static std::vector to_cpu(const at::TensorList& tensors) { return cpu_tensors; } -static c10::optional compute_target_device(std::vector& t_args, std::vector> tlist_args) { +static std::optional compute_target_device(std::vector& t_args, std::vector> tlist_args) { // Decide what device to move the output tensor(s) to. // The current convention is that we use the first tensor arg to pick the device // Barring that, we take the first tensor from a TensorList arg. @@ -89,7 +89,7 @@ void cpu_fallback(const c10::OperatorHandle& op, torch::jit::Stack* stack, bool std::vector> tensorlist_args; std::vector tensorlist_args_indices; - c10::optional tgt_device = c10::nullopt; + std::optional tgt_device = c10::nullopt; // save converted cpu tensor for TensorList std::vector tensorlist_cpu_args; diff --git a/aten/src/ATen/native/ComparisonUtils.cpp b/aten/src/ATen/native/ComparisonUtils.cpp index 5a1138d041b1c..57f00ec86137f 100644 --- a/aten/src/ATen/native/ComparisonUtils.cpp +++ b/aten/src/ATen/native/ComparisonUtils.cpp @@ -25,7 +25,7 @@ void _assert_match(const O& original, const C& compared, const std::string& name } } -void _assert_tensor_metadata(at::Tensor const& tensor, at::OptionalIntArrayRef sizes, at::OptionalIntArrayRef strides, c10::optional dtype) { +void _assert_tensor_metadata(at::Tensor const& tensor, at::OptionalIntArrayRef sizes, at::OptionalIntArrayRef strides, std::optional dtype) { _assert_match(tensor.sizes(), sizes, "sizes"); _assert_match(tensor.strides(), strides, "strides"); _assert_match(tensor.dtype(), dtype, "dtype"); diff --git a/aten/src/ATen/native/Constraints.cpp b/aten/src/ATen/native/Constraints.cpp index 8f3f8c11e696c..21a64537af283 100644 --- a/aten/src/ATen/native/Constraints.cpp +++ b/aten/src/ATen/native/Constraints.cpp @@ -24,8 +24,8 @@ namespace at::native { void sym_constrain_range( const Scalar& size, - c10::optional min, - c10::optional max) { + std::optional min, + std::optional max) { int64_t min_val = min.has_value() ? min.value() : std::numeric_limits::min(); int64_t max_val = max.has_value() ? max.value() : std::numeric_limits::max(); @@ -53,14 +53,14 @@ void sym_constrain_range( Tensor _functional_sym_constrain_range( const Scalar& size, - c10::optional min, - c10::optional max, + std::optional min, + std::optional max, const Tensor& dep_token) { sym_constrain_range(size, min, max); return dep_token.clone(); } -void sym_constrain_range_for_size(const Scalar& size, c10::optional min, c10::optional max) { +void sym_constrain_range_for_size(const Scalar& size, std::optional min, c10::optional max) { int64_t min_val = min.has_value() ? min.value() : 0; if (max.has_value() && max.value() <= 2) { TORCH_CHECK(false, "Max value to constrain_range_for_size must be greater than 2. got: ", max.value()); @@ -70,19 +70,19 @@ void sym_constrain_range_for_size(const Scalar& size, c10::optional min Tensor _functional_sym_constrain_range_for_size( const Scalar& size, - c10::optional min, - c10::optional max, + std::optional min, + std::optional max, const Tensor& dep_token) { sym_constrain_range_for_size(size, min, max); return dep_token.clone(); } Tensor _make_dep_token_cpu( - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt, - c10::optional memory_format_opt) { + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt, + std::optional memory_format_opt) { return at::empty( {}, dtype_opt, layout_opt, device_opt, pin_memory_opt, memory_format_opt); } diff --git a/aten/src/ATen/native/ConvUtils.h b/aten/src/ATen/native/ConvUtils.h index 4b814f3e442cb..d504d088a8620 100644 --- a/aten/src/ATen/native/ConvUtils.h +++ b/aten/src/ATen/native/ConvUtils.h @@ -44,7 +44,7 @@ using mkldnn_convolution_backward_fn = std::tuple); DECLARE_DISPATCH(mkldnn_convolution_backward_fn, mkldnn_convolution_backward_stub); -using mkldnn_convolution_transpose_fn = Tensor(*)(const Tensor&, const Tensor&, const c10::optional&, +using mkldnn_convolution_transpose_fn = Tensor(*)(const Tensor&, const Tensor&, const std::optional&, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, int64_t); DECLARE_DISPATCH(mkldnn_convolution_transpose_fn, mkldnn_convolution_transpose_stub); using mkldnn_convolution_transpose_backward_fn = std::tuple(*)( @@ -117,7 +117,7 @@ enum class ConvBackend { // Overload for selecting the convolution backend from the full set of convolution inputs. // This overload is exposed to python for testing, etc. TORCH_API ConvBackend select_conv_backend( - const Tensor& input, const Tensor& weight, const c10::optional& bias_opt, + const Tensor& input, const Tensor& weight, const std::optional& bias_opt, SymIntArrayRef stride, SymIntArrayRef padding, SymIntArrayRef dilation, bool transposed, SymIntArrayRef output_padding, c10::SymInt groups, const at::OptionalSymIntArrayRef bias_sizes_opt); @@ -360,7 +360,7 @@ static inline bool miopen_conv_use_channels_last(const at::Tensor& input, const bool can_use_miopen_channels_last_2d = false; // TODO: Remove PYTORCH_MIOPEN_SUGGEST_NHWC once ROCm officially supports NHWC in MIOpen // See #64427 - static c10::optional PYTORCH_MIOPEN_SUGGEST_NHWC = c10::utils::check_env("PYTORCH_MIOPEN_SUGGEST_NHWC"); + static std::optional PYTORCH_MIOPEN_SUGGEST_NHWC = c10::utils::check_env("PYTORCH_MIOPEN_SUGGEST_NHWC"); auto input_memory_format = input.suggest_memory_format(); auto weight_memory_format = weight.suggest_memory_format(); diff --git a/aten/src/ATen/native/Convolution.cpp b/aten/src/ATen/native/Convolution.cpp index 717280a6cdcab..ecedc73579d66 100644 --- a/aten/src/ATen/native/Convolution.cpp +++ b/aten/src/ATen/native/Convolution.cpp @@ -368,7 +368,7 @@ struct ConvParams { } } - bool use_cpu_depthwise3x3_winograd(const at::Tensor& input, const at::Tensor& weight, const c10::optional& bias) const { + bool use_cpu_depthwise3x3_winograd(const at::Tensor& input, const at::Tensor& weight, const std::optional& bias) const { #if defined(__ARM_NEON__) // Currently only 3x3 depthwise convolutions on tensors of float are supported. return (input.ndimension() == 4) && @@ -878,7 +878,7 @@ at::Tensor complex_convolution( at::Tensor complex_convolution_mode( const at::Tensor& input, const at::Tensor& weight, - const c10::optional& bias_opt, + const std::optional& bias_opt, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, @@ -908,7 +908,7 @@ at::Tensor complex_convolution_mode( } // namespace at::Tensor conv1d_symint( - const Tensor& input_, const Tensor& weight, const c10::optional& bias_opt, + const Tensor& input_, const Tensor& weight, const std::optional& bias_opt, SymIntArrayRef stride, SymIntArrayRef padding, SymIntArrayRef dilation, c10::SymInt groups) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); @@ -933,7 +933,7 @@ at::Tensor conv1d_symint( } at::Tensor conv2d_symint( - const Tensor& input_, const Tensor& weight, const c10::optional& bias_opt, + const Tensor& input_, const Tensor& weight, const std::optional& bias_opt, SymIntArrayRef stride, SymIntArrayRef padding, SymIntArrayRef dilation, c10::SymInt groups) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); @@ -958,7 +958,7 @@ at::Tensor conv2d_symint( } at::Tensor conv3d_symint( - const Tensor& input_, const Tensor& weight, const c10::optional& bias_opt, + const Tensor& input_, const Tensor& weight, const std::optional& bias_opt, SymIntArrayRef stride, SymIntArrayRef padding, SymIntArrayRef dilation, c10::SymInt groups) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); @@ -1049,7 +1049,7 @@ static Tensor convolution_same( } Tensor _convolution_mode_symint( - const Tensor& input, const Tensor& weight, const c10::optional& bias_opt, + const Tensor& input, const Tensor& weight, const std::optional& bias_opt, SymIntArrayRef stride, c10::string_view padding, SymIntArrayRef dilation, c10::SymInt groups) { // See [Note: hacky wrapper removal for optional tensor] @@ -1067,7 +1067,7 @@ Tensor _convolution_mode_symint( } at::Tensor conv1d_padding_symint( - const Tensor& input_, const Tensor& weight, const c10::optional& bias, + const Tensor& input_, const Tensor& weight, const std::optional& bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { auto [input, is_batched] = batchify(input_, /*num_spatial_dims=*/ 1, "conv1d"); @@ -1081,7 +1081,7 @@ at::Tensor conv1d_padding_symint( } at::Tensor conv2d_padding_symint( - const Tensor& input_, const Tensor& weight, const c10::optional& bias, + const Tensor& input_, const Tensor& weight, const std::optional& bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { auto [input, is_batched] = batchify(input_, /*num_spatial_dims=*/ 2, "conv2d"); @@ -1095,7 +1095,7 @@ at::Tensor conv2d_padding_symint( } at::Tensor conv3d_padding_symint( - const Tensor& input_, const Tensor& weight, const c10::optional& bias, + const Tensor& input_, const Tensor& weight, const std::optional& bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { auto [input, is_batched] = batchify(input_, /*num_spatial_dims=*/ 3, "conv3d"); @@ -1109,7 +1109,7 @@ at::Tensor conv3d_padding_symint( } at::Tensor conv_transpose1d_symint( - const Tensor& input_, const Tensor& weight, const c10::optional& bias_opt, + const Tensor& input_, const Tensor& weight, const std::optional& bias_opt, SymIntArrayRef stride, SymIntArrayRef padding, SymIntArrayRef output_padding, c10::SymInt groups, SymIntArrayRef dilation) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); @@ -1128,7 +1128,7 @@ at::Tensor conv_transpose1d_symint( } at::Tensor conv_transpose2d_symint( - const Tensor& input_, const Tensor& weight, const c10::optional& bias_opt, + const Tensor& input_, const Tensor& weight, const std::optional& bias_opt, SymIntArrayRef stride, SymIntArrayRef padding, SymIntArrayRef output_padding, c10::SymInt groups, SymIntArrayRef dilation) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); @@ -1147,7 +1147,7 @@ at::Tensor conv_transpose2d_symint( } at::Tensor conv_transpose3d_symint( - const Tensor& input_, const Tensor& weight, const c10::optional& bias_opt, + const Tensor& input_, const Tensor& weight, const std::optional& bias_opt, SymIntArrayRef stride, SymIntArrayRef padding, SymIntArrayRef output_padding, c10::SymInt groups, SymIntArrayRef dilation) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); @@ -1166,7 +1166,7 @@ at::Tensor conv_transpose3d_symint( } at::Tensor convolution( - const Tensor& input, const Tensor& weight, const c10::optional& bias_opt, + const Tensor& input, const Tensor& weight, const std::optional& bias_opt, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups) { // See [Note: hacky wrapper removal for optional tensor] @@ -1182,7 +1182,7 @@ at::Tensor convolution( } at::Tensor convolution_overrideable( - const Tensor& input, const Tensor& weight, const c10::optional& bias_opt, + const Tensor& input, const Tensor& weight, const std::optional& bias_opt, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups) { TORCH_CHECK_NOT_IMPLEMENTED(false, "convolution_overrideable not implemented. You are likely triggering this with tensor backend other than CPU/CUDA/MKLDNN, if this is intended, please use TORCH_LIBRARY_IMPL to override this function "); @@ -1197,7 +1197,7 @@ template ConvBackend _select_conv_backend( const Tensor& input, const Tensor& weight, - const c10::optional& bias, + const std::optional& bias, const at::OptionalArrayRef bias_sizes_opt, const bool need_backward, const ConvParams& params) { @@ -1304,7 +1304,7 @@ ConvBackend _select_conv_backend( // Selects a backend for convolution based on the inputs and params. ConvBackend select_conv_backend( - const Tensor& input_r, const Tensor& weight_r, const c10::optional& bias_opt, + const Tensor& input_r, const Tensor& weight_r, const std::optional& bias_opt, SymIntArrayRef stride_, SymIntArrayRef padding_, SymIntArrayRef dilation_, bool transposed_, SymIntArrayRef output_padding_, c10::SymInt groups_, const at::OptionalSymIntArrayRef bias_sizes_opt) { c10::MaybeOwned bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); @@ -1339,7 +1339,7 @@ ConvBackend select_conv_backend( weight = view4d(weight); } - auto bias_sizes = bias.defined() ? c10::optional(bias.sym_sizes()) : bias_sizes_opt; + auto bias_sizes = bias.defined() ? std::optional(bias.sym_sizes()) : bias_sizes_opt; bool need_backward = GradMode::is_enabled() && (input.requires_grad() || weight.requires_grad() || (bias.defined() && bias.requires_grad())); return _select_conv_backend(input, weight, bias, bias_sizes, need_backward, params); @@ -1461,7 +1461,7 @@ at::MemoryFormat _determine_backend_memory_format( } at::Tensor _convolution( - const Tensor& input_r, const Tensor& weight_r, const c10::optional& bias_r_opt, + const Tensor& input_r, const Tensor& weight_r, const std::optional& bias_r_opt, IntArrayRef stride_, IntArrayRef padding_, IntArrayRef dilation_, bool transposed_, IntArrayRef output_padding_, int64_t groups_, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) { @@ -1504,7 +1504,7 @@ at::Tensor _convolution( } // Select appropriate backend to use. - auto bias_sizes_opt = bias.defined() ? c10::optional(bias.sizes()) : c10::nullopt; + auto bias_sizes_opt = bias.defined() ? std::optional(bias.sizes()) : c10::nullopt; bool need_backward = GradMode::is_enabled() && (input.requires_grad() || weight.requires_grad() || (bias.defined() && bias.requires_grad())); ConvBackend backend = _select_conv_backend(input, weight, bias, c10::OptionalIntArrayRef(bias_sizes_opt), need_backward, params); @@ -1701,7 +1701,7 @@ at::Tensor _convolution( } at::Tensor _convolution( - const Tensor& input_r, const Tensor& weight_r, const c10::optional& bias_r_opt, + const Tensor& input_r, const Tensor& weight_r, const std::optional& bias_r_opt, IntArrayRef stride_, IntArrayRef padding_, IntArrayRef dilation_, bool transposed_, IntArrayRef output_padding_, int64_t groups_, bool benchmark, bool deterministic, bool cudnn_enabled) @@ -1730,7 +1730,7 @@ static Tensor subvariable(const Tensor& var, int dim, int groups, int g) { return result; } -std::tuple _convolution_double_backward( const c10::optional& ggI_opt, const c10::optional& ggW_r_opt, const c10::optional& ggb_opt, +std::tuple _convolution_double_backward( const std::optional& ggI_opt, const c10::optional& ggW_r_opt, const c10::optional& ggb_opt, const Tensor& gO_r, const Tensor& weight_r, const Tensor& input, IntArrayRef stride_, IntArrayRef padding_, IntArrayRef dilation_, bool transposed_, IntArrayRef output_padding_, int64_t groups_, diff --git a/aten/src/ATen/native/ConvolutionMM2d.cpp b/aten/src/ATen/native/ConvolutionMM2d.cpp index 6f8a3477c239c..686948584c728 100644 --- a/aten/src/ATen/native/ConvolutionMM2d.cpp +++ b/aten/src/ATen/native/ConvolutionMM2d.cpp @@ -538,7 +538,7 @@ static void slow_conv2d_backward_weight_out_cpu_template( Tensor& slow_conv2d_forward_out_cpu( const Tensor& self, const Tensor& weight_, - IntArrayRef kernel_size, const c10::optional& bias_opt, + IntArrayRef kernel_size, const std::optional& bias_opt, IntArrayRef stride, IntArrayRef padding, Tensor& output) { @@ -627,7 +627,7 @@ Tensor& slow_conv2d_forward_out_cpu( Tensor slow_conv2d_forward_cpu( const Tensor& self, const Tensor& weight, - IntArrayRef kernel_size, const c10::optional& bias_opt, + IntArrayRef kernel_size, const std::optional& bias_opt, IntArrayRef stride, IntArrayRef padding) { // See [Note: hacky wrapper removal for optional tensor] @@ -726,7 +726,7 @@ std::tuple slow_conv2d_backward_cpu( return std::make_tuple(grad_input, grad_weight, grad_bias); } -Tensor & thnn_conv2d_out(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const c10::optional& bias_opt, IntArrayRef stride, IntArrayRef padding, Tensor & output) { +Tensor & thnn_conv2d_out(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const std::optional& bias_opt, IntArrayRef stride, IntArrayRef padding, Tensor & output) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); const Tensor& bias = *bias_maybe_owned; @@ -734,7 +734,7 @@ Tensor & thnn_conv2d_out(const Tensor & self, const Tensor & weight, IntArrayRef return at::_slow_conv2d_forward_out(output, self, weight, kernel_size, bias, stride, padding); } -Tensor thnn_conv2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const c10::optional& bias_opt, IntArrayRef stride, IntArrayRef padding) { +Tensor thnn_conv2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const std::optional& bias_opt, IntArrayRef stride, IntArrayRef padding) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); const Tensor& bias = *bias_maybe_owned; diff --git a/aten/src/ATen/native/ConvolutionMM3d.cpp b/aten/src/ATen/native/ConvolutionMM3d.cpp index 1d5e7a8333def..f361b3a819129 100644 --- a/aten/src/ATen/native/ConvolutionMM3d.cpp +++ b/aten/src/ATen/native/ConvolutionMM3d.cpp @@ -553,7 +553,7 @@ static void slow_conv3d_backward_parameters_out_cpu_template( Tensor& slow_conv3d_forward_out_cpu(const Tensor& self, const Tensor& weight, - IntArrayRef kernel_size, const c10::optional& bias_opt, + IntArrayRef kernel_size, const std::optional& bias_opt, IntArrayRef stride, IntArrayRef padding, Tensor& output) { @@ -668,7 +668,7 @@ Tensor& slow_conv3d_forward_out_cpu(const Tensor& self, Tensor slow_conv3d_forward_cpu( const Tensor& self, const Tensor& weight, - IntArrayRef kernel_size, const c10::optional& bias_opt, + IntArrayRef kernel_size, const std::optional& bias_opt, IntArrayRef stride, IntArrayRef padding) { // See [Note: hacky wrapper removal for optional tensor] @@ -771,7 +771,7 @@ std::tuple slow_conv3d_backward_cpu( Tensor& slow_conv3d_out(const Tensor& self, const Tensor& weight, - IntArrayRef kernel_size, const c10::optional& bias_opt, + IntArrayRef kernel_size, const std::optional& bias_opt, IntArrayRef stride, IntArrayRef padding, Tensor& output) { @@ -792,7 +792,7 @@ Tensor& slow_conv3d_out(const Tensor& self, Tensor slow_conv3d( const Tensor& self, const Tensor& weight, - IntArrayRef kernel_size, const c10::optional& bias_opt, + IntArrayRef kernel_size, const std::optional& bias_opt, IntArrayRef stride, IntArrayRef padding) { // See [Note: hacky wrapper removal for optional tensor] diff --git a/aten/src/ATen/native/Correlation.cpp b/aten/src/ATen/native/Correlation.cpp index 95384684961a4..5482a8e0a597a 100644 --- a/aten/src/ATen/native/Correlation.cpp +++ b/aten/src/ATen/native/Correlation.cpp @@ -24,8 +24,8 @@ namespace at::native { Tensor cov( const Tensor& self, int64_t correction, - const c10::optional& fweights, - const c10::optional& aweights) { + const std::optional& fweights, + const std::optional& aweights) { constexpr int64_t OBSERVATIONS_DIM = 1; TORCH_CHECK( diff --git a/aten/src/ATen/native/Cross.cpp b/aten/src/ATen/native/Cross.cpp index 99f0760fcc0f4..7297aaed80d38 100644 --- a/aten/src/ATen/native/Cross.cpp +++ b/aten/src/ATen/native/Cross.cpp @@ -40,7 +40,7 @@ namespace at::native { DEFINE_DISPATCH(cross_stub); -static int64_t _default_cross_dim(const c10::optional &dimension, SymIntArrayRef sizes) { +static int64_t _default_cross_dim(const std::optional &dimension, SymIntArrayRef sizes) { // If dimension is not given, it defaults to the first dimension found with the size 3. // Note that this behaviour might be unexpected. // _default_cross_dim is called internally inside the cross implementation to calculate @@ -57,7 +57,7 @@ static int64_t _default_cross_dim(const c10::optional &dimension, SymIn TORCH_CHECK(false, "no dimension of size 3 in input"); } -Tensor cross(const Tensor & input, const Tensor & other, const c10::optional dimension) { +Tensor cross(const Tensor & input, const Tensor & other, const std::optional dimension) { if (!dimension) { TORCH_WARN_ONCE( "Using torch.cross without specifying the dim arg is deprecated.\n", @@ -69,7 +69,7 @@ Tensor cross(const Tensor & input, const Tensor & other, const c10::optional dimension, Tensor & out) { +Tensor & cross_out(const Tensor & input, const Tensor & other, const std::optional dimension, Tensor & out) { auto dim = _default_cross_dim(dimension, input.sym_sizes()); return at::linalg_cross_out(out, input, other, dim); } diff --git a/aten/src/ATen/native/Distance.cpp b/aten/src/ATen/native/Distance.cpp index 5af87802a1246..942461c7612c1 100644 --- a/aten/src/ATen/native/Distance.cpp +++ b/aten/src/ATen/native/Distance.cpp @@ -78,7 +78,7 @@ Tensor _euclidean_dist(const Tensor& x1, const Tensor& x2) { return result; } -static Tensor cdist_impl(const Tensor& x1, const Tensor& x2, const double p, c10::optional compute_mode) { +static Tensor cdist_impl(const Tensor& x1, const Tensor& x2, const double p, std::optional compute_mode) { TORCH_CHECK(at::isFloatingType(x1.scalar_type()), "cdist only supports floating-point dtypes, X1 got: ", x1.scalar_type()); auto device1 = x1.device().type(); TORCH_CHECK(at::isFloatingType(x2.scalar_type()), "cdist only supports floating-point dtypes, X2 got: ", x2.scalar_type()); @@ -147,7 +147,7 @@ static Tensor cdist_impl(const Tensor& x1, const Tensor& x2, const double p, c10 return result; } -Tensor cdist(const Tensor& x1, const Tensor& x2, const double p, c10::optional compute_mode) { +Tensor cdist(const Tensor& x1, const Tensor& x2, const double p, std::optional compute_mode) { TORCH_CHECK(x1.dim() >= 2, "cdist only supports at least 2D tensors, X1 got: ", x1.dim(), "D"); TORCH_CHECK(x2.dim() >= 2, "cdist only supports at least 2D tensors, X2 got: ", x2.dim(), "D"); TORCH_CHECK(x1.sym_size(-1) == x2.sym_size(-1), "X1 and X2 must have the same number of columns. X1: ", x1.sym_size(-1), " X2: ", x2.sym_size(-1)); @@ -175,7 +175,7 @@ Tensor cdist(const Tensor& x1, const Tensor& x2, const double p, c10::optional compute_mode) { +Tensor _cdist_forward(const Tensor& x1, const Tensor& x2, const double p, std::optional compute_mode) { TORCH_CHECK(x1.dim() >= 2, "cdist only supports at least 2D tensors, X1 got: ", x1.dim(), "D"); TORCH_CHECK(x2.dim() >= 2, "cdist only supports at least 2D tensors, X2 got: ", x2.dim(), "D"); TORCH_CHECK(x1.size(-1) == x2.size(-1), "X1 and X2 must have the same number of columns. X1: ", x1.size(-1), " X2: ", x2.size(-1)); diff --git a/aten/src/ATen/native/DistributionTemplates.h b/aten/src/ATen/native/DistributionTemplates.h index a5ed9526c270d..ba72f0df11a0a 100644 --- a/aten/src/ATen/native/DistributionTemplates.h +++ b/aten/src/ATen/native/DistributionTemplates.h @@ -81,7 +81,7 @@ int64_t update_to(int64_t to) { } template class random_kernel, typename RNG> -at::Tensor& random_impl(at::Tensor& self, c10::optional generator) { +at::Tensor& random_impl(at::Tensor& self, std::optional generator) { CHECK_EMPTY_AND_RETURN(self); auto iter = at::TensorIterator::borrowing_nullary_op(self); random_kernel()(iter, generator); @@ -132,7 +132,7 @@ static void check_from_to_in_range(int64_t from, int64_t to_inc, caffe2::TypeMet } template class random_from_to_kernel, typename RNG> -at::Tensor& random_from_to_impl(at::Tensor& self, int64_t from, c10::optional to_opt, c10::optional generator) { +at::Tensor& random_from_to_impl(at::Tensor& self, int64_t from, std::optional to_opt, c10::optional generator) { uint64_t range = 0; auto iter = at::TensorIterator::borrowing_nullary_op(self); if (to_opt.has_value()) { @@ -200,7 +200,7 @@ at::Tensor& random_from_to_impl(at::Tensor& self, int64_t from, c10::optional= 0.0, "normal expects std >= 0.0, but found std ", std); template class normal_kernel, typename RNG> -Tensor& normal_impl_(Tensor& self, double mean, double std, c10::optional gen) { +Tensor& normal_impl_(Tensor& self, double mean, double std, std::optional gen) { CHECK_NORMAL_STD(std); CHECK_EMPTY_AND_RETURN(self); @@ -216,7 +216,7 @@ Tensor& normal_impl_(Tensor& self, double mean, double std, c10::optional class normal_kernel, typename RNG> -Tensor& normal_out_impl(Tensor& output, const Tensor& mean, double std, c10::optional gen) { +Tensor& normal_out_impl(Tensor& output, const Tensor& mean, double std, std::optional gen) { CHECK_NORMAL_STD(std); auto std_tensor = at::empty_like(output, MemoryFormat::Contiguous); auto shape = at::infer_size(mean.sizes(), std_tensor.sizes()); @@ -227,7 +227,7 @@ Tensor& normal_out_impl(Tensor& output, const Tensor& mean, double std, c10::opt } template class normal_kernel, typename RNG> -Tensor& normal_out_impl(Tensor& output, double mean, const Tensor& std, c10::optional gen) { +Tensor& normal_out_impl(Tensor& output, double mean, const Tensor& std, std::optional gen) { CHECK_NORMAL_TENSOR_STD(std); auto mean_tensor = at::full({}, mean, output.options()); auto shape = at::infer_size(mean_tensor.sizes(), std.sizes()); @@ -242,7 +242,7 @@ Tensor& normal_out_impl(Tensor& output, double mean, const Tensor& std, c10::opt } template class normal_kernel, typename RNG> -Tensor& normal_out_impl(Tensor& output, const Tensor& mean, const Tensor& std, c10::optional gen) { +Tensor& normal_out_impl(Tensor& output, const Tensor& mean, const Tensor& std, std::optional gen) { CHECK_NORMAL_TENSOR_STD(std); auto shape = at::infer_size(mean.sizes(), std.sizes()); at::native::resize_output(output, shape); @@ -256,7 +256,7 @@ Tensor& normal_out_impl(Tensor& output, const Tensor& mean, const Tensor& std, c } template class normal_kernel, typename RNG> -Tensor normal_impl(const Tensor& mean, double std, c10::optional gen) { +Tensor normal_impl(const Tensor& mean, double std, std::optional gen) { CHECK_NORMAL_STD(std); Tensor ret = at::empty_like(mean, MemoryFormat::Contiguous); normal_out_impl(ret, mean, std, gen); @@ -264,7 +264,7 @@ Tensor normal_impl(const Tensor& mean, double std, c10::optional gen) } template class normal_kernel, typename RNG> -Tensor normal_impl(double mean, const Tensor& std, c10::optional gen) { +Tensor normal_impl(double mean, const Tensor& std, std::optional gen) { CHECK_NORMAL_TENSOR_STD(std); Tensor ret = at::empty_like(std, MemoryFormat::Contiguous); normal_out_impl(ret, mean, std, gen); @@ -272,7 +272,7 @@ Tensor normal_impl(double mean, const Tensor& std, c10::optional gen) } template class normal_kernel, typename RNG> -Tensor normal_impl(const Tensor& mean, const Tensor& std, c10::optional gen) { +Tensor normal_impl(const Tensor& mean, const Tensor& std, std::optional gen) { CHECK_NORMAL_TENSOR_STD(std); auto shape = at::infer_size(mean.sizes(), std.sizes()); Tensor ret = at::empty(shape, mean.options(), MemoryFormat::Contiguous); @@ -283,7 +283,7 @@ Tensor normal_impl(const Tensor& mean, const Tensor& std, c10::optional class uniform_kernel, typename RNG> -at::Tensor& uniform_impl_(at::Tensor& self, double from, double to, c10::optional generator) { +at::Tensor& uniform_impl_(at::Tensor& self, double from, double to, std::optional generator) { if (self.is_complex()) { CHECK_EMPTY_AND_RETURN(self); auto float_tensor = at::view_as_real(self); @@ -313,7 +313,7 @@ at::Tensor& uniform_impl_(at::Tensor& self, double from, double to, c10::optiona // ================================================== LogNormal ======================================================= template class log_normal_kernel, typename RNG> -at::Tensor& log_normal_impl_(at::Tensor& self, double mean, double std, c10::optional gen) { +at::Tensor& log_normal_impl_(at::Tensor& self, double mean, double std, std::optional gen) { TORCH_CHECK(std > 0.0, "log_normal_ expects std > 0.0, but found std=", std); CHECK_EMPTY_AND_RETURN(self); auto iter = TensorIterator::borrowing_nullary_op(self); @@ -324,7 +324,7 @@ at::Tensor& log_normal_impl_(at::Tensor& self, double mean, double std, c10::opt // =================================================== Geometric ====================================================== template class geometric_kernel, typename RNG> -Tensor& geometric_impl_(Tensor& self, double p, c10::optional gen) { +Tensor& geometric_impl_(Tensor& self, double p, std::optional gen) { TORCH_CHECK(0 < p && p < 1, "geometric_ expects p to be in (0, 1), but got p=", p); CHECK_EMPTY_AND_RETURN(self); auto iter = TensorIterator::borrowing_nullary_op(self); @@ -335,7 +335,7 @@ Tensor& geometric_impl_(Tensor& self, double p, c10::optional gen) { // ================================================== Exponential ===================================================== template class exponential_kernel, typename RNG> -Tensor& exponential_impl_(Tensor& self, double lambda, c10::optional gen) { +Tensor& exponential_impl_(Tensor& self, double lambda, std::optional gen) { TORCH_CHECK(lambda > 0.0, "exponential_ expects lambda > 0.0, but found lambda=", lambda); CHECK_EMPTY_AND_RETURN(self); auto iter = TensorIterator::borrowing_nullary_op(self); @@ -346,7 +346,7 @@ Tensor& exponential_impl_(Tensor& self, double lambda, c10::optional // ==================================================== Cauchy ======================================================== template class cauchy_kernel, typename RNG> -Tensor& cauchy_impl_(Tensor& self, double median, double sigma, c10::optional gen) { +Tensor& cauchy_impl_(Tensor& self, double median, double sigma, std::optional gen) { // TODO: instead of variable name 'sigma', use 'gamma' or 'scale' // the variance, squared sigma, is undefined for cauchy distribution TORCH_CHECK(sigma > 0.0, "cauchy_ expects sigma > 0.0, but found sigma=", sigma); @@ -360,7 +360,7 @@ Tensor& cauchy_impl_(Tensor& self, double median, double sigma, c10::optional class bernoulli_tensor_kernel, typename RNG> -Tensor& bernoulli_impl_(Tensor& self, const Tensor& p_, c10::optional gen) { +Tensor& bernoulli_impl_(Tensor& self, const Tensor& p_, std::optional gen) { CHECK_EMPTY_AND_RETURN(self); NoNamesGuard guard; at::assert_no_internal_overlap(self); @@ -369,7 +369,7 @@ Tensor& bernoulli_impl_(Tensor& self, const Tensor& p_, c10::optional } template class bernoulli_scalar_kernel, typename RNG> -Tensor& bernoulli_impl_(Tensor& self, double p, c10::optional gen) { +Tensor& bernoulli_impl_(Tensor& self, double p, std::optional gen) { TORCH_CHECK(0 <= p && p <= 1, "bernoulli_ expects p to be in [0, 1], but got p=", p); CHECK_EMPTY_AND_RETURN(self); at::assert_no_internal_overlap(self); @@ -378,7 +378,7 @@ Tensor& bernoulli_impl_(Tensor& self, double p, c10::optional gen) { } template class bernoulli_tensor_kernel, typename RNG> -Tensor& bernoulli_out_impl(Tensor& result, const Tensor& self, c10::optional gen) { +Tensor& bernoulli_out_impl(Tensor& result, const Tensor& self, std::optional gen) { // result.resize_as_(self) requires self to have same dtype as result, so we // use resize_ instead. // TODO: Fix resize_as_. See pytorch/pytorch#11665. diff --git a/aten/src/ATen/native/Distributions.cpp b/aten/src/ATen/native/Distributions.cpp index 4d4eb2efaf401..7ecb8ebb9ffc8 100644 --- a/aten/src/ATen/native/Distributions.cpp +++ b/aten/src/ATen/native/Distributions.cpp @@ -160,36 +160,36 @@ DEFINE_DISPATCH(random_full_64_bits_range_stub); template struct BernoulliStub { - void operator()(Tensor& self, const Tensor& p_, c10::optional gen) { + void operator()(Tensor& self, const Tensor& p_, std::optional gen) { bernoulli_tensor_stub(self.device().type(), self, p_, gen); } - void operator()(Tensor& self, double p, c10::optional gen) { + void operator()(Tensor& self, double p, std::optional gen) { bernoulli_scalar_stub(self.device().type(), self, p, gen); } }; -Tensor bernoulli(const Tensor& self, c10::optional gen) { +Tensor bernoulli(const Tensor& self, std::optional gen) { Tensor result = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); result.bernoulli_(self, std::move(gen)); return result; } -Tensor bernoulli(const Tensor& self, double p, c10::optional gen) { +Tensor bernoulli(const Tensor& self, double p, std::optional gen) { Tensor result = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); result.bernoulli_(p, std::move(gen)); return result; } -Tensor& bernoulli_out(const Tensor& self, c10::optional gen, Tensor& result) { +Tensor& bernoulli_out(const Tensor& self, std::optional gen, Tensor& result) { return at::native::templates::bernoulli_out_impl(result, self, std::move(gen)); } -Tensor& bernoulli_(Tensor& self, const Tensor& p_, c10::optional gen) { +Tensor& bernoulli_(Tensor& self, const Tensor& p_, std::optional gen) { return at::native::templates::bernoulli_impl_(self, p_, std::move(gen)); } -Tensor& bernoulli_(Tensor& self, double p, c10::optional gen) { +Tensor& bernoulli_(Tensor& self, double p, std::optional gen) { return at::native::templates::bernoulli_impl_(self, p, std::move(gen)); } @@ -197,12 +197,12 @@ Tensor& bernoulli_(Tensor& self, double p, c10::optional gen) { template struct LogNormalStub { - void operator()(TensorIteratorBase& iter, double mean, double std, c10::optional gen) { + void operator()(TensorIteratorBase& iter, double mean, double std, std::optional gen) { log_normal_stub(iter.device_type(), iter, mean, std, gen); } }; -Tensor& log_normal_(Tensor& self, double mean, double std, c10::optional gen) { +Tensor& log_normal_(Tensor& self, double mean, double std, std::optional gen) { return at::native::templates::log_normal_impl_(self, mean, std, std::move(gen)); } @@ -210,12 +210,12 @@ Tensor& log_normal_(Tensor& self, double mean, double std, c10::optional struct CauchyStub { - void operator()(TensorIteratorBase& iter, double median, double sigma, c10::optional gen) { + void operator()(TensorIteratorBase& iter, double median, double sigma, std::optional gen) { cauchy_stub(iter.device_type(), iter, median, sigma, gen); } }; -Tensor& cauchy_(Tensor& self, double median, double sigma, c10::optional gen) { +Tensor& cauchy_(Tensor& self, double median, double sigma, std::optional gen) { return at::native::templates::cauchy_impl_(self, median, sigma, std::move(gen)); } @@ -223,12 +223,12 @@ Tensor& cauchy_(Tensor& self, double median, double sigma, c10::optional struct ExponentialStub { - void operator()(TensorIteratorBase& iter, double lambda, c10::optional gen) { + void operator()(TensorIteratorBase& iter, double lambda, std::optional gen) { exponential_stub(iter.device_type(), iter, lambda, gen); } }; -Tensor& exponential_(Tensor& self, double lambda, c10::optional gen) { +Tensor& exponential_(Tensor& self, double lambda, std::optional gen) { return at::native::templates::exponential_impl_(self, lambda, std::move(gen)); } @@ -236,12 +236,12 @@ Tensor& exponential_(Tensor& self, double lambda, c10::optional gen) template struct GeometricStub { - void operator()(TensorIteratorBase& iter, double p, c10::optional gen) { + void operator()(TensorIteratorBase& iter, double p, std::optional gen) { geometric_stub(iter.device_type(), iter, p, gen); } }; -Tensor& geometric_(Tensor& self, double p, c10::optional gen) { +Tensor& geometric_(Tensor& self, double p, std::optional gen) { return at::native::templates::geometric_impl_(self, p, std::move(gen)); } @@ -249,7 +249,7 @@ Tensor& geometric_(Tensor& self, double p, c10::optional gen) { template struct UniformStub { - void operator()(TensorIteratorBase& iter, double from, double to, c10::optional gen) { + void operator()(TensorIteratorBase& iter, double from, double to, std::optional gen) { uniform_stub(iter.device_type(), iter, from, to, gen); } }; @@ -257,15 +257,15 @@ struct UniformStub { template struct UniformMeta { // No-op! - void operator()(TensorIteratorBase& iter, double from, double to, c10::optional gen) { + void operator()(TensorIteratorBase& iter, double from, double to, std::optional gen) { } }; -Tensor& uniform_(Tensor& self, double from, double to, c10::optional gen) { +Tensor& uniform_(Tensor& self, double from, double to, std::optional gen) { return at::native::templates::uniform_impl_(self, from, to, std::move(gen)); } -Tensor& uniform_meta_(Tensor& self, double from, double to, c10::optional gen) { +Tensor& uniform_meta_(Tensor& self, double from, double to, std::optional gen) { return at::native::templates::uniform_impl_(self, from, to, std::move(gen)); } @@ -273,7 +273,7 @@ Tensor& uniform_meta_(Tensor& self, double from, double to, c10::optional struct NormalStub { - void operator()(Tensor& self, double mean, double std, c10::optional gen) { + void operator()(Tensor& self, double mean, double std, std::optional gen) { normal_stub(self.device().type(), self, mean, std, gen); } }; @@ -281,76 +281,76 @@ struct NormalStub { template struct NormalMeta { // No-op! - void operator()(Tensor& self, double mean, double std, c10::optional gen) { + void operator()(Tensor& self, double mean, double std, std::optional gen) { } }; // inplace -Tensor& normal_(Tensor& self, double mean, double std, c10::optional gen) { +Tensor& normal_(Tensor& self, double mean, double std, std::optional gen) { return at::native::templates::normal_impl_(self, mean, std, std::move(gen)); } -Tensor& normal_meta_(Tensor& self, double mean, double std, c10::optional gen) { +Tensor& normal_meta_(Tensor& self, double mean, double std, std::optional gen) { return at::native::templates::normal_impl_(self, mean, std, std::move(gen)); } // out tensor float -Tensor& normal_out(const Tensor& mean, double std, c10::optional gen, Tensor& output) { +Tensor& normal_out(const Tensor& mean, double std, std::optional gen, Tensor& output) { return at::native::templates::normal_out_impl(output, mean, std, std::move(gen)); } -Tensor& normal_out_meta(const Tensor& mean, double std, c10::optional gen, Tensor& output) { +Tensor& normal_out_meta(const Tensor& mean, double std, std::optional gen, Tensor& output) { return at::native::templates::normal_out_impl(output, mean, std, std::move(gen)); } // out float tensor -Tensor& normal_out(double mean, const Tensor& std, c10::optional gen, Tensor& output) { +Tensor& normal_out(double mean, const Tensor& std, std::optional gen, Tensor& output) { return at::native::templates::normal_out_impl(output, mean, std, std::move(gen)); } -Tensor& normal_out_meta(double mean, const Tensor& std, c10::optional gen, Tensor& output) { +Tensor& normal_out_meta(double mean, const Tensor& std, std::optional gen, Tensor& output) { return at::native::templates::normal_out_impl(output, mean, std, std::move(gen)); } // out tensor tensor -Tensor& normal_out(const Tensor& mean, const Tensor& std, c10::optional gen, Tensor& output) { +Tensor& normal_out(const Tensor& mean, const Tensor& std, std::optional gen, Tensor& output) { return at::native::templates::normal_out_impl(output, mean, std, std::move(gen)); } -Tensor& normal_out_meta(const Tensor& mean, const Tensor& std, c10::optional gen, Tensor& output) { +Tensor& normal_out_meta(const Tensor& mean, const Tensor& std, std::optional gen, Tensor& output) { return at::native::templates::normal_out_impl(output, mean, std, std::move(gen)); } // functional tensor float -Tensor normal(const Tensor& mean, double std, c10::optional gen) { +Tensor normal(const Tensor& mean, double std, std::optional gen) { return at::native::templates::normal_impl(mean, std, std::move(gen)); } -Tensor normal_meta(const Tensor& mean, double std, c10::optional gen) { +Tensor normal_meta(const Tensor& mean, double std, std::optional gen) { return at::native::templates::normal_impl(mean, std, std::move(gen)); } // functional float tensor -Tensor normal(double mean, const Tensor& std, c10::optional gen) { +Tensor normal(double mean, const Tensor& std, std::optional gen) { return at::native::templates::normal_impl(mean, std, std::move(gen)); } -Tensor normal_meta(double mean, const Tensor& std, c10::optional gen) { +Tensor normal_meta(double mean, const Tensor& std, std::optional gen) { return at::native::templates::normal_impl(mean, std, std::move(gen)); } // functional tensor tensor -Tensor normal(const Tensor& mean, const Tensor& std, c10::optional gen) { +Tensor normal(const Tensor& mean, const Tensor& std, std::optional gen) { return at::native::templates::normal_impl(mean, std, std::move(gen)); } -Tensor normal_meta(const Tensor& mean, const Tensor& std, c10::optional gen) { +Tensor normal_meta(const Tensor& mean, const Tensor& std, std::optional gen) { return at::native::templates::normal_impl(mean, std, std::move(gen)); } // functional variant, only used by the functionalization pass. -Tensor normal_functional(const Tensor& self, double mean, double std, c10::optional generator) { +Tensor normal_functional(const Tensor& self, double mean, double std, std::optional generator) { return self.clone().normal_(mean, std, std::move(generator)); } @@ -358,44 +358,44 @@ Tensor normal_functional(const Tensor& self, double mean, double std, c10::optio template struct RandomStub { - void operator()(TensorIteratorBase& iter, c10::optional gen) { + void operator()(TensorIteratorBase& iter, std::optional gen) { random_stub(iter.device_type(), iter, gen); } }; -Tensor& random_(Tensor& self, c10::optional gen) { +Tensor& random_(Tensor& self, std::optional gen) { return at::native::templates::random_impl(self, std::move(gen)); } template struct RandomFromToStub { - void operator()(TensorIteratorBase& iter, uint64_t range, int64_t from, c10::optional gen) { + void operator()(TensorIteratorBase& iter, uint64_t range, int64_t from, std::optional gen) { random_from_to_stub(iter.device_type(), iter, range, from, gen); } - void operator()(TensorIteratorBase& iter, c10::optional gen) { + void operator()(TensorIteratorBase& iter, std::optional gen) { random_full_64_bits_range_stub(iter.device_type(), iter, gen); } }; -Tensor& random_(Tensor& self, int64_t from, optional to, c10::optional gen) { +Tensor& random_(Tensor& self, int64_t from, optional to, std::optional gen) { return at::native::templates::random_from_to_impl(self, from, to, std::move(gen)); } -Tensor& random_(Tensor& self, int64_t to, c10::optional gen) { +Tensor& random_(Tensor& self, int64_t to, std::optional gen) { return random_(self, 0, to, std::move(gen)); } -Tensor& random_meta_(Tensor& self, c10::optional gen) { +Tensor& random_meta_(Tensor& self, std::optional gen) { // No error checking yay return self; } -Tensor& random_meta_(Tensor& self, int64_t from, optional to, c10::optional gen) { +Tensor& random_meta_(Tensor& self, int64_t from, optional to, std::optional gen) { // No error checking yay return self; } -Tensor& random_meta_(Tensor& self, int64_t to, c10::optional gen) { +Tensor& random_meta_(Tensor& self, int64_t to, std::optional gen) { // No error checking yay return self; } @@ -437,7 +437,7 @@ Tensor _dirichlet_grad_cpu(const Tensor& x, const Tensor& alpha, const Tensor& t * This section is a counterpart to Distributions.cu */ -Tensor _s_binomial_cpu(const Tensor& count, const Tensor& prob, c10::optional gen) { +Tensor _s_binomial_cpu(const Tensor& count, const Tensor& prob, std::optional gen) { Tensor ret = at::zeros(count.sizes(), count.options()); auto iter = TensorIteratorConfig() .add_output(ret) @@ -462,7 +462,7 @@ Tensor _s_binomial_cpu(const Tensor& count, const Tensor& prob, c10::optional gen) { +Tensor _s_poisson_cpu(const Tensor& lambda, std::optional gen) { Tensor ret = at::zeros(lambda.sizes(), lambda.options()); auto iter = TensorIteratorConfig() .add_output(ret) @@ -479,7 +479,7 @@ Tensor _s_poisson_cpu(const Tensor& lambda, c10::optional gen) { return ret; } -Tensor _s_gamma_cpu(const Tensor& alpha, c10::optional gen) { +Tensor _s_gamma_cpu(const Tensor& alpha, std::optional gen) { Tensor ret = at::zeros(alpha.sizes(), alpha.options()); auto iter = TensorIteratorConfig() .add_output(ret) @@ -509,7 +509,7 @@ Tensor _s_gamma_cpu(const Tensor& alpha, c10::optional gen) { return ret; } -Tensor _s_dirichlet_cpu(const Tensor& alpha, c10::optional gen) { +Tensor _s_dirichlet_cpu(const Tensor& alpha, std::optional gen) { Tensor ret = at::zeros(alpha.sizes(), alpha.options()); AT_DISPATCH_FLOATING_TYPES(ret.scalar_type(), "dirichlet", [&] { Tensor gamma = at::zeros(alpha.sizes(), alpha.options().dtype(ScalarType::Double)); @@ -562,7 +562,7 @@ constexpr int64_t FLOAT32_MAX_CONSECUTIVE_INT = 1 << (FLT_MANT_DIG); Tensor& multinomial_out(const Tensor& self, int64_t n_sample, bool with_replacement, - c10::optional gen, + std::optional gen, Tensor& result) { TORCH_CHECK( result.device() == self.device(), @@ -647,7 +647,7 @@ Tensor multinomial( const Tensor& self, int64_t n_sample, bool with_replacement, - c10::optional gen) { + std::optional gen) { Tensor result = at::empty({0}, self.options().dtype(kLong)); native::multinomial_out(self, n_sample, with_replacement, std::move(gen), result); return result; diff --git a/aten/src/ATen/native/Dropout.cpp b/aten/src/ATen/native/Dropout.cpp index 7014ec65d1f5a..8a5d4a702a0ca 100644 --- a/aten/src/ATen/native/Dropout.cpp +++ b/aten/src/ATen/native/Dropout.cpp @@ -102,7 +102,7 @@ ALIAS_SPECIALIZATION(_feature_alpha_dropout, true, true ) } // anonymous namespace std::tuple -native_dropout_cpu(const Tensor& input, double p, c10::optional train) { +native_dropout_cpu(const Tensor& input, double p, std::optional train) { if (input.numel() == 0) { return std::make_tuple(input, at::empty_like(input, input.options())); } diff --git a/aten/src/ATen/native/Embedding.cpp b/aten/src/ATen/native/Embedding.cpp index 705b08ab39f06..b0c4644e579c2 100644 --- a/aten/src/ATen/native/Embedding.cpp +++ b/aten/src/ATen/native/Embedding.cpp @@ -88,7 +88,7 @@ Tensor embedding_sparse_backward( Tensor indices = indices_; Tensor grad = grad_; if (padding_idx != -1) { - c10::List> c({indices != padding_idx}); + c10::List> c({indices != padding_idx}); indices = indices.index(c); grad = grad.index(c); } diff --git a/aten/src/ATen/native/EmbeddingBag.cpp b/aten/src/ATen/native/EmbeddingBag.cpp index 8b6c90dae2375..216fad05dc07f 100644 --- a/aten/src/ATen/native/EmbeddingBag.cpp +++ b/aten/src/ATen/native/EmbeddingBag.cpp @@ -103,7 +103,7 @@ bool is_fast_path_index_select_scale(const Tensor& src, const Tensor& scale, Ten } template -bool is_fast_path(const Tensor& src, const c10::optional& scale, Tensor& output, index_t padding_idx) { +bool is_fast_path(const Tensor& src, const std::optional& scale, Tensor& output, index_t padding_idx) { return (scale.has_value() && scale.value().defined()) ? is_fast_path_index_select_scale(src, scale.value(), output, padding_idx) : is_fast_path_index_select(src, output, padding_idx); @@ -891,7 +891,7 @@ void check_arguments( const Tensor& indices, const Tensor& offsets, const int64_t mode, - const c10::optional& per_sample_weights, + const std::optional& per_sample_weights, bool include_last_offset) { auto indices_arg = TensorArg(indices, "indices", 1); checkScalarTypes("embedding_bag", indices_arg, {kLong, kInt}); @@ -985,7 +985,7 @@ void make_offset2bag_out( const Tensor& indices, const Tensor& offsets, const int64_t mode, - const c10::optional& per_sample_weights, + const std::optional& per_sample_weights, const int64_t padding_idx) { // To save compute, if we are going to go down the fast path case for the 'sum' // mode, we skip calculating offset2bag, since it is not going to be used. @@ -1040,7 +1040,7 @@ static Tensor make_offset2bag( const Tensor& indices, const Tensor& offsets, const int64_t mode, - const c10::optional& per_sample_weights, + const std::optional& per_sample_weights, const int64_t padding_idx) { Tensor offset2bag = at::empty({0}, offsets.options()); make_offset2bag_out(offset2bag, output, weight, indices, offsets, mode, per_sample_weights, padding_idx); @@ -1144,7 +1144,7 @@ void _embedding_bag_cpu_impl_out(Tensor& output, Tensor& offset2bag, Tensor& bag_size, Tensor* max_indices, const Tensor &weight, const Tensor &indices, const Tensor &offsets, const int64_t mode, - const c10::optional& per_sample_weights, + const std::optional& per_sample_weights, bool include_last_offset, int64_t padding_idx, _EmbeddingBagKernelCache* fbgemm_kernel_cache) { if (mode == MODE_MEAN || mode == MODE_SUM) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, weight.scalar_type(), "embedding_bag_no_grad_cpu_out", @@ -1241,8 +1241,8 @@ static std::tuple _embedding_bag_cpu_impl( std::tuple embedding_bag(const Tensor &weight, const Tensor &indices, const Tensor &offsets, const bool scale_grad_by_freq, - const int64_t mode, bool sparse, const c10::optional& per_sample_weights_opt, - bool include_last_offset, c10::optional padding_idx_opt) { + const int64_t mode, bool sparse, const std::optional& per_sample_weights_opt, + bool include_last_offset, std::optional padding_idx_opt) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned per_sample_weights_maybe_owned = at::borrow_from_optional_tensor(per_sample_weights_opt); const Tensor& per_sample_weights = *per_sample_weights_maybe_owned; @@ -1273,7 +1273,7 @@ embedding_bag(const Tensor &weight, const Tensor &indices, std::tuple embedding_bag(const Tensor &weight, const Tensor &indices, const Tensor &offsets, const bool scale_grad_by_freq, - const int64_t mode, bool sparse, const c10::optional& per_sample_weights_opt, + const int64_t mode, bool sparse, const std::optional& per_sample_weights_opt, bool include_last_offset) { return at::native::embedding_bag(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights_opt, include_last_offset, c10::nullopt); @@ -1284,7 +1284,7 @@ embedding_bag(const Tensor &weight, const Tensor &indices, std::tuple _embedding_bag_forward_only_cpu(const Tensor &weight, const Tensor &indices, const Tensor &offsets, const bool scale_grad_by_freq, - const int64_t mode, bool sparse, const c10::optional& per_sample_weights_opt, bool include_last_offset, + const int64_t mode, bool sparse, const std::optional& per_sample_weights_opt, bool include_last_offset, int64_t padding_idx) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned per_sample_weights_maybe_owned = at::borrow_from_optional_tensor(per_sample_weights_opt); @@ -1307,7 +1307,7 @@ _embedding_bag_forward_only_cpu(const Tensor &weight, const Tensor &indices, std::tuple _embedding_bag_cpu(const Tensor &weight, const Tensor &indices, const Tensor &offsets, const bool scale_grad_by_freq, - const int64_t mode, bool sparse, const c10::optional& per_sample_weights_opt, bool include_last_offset, + const int64_t mode, bool sparse, const std::optional& per_sample_weights_opt, bool include_last_offset, int64_t padding_idx) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned per_sample_weights_maybe_owned = at::borrow_from_optional_tensor(per_sample_weights_opt); @@ -1337,9 +1337,9 @@ void _embedding_bag_cpu_out( const bool /* scale_grad_by_freq */, const int64_t mode, const bool /* sparse */, - const c10::optional& per_sample_weights, + const std::optional& per_sample_weights, const bool include_last_offset, - const c10::optional& padding_idx, + const std::optional& padding_idx, _EmbeddingBagKernelCache* fbgemm_kernel_cache) { auto [indicesMaybeOwned, offsetsMaybeOwned] = promoteIndicesAndOffsets(indices_, offsets_); const auto& indices = *indicesMaybeOwned; @@ -1393,7 +1393,7 @@ Tensor _embedding_bag_backward(const Tensor &grad, const Tensor &indices_, const Tensor &max_indices_, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, - bool sparse, const c10::optional& per_sample_weights_opt, + bool sparse, const std::optional& per_sample_weights_opt, int64_t padding_idx) { return at::native::_embedding_bag_backward_symint( grad, indices_, offsets_, offset2bag, bag_size_, max_indices_, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights_opt, padding_idx); @@ -1408,7 +1408,7 @@ Tensor _embedding_bag_backward_symint(const Tensor &grad, const Tensor &indices_ const Tensor &max_indices_, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, - bool sparse, const c10::optional& per_sample_weights_opt, + bool sparse, const std::optional& per_sample_weights_opt, int64_t padding_idx) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned per_sample_weights_maybe_owned = at::borrow_from_optional_tensor(per_sample_weights_opt); @@ -1610,7 +1610,7 @@ Tensor _embedding_bag_dense_backward_cpu(const Tensor &grad_, const Tensor &indi const Tensor &offset2bag__, const Tensor &bag_size_, const Tensor& max_indices_, int64_t num_weights, - bool scale_grad_by_freq, int64_t mode, const c10::optional& per_sample_weights__opt, + bool scale_grad_by_freq, int64_t mode, const std::optional& per_sample_weights__opt, int64_t padding_idx) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned per_sample_weights__maybe_owned = at::borrow_from_optional_tensor(per_sample_weights__opt); @@ -1765,7 +1765,7 @@ Tensor _embedding_bag_per_sample_weights_backward_cpu( Tensor _embedding_bag_sparse_backward_symint( const Tensor &grad_, const Tensor &indices, const Tensor &offsets, const Tensor &offset2bag, const Tensor &bag_size_, SymInt num_weights, - bool scale_grad_by_freq, int64_t mode, const c10::optional& per_sample_weights_opt, + bool scale_grad_by_freq, int64_t mode, const std::optional& per_sample_weights_opt, int64_t padding_idx) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned per_sample_weights_maybe_owned = at::borrow_from_optional_tensor(per_sample_weights_opt); diff --git a/aten/src/ATen/native/EmbeddingBag.h b/aten/src/ATen/native/EmbeddingBag.h index c2e61f280bf59..796127f0441ee 100644 --- a/aten/src/ATen/native/EmbeddingBag.h +++ b/aten/src/ATen/native/EmbeddingBag.h @@ -13,7 +13,7 @@ void check_arguments( const Tensor& indices, const Tensor& offsets, const int64_t mode, - const c10::optional& per_sample_weights, + const std::optional& per_sample_weights, bool include_last_offset); void make_bag_size_out( @@ -40,7 +40,7 @@ void make_offset2bag_out( const Tensor& indices, const Tensor& offsets, const int64_t mode, - const c10::optional& per_sample_weights, + const std::optional& per_sample_weights, const int64_t padding_idx = -1); #ifdef USE_FBGEMM @@ -64,7 +64,7 @@ struct _CallbackAndBlockSize { _CallbackAndBlockSize() = default; - explicit _CallbackAndBlockSize(c10::optional maybe_block_size) + explicit _CallbackAndBlockSize(std::optional maybe_block_size) : blockSize(maybe_block_size.value_or(-1)) , callback(maybe_block_size.has_value() ? generateCallback(maybe_block_size.value()) : nullptr) {} @@ -75,7 +75,7 @@ struct _EmbeddingBagKernelCacheImpl : private StorageMixins... { _EmbeddingBagKernelCacheImpl() = default; // use each of the mixins to store corresponding kernel and block size - explicit _EmbeddingBagKernelCacheImpl(c10::optional maybe_block_size) + explicit _EmbeddingBagKernelCacheImpl(std::optional maybe_block_size) : StorageMixins(maybe_block_size)... {} @@ -107,7 +107,7 @@ using _EmbeddingBagKernelCache = _EmbeddingBagKernelCacheImpl< _CallbackAndBlockSize>; #else struct _EmbeddingBagKernelCache { - explicit _EmbeddingBagKernelCache(c10::optional /* maybe_block_size */) {} + explicit _EmbeddingBagKernelCache(std::optional /* maybe_block_size */) {} }; #endif @@ -115,7 +115,7 @@ void _embedding_bag_cpu_impl_out(Tensor& output, Tensor& offset2bag, Tensor& bag_size, Tensor* max_indices, const Tensor &weight, const Tensor &indices, const Tensor &offsets, const int64_t mode = 0, - const c10::optional& per_sample_weights = c10::nullopt, + const std::optional& per_sample_weights = c10::nullopt, bool include_last_offset = false, int64_t padding_idx = -1, _EmbeddingBagKernelCache* fbgemm_kernel_cache = nullptr); @@ -131,9 +131,9 @@ void _embedding_bag_cpu_out( const bool scale_grad_by_freq, const int64_t mode, const bool sparse, - const c10::optional& per_sample_weights, + const std::optional& per_sample_weights, const bool include_last_offset, - const c10::optional& padding_idx, + const std::optional& padding_idx, _EmbeddingBagKernelCache* fbgemm_kernel_cache = nullptr); } // namespace at::native diff --git a/aten/src/ATen/native/ForeachUtils.h b/aten/src/ATen/native/ForeachUtils.h index f44ae1179de8f..0839dd9a1560c 100644 --- a/aten/src/ATen/native/ForeachUtils.h +++ b/aten/src/ATen/native/ForeachUtils.h @@ -258,7 +258,7 @@ inline bool can_use_fast_route( using DeviceDtypeKey = std::pair; using IndicesT = std::vector; using nested_optional_tensorvec_t = - std::vector>>; + std::vector>>; using TensorsAndIndicesT = std::pair; using FlatMap = std::unordered_map< DeviceDtypeKey, @@ -339,7 +339,7 @@ inline FlatMap _group_tensors_by_first_tensors_device_and_dtype( nested_optional_tensorvec_t nested_tensorvec; nested_tensorvec.reserve(num_lists); for (const auto& i : c10::irange(num_lists)) { - std::vector> tensors; + std::vector> tensors; if (!nested_tensorlist[i].empty()) { // NB: num_tensors is the max possible length for any of // the inner lists of tensor references. Reserving the max diff --git a/aten/src/ATen/native/FusedAdam.cpp b/aten/src/ATen/native/FusedAdam.cpp index b3be769b24f18..41ef04b02d548 100644 --- a/aten/src/ATen/native/FusedAdam.cpp +++ b/aten/src/ATen/native/FusedAdam.cpp @@ -30,8 +30,8 @@ void _fused_adam_kernel_cpu_( const double eps, const bool amsgrad, const bool maximize, - const c10::optional& grad_scale, - const c10::optional& found_inf) { + const std::optional& grad_scale, + const std::optional& found_inf) { const float* grad_scale_ptr = grad_scale.has_value() ? grad_scale->data_ptr() : nullptr; const float* found_inf_ptr = @@ -87,8 +87,8 @@ void _fused_adam_kernel_cpu_( const double eps, const bool amsgrad, const bool maximize, - const c10::optional& grad_scale, - const c10::optional& found_inf) { + const std::optional& grad_scale, + const std::optional& found_inf) { _fused_adam_kernel_cpu_(params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr.item(), beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); } @@ -106,8 +106,8 @@ void _fused_adamw_kernel_cpu_( const double eps, const bool amsgrad, const bool maximize, - const c10::optional& grad_scale, - const c10::optional& found_inf) { + const std::optional& grad_scale, + const std::optional& found_inf) { const float* grad_scale_ptr = grad_scale.has_value() ? grad_scale->data_ptr() : nullptr; const float* found_inf_ptr = @@ -163,8 +163,8 @@ void _fused_adamw_kernel_cpu_( const double eps, const bool amsgrad, const bool maximize, - const c10::optional& grad_scale, - const c10::optional& found_inf) { + const std::optional& grad_scale, + const std::optional& found_inf) { _fused_adamw_kernel_cpu_(params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr.item(), beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); } diff --git a/aten/src/ATen/native/FusedSGD.cpp b/aten/src/ATen/native/FusedSGD.cpp index 56e2e91759113..2fb1f5af9e02f 100644 --- a/aten/src/ATen/native/FusedSGD.cpp +++ b/aten/src/ATen/native/FusedSGD.cpp @@ -26,8 +26,8 @@ void _fused_sgd_kernel_cpu_( const bool nesterov, const bool maximize, const bool is_first_step, - const c10::optional& grad_scale, - const c10::optional& found_inf) { + const std::optional& grad_scale, + const std::optional& found_inf) { const float* grad_scale_ptr = grad_scale.has_value() ? grad_scale->data_ptr() : nullptr; const float* found_inf_ptr = @@ -71,8 +71,8 @@ void _fused_sgd_kernel_cpu_( const bool nesterov, const bool maximize, const bool is_first_step, - const c10::optional& grad_scale, - const c10::optional& found_inf) { + const std::optional& grad_scale, + const std::optional& found_inf) { _fused_sgd_kernel_cpu_( params, grads, momentum_buffer_list, weight_decay, momentum, lr.item(), dampening, nesterov, diff --git a/aten/src/ATen/native/Histogram.cpp b/aten/src/ATen/native/Histogram.cpp index d5258866f8a34..9954edef94607 100644 --- a/aten/src/ATen/native/Histogram.cpp +++ b/aten/src/ATen/native/Histogram.cpp @@ -71,7 +71,7 @@ namespace { /* Checks properties of input tensors input, bins, and weight. */ -void histogramdd_check_inputs(const Tensor& input, const TensorList& bins, const c10::optional& weight) { +void histogramdd_check_inputs(const Tensor& input, const TensorList& bins, const std::optional& weight) { TORCH_CHECK(input.dim() >= 2, "torch.histogramdd: input tensor should have at least 2 dimensions, but got ", input.dim()); @@ -158,7 +158,7 @@ void histogramdd_prepare_out(const Tensor& input, TensorList bins, * assumes that input has already been reshaped to (M, N). */ std::pair, std::vector> -select_outer_bin_edges(const Tensor& input, c10::optional> range) { +select_outer_bin_edges(const Tensor& input, std::optional> range) { TORCH_INTERNAL_ASSERT(input.dim() == 2, "expected input to have shape (M, N)"); const int64_t N = input.size(-1); @@ -244,7 +244,7 @@ static std::vector allocate_bin_edges_tensors(const Tensor& self) { /* Versions of histogramdd in which bins is a Tensor[] defining the sequences of bin edges. */ static Tensor& histogramdd_out(const Tensor& self, TensorList bins, - const c10::optional& weight, bool density, + const std::optional& weight, bool density, Tensor& hist, TensorList& bin_edges) { histogramdd_check_inputs(self, bins, weight); histogramdd_prepare_out(self, bins, hist, bin_edges); @@ -258,7 +258,7 @@ static Tensor& histogramdd_out(const Tensor& self, TensorList bins, } Tensor _histogramdd(const Tensor& self, TensorList bins, - const c10::optional& weight, bool density) { + const std::optional& weight, bool density) { Tensor hist = at::empty({0}, self.options(), MemoryFormat::Contiguous); std::vector bin_edges_out = allocate_bin_edges_tensors(self); TensorList bin_edges_out_tl(bin_edges_out); @@ -271,8 +271,8 @@ Tensor _histogramdd(const Tensor& self, TensorList bins, * defining the number of bins in each dimension. */ static std::vector& histogramdd_bin_edges_out(const Tensor& self, IntArrayRef bin_ct, - c10::optional> range, - const c10::optional& weight, bool density, + std::optional> range, + const std::optional& weight, bool density, std::vector& bin_edges_out) { TensorList bin_edges_out_tl(bin_edges_out); @@ -296,15 +296,15 @@ static std::vector& histogramdd_bin_edges_out(const Tensor& self, IntArr } std::vector histogramdd_bin_edges(const Tensor& self, IntArrayRef bin_ct, - c10::optional> range, - const c10::optional& weight, bool density) { + std::optional> range, + const std::optional& weight, bool density) { std::vector bin_edges_out = allocate_bin_edges_tensors(self); return histogramdd_bin_edges_out(self, bin_ct, range, weight, density, bin_edges_out); } static Tensor& histogramdd_out(const Tensor& self, IntArrayRef bin_ct, - c10::optional> range, - const c10::optional& weight, bool density, + std::optional> range, + const std::optional& weight, bool density, Tensor& hist, TensorList& bin_edges) { std::vector bins = histogramdd_bin_edges(self, bin_ct, range, weight, density); @@ -320,8 +320,8 @@ static Tensor& histogramdd_out(const Tensor& self, IntArrayRef bin_ct, } Tensor _histogramdd(const Tensor& self, IntArrayRef bin_ct, - c10::optional> range, - const c10::optional& weight, bool density) { + std::optional> range, + const std::optional& weight, bool density) { Tensor hist = at::empty({0}, self.options(), MemoryFormat::Contiguous); std::vector bin_edges_out = allocate_bin_edges_tensors(self); TensorList bin_edges_out_tl(bin_edges_out); @@ -334,10 +334,10 @@ Tensor _histogramdd(const Tensor& self, IntArrayRef bin_ct, */ std::tuple histogram_out(const Tensor& self, const Tensor& bins, - const c10::optional& weight, bool density, + const std::optional& weight, bool density, Tensor& hist, Tensor& bin_edges) { Tensor reshaped_self = self.reshape({ self.numel(), 1 }); - c10::optional reshaped_weight = weight.has_value() + std::optional reshaped_weight = weight.has_value() ? weight.value().reshape({ weight.value().numel() }) : weight; TensorList bins_in = bins; TensorList bins_out = bin_edges; @@ -349,7 +349,7 @@ histogram_out(const Tensor& self, const Tensor& bins, std::tuple histogram(const Tensor& self, const Tensor& bins, - const c10::optional& weight, bool density) { + const std::optional& weight, bool density) { Tensor hist = at::empty({0}, self.options(), MemoryFormat::Contiguous); Tensor bin_edges = at::empty({0}, bins.options(), MemoryFormat::Contiguous); return histogram_out(self, bins, weight, density, hist, bin_edges); @@ -358,11 +358,11 @@ histogram(const Tensor& self, const Tensor& bins, /* Versions of histogram in which bins is an integer specifying the number of equal-width bins. */ std::tuple -histogram_out(const Tensor& self, int64_t bin_ct, c10::optional> range, - const c10::optional& weight, bool density, +histogram_out(const Tensor& self, int64_t bin_ct, std::optional> range, + const std::optional& weight, bool density, Tensor& hist, Tensor& bin_edges) { Tensor reshaped_self = self.reshape({ self.numel(), 1 }); - c10::optional reshaped_weight = weight.has_value() + std::optional reshaped_weight = weight.has_value() ? weight.value().reshape({ weight.value().numel() }) : weight; TensorList bins_in = bin_edges; TensorList bins_out = bin_edges; @@ -378,8 +378,8 @@ histogram_out(const Tensor& self, int64_t bin_ct, c10::optional -histogram(const Tensor& self, int64_t bin_ct, c10::optional> range, - const c10::optional& weight, bool density) { +histogram(const Tensor& self, int64_t bin_ct, std::optional> range, + const std::optional& weight, bool density) { Tensor hist = at::empty({0}, self.options(), MemoryFormat::Contiguous); Tensor bin_edges_out = at::empty({0}, self.options()); return histogram_out(self, bin_ct, range, weight, density, hist, bin_edges_out); @@ -403,7 +403,7 @@ Tensor& histogram_histc_out(const Tensor& self, int64_t bin_ct, histogramdd_check_inputs(reshaped, bins_in, {}); histogramdd_linear_stub(reshaped.device().type(), reshaped, - c10::optional(), false, hist, bin_edges, false); + std::optional(), false, hist, bin_edges, false); return hist; } @@ -414,16 +414,16 @@ Tensor histogram_histc(const Tensor& self, int64_t bin_ct, } std::tuple> histogramdd( - const Tensor &self, TensorList bins, c10::optional> /*range*/, - const c10::optional &weight, bool density) { + const Tensor &self, TensorList bins, std::optional> /*range*/, + const std::optional &weight, bool density) { auto hist = at::_histogramdd_from_bin_tensors(self, bins, weight, density); return std::tuple>{ std::move(hist), bins.vec()}; } std::tuple> histogramdd( - const Tensor &self, IntArrayRef bins, c10::optional> range, - const c10::optional &weight, bool density) { + const Tensor &self, IntArrayRef bins, std::optional> range, + const std::optional &weight, bool density) { auto bin_edges = at::_histogramdd_bin_edges(self, bins, range, weight, density); auto hist = at::_histogramdd_from_bin_cts(self, bins, range, weight, density); return std::tuple>{ @@ -431,8 +431,8 @@ std::tuple> histogramdd( } std::tuple> histogramdd( - const Tensor &self, int64_t bins, c10::optional> range, - const c10::optional &weight, bool density) { + const Tensor &self, int64_t bins, std::optional> range, + const std::optional &weight, bool density) { DimVector bins_v(self.size(-1), bins); return at::native::histogramdd(self, bins_v, range, weight, density); } diff --git a/aten/src/ATen/native/Histogram.h b/aten/src/ATen/native/Histogram.h index cd19fa4691ad0..fee7e06b87258 100644 --- a/aten/src/ATen/native/Histogram.h +++ b/aten/src/ATen/native/Histogram.h @@ -5,8 +5,8 @@ namespace at::native { -using histogramdd_fn = void(*)(const Tensor&, const c10::optional&, bool, Tensor&, const TensorList&); -using histogramdd_linear_fn = void(*)(const Tensor&, const c10::optional&, bool, Tensor&, const TensorList&, bool); +using histogramdd_fn = void(*)(const Tensor&, const std::optional&, bool, Tensor&, const TensorList&); +using histogramdd_linear_fn = void(*)(const Tensor&, const std::optional&, bool, Tensor&, const TensorList&, bool); using histogram_select_outer_bin_edges_fn = void(*)(const Tensor& input, const int64_t N, std::vector &leftmost_edges, std::vector &rightmost_edges); DECLARE_DISPATCH(histogramdd_fn, histogramdd_stub); diff --git a/aten/src/ATen/native/IndexingUtils.h b/aten/src/ATen/native/IndexingUtils.h index 72b39eb326a0c..fb382ccbc6f0a 100644 --- a/aten/src/ATen/native/IndexingUtils.h +++ b/aten/src/ATen/native/IndexingUtils.h @@ -65,8 +65,8 @@ static C10_UNUSED void checkIndexTensorTypes(IOptTensorListRef indices, bool all } } -inline torch::List> toListOfOptionalTensors(ArrayRef list) { - torch::List> result; +inline torch::List> toListOfOptionalTensors(ArrayRef list) { + torch::List> result; result.reserve(list.size()); for (const Tensor& a : list) { result.push_back(a); @@ -74,11 +74,11 @@ inline torch::List> toListOfOptionalTensors(ArrayRef> toListOfOptionalTensors(ArrayRef list) { - torch::List> result; +inline torch::List> toListOfOptionalTensors(ArrayRef list) { + torch::List> result; result.reserve(list.size()); for (const IValue& a : list) { - result.push_back(a.isTensor() ? c10::optional(a.toTensor()) : c10::optional()); + result.push_back(a.isTensor() ? std::optional(a.toTensor()) : c10::optional()); } return result; } diff --git a/aten/src/ATen/native/Linear.cpp b/aten/src/ATen/native/Linear.cpp index 9322776b03f5a..8a835410458ea 100644 --- a/aten/src/ATen/native/Linear.cpp +++ b/aten/src/ATen/native/Linear.cpp @@ -70,7 +70,7 @@ static inline Tensor _flatten_nd_linear(const Tensor& input, const Tensor& weigh } -Tensor linear(const Tensor& input, const Tensor& weight, const c10::optional& bias_opt) { +Tensor linear(const Tensor& input, const Tensor& weight, const std::optional& bias_opt) { // _matmul_impl checks this again later, but _flatten_nd_linear does not work on scalars inputs, // so let's try to catch this here already const auto input_dim = input.dim(); @@ -121,7 +121,7 @@ Tensor linear(const Tensor& input, const Tensor& weight, const c10::optional& bias_opt, Tensor& output) { +Tensor& linear_out(const Tensor& input, const Tensor& weight, const std::optional& bias_opt, Tensor& output) { TORCH_CHECK(!input.is_mkldnn(), "linear doesn't support out for MKLDNN tensors"); // See [Note: hacky wrapper removal for optional tensor] auto bias = bias_opt.has_value() @@ -707,7 +707,7 @@ Tensor _trilinear(const Tensor& i1_, const Tensor& i2_, const Tensor& i3_, return output; } -Tensor bilinear(const Tensor& input1, const Tensor& input2, const Tensor& weight, const c10::optional& bias_opt) { +Tensor bilinear(const Tensor& input1, const Tensor& input2, const Tensor& weight, const std::optional& bias_opt) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); const Tensor& bias = *bias_maybe_owned; diff --git a/aten/src/ATen/native/LinearAlgebra.cpp b/aten/src/ATen/native/LinearAlgebra.cpp index 81f461f6c95b8..3389033ac9851 100644 --- a/aten/src/ATen/native/LinearAlgebra.cpp +++ b/aten/src/ATen/native/LinearAlgebra.cpp @@ -280,7 +280,7 @@ TORCH_META_FUNC(_linalg_slogdet)(const Tensor& A) { } template -void common_checks_baddbmm_bmm(Meta& meta, const Tensor& batch1, const Tensor& batch2, const Scalar& beta, const Scalar& alpha, bool is_bmm, const c10::optional& self_baddbmm = nullopt) { +void common_checks_baddbmm_bmm(Meta& meta, const Tensor& batch1, const Tensor& batch2, const Scalar& beta, const Scalar& alpha, bool is_bmm, const std::optional& self_baddbmm = nullopt) { TORCH_CHECK(batch1.dim() == 3, "batch1 must be a 3D tensor"); TORCH_CHECK(batch2.dim() == 3, "batch2 must be a 3D tensor"); @@ -635,7 +635,7 @@ namespace { Tensor linalg_matrix_power_impl( const Tensor& self, int64_t n, - c10::optional _out) { + std::optional _out) { NoTF32Guard disable_tf32; auto out = _out.value_or(Tensor()); @@ -929,7 +929,7 @@ Tensor matrix_chain_multiplication( } // Implements torch.linalg.multi_dot -Tensor multi_dot_impl(TensorList _tensors, c10::optional _out) { +Tensor multi_dot_impl(TensorList _tensors, std::optional _out) { const size_t n = _tensors.size(); TORCH_CHECK(n >= 2, "multi_dot(): expected at least 2 tensors but got ", n); diff --git a/aten/src/ATen/native/Loss.cpp b/aten/src/ATen/native/Loss.cpp index e21d9f6008e8e..a170e4a868aa7 100644 --- a/aten/src/ATen/native/Loss.cpp +++ b/aten/src/ATen/native/Loss.cpp @@ -250,7 +250,7 @@ Tensor kl_div(const Tensor& input, const Tensor& target, int64_t reduction, bool return apply_loss_reduction(output, reduction); } -Tensor binary_cross_entropy_cpu(const Tensor& input, const Tensor& target, const c10::optional& weight_opt, int64_t reduction) { +Tensor binary_cross_entropy_cpu(const Tensor& input, const Tensor& target, const std::optional& weight_opt, int64_t reduction) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; @@ -260,7 +260,7 @@ Tensor binary_cross_entropy_cpu(const Tensor& input, const Tensor& target, const input, target, weight, reduction, loss); } -Tensor& binary_cross_entropy_out_cpu(const Tensor& input, const Tensor& target, const c10::optional& weight_opt, int64_t reduction, Tensor& loss) { +Tensor& binary_cross_entropy_out_cpu(const Tensor& input, const Tensor& target, const std::optional& weight_opt, int64_t reduction, Tensor& loss) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; @@ -307,7 +307,7 @@ Tensor& binary_cross_entropy_out_cpu(const Tensor& input, const Tensor& target, return loss; } -Tensor binary_cross_entropy_backward_cpu(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional& weight_opt, int64_t reduction) { +Tensor binary_cross_entropy_backward_cpu(const Tensor& grad, const Tensor& input, const Tensor& target, const std::optional& weight_opt, int64_t reduction) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; @@ -317,7 +317,7 @@ Tensor binary_cross_entropy_backward_cpu(const Tensor& grad, const Tensor& input grad, input, target, weight, reduction, grad_input); } -Tensor& binary_cross_entropy_backward_out_cpu(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional& weight_opt, int64_t reduction, Tensor& grad_input) { +Tensor& binary_cross_entropy_backward_out_cpu(const Tensor& grad, const Tensor& input, const Tensor& target, const std::optional& weight_opt, int64_t reduction, Tensor& grad_input) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; @@ -359,7 +359,7 @@ Tensor& binary_cross_entropy_backward_out_cpu(const Tensor& grad, const Tensor& return grad_input; } -Tensor binary_cross_entropy_with_logits(const Tensor& input, const Tensor& target, const c10::optional& weight_opt, const c10::optional& pos_weight_opt, int64_t reduction) { +Tensor binary_cross_entropy_with_logits(const Tensor& input, const Tensor& target, const std::optional& weight_opt, const c10::optional& pos_weight_opt, int64_t reduction) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; diff --git a/aten/src/ATen/native/LossMulti.h b/aten/src/ATen/native/LossMulti.h index f21269620f253..27697815ad594 100644 --- a/aten/src/ATen/native/LossMulti.h +++ b/aten/src/ATen/native/LossMulti.h @@ -41,7 +41,7 @@ namespace { const int64_t& ndims, const Tensor& input, const Tensor& target, - const c10::optional& weight) { + const std::optional& weight) { TORCH_CHECK( (ndims == 2 && input.size(1) != 0) || (ndims == 1 && input.size(0) != 0) || ndims == 0, "Expected non-empty vector or matrix with optional 0-dim batch size, but got: ", diff --git a/aten/src/ATen/native/LossMultiMargin.cpp b/aten/src/ATen/native/LossMultiMargin.cpp index 5b2f5ae1863b7..e7620c7900c56 100644 --- a/aten/src/ATen/native/LossMultiMargin.cpp +++ b/aten/src/ATen/native/LossMultiMargin.cpp @@ -102,7 +102,7 @@ void multi_margin_loss_out_cpu_template( const Tensor& target, int p, const Scalar& margin, - const c10::optional& weight, + const std::optional& weight, int64_t reduction) { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) int64_t nframe, dim; @@ -266,7 +266,7 @@ Tensor multi_margin_loss_cpu( const Tensor& target, const Scalar& p, const Scalar& margin, - const c10::optional& weight, + const std::optional& weight, int64_t reduction) { auto output = at::empty({0}, input.options()); multi_margin_loss_out_cpu_template( @@ -278,7 +278,7 @@ Tensor& multi_margin_loss_cpu_out(const Tensor& input, const Tensor& target, const Scalar& p, const Scalar& margin, - const c10::optional& weight, + const std::optional& weight, int64_t reduction, Tensor& output) { multi_margin_loss_out_cpu_template( @@ -291,7 +291,7 @@ Tensor multi_margin_loss_cpu_backward( const Tensor& input, const Tensor& target, const Scalar& p, - const Scalar& margin, const c10::optional& weight_opt, + const Scalar& margin, const std::optional& weight_opt, int64_t reduction) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); @@ -314,7 +314,7 @@ Tensor& multi_margin_loss_cpu_backward_out(const Tensor& grad_output, const Tensor& input, const Tensor& target, const Scalar& p, - const Scalar& margin, const c10::optional& weight_opt, + const Scalar& margin, const std::optional& weight_opt, int64_t reduction, Tensor& grad_input) { // See [Note: hacky wrapper removal for optional tensor] diff --git a/aten/src/ATen/native/LossNLL.cpp b/aten/src/ATen/native/LossNLL.cpp index 0e7de9c27252a..b7809ab21dd5d 100644 --- a/aten/src/ATen/native/LossNLL.cpp +++ b/aten/src/ATen/native/LossNLL.cpp @@ -624,7 +624,7 @@ static Tensor cross_entropy_loss_label_smoothing( Tensor cross_entropy_loss_symint( const Tensor& self, const Tensor& target, - const c10::optional& weight, + const std::optional& weight, int64_t reduction, c10::SymInt ignore_index, double label_smoothing) { @@ -658,7 +658,7 @@ Tensor cross_entropy_loss_symint( return ret; } -Tensor & nll_loss_out(const Tensor & self, const Tensor & target, const c10::optional& weight_opt, int64_t reduction, int64_t ignore_index, Tensor & output) { +Tensor & nll_loss_out(const Tensor & self, const Tensor & target, const std::optional& weight_opt, int64_t reduction, int64_t ignore_index, Tensor & output) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; @@ -667,7 +667,7 @@ Tensor & nll_loss_out(const Tensor & self, const Tensor & target, const c10::opt return std::get<0>(at::nll_loss_forward_out(output, total_weight, self, target, weight, reduction, ignore_index)); } -Tensor nll_loss_symint(const Tensor & self, const Tensor & target, const c10::optional& weight_opt, int64_t reduction, c10::SymInt ignore_index) { +Tensor nll_loss_symint(const Tensor & self, const Tensor & target, const std::optional& weight_opt, int64_t reduction, c10::SymInt ignore_index) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; @@ -676,7 +676,7 @@ Tensor nll_loss_symint(const Tensor & self, const Tensor & target, const c10::op } // Duplicate of above code for non-symbolic ints. Kept for BC purposes and to minimize breakages. -static Tensor nll_loss(const Tensor & self, const Tensor & target, const c10::optional& weight_opt, int64_t reduction, int64_t ignore_index) { +static Tensor nll_loss(const Tensor & self, const Tensor & target, const std::optional& weight_opt, int64_t reduction, int64_t ignore_index) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; @@ -687,7 +687,7 @@ static Tensor nll_loss(const Tensor & self, const Tensor & target, const c10::op Tensor nll_loss_nd_symint( const Tensor& self, const Tensor& target, - const c10::optional& weight, + const std::optional& weight, int64_t reduction, c10::SymInt ignore_index) { if (self.dim() < 1) { diff --git a/aten/src/ATen/native/LossNLL2d.cpp b/aten/src/ATen/native/LossNLL2d.cpp index 94c667dcb1b2b..6f27884b8f24b 100644 --- a/aten/src/ATen/native/LossNLL2d.cpp +++ b/aten/src/ATen/native/LossNLL2d.cpp @@ -405,7 +405,7 @@ void nll_loss2d_backward_out_cpu_template( } // namespace std::tuple nll_loss2d_forward_out_cpu(const Tensor& self, - const Tensor& target, const c10::optional& weight_opt, + const Tensor& target, const std::optional& weight_opt, int64_t reduction, int64_t ignore_index, Tensor& output, @@ -421,7 +421,7 @@ std::tuple nll_loss2d_forward_out_cpu(const Tensor& self, std::tuple nll_loss2d_forward_cpu( const Tensor& self, - const Tensor& target, const c10::optional& weight_opt, + const Tensor& target, const std::optional& weight_opt, int64_t reduction, int64_t ignore_index) { // See [Note: hacky wrapper removal for optional tensor] @@ -437,7 +437,7 @@ std::tuple nll_loss2d_forward_cpu( Tensor& nll_loss2d_backward_out_cpu(const Tensor& grad_output, const Tensor& self, - const Tensor& target, const c10::optional& weight_opt, + const Tensor& target, const std::optional& weight_opt, int64_t reduction, int64_t ignore_index, const Tensor& total_weight, @@ -461,7 +461,7 @@ Tensor& nll_loss2d_backward_out_cpu(const Tensor& grad_output, Tensor nll_loss2d_backward_cpu( const Tensor& grad_output, const Tensor& self, - const Tensor& target, const c10::optional& weight_opt, + const Tensor& target, const std::optional& weight_opt, int64_t reduction, int64_t ignore_index, const Tensor& total_weight) { @@ -482,7 +482,7 @@ Tensor nll_loss2d_backward_cpu( return grad_input; } -Tensor & nll_loss2d_out(const Tensor & self, const Tensor & target, const c10::optional& weight_opt, int64_t reduction, int64_t ignore_index, Tensor & output) { +Tensor & nll_loss2d_out(const Tensor & self, const Tensor & target, const std::optional& weight_opt, int64_t reduction, int64_t ignore_index, Tensor & output) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; @@ -491,7 +491,7 @@ Tensor & nll_loss2d_out(const Tensor & self, const Tensor & target, const c10::o return std::get<0>(at::nll_loss2d_forward_out(output, total_weight, self, target, weight, reduction, ignore_index)); } -Tensor nll_loss2d_symint(const Tensor & self, const Tensor & target, const c10::optional& weight_opt, int64_t reduction, c10::SymInt ignore_index) { +Tensor nll_loss2d_symint(const Tensor & self, const Tensor & target, const std::optional& weight_opt, int64_t reduction, c10::SymInt ignore_index) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; @@ -500,7 +500,7 @@ Tensor nll_loss2d_symint(const Tensor & self, const Tensor & target, const c10:: } // Duplicate of above code for non-symbolic ints. Kept for BC purposes and to minimize breakages. -static Tensor nll_loss2d(const Tensor & self, const Tensor & target, const c10::optional& weight_opt, int64_t reduction, int64_t ignore_index) { +static Tensor nll_loss2d(const Tensor & self, const Tensor & target, const std::optional& weight_opt, int64_t reduction, int64_t ignore_index) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; diff --git a/aten/src/ATen/native/MathBitsFallback.h b/aten/src/ATen/native/MathBitsFallback.h index 584d07aeca358..de2296634e045 100644 --- a/aten/src/ATen/native/MathBitsFallback.h +++ b/aten/src/ATen/native/MathBitsFallback.h @@ -56,7 +56,7 @@ struct MathOpFallback { const auto num_arguments = arguments.size(); const auto stack_start = stack->size() - num_arguments; - c10::optional is_write; + std::optional is_write; for (const auto i : c10::irange(num_arguments)) { // Three possible states: // 1. alias_info has no value --> out-of-place operation diff --git a/aten/src/ATen/native/Memory.cpp b/aten/src/ATen/native/Memory.cpp index 0d07054f72eda..fefe9ab5a8d2b 100644 --- a/aten/src/ATen/native/Memory.cpp +++ b/aten/src/ATen/native/Memory.cpp @@ -23,11 +23,11 @@ int64_t _debug_has_internal_overlap(const Tensor& self) { // pinned memory, always return false", but this makes life a little easier when // you haven't loaded the backend extension at all (which can happen, e.g., on a // CPU build of PyTorch and you try to check if something is CUDA pinned) -bool is_pinned_default(const Tensor& self, c10::optional device) { +bool is_pinned_default(const Tensor& self, std::optional device) { return false; } -Tensor pin_memory(const Tensor& self, c10::optional device) { +Tensor pin_memory(const Tensor& self, std::optional device) { // Kind of mad that I have to do two dynamic dispatches here, pretty // annoying if (self.is_pinned(device)) { diff --git a/aten/src/ATen/native/MetaTensor.cpp b/aten/src/ATen/native/MetaTensor.cpp index 972d13dc8fb51..518466df84ce4 100644 --- a/aten/src/ATen/native/MetaTensor.cpp +++ b/aten/src/ATen/native/MetaTensor.cpp @@ -13,11 +13,11 @@ namespace at::native { Tensor empty_meta_symint( SymIntArrayRef size, - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt, - c10::optional memory_format_opt + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt, + std::optional memory_format_opt ) { auto opt_size = asIntArrayRefSlowOpt(size); @@ -32,10 +32,10 @@ Tensor empty_meta_symint( static Tensor empty_strided_meta( IntArrayRef size, IntArrayRef stride, - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt ) { return empty_strided_meta_symint(c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), dtype_opt, layout_opt, device_opt, pin_memory_opt); } @@ -43,10 +43,10 @@ static Tensor empty_strided_meta( Tensor empty_strided_meta_symint( SymIntArrayRef size, SymIntArrayRef stride, - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt ) { return at::detail::empty_strided_symint_meta( size, stride, dtype_opt, layout_opt, device_opt, pin_memory_opt); diff --git a/aten/src/ATen/native/NNPACK.cpp b/aten/src/ATen/native/NNPACK.cpp index e43bfdd627965..89b2f3ffc493b 100644 --- a/aten/src/ATen/native/NNPACK.cpp +++ b/aten/src/ATen/native/NNPACK.cpp @@ -22,7 +22,7 @@ namespace at::native { at::Tensor _nnpack_spatial_convolution( const Tensor& input, - const Tensor& weight, const c10::optional& bias_opt, + const Tensor& weight, const std::optional& bias_opt, const IntArrayRef padding, const IntArrayRef stride) { throw std::runtime_error( @@ -137,7 +137,7 @@ static thread_local Workspace workspace; Tensor _nnpack_spatial_convolution( const Tensor& input, - const Tensor& weight, const c10::optional& bias_opt, + const Tensor& weight, const std::optional& bias_opt, const IntArrayRef padding, const IntArrayRef stride) { // See [Note: hacky wrapper removal for optional tensor] diff --git a/aten/src/ATen/native/NaiveConvolutionTranspose3d.cpp b/aten/src/ATen/native/NaiveConvolutionTranspose3d.cpp index 624e820c7ba66..f82354ace3b82 100644 --- a/aten/src/ATen/native/NaiveConvolutionTranspose3d.cpp +++ b/aten/src/ATen/native/NaiveConvolutionTranspose3d.cpp @@ -819,7 +819,7 @@ void slow_conv_transpose3d_acc_grad_parameters_cpu( Tensor& slow_conv_transpose3d_out_cpu(const Tensor& input, const Tensor& weight, - IntArrayRef kernel_size, const c10::optional& bias_opt, + IntArrayRef kernel_size, const std::optional& bias_opt, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, @@ -846,7 +846,7 @@ Tensor& slow_conv_transpose3d_out_cpu(const Tensor& input, Tensor slow_conv_transpose3d_cpu( const Tensor& input, const Tensor& weight, - IntArrayRef kernel_size, const c10::optional& bias_opt, + IntArrayRef kernel_size, const std::optional& bias_opt, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, diff --git a/aten/src/ATen/native/NaiveDilatedConvolution.cpp b/aten/src/ATen/native/NaiveDilatedConvolution.cpp index 571eb16fc50e0..acf040259b135 100644 --- a/aten/src/ATen/native/NaiveDilatedConvolution.cpp +++ b/aten/src/ATen/native/NaiveDilatedConvolution.cpp @@ -524,7 +524,7 @@ void slow_conv_dilated_all_cpu_template( Tensor slow_conv_dilated2d_cpu( const Tensor& input, const Tensor& weight, - IntArrayRef kernel_size, const c10::optional& bias_opt, + IntArrayRef kernel_size, const std::optional& bias_opt, IntArrayRef stride_size, IntArrayRef pad_size, IntArrayRef dilation_size) { @@ -579,7 +579,7 @@ Tensor slow_conv_dilated2d_cpu( Tensor slow_conv_dilated3d_cpu( const Tensor& input, const Tensor& weight, - IntArrayRef kernel_size, const c10::optional& bias_opt, + IntArrayRef kernel_size, const std::optional& bias_opt, IntArrayRef stride_size, IntArrayRef pad_size, IntArrayRef dilation_size) { diff --git a/aten/src/ATen/native/NamedTensor.cpp b/aten/src/ATen/native/NamedTensor.cpp index f0330481c31a9..709d63bae6368 100644 --- a/aten/src/ATen/native/NamedTensor.cpp +++ b/aten/src/ATen/native/NamedTensor.cpp @@ -387,13 +387,13 @@ Tensor scatter_add(const Tensor& self, Dimname dim, const Tensor& index, const T static Tensor& scatter_add_(Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) { reportNYIDimnameOverload("scatter_add"); } -std::tuple sort_out(const Tensor& self, c10::optional stable, Dimname dim, bool keepdim, Tensor& values, Tensor& indices) { +std::tuple sort_out(const Tensor& self, std::optional stable, Dimname dim, bool keepdim, Tensor& values, Tensor& indices) { reportNYIDimnameOverload("sort"); } std::tuple sort_out(const Tensor& self, Dimname dim, bool keepdim, Tensor& values, Tensor& indices) { reportNYIDimnameOverload("sort"); } -std::tuple sort(const Tensor& self, c10::optional stable, Dimname dim, bool keepdim) { +std::tuple sort(const Tensor& self, std::optional stable, Dimname dim, bool keepdim) { reportNYIDimnameOverload("sort"); } std::tuple sort(const Tensor& self, Dimname dim, bool keepdim) { diff --git a/aten/src/ATen/native/NonSymbolicBC.h b/aten/src/ATen/native/NonSymbolicBC.h index 589822a4ee013..037156ac23b15 100644 --- a/aten/src/ATen/native/NonSymbolicBC.h +++ b/aten/src/ATen/native/NonSymbolicBC.h @@ -9,15 +9,15 @@ namespace at::native { // In those cases, we will duplicate the signature here with non-symbolic ints, and also duplicate the C++ implementation. TORCH_API at::Tensor reshape(const at::Tensor& self, at::IntArrayRef proposed_shape); TORCH_API at::Tensor narrow(const at::Tensor& self, int64_t dim, int64_t start, int64_t length); -TORCH_API at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype=c10::nullopt, c10::optional layout=c10::nullopt, c10::optional device=c10::nullopt, c10::optional pin_memory=c10::nullopt, c10::optional is_coalesced=c10::nullopt); -TORCH_API at::Tensor nll_loss(const at::Tensor & self, const at::Tensor & target, const c10::optional& weight_opt, int64_t reduction, int64_t ignore_index); -TORCH_API at::Tensor nll_loss2d(const at::Tensor & self, const at::Tensor & target, const c10::optional& weight_opt, int64_t reduction, int64_t ignore_index); +TORCH_API at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, std::optional dtype=c10::nullopt, c10::optional layout=c10::nullopt, c10::optional device=c10::nullopt, c10::optional pin_memory=c10::nullopt, c10::optional is_coalesced=c10::nullopt); +TORCH_API at::Tensor nll_loss(const at::Tensor & self, const at::Tensor & target, const std::optional& weight_opt, int64_t reduction, int64_t ignore_index); +TORCH_API at::Tensor nll_loss2d(const at::Tensor & self, const at::Tensor & target, const std::optional& weight_opt, int64_t reduction, int64_t ignore_index); // The below ops don't get a duplicated C++ implementation. // They are backward ops, which make them very unlikely to be called directly // by external code (at::native::trace_backward). // They get their own declaration for BC purposes however. -TORCH_API at::Tensor _embedding_bag_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, int64_t padding_idx=-1); -TORCH_API at::Tensor _embedding_bag_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx=-1); +TORCH_API at::Tensor _embedding_bag_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const std::optional & per_sample_weights, int64_t padding_idx=-1); +TORCH_API at::Tensor _embedding_bag_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const std::optional & per_sample_weights, int64_t padding_idx=-1); TORCH_API at::Tensor value_selecting_reduction_backward(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, at::IntArrayRef sizes, bool keepdim); TORCH_API at::Tensor trace_backward(const at::Tensor & grad, at::IntArrayRef sizes); TORCH_API at::Tensor index_select_backward(const at::Tensor & grad, at::IntArrayRef self_sizes, int64_t dim, const at::Tensor & index); diff --git a/aten/src/ATen/native/Normalization.cpp b/aten/src/ATen/native/Normalization.cpp index 0a2aa053b712a..ce1b23c2bdf6f 100644 --- a/aten/src/ATen/native/Normalization.cpp +++ b/aten/src/ATen/native/Normalization.cpp @@ -538,7 +538,7 @@ BatchNormBackend _select_batch_norm_backend( // XXX: The indices of backends need to be kept synchronized between this function and its _backward. // TODO: remove cudnn_enabled arg std::tuple _batch_norm_impl_index( - const Tensor& input, const c10::optional& weight_opt /* optional */, const c10::optional& bias_opt /* optional */, const c10::optional& running_mean_opt /* optional */, const c10::optional& running_var_opt /* optional */, + const Tensor& input, const std::optional& weight_opt /* optional */, const c10::optional& bias_opt /* optional */, const c10::optional& running_mean_opt /* optional */, const c10::optional& running_var_opt /* optional */, bool training, double momentum, double eps, bool cudnn_enabled) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); @@ -620,7 +620,7 @@ std::tuple _batch_norm_impl_index( std::tuple _batch_norm_impl_index_backward( int64_t impl_index, - const Tensor& input, const Tensor& grad_output, const c10::optional& weight_opt /* optional */, const c10::optional& running_mean_opt /* optional */, const c10::optional& running_var_opt /* optional */, const c10::optional& save_mean_opt /* optional */, const c10::optional& save_var_transform_opt /* optional */, + const Tensor& input, const Tensor& grad_output, const std::optional& weight_opt /* optional */, const c10::optional& running_mean_opt /* optional */, const c10::optional& running_var_opt /* optional */, const c10::optional& save_mean_opt /* optional */, const c10::optional& save_var_transform_opt /* optional */, bool train, double epsilon, std::array output_mask, const Tensor &reservedSpace) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); @@ -666,8 +666,8 @@ std::tuple _batch_norm_impl_index_backward( // TODO: remove cudnn_enabled arg Tensor batch_norm( - const Tensor& input, const c10::optional& weight_opt, const c10::optional& bias_opt, - const c10::optional& running_mean_opt, const c10::optional& running_var_opt, + const Tensor& input, const std::optional& weight_opt, const c10::optional& bias_opt, + const std::optional& running_mean_opt, const c10::optional& running_var_opt, bool training, double momentum, double eps, bool cudnn_enabled) { const Tensor& weight = c10::value_or_else(weight_opt, [] {return Tensor();}); const Tensor& bias = c10::value_or_else(bias_opt, [] {return Tensor();}); @@ -702,7 +702,7 @@ Tensor batch_norm( } Tensor instance_norm( - const Tensor& input, const c10::optional& weight_opt /* optional */, const c10::optional& bias_opt /* optional */, const c10::optional& running_mean_opt /* optional */, const c10::optional& running_var_opt /* optional */, + const Tensor& input, const std::optional& weight_opt /* optional */, const c10::optional& bias_opt /* optional */, const c10::optional& running_mean_opt /* optional */, const c10::optional& running_var_opt /* optional */, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); @@ -740,7 +740,7 @@ Tensor instance_norm( } std::tuple batch_norm_update_stats_cpu( - const Tensor& self, const c10::optional& running_mean_opt, const c10::optional& running_var_opt, double momentum) { + const Tensor& self, const std::optional& running_mean_opt, const c10::optional& running_var_opt, double momentum) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned running_mean_maybe_owned = at::borrow_from_optional_tensor(running_mean_opt); const Tensor& running_mean = *running_mean_maybe_owned; @@ -758,7 +758,7 @@ std::tuple batch_norm_update_stats_cpu( }); } -std::tuple batch_norm_cpu_out(const Tensor& self, const c10::optional& weight_opt, const c10::optional& bias_opt, const c10::optional& running_mean_opt, const c10::optional& running_var_opt, +std::tuple batch_norm_cpu_out(const Tensor& self, const std::optional& weight_opt, const c10::optional& bias_opt, const c10::optional& running_mean_opt, const c10::optional& running_var_opt, bool train, double momentum, double eps, Tensor& out, Tensor& save_mean, Tensor& save_var) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); @@ -801,7 +801,7 @@ std::tuple batch_norm_cpu_out(const Tensor& self, con return std::tuple(out, save_mean, save_var); } -std::tuple batch_norm_cpu(const Tensor& self, const c10::optional& weight_opt, const c10::optional& bias_opt, const c10::optional& running_mean_opt, const c10::optional& running_var_opt, +std::tuple batch_norm_cpu(const Tensor& self, const std::optional& weight_opt, const c10::optional& bias_opt, const c10::optional& running_mean_opt, const c10::optional& running_var_opt, bool train, double momentum, double eps) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); @@ -851,7 +851,7 @@ std::tuple batch_norm_cpu(const Tensor& self, const c10: } std::tuple _batch_norm_with_update_cpu( - const Tensor& input, const c10::optional& weight_opt, const c10::optional& bias_opt, + const Tensor& input, const std::optional& weight_opt, const c10::optional& bias_opt, Tensor& running_mean, Tensor& running_var, double momentum, double eps) { Tensor output, save_mean, save_var; std::tie(output, save_mean, save_var) = @@ -861,7 +861,7 @@ std::tuple _batch_norm_with_update_cpu( } std::tuple _batch_norm_with_update_cpu_out( - const Tensor& input, const c10::optional& weight_opt, const c10::optional& bias_opt, + const Tensor& input, const std::optional& weight_opt, const c10::optional& bias_opt, Tensor& running_mean, Tensor& running_var, double momentum, double eps, Tensor& out, Tensor& save_mean, Tensor& save_var, Tensor& reserve) { std::tie(out, save_mean, save_var) = @@ -871,8 +871,8 @@ std::tuple _batch_norm_with_update_cpu_out( std::tuple _batch_norm_no_update( - const Tensor& input, const c10::optional& weight_opt, const c10::optional& bias_opt, - const c10::optional& running_mean_opt, const c10::optional& running_var_opt, + const Tensor& input, const std::optional& weight_opt, const c10::optional& bias_opt, + const std::optional& running_mean_opt, const c10::optional& running_var_opt, double momentum, double eps) { const Tensor& running_mean = c10::value_or_else(running_mean_opt, [] {return Tensor();}); const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();}); @@ -884,41 +884,41 @@ std::tuple _batch_norm_no_update( } std::tuple _batch_norm_legit_cpu( - const Tensor& self, const c10::optional& weight_opt, const c10::optional& bias_opt, + const Tensor& self, const std::optional& weight_opt, const c10::optional& bias_opt, Tensor& running_mean, Tensor& running_var, bool train, double momentum, double eps) { return batch_norm_cpu(self, weight_opt, bias_opt, running_mean, running_var, train, momentum, eps); } std::tuple _batch_norm_legit_no_stats_cpu( - const Tensor& self, const c10::optional& weight_opt, const c10::optional& bias_opt, + const Tensor& self, const std::optional& weight_opt, const c10::optional& bias_opt, bool train, double momentum, double eps) { return batch_norm_cpu(self, weight_opt, bias_opt, Tensor(), Tensor(), train, momentum, eps); } std::tuple _batch_norm_legit_no_training( - const Tensor& self, const c10::optional& weight_opt, const c10::optional& bias_opt, + const Tensor& self, const std::optional& weight_opt, const c10::optional& bias_opt, const Tensor& running_mean, const Tensor& running_var, double momentum, double eps) { return at::_native_batch_norm_legit(self, weight_opt, bias_opt, const_cast(running_mean), const_cast(running_var), /*train=*/false, momentum, eps); } -std::tuple _batch_norm_legit_cpu_out(const Tensor& self, const c10::optional& weight_opt, const c10::optional& bias_opt, Tensor& running_mean, Tensor& running_var, bool train, double momentum, double eps, Tensor& out, Tensor& save_mean, Tensor& save_var) { +std::tuple _batch_norm_legit_cpu_out(const Tensor& self, const std::optional& weight_opt, const c10::optional& bias_opt, Tensor& running_mean, Tensor& running_var, bool train, double momentum, double eps, Tensor& out, Tensor& save_mean, Tensor& save_var) { return batch_norm_cpu_out(self, weight_opt, bias_opt, running_mean, running_var, train, momentum, eps, out, save_mean, save_var); } -std::tuple _batch_norm_legit_no_stats_cpu_out(const Tensor& self, const c10::optional& weight_opt, const c10::optional& bias_opt, bool train, double momentum, double eps, Tensor& out, Tensor& save_mean, Tensor& save_var) { +std::tuple _batch_norm_legit_no_stats_cpu_out(const Tensor& self, const std::optional& weight_opt, const c10::optional& bias_opt, bool train, double momentum, double eps, Tensor& out, Tensor& save_mean, Tensor& save_var) { return batch_norm_cpu_out(self, weight_opt, bias_opt, Tensor(), Tensor(), train, momentum, eps, out, save_mean, save_var); } std::tuple _new_batch_norm_backward_cpu( const Tensor& grad_output, const Tensor& input, const Tensor& weight, - const c10::optional& running_mean_opt, const c10::optional& running_var_opt, - const c10::optional& save_mean_opt, const c10::optional& save_var_opt, + const std::optional& running_mean_opt, const c10::optional& running_var_opt, + const std::optional& save_mean_opt, const c10::optional& save_var_opt, bool update, double eps, std::array grad_input_mask, const Tensor& reserve) { return batch_norm_backward_cpu(grad_output, input, weight, running_mean_opt, running_var_opt, save_mean_opt, save_var_opt, update, eps, grad_input_mask); } -std::tuple batch_norm_backward_cpu(const Tensor& grad_out, const Tensor& self, const c10::optional& weight_opt, const c10::optional& running_mean_opt, const c10::optional& running_var_opt, const c10::optional& save_mean_opt, const c10::optional& save_invstd_opt, +std::tuple batch_norm_backward_cpu(const Tensor& grad_out, const Tensor& self, const std::optional& weight_opt, const c10::optional& running_mean_opt, const c10::optional& running_var_opt, const c10::optional& save_mean_opt, const c10::optional& save_invstd_opt, bool train, double eps, std::array grad_input_mask) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); diff --git a/aten/src/ATen/native/PadNd.cpp b/aten/src/ATen/native/PadNd.cpp index aecab68c2be0f..e7172fe5a2c12 100644 --- a/aten/src/ATen/native/PadNd.cpp +++ b/aten/src/ATen/native/PadNd.cpp @@ -188,7 +188,7 @@ Tensor _pad_circular_symint(const Tensor &self, c10::SymIntArrayRef padding) { return out; } -Tensor _pad_enum_symint(const Tensor &self, c10::SymIntArrayRef pad, int64_t mode_int, c10::optional value) { +Tensor _pad_enum_symint(const Tensor &self, c10::SymIntArrayRef pad, int64_t mode_int, std::optional value) { const auto input_dim = self.dim(); TORCH_CHECK(pad.size() % 2 == 0, "Padding length must be divisible by 2"); TORCH_CHECK(static_cast(pad.size()) <= input_dim * 2, @@ -228,7 +228,7 @@ Tensor _pad_enum_symint(const Tensor &self, c10::SymIntArrayRef pad, int64_t mod "Only 2D, 3D, 4D, 5D padding with non-constant padding are supported for now"); } -Tensor pad_symint(const Tensor &self, c10::SymIntArrayRef pad, c10::string_view mode, c10::optional value) { +Tensor pad_symint(const Tensor &self, c10::SymIntArrayRef pad, c10::string_view mode, std::optional value) { const auto mode_enum = [&] { if (mode == "reflect") { return at::padding_mode::reflect; diff --git a/aten/src/ATen/native/Pool.h b/aten/src/ATen/native/Pool.h index 07940729fda8c..df73299ea2308 100644 --- a/aten/src/ATen/native/Pool.h +++ b/aten/src/ATen/native/Pool.h @@ -19,9 +19,9 @@ DECLARE_DISPATCH(max_pool2d_backward_fn, max_pool2d_backward_kernel); // averge pooling has same signature for forward and backward using avg_pool2d_fn = void(*)(const Tensor& output, const Tensor& input, int64_t kW, int64_t kH, - int64_t dW, int64_t dH, int64_t padW, int64_t padH, bool count_include_pad, c10::optional divisor_override); + int64_t dW, int64_t dH, int64_t padW, int64_t padH, bool count_include_pad, std::optional divisor_override); using avg_pool2d_backward_fn = void(*)(const Tensor& output, const Tensor& input, int kW, int kH, - int dW, int dH, int padW, int padH, bool count_include_pad, c10::optional divisor_override); + int dW, int dH, int padW, int padH, bool count_include_pad, std::optional divisor_override); DECLARE_DISPATCH(avg_pool2d_fn, avg_pool2d_kernel); DECLARE_DISPATCH(avg_pool2d_backward_fn, avg_pool2d_backward_kernel); @@ -30,11 +30,11 @@ DECLARE_DISPATCH(avg_pool2d_backward_fn, avg_pool2d_backward_kernel); using avg_pool3d_fn = void(*)(const Tensor& output, const Tensor& input, int64_t kW, int64_t kH, int64_t kD, int64_t dW, int64_t dH, int64_t dD, int64_t padW, int64_t padH, int64_t padD, bool count_include_pad, - c10::optional divisor_override); + std::optional divisor_override); using avg_pool3d_backward_fn = void(*)(const Tensor& output, const Tensor& input, int kW, int kH, int kD, int dW, int dH, int dD, int padW, int padH, int padD, bool count_include_pad, - c10::optional divisor_override); + std::optional divisor_override); DECLARE_DISPATCH(avg_pool3d_fn, avg_pool3d_kernel); DECLARE_DISPATCH(avg_pool3d_backward_fn, avg_pool3d_backward_kernel); diff --git a/aten/src/ATen/native/RNN.cpp b/aten/src/ATen/native/RNN.cpp index 97ce09ac8e51d..fccd3420d3f67 100644 --- a/aten/src/ATen/native/RNN.cpp +++ b/aten/src/ATen/native/RNN.cpp @@ -1163,7 +1163,7 @@ bool _use_cudnn_rnn_flatten_weight() { // NB: This a (composite) wrapper for _thnn_fused_lstm_cell_backward_impl. // It duplicates the outputs of this function so the non-composite version doesn't have to. // The point is so that we avoid triggering TensorImpl use count asserts in debug mode -std::tuple _thnn_fused_lstm_cell_backward( const c10::optional& grad_hy_opt, const c10::optional& grad_cy_opt, +std::tuple _thnn_fused_lstm_cell_backward( const std::optional& grad_hy_opt, const c10::optional& grad_cy_opt, const Tensor& cx, const Tensor& cy, const Tensor& workspace, bool has_bias) { TORCH_INTERNAL_ASSERT(!GradMode::is_enabled()); @@ -1523,7 +1523,7 @@ std::tuple lstm( std::tuple lstm_cell( const Tensor& input, TensorList hx, - const Tensor& w_ih, const Tensor& w_hh, const c10::optional& b_ih_opt, const c10::optional& b_hh_opt) { + const Tensor& w_ih, const Tensor& w_hh, const std::optional& b_ih_opt, const c10::optional& b_hh_opt) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned b_ih_maybe_owned = at::borrow_from_optional_tensor(b_ih_opt); const Tensor& b_ih = *b_ih_maybe_owned; @@ -1539,9 +1539,9 @@ std::tuple lstm_cell( } std::tuple -_thnn_differentiable_lstm_cell_backward( const c10::optional& grad_hy_opt, const c10::optional& grad_cy_opt, +_thnn_differentiable_lstm_cell_backward( const std::optional& grad_hy_opt, const c10::optional& grad_cy_opt, const Tensor& input_gates, - const Tensor& hidden_gates, const c10::optional& input_bias_opt, const c10::optional& hidden_bias_opt, + const Tensor& hidden_gates, const std::optional& input_bias_opt, const c10::optional& hidden_bias_opt, const Tensor& cx, const Tensor& cy) { // See [Note: hacky wrapper removal for optional tensor] @@ -1597,7 +1597,7 @@ std::tuple _thnn_differentiable_gru_cell const Tensor& grad_hy, const Tensor& input_gates, const Tensor& hidden_gates, - const Tensor& hx, const c10::optional& input_bias_opt, const c10::optional& hidden_bias_opt){ + const Tensor& hx, const std::optional& input_bias_opt, const c10::optional& hidden_bias_opt){ // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned input_bias_maybe_owned = at::borrow_from_optional_tensor(input_bias_opt); const Tensor& input_bias = *input_bias_maybe_owned; @@ -1637,7 +1637,7 @@ std::tuple _thnn_differentiable_gru_cell Tensor gru_cell( const Tensor& input, const Tensor& hx, - const Tensor& w_ih, const Tensor& w_hh, const c10::optional& b_ih_opt, const c10::optional& b_hh_opt) { + const Tensor& w_ih, const Tensor& w_hh, const std::optional& b_ih_opt, const c10::optional& b_hh_opt) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned b_ih_maybe_owned = at::borrow_from_optional_tensor(b_ih_opt); const Tensor& b_ih = *b_ih_maybe_owned; @@ -1651,7 +1651,7 @@ Tensor gru_cell( Tensor rnn_tanh_cell( const Tensor& input, const Tensor& hx, - const Tensor& w_ih, const Tensor& w_hh, const c10::optional& b_ih_opt, const c10::optional& b_hh_opt) { + const Tensor& w_ih, const Tensor& w_hh, const std::optional& b_ih_opt, const c10::optional& b_hh_opt) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned b_ih_maybe_owned = at::borrow_from_optional_tensor(b_ih_opt); const Tensor& b_ih = *b_ih_maybe_owned; @@ -1665,7 +1665,7 @@ Tensor rnn_tanh_cell( Tensor rnn_relu_cell( const Tensor& input, const Tensor& hx, - const Tensor& w_ih, const Tensor& w_hh, const c10::optional& b_ih_opt, const c10::optional& b_hh_opt) { + const Tensor& w_ih, const Tensor& w_hh, const std::optional& b_ih_opt, const c10::optional& b_hh_opt) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned b_ih_maybe_owned = at::borrow_from_optional_tensor(b_ih_opt); const Tensor& b_ih = *b_ih_maybe_owned; @@ -1693,7 +1693,7 @@ static std::tuple quantized_lstm_input( bool train, bool bidirectional, bool batch_first, - c10::optional dtype, + std::optional dtype, bool use_dynamic) { auto hx = hx_.vec(); std::vector params; @@ -1747,7 +1747,7 @@ static std::tuple quantized_lstm_input_legacy( bool train, bool bidirectional, bool batch_first, - c10::optional dtype, + std::optional dtype, bool use_dynamic) { TORCH_CHECK( false, @@ -1766,7 +1766,7 @@ static std::tuple quantized_lstm_data( double dropout_p, bool train, bool bidirectional, - c10::optional dtype, + std::optional dtype, bool use_dynamic) { auto hx = hx_.vec(); std::vector params; @@ -1813,7 +1813,7 @@ static std::tuple quantized_lstm_data_legacy( double dropout_p, bool train, bool bidirectional, - c10::optional dtype, + std::optional dtype, bool use_dynamic) { TORCH_CHECK( false, diff --git a/aten/src/ATen/native/ReduceOps.cpp b/aten/src/ATen/native/ReduceOps.cpp index d29b177c13960..96f6d6f90c87d 100644 --- a/aten/src/ATen/native/ReduceOps.cpp +++ b/aten/src/ATen/native/ReduceOps.cpp @@ -148,7 +148,7 @@ static ScalarType infer_dtype_from_optional( } } -static IntArrayRef optional_to_arrayref(const c10::optional& opt) { +static IntArrayRef optional_to_arrayref(const std::optional& opt) { return opt.has_value() ? opt.value() : IntArrayRef{}; } @@ -217,7 +217,7 @@ TORCH_META_FUNC(any)(const Tensor& self) { static void check_argmax_argmin( const char* name, const Tensor& self, - const c10::optional& dim) { + const std::optional& dim) { if (dim.has_value()) { auto dim_ = maybe_wrap_dim(dim.value(), self.dim()); native::zero_numel_check_dims(self, dim_, name); @@ -229,13 +229,13 @@ static void check_argmax_argmin( } TORCH_META_FUNC(argmax) -(const Tensor& self, c10::optional dim, bool keepdim) { +(const Tensor& self, std::optional dim, bool keepdim) { check_argmax_argmin("argmax()", self, dim); resize_reduction(*this, self, optional_to_arrayref(dim), keepdim, kLong); } TORCH_META_FUNC(argmin) -(const Tensor& self, c10::optional dim, bool keepdim) { +(const Tensor& self, std::optional dim, bool keepdim) { check_argmax_argmin("argmin()", self, dim); resize_reduction(*this, self, optional_to_arrayref(dim), keepdim, kLong); } @@ -245,7 +245,7 @@ static void meta_func_cum_ops( const char* name, const Tensor& self, int64_t dim, - c10::optional dtype) { + std::optional dtype) { // Checking whether 'dim' is valid. maybe_wrap_dim(dim, self.dim()); @@ -264,12 +264,12 @@ static void meta_func_cum_ops( } TORCH_META_FUNC(cumsum) -(const Tensor& self, int64_t dim, c10::optional dtype) { +(const Tensor& self, int64_t dim, std::optional dtype) { meta_func_cum_ops(*this, "cumsum", self, dim, dtype); } TORCH_META_FUNC(cumprod) -(const Tensor& self, int64_t dim, c10::optional dtype) { +(const Tensor& self, int64_t dim, std::optional dtype) { meta_func_cum_ops(*this, "cumprod", self, dim, dtype); } @@ -283,7 +283,7 @@ TORCH_META_FUNC2(prod, dim_int) (const Tensor& self, int64_t dim, bool keepdim, - c10::optional dtype) { + std::optional dtype) { auto out_dtype = infer_dtype_from_optional(self, dtype, maybe_get_output()); resize_reduction(*this, self, dim, keepdim, out_dtype); } @@ -315,7 +315,7 @@ TORCH_META_FUNC2(mean, dim) static ScalarType get_result_or_self_value_dtype( const Tensor& self, const Tensor& result, - const c10::optional& dtype) { + const std::optional& dtype) { if (result.defined()) { return result.scalar_type(); } else { @@ -350,7 +350,7 @@ TORCH_META_FUNC2(norm, ScalarOpt_dim_dtype) } TORCH_META_FUNC(aminmax) -(const Tensor& self, c10::optional dim_opt, bool keepdim) { +(const Tensor& self, std::optional dim_opt, bool keepdim) { DimVector shape; if (dim_opt.has_value()) { auto dim = maybe_wrap_dim(dim_opt.value(), self.ndimension()); @@ -407,7 +407,7 @@ DEFINE_DISPATCH(aminmax_allreduce_stub); TORCH_IMPL_FUNC(aminmax_out) (const Tensor& self, - c10::optional dim_opt, + std::optional dim_opt, bool keepdim, const Tensor& min, const Tensor& max) { @@ -491,7 +491,7 @@ void impl_func_cum_ops( TORCH_IMPL_FUNC(cumsum_out) (const Tensor& self, int64_t dim, - c10::optional dtype, + std::optional dtype, const Tensor& result) { impl_func_cum_ops(self, dim, result, cumsum_stub); } @@ -499,7 +499,7 @@ TORCH_IMPL_FUNC(cumsum_out) TORCH_IMPL_FUNC(cumprod_out) (const Tensor& self, int64_t dim, - c10::optional dtype, + std::optional dtype, const Tensor& result) { impl_func_cum_ops(self, dim, result, cumprod_stub); } @@ -869,7 +869,7 @@ Tensor cummaxmin_backward(const Tensor& grad, const Tensor& input, const Tensor& return result.scatter_add_(dim, indices, grad); } -static Tensor prepend_append_on_dim(const Tensor& self, const c10::optional& prepend, const c10::optional& append, int64_t dim) { +static Tensor prepend_append_on_dim(const Tensor& self, const std::optional& prepend, const c10::optional& append, int64_t dim) { // Helper for diff that handles prepending and appending when at least one is present TORCH_INTERNAL_ASSERT(prepend.has_value() || append.has_value(), "either prepend or append must be have value"); if (!prepend.has_value() && append.has_value()) { @@ -881,7 +881,7 @@ static Tensor prepend_append_on_dim(const Tensor& self, const c10::optional&other, int64_t dim) { +static inline void diff_check_compatible_shape(const Tensor& self, const std::optional&other, int64_t dim) { // Helper for diff that checks whether the shape of the tensor to prepend or append // is compatible with that of input if (other.has_value()) { @@ -902,7 +902,7 @@ static inline void diff_check_compatible_shape(const Tensor& self, const c10::op } } -static inline void diff_check(const Tensor& self, int64_t n, int64_t dim, const c10::optional&prepend, const c10::optional& append) { +static inline void diff_check(const Tensor& self, int64_t n, int64_t dim, const std::optional&prepend, const c10::optional& append) { // Helper for diff that checks whether its parameters are valid TORCH_CHECK( self.dim() >= 1, @@ -943,7 +943,7 @@ static inline Tensor diff_helper(const Tensor& self, int64_t n, int64_t dim) { return result; } -Tensor diff(const Tensor& self, int64_t n, int64_t dim, const c10::optional& prepend, const c10::optional& append) { +Tensor diff(const Tensor& self, int64_t n, int64_t dim, const std::optional& prepend, const c10::optional& append) { diff_check(self, n, dim, prepend, append); if ((!prepend.has_value() && !append.has_value()) || n == 0) { return diff_helper(self, n, dim); @@ -987,7 +987,7 @@ static inline Tensor& diff_out_helper(const Tensor& self, int64_t n, int64_t dim return result; } -Tensor& diff_out(const Tensor& self, int64_t n, int64_t dim, const c10::optional& prepend, const c10::optional& append, Tensor& result) { +Tensor& diff_out(const Tensor& self, int64_t n, int64_t dim, const std::optional& prepend, const c10::optional& append, Tensor& result) { diff_check(self, n, dim, prepend, append); if ((!prepend.has_value() && !append.has_value()) || n == 0) { return diff_out_helper(self, n, dim, result); @@ -997,7 +997,7 @@ Tensor& diff_out(const Tensor& self, int64_t n, int64_t dim, const c10::optional } } -static void pre_check_gradient(const Tensor& self, c10::optional spacing_size, at::OptionalIntArrayRef dim, int64_t edge_order) { +static void pre_check_gradient(const Tensor& self, std::optional spacing_size, at::OptionalIntArrayRef dim, int64_t edge_order) { // Helper for gradient function to make sure input data satisfies prerequisites TORCH_CHECK(self.scalar_type() != ScalarType::Byte, "torch.gradient does not support uint8 input."); if (spacing_size.has_value() && !dim.has_value()) { @@ -1088,7 +1088,7 @@ static std::vector gradient_helper_float(const Tensor& self, ArrayRef gradient_dim_preprocess(const Tensor& self, c10::optional dim) { +static std::vector gradient_dim_preprocess(const Tensor& self, std::optional dim) { // if gradient dim is provided as an integer, then we need to compute gradient only on this direction. // Moreover, if it's not provided at all, then we are interested in gradient for all directions. // Finally, if dim is provided as vector of ints, then it is not expected to be called by this function. @@ -1103,16 +1103,16 @@ static std::vector gradient_dim_preprocess(const Tensor& self, c10::opt std::vector gradient(const Tensor& self, TensorList coordinates, IntArrayRef dim, int64_t edge_order) { pre_check_gradient(self, - c10::optional(coordinates.size()), + std::optional(coordinates.size()), at::OptionalIntArrayRef(dim), edge_order); return gradient_helper(self, coordinates, dim, edge_order); } -std::vector gradient(const Tensor& self, TensorList coordinates, c10::optional dim, int64_t edge_order) { +std::vector gradient(const Tensor& self, TensorList coordinates, std::optional dim, int64_t edge_order) { const auto processed_dim = gradient_dim_preprocess(self, dim); pre_check_gradient(self, - c10::optional(coordinates.size()), + std::optional(coordinates.size()), dim.has_value() ? at::OptionalIntArrayRef(processed_dim) : c10::nullopt, edge_order); return gradient_helper(self, coordinates, processed_dim, edge_order); @@ -1120,16 +1120,16 @@ std::vector gradient(const Tensor& self, TensorList coordinates, c10::op std::vector gradient(const Tensor& self, c10::ArrayRef spacing, IntArrayRef dim, int64_t edge_order) { pre_check_gradient(self, - c10::optional(spacing.size()), + std::optional(spacing.size()), at::OptionalIntArrayRef(dim), edge_order); return gradient_helper_float(self, spacing, dim, edge_order); } -std::vector gradient(const Tensor& self, ArrayRef spacing, c10::optional dim, int64_t edge_order) { +std::vector gradient(const Tensor& self, ArrayRef spacing, std::optional dim, int64_t edge_order) { const auto processed_dim = gradient_dim_preprocess(self, dim); pre_check_gradient(self, - c10::optional(spacing.size()), + std::optional(spacing.size()), dim.has_value() ? at::OptionalIntArrayRef(processed_dim) : c10::nullopt, edge_order); return gradient_helper_float(self, spacing, processed_dim, edge_order); @@ -1140,13 +1140,13 @@ std::vector gradient(const Tensor& self, const Scalar& unit_size, IntArr // be taken as unit size at every given dimension element of - dim. std::vector spacing(dim.size(), unit_size); pre_check_gradient(self, - c10::optional(spacing.size()), + std::optional(spacing.size()), at::OptionalIntArrayRef(dim), edge_order); return gradient_helper_float(self, spacing, dim, edge_order); } -std::vector gradient(const Tensor& self, const c10::optional& unit_size, c10::optional dim, int64_t edge_order) { +std::vector gradient(const Tensor& self, const std::optional& unit_size, c10::optional dim, int64_t edge_order) { const auto processed_dim = gradient_dim_preprocess(self, dim); // When unit_size not provided, it is always assumed to be equal to 1. // When dim has integer value it implies we are looking for gradient in the specific direction, however when @@ -1154,7 +1154,7 @@ std::vector gradient(const Tensor& self, const c10::optional& un std::vector spacing(dim.has_value() ? 1 : self.dim(), unit_size.has_value() ? unit_size.value() : 1.0) ; pre_check_gradient(self, - unit_size.has_value() ? c10::optional(spacing.size()) : c10::nullopt, + unit_size.has_value() ? std::optional(spacing.size()) : c10::nullopt, dim.has_value() ? at::OptionalIntArrayRef(processed_dim) : c10::nullopt, edge_order); return gradient_helper_float(self, spacing, processed_dim, edge_order); @@ -1163,7 +1163,7 @@ std::vector gradient(const Tensor& self, const c10::optional& un std::vector gradient(const Tensor& self, IntArrayRef dim, int64_t edge_order) { std::vector spacing(dim.size(), 1.0) ; pre_check_gradient(self, - c10::optional(spacing.size()), + std::optional(spacing.size()), at::OptionalIntArrayRef(dim), edge_order); return gradient_helper_float(self, spacing, dim, edge_order); @@ -1217,11 +1217,11 @@ TORCH_IMPL_FUNC(sum_out) } } -Tensor sum(const Tensor &self, c10::optional dtype) { +Tensor sum(const Tensor &self, std::optional dtype) { return at::sum(self, IntArrayRef{}, false, dtype); } -Tensor sum(const Tensor& self, DimnameList dim, bool keepdim, c10::optional dtype) { +Tensor sum(const Tensor& self, DimnameList dim, bool keepdim, std::optional dtype) { return at::sum(self, dimnames_to_positions(self, dim), keepdim, dtype); } @@ -1252,7 +1252,7 @@ Tensor& nansum_out(const Tensor& self, at::OptionalIntArrayRef dim, return result; } -Tensor nansum(const Tensor& self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional opt_dtype) { +Tensor nansum(const Tensor& self, at::OptionalIntArrayRef dim, bool keepdim, std::optional opt_dtype) { ScalarType dtype = get_dtype_from_self(self, opt_dtype, true); Tensor result = create_reduction_result(self, dim, keepdim, dtype); return at::native::nansum_out(self, dim, keepdim, dtype, result); @@ -1306,7 +1306,7 @@ static void impl_func_prod( const Tensor& self, IntArrayRef dims, bool keepdim, - c10::optional dtype, + std::optional dtype, const Tensor& result) { auto iter = meta::make_reduction_from_out_ty(self, result, dims, keepdim, result.scalar_type()); if (iter.numel() == 0) { @@ -1320,12 +1320,12 @@ TORCH_IMPL_FUNC(prod_out) (const Tensor& self, int64_t dim, bool keepdim, - c10::optional dtype, + std::optional dtype, const Tensor& result) { impl_func_prod(self, dim, keepdim, dtype, result); } -Tensor prod(const Tensor &self, c10::optional opt_dtype) { +Tensor prod(const Tensor &self, std::optional opt_dtype) { auto dtype = get_dtype_from_self(self, opt_dtype, true); auto shape = meta::get_reduction_shape(self, {}, false); Tensor result = at::empty(shape, self.options().dtype(dtype)); @@ -1333,7 +1333,7 @@ Tensor prod(const Tensor &self, c10::optional opt_dtype) { return result; } -Tensor prod(const Tensor& self, Dimname dim, bool keepdim, c10::optional dtype) { +Tensor prod(const Tensor& self, Dimname dim, bool keepdim, std::optional dtype) { return at::prod(self, dimname_to_position(self, dim), keepdim, dtype); } @@ -1346,7 +1346,7 @@ TORCH_IMPL_FUNC(mean_out) (const Tensor& self, OptionalIntArrayRef opt_dim, bool keepdim, - c10::optional opt_dtype, + std::optional opt_dtype, const Tensor& result) { ScalarType dtype = result.scalar_type(); // TODO: the TensorIterator reduction implementation of mean @@ -1407,7 +1407,7 @@ Tensor mean(const Tensor& self, DimnameList dim, bool keepdim, optional opt_dtype, Tensor& result) { + bool keepdim, std::optional opt_dtype, Tensor& result) { return at::mean_out(result, self, dimnames_to_positions(self, dim), keepdim, opt_dtype); } @@ -1416,7 +1416,7 @@ Tensor& nanmean_out( const Tensor& self, at::OptionalIntArrayRef dim, bool keepdim, - c10::optional opt_dtype, + std::optional opt_dtype, Tensor& result) { TORCH_CHECK( self.is_floating_point() || self.is_complex(), @@ -1703,7 +1703,7 @@ TORCH_IMPL_FUNC(amax_out) (const Tensor& self, IntArrayRef dim, bool keepdim, co template void argmax_argmin_impl( const Tensor& self, - c10::optional dim, + std::optional dim, bool keepdim, const Tensor& result, Stub& stub) { @@ -1737,7 +1737,7 @@ void argmax_argmin_impl( TORCH_IMPL_FUNC(argmax_out) (const Tensor& self, - c10::optional dim, + std::optional dim, bool keepdim, const Tensor& result) { argmax_argmin_impl(self, dim, keepdim, result, argmax_stub); @@ -1745,7 +1745,7 @@ TORCH_IMPL_FUNC(argmax_out) TORCH_IMPL_FUNC(argmin_out) (const Tensor& self, - c10::optional dim, + std::optional dim, bool keepdim, const Tensor& result) { argmax_argmin_impl(self, dim, keepdim, result, argmin_stub); @@ -1812,7 +1812,7 @@ namespace { static Tensor& std_var_out( const char* fname, Tensor& result, const Tensor& self, - at::OptionalIntArrayRef dim, const c10::optional& correction_opt, + at::OptionalIntArrayRef dim, const std::optional& correction_opt, bool keepdim, bool take_sqrt) { TORCH_CHECK(self.device().is_cpu() || self.device().is_cuda(), "std and var only supports tensors on a CPU or CUDA device, but got: ", @@ -1884,7 +1884,7 @@ static Tensor& std_var_out( static std::tuple std_var_mean_out( const char* fname, Tensor& result1, Tensor& result2, const Tensor& self, - at::OptionalIntArrayRef dim, const c10::optional& correction_opt, + at::OptionalIntArrayRef dim, const std::optional& correction_opt, bool keepdim, bool take_sqrt) { AT_ASSERT(result1.defined() && result2.defined()); TORCH_CHECK(self.device().is_cpu() || self.is_cuda(), @@ -1995,7 +1995,7 @@ static TensorOptions options_to_value_type(TensorOptions opts) { std::tuple var_mean( const Tensor& self, at::OptionalIntArrayRef dim, - const c10::optional& correction, bool keepdim) { + const std::optional& correction, bool keepdim) { Tensor result1 = at::empty({0}, options_to_value_type(self.options())); Tensor result2 = at::empty({0}, self.options()); return std_var_mean_out( @@ -2004,7 +2004,7 @@ std::tuple var_mean( std::tuple std_mean( const Tensor& self, at::OptionalIntArrayRef dim, - const c10::optional& correction, bool keepdim) { + const std::optional& correction, bool keepdim) { Tensor result1 = at::empty({0}, options_to_value_type(self.options())); Tensor result2 = at::empty({0}, self.options()); return std_var_mean_out( @@ -2047,26 +2047,26 @@ Tensor& std_out(const Tensor& self, at::OptionalIntArrayRef opt_dim, bool unbias } Tensor std(const Tensor& self, at::OptionalIntArrayRef dim, - const c10::optional& correction, bool keepdim) { + const std::optional& correction, bool keepdim) { Tensor result = at::empty({0}, options_to_value_type(self.options())); return std_var_out("std", result, self, dim, correction, keepdim, true); } Tensor& std_out( const Tensor& self, at::OptionalIntArrayRef dim, - const c10::optional& correction, bool keepdim, Tensor& result) { + const std::optional& correction, bool keepdim, Tensor& result) { return std_var_out("std", result, self, dim, correction, keepdim, true); } Tensor& var_out( const Tensor& self, at::OptionalIntArrayRef dim, - const c10::optional& correction, bool keepdim, Tensor& result) { + const std::optional& correction, bool keepdim, Tensor& result) { return std_var_out("var", result, self, dim, correction, keepdim, false); } Tensor var( const Tensor& self, at::OptionalIntArrayRef dim, - const c10::optional& correction, bool keepdim) { + const std::optional& correction, bool keepdim) { Tensor result = at::empty({0}, options_to_value_type(self.options())); return std_var_out("var", result, self, dim, correction, keepdim, false); } @@ -2096,32 +2096,32 @@ std::tuple std_mean(const Tensor& self, DimnameList dim, bool unb return at::std_mean(self, dimnames_to_positions(self, dim), unbiased, keepdim); } -Tensor std(const Tensor& self, DimnameList dim, const c10::optional& correction, bool keepdim) { +Tensor std(const Tensor& self, DimnameList dim, const std::optional& correction, bool keepdim) { return at::std(self, dimnames_to_positions(self, dim), correction, keepdim); } -Tensor& std_out(const Tensor& self, DimnameList dim, const c10::optional& correction, +Tensor& std_out(const Tensor& self, DimnameList dim, const std::optional& correction, bool keepdim, Tensor& result) { return at::std_out(result, self, dimnames_to_positions(self, dim), correction, keepdim); } -Tensor var(const Tensor& self, DimnameList dim, const c10::optional& correction, bool keepdim) { +Tensor var(const Tensor& self, DimnameList dim, const std::optional& correction, bool keepdim) { return at::var(self, dimnames_to_positions(self, dim), correction, keepdim); } -Tensor& var_out(const Tensor& self, DimnameList dim, const c10::optional& correction, +Tensor& var_out(const Tensor& self, DimnameList dim, const std::optional& correction, bool keepdim, Tensor& result) { return at::var_out( result, self, dimnames_to_positions(self, dim), correction, keepdim); } std::tuple var_mean(const Tensor& self, DimnameList dim, - const c10::optional& correction, bool keepdim) { + const std::optional& correction, bool keepdim) { return at::var_mean(self, dimnames_to_positions(self, dim), correction, keepdim); } std::tuple std_mean(const Tensor& self, DimnameList dim, - const c10::optional& correction, bool keepdim) { + const std::optional& correction, bool keepdim) { return at::std_mean(self, dimnames_to_positions(self, dim), correction, keepdim); } @@ -2167,22 +2167,22 @@ Tensor logcumsumexp(const Tensor& self, Dimname dim) { Tensor& logcumsumexp_out(const Tensor& self, Dimname dim, Tensor& result) { return at::logcumsumexp_out(result, self, dimname_to_position(self, dim)); } -Tensor cumsum(const Tensor& self, Dimname dim, c10::optional dtype) { +Tensor cumsum(const Tensor& self, Dimname dim, std::optional dtype) { return at::cumsum(self, dimname_to_position(self, dim), dtype); } -Tensor& cumsum_(Tensor& self, Dimname dim, c10::optional dtype) { +Tensor& cumsum_(Tensor& self, Dimname dim, std::optional dtype) { return at::cumsum_out(self, self, dimname_to_position(self, dim), dtype); } -Tensor& cumsum_out(const Tensor& self, Dimname dim, c10::optional dtype, Tensor& result) { +Tensor& cumsum_out(const Tensor& self, Dimname dim, std::optional dtype, Tensor& result) { return at::cumsum_out(result, self, dimname_to_position(self, dim), dtype); } -Tensor cumprod(const Tensor& self, Dimname dim, c10::optional dtype) { +Tensor cumprod(const Tensor& self, Dimname dim, std::optional dtype) { return at::cumprod(self, dimname_to_position(self, dim), dtype); } -Tensor& cumprod_(Tensor& self, Dimname dim, c10::optional dtype) { +Tensor& cumprod_(Tensor& self, Dimname dim, std::optional dtype) { return at::cumprod_out(self, self, dimname_to_position(self, dim), dtype); } -Tensor& cumprod_out(const Tensor& self, Dimname dim, c10::optional dtype, Tensor& result) { +Tensor& cumprod_out(const Tensor& self, Dimname dim, std::optional dtype, Tensor& result) { return at::cumprod_out(result, self, dimname_to_position(self, dim), dtype); } std::tuple cummax(const Tensor& self, Dimname dim) { @@ -2303,15 +2303,15 @@ Tensor value_selecting_reduction_backward_symint(const Tensor& grad, int64_t dim return inplace_scatter_if_not_tensor_subclass(grad, indices); } -Tensor sum_csr(const Tensor &self, c10::optional dtype) { +Tensor sum_csr(const Tensor &self, std::optional dtype) { return self.values().sum(dtype); } -Tensor sum_coo(const Tensor &self, c10::optional dtype) { +Tensor sum_coo(const Tensor &self, std::optional dtype) { return self._values().sum(dtype); } -Tensor sum_sparse_coo(const Tensor& self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype) { +Tensor sum_sparse_coo(const Tensor& self, at::OptionalIntArrayRef dim, bool keepdim, std::optional dtype) { Tensor result; if (dim.has_value()) { if (dtype.has_value()) { @@ -2341,7 +2341,7 @@ Tensor sum_sparse_compressed( const Tensor& self, at::OptionalIntArrayRef dim, bool keepdim, - c10::optional dtype) { + std::optional dtype) { // TODO: The signature of sum.dim_IntList and _sparse_csr_sum.dim_dtype is a little // bit different in the second parameters `dim`, which causes the conversion of `dim` // to call into `_sparse_csr_sum`. Align the signatures would be a better choice. diff --git a/aten/src/ATen/native/ReduceOps.h b/aten/src/ATen/native/ReduceOps.h index 604d6ae8a74ef..d834f17a6d774 100644 --- a/aten/src/ATen/native/ReduceOps.h +++ b/aten/src/ATen/native/ReduceOps.h @@ -33,7 +33,7 @@ using reduce_std_var_function = DECLARE_DISPATCH(reduce_std_var_function, std_var_stub); using reduce_norm_fn = - void (*)(Tensor&, const Tensor&, const c10::Scalar&, c10::optional); + void (*)(Tensor&, const Tensor&, const c10::Scalar&, std::optional); DECLARE_DISPATCH(reduce_norm_fn, norm_kernel); using reduce_fn_flag = void(*)(TensorIterator &, const c10::Scalar&); diff --git a/aten/src/ATen/native/ReduceOpsUtils.h b/aten/src/ATen/native/ReduceOpsUtils.h index 6989b00f6f3e6..505cf3bb3a778 100644 --- a/aten/src/ATen/native/ReduceOpsUtils.h +++ b/aten/src/ATen/native/ReduceOpsUtils.h @@ -77,7 +77,7 @@ inline bool _dimreduce_return_trivial_no_ident(Tensor &result, const Tensor &sel return false; } -inline c10::optional _allreduce_return_trivial( +inline std::optional _allreduce_return_trivial( const Tensor& self, const Scalar& ident) { // Return identity @@ -102,7 +102,7 @@ static inline void check_scalar_type_device_layout_equal(const Tensor& out, cons OPTION_TYPE_EQUALITY_CHECK(layout, out.options(), self.options()); } -static inline Tensor integer_upcast(const Tensor& self, c10::optional dtype) { +static inline Tensor integer_upcast(const Tensor& self, std::optional dtype) { ScalarType scalarType = self.scalar_type(); TORCH_CHECK(!isBarebonesUnsignedType(scalarType), "integer upcasting for uint16, uint32 and uint64 is not currently implemented"); ScalarType upcast_scalarType = dtype.value_or(at::isIntegralType(scalarType, /*includeBool=*/true) ? ScalarType::Long : scalarType); @@ -323,7 +323,7 @@ static C10_UNUSED void zero_numel_tensor_resize(Tensor& result, Tensor& result_i inline ScalarType get_dtype_from_self( const Tensor& self, - const c10::optional& dtype, + const std::optional& dtype, bool promote_integers) { if (dtype.has_value()) { return dtype.value(); @@ -335,7 +335,7 @@ inline ScalarType get_dtype_from_self( return src_type; } -inline ScalarType get_dtype_from_result(Tensor& result, c10::optional dtype) { +inline ScalarType get_dtype_from_result(Tensor& result, std::optional dtype) { TORCH_CHECK(result.defined(), "Cannot create a new tensor inside a reduction op. You likely tried to call an operator with an out argument but the out argument was an undefined tensor."); if (dtype.has_value()) { return dtype.value(); diff --git a/aten/src/ATen/native/Repeat.cpp b/aten/src/ATen/native/Repeat.cpp index dd87cead1f480..8bd253134b7a9 100644 --- a/aten/src/ATen/native/Repeat.cpp +++ b/aten/src/ATen/native/Repeat.cpp @@ -41,7 +41,7 @@ namespace at::native { Tensor repeat_interleave_cpu( const Tensor& repeat, - c10::optional output_size) { + std::optional output_size) { Tensor output; AT_DISPATCH_INDEX_TYPES(repeat.scalar_type(), "repeat_interleave_cpu", [&]() { output = repeat_interleave_common>( @@ -54,8 +54,8 @@ Tensor repeat_interleave_cpu( Tensor repeat_interleave_symint( const Tensor& self, const Tensor& repeats, - c10::optional dim, - c10::optional output_size) { + std::optional dim, + std::optional output_size) { Tensor input = self; // Store conj and neg bits @@ -101,8 +101,8 @@ Tensor repeat_interleave_symint( Tensor repeat_interleave_symint( const Tensor& self, c10::SymInt repeats, - c10::optional dim_opt, - c10::optional output_size) { + std::optional dim_opt, + std::optional output_size) { Tensor input = dim_opt ? self : self.flatten(); int64_t dim = c10::maybe_wrap_dim(dim_opt.value_or(0), self.dim()); TORCH_CHECK(repeats >= 0, "Repeats must be non-negative"); diff --git a/aten/src/ATen/native/Repeat.h b/aten/src/ATen/native/Repeat.h index e9a471d16f931..879a09bddd99b 100644 --- a/aten/src/ATen/native/Repeat.h +++ b/aten/src/ATen/native/Repeat.h @@ -17,7 +17,7 @@ template < void compute(const index_t*, const int64_t*, index_t*, int64_t, int64_t)> static inline Tensor repeat_interleave_common( const Tensor& repeats, - c10::optional output_size) { + std::optional output_size) { TORCH_CHECK( repeats.dim() == 1, "repeat_interleave only accept 1D vector as repeat"); TORCH_CHECK( diff --git a/aten/src/ATen/native/Resize.cpp b/aten/src/ATen/native/Resize.cpp index be88538ed7082..fd06627b70277 100644 --- a/aten/src/ATen/native/Resize.cpp +++ b/aten/src/ATen/native/Resize.cpp @@ -136,7 +136,7 @@ const Tensor& resize_as_sparse_(const Tensor& self, const Tensor& src); const Tensor& resize_as_( const Tensor& self, const Tensor& the_template, - c10::optional optional_memory_format) { + std::optional optional_memory_format) { if (self.is_sparse() && the_template.is_sparse()) { TORCH_CHECK( !optional_memory_format.has_value(), @@ -243,7 +243,7 @@ template const Tensor& _resize_( const Tensor& self, ArrayRef size, - c10::optional optional_memory_format) { + std::optional optional_memory_format) { auto* self_ = self.unsafeGetTensorImpl(); int64_t old_storage_nbytes = self_->unsafe_storage() ? self_->unsafe_storage().sym_nbytes().maybe_as_int().value_or(-1) : 0; // NOLINTNEXTLINE(bugprone-argument-comment) @@ -267,7 +267,7 @@ const Tensor& _resize_( const Tensor& resize_( const Tensor& self, IntArrayRef size, - c10::optional optional_memory_format) { + std::optional optional_memory_format) { if (self.has_names()) { return resize_named_tensor_(self, size, optional_memory_format); } @@ -277,7 +277,7 @@ const Tensor& resize_( const Tensor& resize__symint( const Tensor& self, c10::SymIntArrayRef size, - c10::optional optional_memory_format) { + std::optional optional_memory_format) { TORCH_INTERNAL_ASSERT(!self.has_names()) return _resize_(self, size, optional_memory_format); } diff --git a/aten/src/ATen/native/ResizeCommon.h b/aten/src/ATen/native/ResizeCommon.h index 02d1e95c42efe..cea2612a22127 100644 --- a/aten/src/ATen/native/ResizeCommon.h +++ b/aten/src/ATen/native/ResizeCommon.h @@ -32,7 +32,7 @@ inline T storage_size_for(ArrayRef size, ArrayRef stride) { inline const Tensor& resize_named_tensor_( const Tensor& self, IntArrayRef size, - c10::optional optional_memory_format) { + std::optional optional_memory_format) { TORCH_INTERNAL_ASSERT(self.has_names()); TORCH_CHECK( self.sizes() == size, diff --git a/aten/src/ATen/native/ScatterGatherChecks.h b/aten/src/ATen/native/ScatterGatherChecks.h index 829959c347035..4aad28eb1f73a 100644 --- a/aten/src/ATen/native/ScatterGatherChecks.h +++ b/aten/src/ATen/native/ScatterGatherChecks.h @@ -15,7 +15,7 @@ static void scatter_gather_dtype_check( const std::string& method_name, const Tensor& self, const Tensor& index, - const c10::optional& src_opt = c10::nullopt + const std::optional& src_opt = c10::nullopt ) { if (index.numel() != 0) { TORCH_CHECK( @@ -66,7 +66,7 @@ static C10_UNUSED void gather_shape_check(const Tensor& self, int64_t dim, // 3. index.dim() == self.dim() == src.dim() static C10_UNUSED void scatter_shape_check( const Tensor& self, int64_t dim, const Tensor& index, - const c10::optional& src_opt = c10::nullopt + const std::optional& src_opt = c10::nullopt ) { if (index.numel() == 0) return; TORCH_CHECK( diff --git a/aten/src/ATen/native/SegmentReduce.cpp b/aten/src/ATen/native/SegmentReduce.cpp index 3c7b539ee4b6d..0ab01bbe8c0bd 100644 --- a/aten/src/ATen/native/SegmentReduce.cpp +++ b/aten/src/ATen/native/SegmentReduce.cpp @@ -33,7 +33,7 @@ void _segment_reduce_lengths_cpu_kernel1( const Tensor& data, const T* lengths_data, int64_t axis, - const c10::optional& initial, + const std::optional& initial, Tensor& output, int64_t segment_count, int64_t lengths_stride_axis) { @@ -132,7 +132,7 @@ Tensor _segment_reduce_lengths_cpu_kernel( const Tensor& data, const Tensor& lengths, int64_t axis, - const c10::optional& initial) { + const std::optional& initial) { // data and lengths should be contiguous from the call to .contiguous in segment_reduce_kernel TORCH_CHECK(data.is_contiguous(), "Expected data to be contiguous."); TORCH_CHECK(lengths.is_contiguous(), "Expected lengths to be contiguous."); @@ -158,7 +158,7 @@ Tensor _segment_reduce_offsets_cpu_kernel( const Tensor& data, const Tensor& offsets, int64_t axis, - const c10::optional& initial) { + const std::optional& initial) { // data and lengths should be contiguous from the call to .contiguous in segment_reduce_kernel TORCH_CHECK(data.is_contiguous(), "Expected data to be contiguous."); TORCH_CHECK(offsets.is_contiguous(), "Expected offsets to be contiguous."); @@ -187,7 +187,7 @@ void _segment_reduce_cpu_lengths_backward_kernel1( ReductionType reduction, const T* lengths_data, int64_t axis, - const c10::optional& initial, + const std::optional& initial, Tensor& grad_input, int64_t segment_count, int64_t lengths_stride_axis) { @@ -323,7 +323,7 @@ Tensor _segment_reduce_cpu_lengths_backward_kernel( ReductionType reduction, const Tensor& lengths_contig, int64_t axis, - const c10::optional& initial) { + const std::optional& initial) { axis = lengths_contig.dim() - 1; int64_t segment_count = lengths_contig.size(axis); int64_t lengths_stride_axis = lengths_contig.stride(axis); @@ -356,7 +356,7 @@ Tensor _segment_reduce_cpu_offsets_backward_kernel( ReductionType reduction, const Tensor& offsets_contig, int64_t axis, - const c10::optional& initial) { + const std::optional& initial) { axis = offsets_contig.dim() - 1; int64_t segment_count = offsets_contig.size(axis) - 1; int64_t offsets_stride_axis = offsets_contig.stride(axis); @@ -386,12 +386,12 @@ Tensor _segment_reduce_cpu_offsets_backward_kernel( Tensor segment_reduce_kernel( const Tensor& data, c10::string_view reduce, - const c10::optional& lengths, - const c10::optional& indices, - const c10::optional& offsets, + const std::optional& lengths, + const std::optional& indices, + const std::optional& offsets, int64_t axis, bool unsafe, - const c10::optional& initial) { + const std::optional& initial) { axis = maybe_wrap_dim(axis, data.ndimension()); TORCH_CHECK(data.numel() >= 0); @@ -484,13 +484,13 @@ Tensor _segment_reduce_backward_kernel( const Tensor& output, const Tensor& data, c10::string_view reduce, - const c10::optional& lengths, - const c10::optional& offsets, + const std::optional& lengths, + const std::optional& offsets, int64_t axis, - const c10::optional& initial) { + const std::optional& initial) { axis = maybe_wrap_dim(axis, data.ndimension()); // check that one of lengths or offsets is defined - // codegen for derivatives.yaml passes an undefined Tensor for None rather than a c10::optional + // codegen for derivatives.yaml passes an undefined Tensor for None rather than a std::optional // so checking .has_value() doesn't work unlike in the forward pass auto lengths_has_value = lengths.has_value() && lengths.value().defined(); auto offsets_has_value = offsets.has_value() && offsets.value().defined(); diff --git a/aten/src/ATen/native/SegmentReduce.h b/aten/src/ATen/native/SegmentReduce.h index 0f14aff64f887..44429d0594bfc 100644 --- a/aten/src/ATen/native/SegmentReduce.h +++ b/aten/src/ATen/native/SegmentReduce.h @@ -15,7 +15,7 @@ using segment_reduce_lengths_fn = Tensor (*)( const Tensor&, const Tensor&, int64_t, - const c10::optional&); + const std::optional&); DECLARE_DISPATCH(segment_reduce_lengths_fn, _segment_reduce_lengths_stub); using segment_reduce_offsets_fn = Tensor (*)( @@ -23,7 +23,7 @@ using segment_reduce_offsets_fn = Tensor (*)( const Tensor&, const Tensor&, int64_t, - const c10::optional&); + const std::optional&); DECLARE_DISPATCH(segment_reduce_offsets_fn, _segment_reduce_offsets_stub); using segment_reduce_lengths_backward_fn = Tensor (*)( @@ -33,7 +33,7 @@ using segment_reduce_lengths_backward_fn = Tensor (*)( ReductionType, const Tensor&, int64_t, - const c10::optional&); + const std::optional&); DECLARE_DISPATCH(segment_reduce_lengths_backward_fn, _segment_reduce_lengths_backward_stub); using segment_reduce_offsets_backward_fn = Tensor (*)( @@ -43,7 +43,7 @@ using segment_reduce_offsets_backward_fn = Tensor (*)( ReductionType, const Tensor&, int64_t, - const c10::optional&); + const std::optional&); DECLARE_DISPATCH(segment_reduce_offsets_backward_fn, _segment_reduce_offsets_backward_stub); } // namespace native diff --git a/aten/src/ATen/native/SoftMax.cpp b/aten/src/ATen/native/SoftMax.cpp index bd321a0a88e7a..3188479b931f3 100644 --- a/aten/src/ATen/native/SoftMax.cpp +++ b/aten/src/ATen/native/SoftMax.cpp @@ -155,7 +155,7 @@ void host_softmax( const Tensor& input, const int64_t dim, bool* mask = nullptr, - const c10::optional mask_type_ = {}) { + const std::optional mask_type_ = {}) { if (MaskedSoftMax) { TORCH_CHECK(mask_type_.has_value(), "Mask Type should be defined"); @@ -449,7 +449,7 @@ static Tensor softmax(const Tensor& input_, const int64_t dim_) { return result; } -Tensor softmax(const Tensor& input_, const int64_t dim_, c10::optional dtype) { +Tensor softmax(const Tensor& input_, const int64_t dim_, std::optional dtype) { auto result = [&]() { NoNamesGuard guard; if (input_.is_cuda() && input_.scalar_type() == ScalarType::Half && dtype == ScalarType::Float){ @@ -466,7 +466,7 @@ Tensor softmax(const Tensor& input_, const int64_t dim_, c10::optional dtype, + std::optional dtype, Tensor& output_) { Tensor output_temp; if (input_.is_cuda() && input_.scalar_type() == ScalarType::Half && @@ -501,7 +501,7 @@ Tensor& softmax_out( } // special_softmax, alias for softmax -Tensor special_softmax(const Tensor& input_, const int64_t dim_, c10::optional dtype) { +Tensor special_softmax(const Tensor& input_, const int64_t dim_, std::optional dtype) { return at::softmax(input_, dim_, dtype); } @@ -514,7 +514,7 @@ static Tensor log_softmax(const Tensor& input_, const int64_t dim_) { return result; } -Tensor log_softmax(const Tensor& input_, const int64_t dim_, c10::optional dtype) { +Tensor log_softmax(const Tensor& input_, const int64_t dim_, std::optional dtype) { auto result = [&]() { NoNamesGuard guard; if (input_.is_cuda() && input_.scalar_type() == ScalarType::Half && dtype == ScalarType::Float){ @@ -531,7 +531,7 @@ Tensor log_softmax(const Tensor& input_, const int64_t dim_, c10::optional dtype, + std::optional dtype, Tensor& output_) { Tensor output_temp; if (input_.is_cuda() && input_.scalar_type() == ScalarType::Half && @@ -565,7 +565,7 @@ Tensor& log_softmax_out( return output_; } -Tensor special_log_softmax(const Tensor& input, const int64_t dim, c10::optional dtype) { +Tensor special_log_softmax(const Tensor& input, const int64_t dim, std::optional dtype) { return at::log_softmax(input, dim, dtype); } @@ -587,7 +587,7 @@ Tensor log_softmax(const Tensor& self, Dimname dim, optional dtype) return at::log_softmax(self, dimname_to_position(self, dim), dtype); } -Tensor masked_softmax_cpu(const Tensor& input_, const Tensor& mask_, const c10::optional dim_, const c10::optional mask_type_) { +Tensor masked_softmax_cpu(const Tensor& input_, const Tensor& mask_, const std::optional dim_, const c10::optional mask_type_) { auto mask = mask_.contiguous(); auto mask_type = mask_type_; // Mask type might get transformed below @@ -652,7 +652,7 @@ Tensor masked_softmax_backward_cpu( const Tensor& grad_, const Tensor& output_, const Tensor& mask_, - const c10::optional dim_) { + const std::optional dim_) { TORCH_CHECK( grad_.sizes() == mask_.sizes(), "Mask shape should match grad shape"); TORCH_CHECK( diff --git a/aten/src/ATen/native/Sorting.cpp b/aten/src/ATen/native/Sorting.cpp index b31007408c7ae..f9980ffd7229d 100644 --- a/aten/src/ATen/native/Sorting.cpp +++ b/aten/src/ATen/native/Sorting.cpp @@ -71,7 +71,7 @@ TORCH_META_FUNC(topk) } TORCH_META_FUNC2(sort, stable) -(const Tensor& self, c10::optional stable, int64_t dim, bool descending) { +(const Tensor& self, std::optional stable, int64_t dim, bool descending) { maybe_wrap_dim(dim, self.dim()); // See issue: https://github.com/pytorch/pytorch/issues/65863 @@ -939,7 +939,7 @@ Tensor nanmedian_cpu(const Tensor& self) { TORCH_IMPL_FUNC(sort_stable_out) (const Tensor& self, - c10::optional stable, + std::optional stable, int64_t dim, bool descending, const Tensor& values, diff --git a/aten/src/ATen/native/SpectralOps.cpp b/aten/src/ATen/native/SpectralOps.cpp index 7ed068874e68a..5f9ff1b838220 100644 --- a/aten/src/ATen/native/SpectralOps.cpp +++ b/aten/src/ATen/native/SpectralOps.cpp @@ -114,7 +114,7 @@ Tensor promote_tensor_fft(const Tensor& t, bool require_complex=false) { // Convert NumPy compatible normalization mode string to enum values // NOTE: NumPy's normalization modes have direction-specific meanings. For example, // "forward" translates to `by_n` for a forward transform and `none` for backward. -fft_norm_mode norm_from_string(c10::optional norm, bool forward) { +fft_norm_mode norm_from_string(std::optional norm, bool forward) { if (!norm || *norm == "backward") { return forward ? fft_norm_mode::none : fft_norm_mode::by_n; } @@ -197,8 +197,8 @@ Tensor fft_c2c_maybe_out( // Complex to real FFT Tensor fft_c2r(c10::string_view function_name, - Tensor out, Tensor input, c10::optional n_opt, - int64_t unwrapped_dim, c10::optional norm_str, + Tensor out, Tensor input, std::optional n_opt, + int64_t unwrapped_dim, std::optional norm_str, bool forward) { TORCH_CHECK(!out.defined() || out.is_floating_point(), function_name, " expects a floating point output tensor, but got ", out.scalar_type()); @@ -221,8 +221,8 @@ Tensor fft_c2r(c10::string_view function_name, // Real to complex FFT Tensor fft_r2c(c10::string_view function_name, - Tensor out, Tensor input, c10::optional n_opt, - int64_t unwrapped_dim, c10::optional norm_str, + Tensor out, Tensor input, std::optional n_opt, + int64_t unwrapped_dim, std::optional norm_str, bool forward, bool onesided) { TORCH_CHECK(!input.is_complex(), function_name, " expects a real input tensor, but got ", input.scalar_type()); @@ -256,8 +256,8 @@ Tensor fft_r2c(c10::string_view function_name, // Complex to complex FFT Tensor fft_c2c(c10::string_view function_name, - Tensor out, Tensor input, c10::optional n_opt, - int64_t unwrapped_dim, c10::optional norm_str, + Tensor out, Tensor input, std::optional n_opt, + int64_t unwrapped_dim, std::optional norm_str, bool forward) { TORCH_CHECK(input.is_complex(), function_name, " expects a complex input tensor, but got ", input.scalar_type()); @@ -346,7 +346,7 @@ ShapeAndDims canonicalize_fft_shape_and_dim_args( Tensor fftn_c2c( c10::string_view function_name, Tensor out, const Tensor& input, SymIntArrayRef shape, - IntArrayRef dim, c10::optional norm_str, bool forward) { + IntArrayRef dim, std::optional norm_str, bool forward) { TORCH_CHECK(input.is_complex(), function_name, " expects a complex input tensor, but got", input.scalar_type()); Tensor x = resize_fft_input(input, dim, shape); const auto norm = static_cast(norm_from_string(norm_str, forward)); @@ -357,15 +357,15 @@ Tensor fftn_c2c( } // namespace (anonymous) // torch.fft.fft, analogous to NumPy's numpy.fft.fft -Tensor fft_fft_symint(const Tensor& self, c10::optional n, int64_t dim, - c10::optional norm) { +Tensor fft_fft_symint(const Tensor& self, std::optional n, int64_t dim, + std::optional norm) { return self.is_complex() ? fft_c2c("fft", {}, self, n, dim, norm, /*forward=*/true) : fft_r2c("fft", {}, self, n, dim, norm, /*forward=*/true, /*onesided=*/false); } -Tensor& fft_fft_symint_out(const Tensor& self, c10::optional n, - int64_t dim, c10::optional norm, Tensor& out) { +Tensor& fft_fft_symint_out(const Tensor& self, std::optional n, + int64_t dim, std::optional norm, Tensor& out) { if (self.is_complex()) { fft_c2c("fft", out, self, n, dim, norm, /*forward=*/true); } else { @@ -374,15 +374,15 @@ Tensor& fft_fft_symint_out(const Tensor& self, c10::optional n, return out; } -Tensor fft_ifft_symint(const Tensor& self, c10::optional n, int64_t dim, - c10::optional norm) { +Tensor fft_ifft_symint(const Tensor& self, std::optional n, int64_t dim, + std::optional norm) { return self.is_complex() ? fft_c2c("ifft", {}, self, n, dim, norm, /*forward=*/false) : fft_r2c("ifft", {}, self, n, dim, norm, /*forward=*/false, /*onesided=*/false); } -Tensor& fft_ifft_symint_out(const Tensor& self, c10::optional n, - int64_t dim, c10::optional norm, Tensor& out) { +Tensor& fft_ifft_symint_out(const Tensor& self, std::optional n, + int64_t dim, std::optional norm, Tensor& out) { if (self.is_complex()) { fft_c2c("ifft", out, self, n, dim, norm, /*forward=*/false); } else { @@ -391,53 +391,53 @@ Tensor& fft_ifft_symint_out(const Tensor& self, c10::optional n, return out; } -Tensor fft_rfft_symint(const Tensor& self, c10::optional n, int64_t dim, - c10::optional norm) { +Tensor fft_rfft_symint(const Tensor& self, std::optional n, int64_t dim, + std::optional norm) { return fft_r2c("rfft", {}, self, n, dim, norm, /*forward=*/true, /*onesided=*/true); } -Tensor& fft_rfft_symint_out(const Tensor& self, c10::optional n, - int64_t dim, c10::optional norm, Tensor& out) { +Tensor& fft_rfft_symint_out(const Tensor& self, std::optional n, + int64_t dim, std::optional norm, Tensor& out) { fft_r2c("rfft", out, self, n, dim, norm, /*forward=*/true, /*onesided=*/true); return out; } -Tensor fft_irfft_symint(const Tensor& self, c10::optional n, int64_t dim, - c10::optional norm) { +Tensor fft_irfft_symint(const Tensor& self, std::optional n, int64_t dim, + std::optional norm) { return fft_c2r("irfft", {}, self, n, dim, norm, /*forward=*/false); } -Tensor& fft_irfft_symint_out(const Tensor& self, c10::optional n, - int64_t dim, c10::optional norm, Tensor& out) { +Tensor& fft_irfft_symint_out(const Tensor& self, std::optional n, + int64_t dim, std::optional norm, Tensor& out) { fft_c2r("irfft", out, self, n, dim, norm, /*forward=*/false); return out; } -Tensor fft_hfft_symint(const Tensor& self, c10::optional n, int64_t dim, - c10::optional norm) { +Tensor fft_hfft_symint(const Tensor& self, std::optional n, int64_t dim, + std::optional norm) { return fft_c2r("hfft", {}, self, n, dim, norm, /*forward=*/true); } -Tensor& fft_hfft_symint_out(const Tensor& self, c10::optional n, - int64_t dim, c10::optional norm, Tensor& out) { +Tensor& fft_hfft_symint_out(const Tensor& self, std::optional n, + int64_t dim, std::optional norm, Tensor& out) { fft_c2r("hfft", out, self, n, dim, norm, /*forward=*/true); return out; } -Tensor fft_ihfft_symint(const Tensor& self, c10::optional n, int64_t dim, - c10::optional norm) { +Tensor fft_ihfft_symint(const Tensor& self, std::optional n, int64_t dim, + std::optional norm) { return fft_r2c("ihfft", {}, self, n, dim, norm, /*forward=*/false, /*onesided=*/true); } -Tensor& fft_ihfft_symint_out(const Tensor& self, c10::optional n, - int64_t dim, c10::optional norm, Tensor& out) { +Tensor& fft_ihfft_symint_out(const Tensor& self, std::optional n, + int64_t dim, std::optional norm, Tensor& out) { fft_r2c("ihfft", out, self, n, dim, norm, /*forward=*/false, /*onesided=*/true); return out; } Tensor fft_fftn_symint(const Tensor& self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, - c10::optional norm) { + std::optional norm) { auto desc = canonicalize_fft_shape_and_dim_args(self, s, dim); // TODO: For real input, perform rfftn then mirror with conjugate symmetry Tensor input = promote_tensor_fft(self, /*require_complex=*/true); @@ -447,7 +447,7 @@ Tensor fft_fftn_symint(const Tensor& self, at::OptionalSymIntArrayRef s, Tensor& fft_fftn_symint_out(const Tensor& self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, - c10::optional norm, Tensor& out) { + std::optional norm, Tensor& out) { auto desc = canonicalize_fft_shape_and_dim_args(self, s, dim); // TODO: For real input, perform rfftn then mirror with conjugate symmetry Tensor input = promote_tensor_fft(self, /*require_complex=*/true); @@ -457,7 +457,7 @@ Tensor& fft_fftn_symint_out(const Tensor& self, Tensor fft_ifftn_symint(const Tensor& self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, - c10::optional norm) { + std::optional norm) { auto desc = canonicalize_fft_shape_and_dim_args(self, s, dim); Tensor input = promote_tensor_fft(self, /*require_complex=*/true); return fftn_c2c("ifftn", {}, input, desc.shape, desc.dim, norm, /*forward=*/false); @@ -466,7 +466,7 @@ Tensor fft_ifftn_symint(const Tensor& self, at::OptionalSymIntArrayRef s, Tensor& fft_ifftn_symint_out(const Tensor& self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, - c10::optional norm, Tensor& out) { + std::optional norm, Tensor& out) { auto desc = canonicalize_fft_shape_and_dim_args(self, s, dim); Tensor input = promote_tensor_fft(self, /*require_complex=*/true); fftn_c2c("ifftn", out, input, desc.shape, desc.dim, norm, /*forward=*/false); @@ -476,7 +476,7 @@ Tensor& fft_ifftn_symint_out(const Tensor& self, static Tensor fft_rfftn_impl(Tensor out, const Tensor& self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, - const c10::optional& norm_str) { + const std::optional& norm_str) { TORCH_CHECK(!self.is_complex(), "rfftn expects a real-valued input tensor, but got ", self.scalar_type()); auto desc = canonicalize_fft_shape_and_dim_args(self, s, dim); TORCH_CHECK(!desc.shape.empty(), "rfftn must transform at least one axis"); @@ -489,14 +489,14 @@ static Tensor fft_rfftn_impl(Tensor out, const Tensor& self, Tensor fft_rfftn_symint(const Tensor& self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, - c10::optional norm_str) { + std::optional norm_str) { return fft_rfftn_impl({}, self, s, dim, norm_str); } Tensor& fft_rfftn_symint_out(const Tensor& self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, - c10::optional norm_str, Tensor& out) { + std::optional norm_str, Tensor& out) { fft_rfftn_impl(out, self, s, dim, norm_str); return out; } @@ -528,7 +528,7 @@ static ShapeAndDims canonicalize_fft_c2r_shape_and_dim_args( static Tensor fft_irfftn_impl(Tensor out, const Tensor& self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, - const c10::optional& norm_str) { + const std::optional& norm_str) { SymInt last_dim_size = 0; auto desc = canonicalize_fft_c2r_shape_and_dim_args( "irfftn", self, s, dim, last_dim_size); @@ -542,14 +542,14 @@ static Tensor fft_irfftn_impl(Tensor out, const Tensor& self, Tensor fft_irfftn_symint(const Tensor& self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, - c10::optional norm_str) { + std::optional norm_str) { return fft_irfftn_impl({}, self, s, dim, norm_str); } Tensor& fft_irfftn_symint_out(const Tensor& self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, - c10::optional norm_str, Tensor& out) { + std::optional norm_str, Tensor& out) { fft_irfftn_impl(out, self, s, dim, norm_str); return out; } @@ -558,7 +558,7 @@ static Tensor fft_hfftn_impl( const Tensor& self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, - c10::optional norm_str, + std::optional norm_str, const Tensor& out) { constexpr c10::string_view fname = "hfftn"; SymInt last_dim_size = 0; @@ -586,14 +586,14 @@ Tensor fft_hfftn_symint( const Tensor& self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, - c10::optional norm) { + std::optional norm) { return fft_hfftn_impl(self, s, dim, norm, {}); } const Tensor& fft_hfftn_symint_out( const Tensor& self, at::OptionalSymIntArrayRef s, - at::OptionalIntArrayRef dim, c10::optional norm, + at::OptionalIntArrayRef dim, std::optional norm, const Tensor& out) { fft_hfftn_impl(self, s, dim, norm, out); return out; @@ -603,7 +603,7 @@ static Tensor fft_ihfftn_impl( const Tensor& self, const at::OptionalSymIntArrayRef& s, const at::OptionalIntArrayRef& dim, - const c10::optional& norm_str, + const std::optional& norm_str, const Tensor& out) { constexpr c10::string_view fname = "ihfftn"; auto desc = canonicalize_fft_shape_and_dim_args(self, s, dim); @@ -628,7 +628,7 @@ Tensor fft_ihfftn_symint( const Tensor& self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, - c10::optional norm) { + std::optional norm) { return fft_ihfftn_impl(self, s, dim, norm, {}); } @@ -636,71 +636,71 @@ const Tensor& fft_ihfftn_symint_out( const Tensor& self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, - c10::optional norm, + std::optional norm, const Tensor& out) { fft_ihfftn_impl(self, s, dim, norm, out); return out; } Tensor fft_fft2_symint(const Tensor& self, at::OptionalSymIntArrayRef s, - IntArrayRef dim, c10::optional norm) { + IntArrayRef dim, std::optional norm) { return native::fft_fftn_symint(self, s, dim, std::move(norm)); } Tensor& fft_fft2_symint_out(const Tensor& self, at::OptionalSymIntArrayRef s, - IntArrayRef dim, c10::optional norm, Tensor& out) { + IntArrayRef dim, std::optional norm, Tensor& out) { return native::fft_fftn_symint_out(self, s, dim, std::move(norm), out); } Tensor fft_ifft2_symint(const Tensor& self, at::OptionalSymIntArrayRef s, - IntArrayRef dim, c10::optional norm) { + IntArrayRef dim, std::optional norm) { return native::fft_ifftn_symint(self, s, dim, std::move(norm)); } Tensor& fft_ifft2_symint_out(const Tensor& self, at::OptionalSymIntArrayRef s, - IntArrayRef dim, c10::optional norm, Tensor& out) { + IntArrayRef dim, std::optional norm, Tensor& out) { return native::fft_ifftn_symint_out(self, s, dim, std::move(norm), out); } Tensor fft_rfft2_symint(const Tensor& self, at::OptionalSymIntArrayRef s, - IntArrayRef dim, c10::optional norm) { + IntArrayRef dim, std::optional norm) { return native::fft_rfftn_symint(self, s, dim, std::move(norm)); } Tensor& fft_rfft2_symint_out(const Tensor& self, at::OptionalSymIntArrayRef s, - IntArrayRef dim, c10::optional norm, Tensor& out) { + IntArrayRef dim, std::optional norm, Tensor& out) { return native::fft_rfftn_symint_out(self, s, dim, std::move(norm), out); } Tensor fft_irfft2_symint(const Tensor& self, at::OptionalSymIntArrayRef s, - IntArrayRef dim, c10::optional norm) { + IntArrayRef dim, std::optional norm) { return native::fft_irfftn_symint(self, s, dim, std::move(norm)); } Tensor& fft_irfft2_symint_out(const Tensor& self, at::OptionalSymIntArrayRef s, - IntArrayRef dim, c10::optional norm, Tensor& out) { + IntArrayRef dim, std::optional norm, Tensor& out) { return native::fft_irfftn_symint_out(self, s, dim, std::move(norm), out); } const Tensor& fft_hfft2_symint_out( const Tensor& self, at::OptionalSymIntArrayRef s, IntArrayRef dim, - c10::optional norm, const Tensor& out) { + std::optional norm, const Tensor& out) { return native::fft_hfftn_symint_out(self, s, dim, std::move(norm), out); } Tensor fft_hfft2_symint(const Tensor& self, at::OptionalSymIntArrayRef s, - IntArrayRef dim, c10::optional norm) { + IntArrayRef dim, std::optional norm) { return native::fft_hfftn_symint(self, s, dim, std::move(norm)); } const Tensor& fft_ihfft2_symint_out( const Tensor& self, at::OptionalSymIntArrayRef s, IntArrayRef dim, - c10::optional norm, const Tensor& out) { + std::optional norm, const Tensor& out) { return native::fft_ihfftn_symint_out(self, s, dim, std::move(norm), out); } Tensor fft_ihfft2_symint(const Tensor& self, at::OptionalSymIntArrayRef s, - IntArrayRef dim, c10::optional norm) { + IntArrayRef dim, std::optional norm) { return native::fft_ihfftn_symint(self, s, dim, std::move(norm)); } @@ -716,10 +716,10 @@ Tensor& fft_fftfreq_out(int64_t n, double d, Tensor& out) { } Tensor fft_fftfreq(int64_t n, double d, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -737,10 +737,10 @@ Tensor& fft_rfftfreq_out(int64_t n, double d, Tensor& out) { } Tensor fft_rfftfreq(int64_t n, double d, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -824,7 +824,7 @@ static Stream& write_opt(Stream& SS, const optional& value) { * signals and complex windows. */ Tensor stft(const Tensor& self, const int64_t n_fft, const optional hop_lengthOpt, - const optional win_lengthOpt, const c10::optional& window_opt, + const optional win_lengthOpt, const std::optional& window_opt, const bool center, c10::string_view mode, const bool normalized, const optional onesidedOpt, const optional return_complexOpt) { // See [Note: hacky wrapper removal for optional tensor] @@ -980,7 +980,7 @@ Tensor stft(const Tensor& self, const int64_t n_fft, const optional hop Tensor stft( const Tensor& self, const int64_t n_fft, const optional hop_lengthOpt, - const optional win_lengthOpt, const c10::optional& window_opt, + const optional win_lengthOpt, const std::optional& window_opt, const bool normalized, const optional onesidedOpt, const optional return_complexOpt) { return at::stft( @@ -1011,8 +1011,8 @@ static Tensor as_complex(const Tensor& self) { * signals and complex windows. */ Tensor istft(const Tensor& self, const int64_t n_fft, const optional hop_lengthOpt, - const optional win_lengthOpt, const c10::optional& window_opt, - const bool center, const bool normalized, const c10::optional onesidedOpt, + const optional win_lengthOpt, const std::optional& window_opt, + const bool center, const bool normalized, const std::optional onesidedOpt, const optional lengthOpt, const bool return_complex) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned window_maybe_owned = at::borrow_from_optional_tensor(window_opt); diff --git a/aten/src/ATen/native/SummaryOps.cpp b/aten/src/ATen/native/SummaryOps.cpp index 4c158f81a47e9..1866f4353b535 100644 --- a/aten/src/ATen/native/SummaryOps.cpp +++ b/aten/src/ATen/native/SummaryOps.cpp @@ -68,7 +68,7 @@ Tensor _bincount_cpu_template( } // namespace Tensor -_bincount_cpu(const Tensor& self, const c10::optional& weights_opt, int64_t minlength) { +_bincount_cpu(const Tensor& self, const std::optional& weights_opt, int64_t minlength) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weights_maybe_owned = at::borrow_from_optional_tensor(weights_opt); const Tensor& weights = *weights_maybe_owned; diff --git a/aten/src/ATen/native/TensorAdvancedIndexing.cpp b/aten/src/ATen/native/TensorAdvancedIndexing.cpp index f1e385d8eeac8..395af8e5ef139 100644 --- a/aten/src/ATen/native/TensorAdvancedIndexing.cpp +++ b/aten/src/ATen/native/TensorAdvancedIndexing.cpp @@ -190,8 +190,8 @@ void scatter_meta_impl( const Tensor& self, int64_t dim, const Tensor& index, - const c10::optional& src = nullopt, - const c10::optional reduce = nullopt) { + const std::optional& src = nullopt, + const std::optional reduce = nullopt) { int64_t wrapped_dim = at::maybe_wrap_dim(dim, self.dim()); at::native::scatter_gather_dtype_check("scatter", self, index, src); at::native::scatter_shape_check(self, wrapped_dim, index, src); @@ -629,7 +629,7 @@ TORCH_IMPL_FUNC(index_out) index_stub(device_type(), *this, sizes, strides); } -Tensor quantized_index(const Tensor & self, const torch::List>& indices) { +Tensor quantized_index(const Tensor & self, const torch::List>& indices) { TORCH_INTERNAL_ASSERT( self.qscheme() == c10::kPerTensorAffine || self.qscheme() == c10::kPerTensorSymmetric, @@ -643,7 +643,7 @@ Tensor quantized_index(const Tensor & self, const torch::List>& indices) { +Tensor _unsafe_index(const Tensor& self, const torch::List>& indices) { // Disallow boolean indexing since it leads to dynamic output shapes for (auto i : c10::irange(indices.size())) { auto index = indices.get(i); @@ -702,15 +702,15 @@ Tensor put(const Tensor & self, const Tensor& index, const Tensor & source, cons return self.clone(at::MemoryFormat::Preserve).put_(index, source, accumulate); } -Tensor index_put(const Tensor & self, const torch::List>& indices, const Tensor & value, bool accumulate) { +Tensor index_put(const Tensor & self, const torch::List>& indices, const Tensor & value, bool accumulate) { return self.clone(at::MemoryFormat::Preserve).index_put_(indices, value, accumulate); } -Tensor _unsafe_index_put(const Tensor& self, const torch::List>& indices, const Tensor& value, bool accumulate) { +Tensor _unsafe_index_put(const Tensor& self, const torch::List>& indices, const Tensor& value, bool accumulate) { return at::index_put(self, indices, value, accumulate); } -Tensor & _index_put_impl_(Tensor & self, const torch::List>& indices, const Tensor & value, const bool accumulate, const bool unsafe) { +Tensor & _index_put_impl_(Tensor & self, const torch::List>& indices, const Tensor & value, const bool accumulate, const bool unsafe) { TORCH_CHECK_INDEX(indices.size() <= (size_t)self.dim(), "too many indices for tensor of dimension ", self.dim(), " (got ", indices.size(), ")"); if (at::has_internal_overlap(self) == MemOverlap::Yes) { TORCH_WARN( @@ -730,7 +730,7 @@ Tensor & _index_put_impl_(Tensor & self, const torch::List } at::assert_no_overlap(self, value); // NOLINTNEXTLINE(performance-implicit-conversion-in-loop) - for (const c10::optional& index: indices) { + for (const std::optional& index: indices) { if (index.has_value()) { at::assert_no_overlap(self, *index); } @@ -788,7 +788,7 @@ Tensor take(const Tensor& self, const Tensor& index) { return out; } -Tensor & index_put_(Tensor & self, const torch::List>& indices, const Tensor & value, const bool accumulate) { +Tensor & index_put_(Tensor & self, const torch::List>& indices, const Tensor & value, const bool accumulate) { return at::_index_put_impl_(self, indices, value, accumulate, /*unsafe=*/false); } @@ -798,7 +798,7 @@ TORCH_IMPL_FUNC(index_copy_out) // See Note [Enabling Deterministic Operations] if (result.is_cuda() && globalContext().deterministicAlgorithms()){ - torch::List> indices; + torch::List> indices; indices.reserve(dim + 1); for (const auto i: c10::irange(dim)) { (void)i; @@ -1624,7 +1624,7 @@ static void _scatter_via_index_put( const Tensor& mut_out, bool accumulate) { if (self.dim() == 1) { - torch::List> indices; + torch::List> indices; indices.reserve(1); indices.push_back(index); mut_out.index_put_(indices, src, accumulate); @@ -1698,7 +1698,7 @@ static void _scatter_via_index_put( src.strides() ).flatten(); - torch::List> indices; + torch::List> indices; indices.reserve(1); indices.push_back(index_flat); @@ -1719,7 +1719,7 @@ void scatter_impl( const Tensor& out, ReduceStub& reduce_stub, FillStub& fill_stub, - const c10::optional reduce = nullopt, + const std::optional reduce = nullopt, bool reduce_includes_self = true) { dim = at::maybe_wrap_dim(dim, self.dim()); @@ -2123,7 +2123,7 @@ static inline void checkDevice(CheckedFrom c, at::ArrayRef tensors, Devi } // anonymous namespace -Tensor take_along_dim(const Tensor& self, const Tensor& indices, c10::optional opt_dim) { +Tensor take_along_dim(const Tensor& self, const Tensor& indices, std::optional opt_dim) { checkDevice("torch.take_along_dim():", {self, indices}, self.device()); if (opt_dim.has_value()) { auto [self_broadcasted, indices_broadcasted, dim] = @@ -2135,7 +2135,7 @@ Tensor take_along_dim(const Tensor& self, const Tensor& indices, c10::optional opt_dim, Tensor& result) { +Tensor& take_along_dim_out(const Tensor& self, const Tensor& indices, std::optional opt_dim, Tensor& result) { checkDevice("torch.take_along_dim():", {self, indices, result}, self.device()); if (opt_dim.has_value()) { auto [self_broadcasted, indices_broadcasted, dim] = @@ -2241,7 +2241,7 @@ Tensor count_nonzero_cpu(const Tensor& self, IntArrayRef dims){ } -Tensor count_nonzero(const Tensor& self, c10::optional dim) { +Tensor count_nonzero(const Tensor& self, std::optional dim) { if (dim) { return at::count_nonzero(self, IntArrayRef{*dim}); } diff --git a/aten/src/ATen/native/TensorAdvancedIndexing.h b/aten/src/ATen/native/TensorAdvancedIndexing.h index c1464092a8e28..7b02b4201ffaa 100644 --- a/aten/src/ATen/native/TensorAdvancedIndexing.h +++ b/aten/src/ATen/native/TensorAdvancedIndexing.h @@ -13,8 +13,8 @@ struct TensorIterator; namespace at::native { -using index_put_with_sort_fn = void(*)(Tensor &, const c10::List> &, const Tensor &, bool accumulate, bool unsafe); -using index_put_with_sort_quantized_fn = void(*)(Tensor& self, const c10::List>& indices, const Tensor& value, double scale, int zero_point, bool unsafe); +using index_put_with_sort_fn = void(*)(Tensor &, const c10::List> &, const Tensor &, bool accumulate, bool unsafe); +using index_put_with_sort_quantized_fn = void(*)(Tensor& self, const c10::List>& indices, const Tensor& value, double scale, int zero_point, bool unsafe); using gather_fn = void (*)(const Tensor & result, const Tensor & self, int64_t dim, const Tensor & index); using scatter_fn = void(*)(const Tensor& self, int64_t dim, const Tensor& index, const Tensor& src); using scatter_fill_fn = void(*)(const Tensor& self, int64_t dim, const Tensor& index, const Scalar& src); @@ -36,7 +36,7 @@ DECLARE_DISPATCH(scatter_reduce_fn, scatter_reduce_stub); DECLARE_DISPATCH(scatter_scalar_reduce_fn, scatter_scalar_reduce_stub); DECLARE_DISPATCH(scatter_reduce_two_fn, scatter_reduce_two_stub); -TORCH_API Tensor& index_out(Tensor& result, const Tensor & self, const c10::List>& indices); +TORCH_API Tensor& index_out(Tensor& result, const Tensor & self, const c10::List>& indices); using scatter_add_expanded_index_fn = void(*)(const Tensor&, const Tensor&, const Tensor&); using scatter_reduce_expanded_index_fn = void(*)(const Tensor&, const Tensor&, const Tensor&, const ReductionType& reduce, bool); diff --git a/aten/src/ATen/native/TensorAdvancedIndexingUtils.h b/aten/src/ATen/native/TensorAdvancedIndexingUtils.h index 7b9d1446a087b..e46be1f878f72 100644 --- a/aten/src/ATen/native/TensorAdvancedIndexingUtils.h +++ b/aten/src/ATen/native/TensorAdvancedIndexingUtils.h @@ -21,7 +21,7 @@ static std::string shapes_as_str(TensorList tensors) { } } // anonymous namespace -static std::tuple canDispatchToMaskedFill(const Tensor& self, const torch::List>& indices, +static std::tuple canDispatchToMaskedFill(const Tensor& self, const torch::List>& indices, const Tensor& value){ if (!(value.numel() ==1 && value.device().is_cpu())){ return std::make_tuple(false,Tensor()); @@ -29,7 +29,7 @@ const Tensor& value){ int64_t num_ind = 0; Tensor mask; auto self_device = self.device(); - for (const c10::optional& i: indices) { + for (const std::optional& i: indices) { if (!i.has_value() || !(*i).defined()){ num_ind++; } else { diff --git a/aten/src/ATen/native/TensorCompare.cpp b/aten/src/ATen/native/TensorCompare.cpp index e9599b4898fcd..cbb79dfabc7eb 100644 --- a/aten/src/ATen/native/TensorCompare.cpp +++ b/aten/src/ATen/native/TensorCompare.cpp @@ -491,7 +491,7 @@ static void isin_sorting( if (assume_unique) { out.copy_(mask.slice(0, 0, elements.numel()).view_as(out)); } else { - out.copy_(at::index(mask, {c10::optional(unique_order)})); + out.copy_(at::index(mask, {std::optional(unique_order)})); } } @@ -746,27 +746,27 @@ TORCH_IMPL_FUNC(clamp_min_Tensor_out) } // Implements the "clip" alias for clamp -Tensor& clip_out(const Tensor& self, const c10::optional& min, const c10::optional& max, Tensor& result) { +Tensor& clip_out(const Tensor& self, const std::optional& min, const c10::optional& max, Tensor& result) { return at::clamp_outf(self, min, max, result); } -Tensor& clip_out(const Tensor& self, const c10::optional& min, const c10::optional& max, Tensor& result) { +Tensor& clip_out(const Tensor& self, const std::optional& min, const c10::optional& max, Tensor& result) { return at::clamp_outf(self, min, max, result); } -Tensor clip(const Tensor& self, const c10::optional& min, const c10::optional& max) { +Tensor clip(const Tensor& self, const std::optional& min, const c10::optional& max) { return at::clamp(self, min, max); } -Tensor clip(const Tensor& self, const c10::optional& min, const c10::optional& max) { +Tensor clip(const Tensor& self, const std::optional& min, const c10::optional& max) { return at::clamp(self, min, max); } -Tensor& clip_(Tensor& self, const c10::optional& min, const c10::optional& max) { +Tensor& clip_(Tensor& self, const std::optional& min, const c10::optional& max) { return at::clamp_(self, min, max); } -Tensor& clip_(Tensor& self, const c10::optional& min, const c10::optional& max) { +Tensor& clip_(Tensor& self, const std::optional& min, const c10::optional& max) { return at::clamp_(self, min, max); } diff --git a/aten/src/ATen/native/TensorConversions.cpp b/aten/src/ATen/native/TensorConversions.cpp index c70da8334a5e9..dfb0fe4eb0a05 100644 --- a/aten/src/ATen/native/TensorConversions.cpp +++ b/aten/src/ATen/native/TensorConversions.cpp @@ -229,12 +229,12 @@ static inline optional ensure_has_index(optional device) { Tensor _to_copy( const Tensor& self, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, bool non_blocking, - c10::optional optional_memory_format) { + std::optional optional_memory_format) { TORCH_CHECK(!layout.has_value() || self.layout() == layout.value(), "to(options) doesn't support converting to a different layout, " "but got self.layout being ", self.layout(), @@ -387,7 +387,7 @@ Tensor _to_copy( } template -static inline bool is_null_or_equal_to(const c10::optional& test, const T& value) { +static inline bool is_null_or_equal_to(const std::optional& test, const T& value) { if (!test.has_value()) { return true; } @@ -399,11 +399,11 @@ static inline bool is_null_or_equal_to(const c10::optional& test, const T& va // well. bool to_will_alias( const Tensor& self, - c10::optional dtype, - c10::optional layout, - c10::optional device, + std::optional dtype, + std::optional layout, + std::optional device, bool copy, - c10::optional optional_memory_format) { + std::optional optional_memory_format) { auto memory_format = optional_memory_format.value_or(MemoryFormat::Preserve); return is_null_or_equal_to(dtype, self.dtype().toScalarType()) && @@ -416,13 +416,13 @@ bool to_will_alias( static inline Tensor to_impl( const Tensor& self, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, bool non_blocking, bool copy, - c10::optional optional_memory_format) { + std::optional optional_memory_format) { // fast path if (to_will_alias(self, dtype, layout, device, copy, optional_memory_format)) { @@ -471,13 +471,13 @@ Tensor _autocast_to_full_precision(const Tensor& self, bool cuda_enabled, bool c Tensor to( const Tensor& self, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, bool non_blocking, bool copy, - c10::optional optional_memory_format + std::optional optional_memory_format ) { return to_impl( self, @@ -490,7 +490,7 @@ Tensor to( optional_memory_format); } -Tensor to(const Tensor& self, Device device, ScalarType dtype, bool non_blocking, bool copy, c10::optional optional_memory_format) { +Tensor to(const Tensor& self, Device device, ScalarType dtype, bool non_blocking, bool copy, std::optional optional_memory_format) { return to_impl( self, dtype, @@ -502,7 +502,7 @@ Tensor to(const Tensor& self, Device device, ScalarType dtype, bool non_blocking optional_memory_format); } -Tensor to(const Tensor& self, ScalarType dtype, bool non_blocking, bool copy, c10::optional optional_memory_format) { +Tensor to(const Tensor& self, ScalarType dtype, bool non_blocking, bool copy, std::optional optional_memory_format) { return to_impl( self, dtype, @@ -514,7 +514,7 @@ Tensor to(const Tensor& self, ScalarType dtype, bool non_blocking, bool copy, c1 optional_memory_format); } -Tensor to(const Tensor& self, const Tensor& other, bool non_blocking, bool copy, c10::optional optional_memory_format) { +Tensor to(const Tensor& self, const Tensor& other, bool non_blocking, bool copy, std::optional optional_memory_format) { auto options = other.options(); return to_impl( self, @@ -538,7 +538,7 @@ std::vector _to_cpu(TensorList tensors) { return cpu_tensors; } -Tensor to_dense_backward(const Tensor& grad, const Tensor& input_, c10::optional masked_grad_) { +Tensor to_dense_backward(const Tensor& grad, const Tensor& input_, std::optional masked_grad_) { /* For historical reasons, to_dense backward implements masked semantics for sparse tensors, that is, gradients with respect to @@ -598,7 +598,7 @@ Tensor to_mkldnn_backward(const Tensor& grad, const Tensor& input_) { return grad.to_dense(input_.scalar_type()); } -Tensor to_dense(const Tensor& tensor, c10::optional dtype, c10::optional masked_grad) { +Tensor to_dense(const Tensor& tensor, std::optional dtype, c10::optional masked_grad) { if (tensor.layout() == c10::kSparse) { return tensor._to_dense(dtype, masked_grad); } @@ -621,7 +621,7 @@ Tensor to_dense(const Tensor& tensor, c10::optional dtype, c10: return tensor; } -Tensor sparse_to_dense(const Tensor& self, c10::optional dtype, c10::optional masked) { +Tensor sparse_to_dense(const Tensor& self, std::optional dtype, c10::optional masked) { TORCH_CHECK( !dtype.has_value(), "dtype argument is not supported by sparse_to_dense"); Tensor dst = at::zeros(self.sizes(), self.options().layout(kStrided)); @@ -630,8 +630,8 @@ Tensor sparse_to_dense(const Tensor& self, c10::optional dtype, c10: Tensor sparse_compressed_to_dense( const Tensor& self, - c10::optional dtype, - c10::optional masked_grad) { + std::optional dtype, + std::optional masked_grad) { TORCH_CHECK( !dtype.has_value(), "dtype argument is not supported by sparse_csr_to_dense"); @@ -954,7 +954,7 @@ void _to_sparse_check_arguments(const std::string& funcname, const Tensor& self, } static inline -void _to_sparse_check_arguments(const std::string& funcname, const Tensor& self, c10::optional layout, OptionalIntArrayRef blocksize, c10::optional dense_dim_opt) { +void _to_sparse_check_arguments(const std::string& funcname, const Tensor& self, std::optional layout, OptionalIntArrayRef blocksize, c10::optional dense_dim_opt) { auto layout_from = self.layout(); auto layout_to = layout.value_or(kSparse); @@ -1036,7 +1036,7 @@ void _to_sparse_check_arguments(const std::string& funcname, const Tensor& self, } template -static Tensor dense_to_sparse_compressed(const Tensor& self, const Tensor& self_mask, IntArrayRef blocksize, c10::optional dense_dim_opt) { +static Tensor dense_to_sparse_compressed(const Tensor& self, const Tensor& self_mask, IntArrayRef blocksize, std::optional dense_dim_opt) { static_assert(target_layout == Layout::SparseCsr || target_layout == Layout::SparseCsc || target_layout == Layout::SparseBsr || target_layout == Layout::SparseBsc, "invalid layout template parameter for dense_to_sparse_compressed"); @@ -1109,7 +1109,7 @@ static Tensor dense_to_sparse_compressed(const Tensor& self, const Tensor& self_ self.options().layout(target_layout)); } -Tensor dense_to_sparse_with_mask(const Tensor& self, const Tensor& mask, c10::optional layout, OptionalIntArrayRef blocksize, c10::optional dense_dim_opt) { +Tensor dense_to_sparse_with_mask(const Tensor& self, const Tensor& mask, std::optional layout, OptionalIntArrayRef blocksize, c10::optional dense_dim_opt) { auto layout_to = layout.value_or(kSparse); TORCH_INTERNAL_ASSERT(self.layout() != layout_to, "dense_to_sparse: unexpected same input and output layout"); TORCH_INTERNAL_ASSERT(self.layout() == mask.layout(), @@ -1137,35 +1137,35 @@ Tensor dense_to_sparse_with_mask(const Tensor& self, const Tensor& mask, c10::op return Tensor{}; } -Tensor dense_to_sparse_csr(const Tensor& self, c10::optional dense_dim_opt) { +Tensor dense_to_sparse_csr(const Tensor& self, std::optional dense_dim_opt) { auto layout_to = kSparseCsr; _to_sparse_check_arguments("dense_to_sparse_csr", self, layout_to, {}, dense_dim_opt); return dense_to_sparse_compressed(self, self != 0, {}, dense_dim_opt); } -Tensor dense_to_sparse_csc(const Tensor& self, c10::optional dense_dim_opt) { +Tensor dense_to_sparse_csc(const Tensor& self, std::optional dense_dim_opt) { auto layout_to = kSparseCsc; _to_sparse_check_arguments("dense_to_sparse_csc", self, layout_to, {}, dense_dim_opt); return dense_to_sparse_compressed(self, self != 0, {}, dense_dim_opt); } -Tensor dense_to_sparse_bsr(const Tensor& self, IntArrayRef blocksize, c10::optional dense_dim_opt) { +Tensor dense_to_sparse_bsr(const Tensor& self, IntArrayRef blocksize, std::optional dense_dim_opt) { auto layout_to = kSparseBsr; _to_sparse_check_arguments("dense_to_sparse_bsr", self, layout_to, blocksize, dense_dim_opt); return dense_to_sparse_compressed(self, self != 0, blocksize, dense_dim_opt); } -Tensor dense_to_sparse_bsc(const Tensor& self, IntArrayRef blocksize, c10::optional dense_dim_opt) { +Tensor dense_to_sparse_bsc(const Tensor& self, IntArrayRef blocksize, std::optional dense_dim_opt) { auto layout_to = kSparseBsc; _to_sparse_check_arguments("dense_to_sparse_bsc", self, layout_to, blocksize, dense_dim_opt); return dense_to_sparse_compressed(self, self != 0, blocksize, dense_dim_opt); } -Tensor dense_to_sparse(const Tensor& self, c10::optional layout, OptionalIntArrayRef blocksize, c10::optional dense_dim_opt) { +Tensor dense_to_sparse(const Tensor& self, std::optional layout, OptionalIntArrayRef blocksize, c10::optional dense_dim_opt) { auto layout_to = layout.value_or(kSparse); TORCH_INTERNAL_ASSERT(self.layout() != layout_to, "dense_to_sparse: unexpected same input and output layout"); _to_sparse_check_arguments("dense_to_sparse", self, layout, blocksize, dense_dim_opt); @@ -1234,7 +1234,7 @@ Tensor dense_to_sparse(const Tensor& self, int64_t sparse_dim) { static Tensor sparse_compressed_to_flipped( const Tensor& self, - c10::optional blocksize, + std::optional blocksize, const std::string& name) { const auto layout = self.layout(); // NOTE: errors on non-compressed sparse layouts. @@ -1435,7 +1435,7 @@ static Tensor sparse_compressed_to_flipped( self.options().layout(flipped_layout)); } -Tensor sparse_compressed_to_sparse_csr(const Tensor& self, c10::optional dense_dim_opt) { +Tensor sparse_compressed_to_sparse_csr(const Tensor& self, std::optional dense_dim_opt) { auto layout_to = kSparseCsr; TORCH_INTERNAL_ASSERT(self.layout() != layout_to, "sparse_compressed_to_sparse_csr: unexpected same input and output layout"); _to_sparse_check_arguments("sparse_compressed_to_sparse_csr", self, layout_to, {}, dense_dim_opt); @@ -1448,7 +1448,7 @@ Tensor sparse_compressed_to_sparse_csr(const Tensor& self, c10::optional dense_dim_opt) { +Tensor sparse_compressed_to_sparse_csc(const Tensor& self, std::optional dense_dim_opt) { auto layout_to = kSparseCsc; TORCH_INTERNAL_ASSERT(self.layout() != layout_to, "sparse_compressed_to_sparse_csc: unexpected same input and output layout"); _to_sparse_check_arguments("sparse_compressed_to_sparse_csc", self, layout_to, {}, dense_dim_opt); @@ -1461,7 +1461,7 @@ Tensor sparse_compressed_to_sparse_csc(const Tensor& self, c10::optional dense_dim_opt) { +Tensor coo_to_sparse_csr(const Tensor& self, std::optional dense_dim_opt) { auto layout_to = kSparseCsr; _to_sparse_check_arguments("coo_to_sparse_csr", self, layout_to, {}, dense_dim_opt); @@ -1480,7 +1480,7 @@ Tensor coo_to_sparse_csr(const Tensor& self, c10::optional dense_dim_op coalesced_self.device()); } -Tensor coo_to_sparse_csc(const Tensor& self, c10::optional dense_dim_opt) { +Tensor coo_to_sparse_csc(const Tensor& self, std::optional dense_dim_opt) { auto layout_to = kSparseCsc; _to_sparse_check_arguments("coo_to_sparse_csc", self, layout_to, {}, dense_dim_opt); @@ -1495,14 +1495,14 @@ Tensor coo_to_sparse_csc(const Tensor& self, c10::optional dense_dim_op transposed_csr.device()); } -Tensor coo_to_sparse_bsr(const Tensor& self, IntArrayRef blocksize, c10::optional dense_dim_opt) { +Tensor coo_to_sparse_bsr(const Tensor& self, IntArrayRef blocksize, std::optional dense_dim_opt) { auto layout_to = kSparseBsr; _to_sparse_check_arguments("coo_to_sparse_bsr", self, layout_to, blocksize, dense_dim_opt); return self.to_sparse_csr(dense_dim_opt).to_sparse_bsr(blocksize); } -Tensor coo_to_sparse_bsc(const Tensor& self, IntArrayRef blocksize, c10::optional dense_dim_opt) { +Tensor coo_to_sparse_bsc(const Tensor& self, IntArrayRef blocksize, std::optional dense_dim_opt) { auto layout_to = kSparseBsc; _to_sparse_check_arguments("coo_to_sparse_bsc", self, layout_to, blocksize, dense_dim_opt); @@ -1814,7 +1814,7 @@ Tensor _compressed_to_block_compressed_cpu(const Tensor& self, IntArrayRef block self.options().layout(target_layout)); } -Tensor sparse_compressed_to_sparse_bsr(const Tensor& self, IntArrayRef blocksize, c10::optional dense_dim_opt) { +Tensor sparse_compressed_to_sparse_bsr(const Tensor& self, IntArrayRef blocksize, std::optional dense_dim_opt) { auto layout_to = kSparseBsr; TORCH_INTERNAL_ASSERT(self.layout() != layout_to, "sparse_compressed_to_sparse_bsr: unexpected same input and output layout"); _to_sparse_check_arguments("sparse_compressed_to_sparse_bsr", self, layout_to, blocksize, dense_dim_opt); @@ -1836,7 +1836,7 @@ Tensor sparse_compressed_to_sparse_bsr(const Tensor& self, IntArrayRef blocksize return Tensor{}; } -Tensor sparse_compressed_to_sparse_bsc(const Tensor& self, IntArrayRef blocksize, c10::optional dense_dim_opt) { +Tensor sparse_compressed_to_sparse_bsc(const Tensor& self, IntArrayRef blocksize, std::optional dense_dim_opt) { auto layout_to = kSparseBsc; TORCH_INTERNAL_ASSERT(self.layout() != layout_to, "sparse_compressed_to_sparse_bsc: unexpected same input and output layout"); _to_sparse_check_arguments("sparse_compressed_to_sparse_bsc", self, layout_to, blocksize, dense_dim_opt); @@ -1909,7 +1909,7 @@ Tensor sparse_compressed_to_sparse(const Tensor& self, const int64_t sparse_dim) return at::native::_sparse_coo_tensor_unsafe(indices, values, self.sizes())._coalesced_(coalesced); } -Tensor sparse_compressed_to_sparse(const Tensor& self, c10::optional layout, OptionalIntArrayRef blocksize, c10::optional dense_dim_opt) { +Tensor sparse_compressed_to_sparse(const Tensor& self, std::optional layout, OptionalIntArrayRef blocksize, c10::optional dense_dim_opt) { auto layout_to = layout.value_or(kSparse); TORCH_INTERNAL_ASSERT(self.layout() != layout_to, "sparse_compressed_to_sparse: unexpected same input and output layout"); _to_sparse_check_arguments("sparse_compressed_to_sparse", self, layout_to, blocksize, dense_dim_opt); @@ -1936,7 +1936,7 @@ Tensor sparse_compressed_to_sparse(const Tensor& self, c10::optional layout, OptionalIntArrayRef blocksize, c10::optional dense_dim_opt) { +Tensor sparse_coo_to_sparse(const Tensor& self, std::optional layout, OptionalIntArrayRef blocksize, c10::optional dense_dim_opt) { auto layout_to = layout.value_or(kSparse); TORCH_INTERNAL_ASSERT(self.layout() != layout_to, "sparse_coo_to_sparse: unexpected same input and output layout"); _to_sparse_check_arguments("sparse_coo_to_sparse", self, layout_to, blocksize, dense_dim_opt); @@ -1969,7 +1969,7 @@ Tensor to_sparse(const Tensor& self, const int64_t sparse_dim) { return self._to_sparse(sparse_dim); } -Tensor to_sparse(const Tensor& self, c10::optional layout, OptionalIntArrayRef blocksize, c10::optional dense_dim_opt) { +Tensor to_sparse(const Tensor& self, std::optional layout, OptionalIntArrayRef blocksize, c10::optional dense_dim_opt) { auto layout_to = layout.value_or(kSparse); if (self.layout() == layout_to) { _to_sparse_check_arguments("to_sparse", self, layout, blocksize, dense_dim_opt); @@ -1978,7 +1978,7 @@ Tensor to_sparse(const Tensor& self, c10::optional layout, Optional return self._to_sparse(layout, blocksize, dense_dim_opt); } -Tensor to_sparse_csr(const Tensor& self, c10::optional dense_dim_opt) { +Tensor to_sparse_csr(const Tensor& self, std::optional dense_dim_opt) { auto layout_to = kSparseCsr; if (self.layout() == layout_to) { _to_sparse_check_arguments("to_sparse_csr", self, layout_to, {}, dense_dim_opt); @@ -1987,7 +1987,7 @@ Tensor to_sparse_csr(const Tensor& self, c10::optional dense_dim_opt) { return self._to_sparse_csr(dense_dim_opt); } -Tensor to_sparse_csc(const Tensor& self, c10::optional dense_dim_opt) { +Tensor to_sparse_csc(const Tensor& self, std::optional dense_dim_opt) { auto layout_to = kSparseCsc; if (self.layout() == layout_to) { _to_sparse_check_arguments("to_sparse_csc", self, layout_to, {}, dense_dim_opt); @@ -1996,7 +1996,7 @@ Tensor to_sparse_csc(const Tensor& self, c10::optional dense_dim_opt) { return self._to_sparse_csc(dense_dim_opt); } -Tensor to_sparse_bsr(const Tensor& self, IntArrayRef blocksize, c10::optional dense_dim_opt) { +Tensor to_sparse_bsr(const Tensor& self, IntArrayRef blocksize, std::optional dense_dim_opt) { auto layout_to = kSparseBsr; if (self.layout() == layout_to) { _to_sparse_check_arguments("to_sparse_bsr", self, layout_to, blocksize, dense_dim_opt); @@ -2005,7 +2005,7 @@ Tensor to_sparse_bsr(const Tensor& self, IntArrayRef blocksize, c10::optional dense_dim_opt) { +Tensor to_sparse_bsc(const Tensor& self, IntArrayRef blocksize, std::optional dense_dim_opt) { auto layout_to = kSparseBsc; if (self.layout() == layout_to) { _to_sparse_check_arguments("to_sparse_bsc", self, layout_to, blocksize, dense_dim_opt); @@ -2026,7 +2026,7 @@ Tensor to_meta(const Tensor& tensor) { } return out; } -c10::optional to_meta(const c10::optional& tensor) { +std::optional to_meta(const c10::optional& tensor) { if (tensor.has_value()) { return to_meta(*tensor); } diff --git a/aten/src/ATen/native/TensorConversions.h b/aten/src/ATen/native/TensorConversions.h index fa0d58f3c1299..0e2fd30c288ce 100644 --- a/aten/src/ATen/native/TensorConversions.h +++ b/aten/src/ATen/native/TensorConversions.h @@ -11,16 +11,16 @@ namespace at { namespace native { bool to_will_alias( const Tensor& self, - c10::optional dtype, - c10::optional layout, - c10::optional device, + std::optional dtype, + std::optional layout, + std::optional device, bool copy, - c10::optional optional_memory_format); + std::optional optional_memory_format); Tensor to_meta(const Tensor& tensor); -c10::optional to_meta(const c10::optional& tensor); +std::optional to_meta(const c10::optional& tensor); std::vector to_meta(at::ITensorListRef t_list); -Tensor dense_to_sparse_with_mask(const Tensor& self, const Tensor& mask, c10::optional layout, OptionalIntArrayRef blocksize, c10::optional dense_dim_opt); +Tensor dense_to_sparse_with_mask(const Tensor& self, const Tensor& mask, std::optional layout, OptionalIntArrayRef blocksize, c10::optional dense_dim_opt); } // namespace native } // namespace at diff --git a/aten/src/ATen/native/TensorFactories.cpp b/aten/src/ATen/native/TensorFactories.cpp index c8fddc3756353..195a792600f9b 100644 --- a/aten/src/ATen/native/TensorFactories.cpp +++ b/aten/src/ATen/native/TensorFactories.cpp @@ -133,18 +133,18 @@ DEFINE_DISPATCH(polar_stub); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ arange ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tensor arange(const Scalar& end, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { return native::arange(/*start=*/0, end, dtype, layout, device, pin_memory); } Tensor arange(const Scalar& start, const Scalar& end, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { return native::arange( start, end, /*step=*/1, dtype, layout, device, pin_memory); } @@ -153,10 +153,10 @@ Tensor arange( const Scalar& start, const Scalar& end, const Scalar& step, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -252,8 +252,8 @@ Tensor polar(const Tensor& abs, const Tensor& angle) { } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ empty ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Tensor empty_cpu(IntArrayRef size, c10::optional dtype_opt, c10::optional layout_opt, - c10::optional device_opt, c10::optional pin_memory_opt, c10::optional memory_format_opt) { +Tensor empty_cpu(IntArrayRef size, std::optional dtype_opt, c10::optional layout_opt, + std::optional device_opt, c10::optional pin_memory_opt, c10::optional memory_format_opt) { Tensor result = at::detail::empty_cpu(size, dtype_opt, layout_opt, device_opt, pin_memory_opt, memory_format_opt); // See Note [Enabling Deterministic Operations] if (C10_UNLIKELY(at::globalContext().deterministicAlgorithms() && at::globalContext().deterministicFillUninitializedMemory())) { @@ -264,11 +264,11 @@ Tensor empty_cpu(IntArrayRef size, c10::optional dtype_opt, c10::opt Tensor empty_names( IntArrayRef size, - c10::optional names, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, + std::optional names, + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, optional optional_memory_format) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -285,8 +285,8 @@ Tensor empty_names( return result; } -Tensor empty_permuted_symint(SymIntArrayRef size, IntArrayRef physical_layout, c10::optional dtype_opt, - c10::optional layout_opt, c10::optional device_opt, c10::optional pin_memory_opt +Tensor empty_permuted_symint(SymIntArrayRef size, IntArrayRef physical_layout, std::optional dtype_opt, + std::optional layout_opt, c10::optional device_opt, c10::optional pin_memory_opt ) { // size is logical; aka, the output size you'll get from the operation overall // @@ -324,8 +324,8 @@ Tensor empty_permuted_symint(SymIntArrayRef size, IntArrayRef physical_layout, c return phys_tensor.as_strided_symint(size, strides); } -Tensor empty_strided_cpu(IntArrayRef size, IntArrayRef stride, c10::optional dtype_opt, - c10::optional layout_opt, c10::optional device_opt, c10::optional pin_memory_opt) { +Tensor empty_strided_cpu(IntArrayRef size, IntArrayRef stride, std::optional dtype_opt, + std::optional layout_opt, c10::optional device_opt, c10::optional pin_memory_opt) { Tensor result = at::detail::empty_strided_cpu(size, stride, dtype_opt, layout_opt, device_opt, pin_memory_opt); // See Note [Enabling Deterministic Operations] if (C10_UNLIKELY(at::globalContext().deterministicAlgorithms() && at::globalContext().deterministicFillUninitializedMemory())) { @@ -335,7 +335,7 @@ Tensor empty_strided_cpu(IntArrayRef size, IntArrayRef stride, c10::optional optional_memory_format, + std::optional optional_memory_format, Tensor& result) { // Preferably, this argument would not be accepted by _out, but the code // generator requires the out and non-out overloads to match exactly @@ -377,11 +377,11 @@ C10_DIAGNOSTIC_POP() Tensor empty_like( const Tensor& self, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, - c10::optional optional_memory_format) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, + std::optional optional_memory_format) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options_ = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -430,11 +430,11 @@ Tensor empty_like( Tensor empty_like_quantized( const Tensor& self, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, - c10::optional optional_memory_format) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, + std::optional optional_memory_format) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options_ = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -458,7 +458,7 @@ Tensor empty_like_quantized( // TODO: To support all features of MemoryFormat::Preserve we need to add // _empty_affine_quantized_strided function and use it similarly to - // Tensor clone(const Tensor& src, c10::optional optional_memory_format) + // Tensor clone(const Tensor& src, std::optional optional_memory_format) // if (self.is_non_overlapping_and_dense()) -> _empty_affine_quantized_strided if (memory_format == MemoryFormat::Preserve) { memory_format = self.suggest_memory_format(); @@ -508,10 +508,10 @@ Tensor empty_like_quantized( Tensor new_empty_symint( const Tensor& self, SymIntArrayRef size, - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt ) { auto dtype = dtype_opt.has_value() ? dtype_opt : optTypeMetaToScalarType(self.options().dtype_opt()); auto layout = layout_opt.has_value() ? layout_opt : self.options().layout_opt(); @@ -524,10 +524,10 @@ Tensor new_empty_strided_symint( const Tensor& self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory ) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -538,19 +538,19 @@ Tensor new_empty_strided_symint( // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ eye ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tensor eye(int64_t n, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { // the default value of `m` equals to `n` return at::eye(n, n, dtype, layout, device, pin_memory); } Tensor eye(int64_t n, int64_t m, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -614,10 +614,10 @@ TensorOptions infer_full_options( } // anonymous namespace Tensor full(IntArrayRef size, const Scalar& fill_value, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -639,11 +639,11 @@ Tensor& full_out(IntArrayRef size, const Scalar& fill_value, Tensor& result) { Tensor full_like( const Tensor& self, const Scalar& fill_value, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, - c10::optional optional_memory_format) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, + std::optional optional_memory_format) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -655,10 +655,10 @@ Tensor new_full( const Tensor& self, IntArrayRef size, const Scalar& fill_value, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory ) { Tensor r = self.new_empty(size, TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory)); @@ -693,10 +693,10 @@ Tensor linspace( const Scalar& start, const Scalar& end, int64_t steps, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -710,10 +710,10 @@ Tensor linspace( const Tensor& start, const Tensor& end, int64_t steps, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { TORCH_CHECK(start.dim() == 0 && end.dim() == 0, "linspace only supports 0-dimensional start and end tensors, " "but got start with ", start.dim(), " dimension(s) and end with ", end.dim()," dimension(s)."); return at::linspace(start.item(), end.item(), steps, dtype, layout, device, pin_memory); @@ -723,10 +723,10 @@ Tensor linspace( const Tensor& start, const Scalar& end, int64_t steps, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { TORCH_CHECK(start.dim() == 0, "linspace only supports 0-dimensional start and end tensors, " "but got start with ", start.dim(), " dimension(s)."); return at::linspace(start.item(), end, steps, dtype, layout, device, pin_memory); @@ -736,10 +736,10 @@ Tensor linspace( const Scalar& start, const Tensor& end, int64_t steps, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { TORCH_CHECK(end.dim() == 0, "linspace only supports 0-dimensional start and end tensors, " "but got end with ", end.dim()," dimension(s)."); return at::linspace(start, end.item(), steps, dtype, layout, device, pin_memory); @@ -752,10 +752,10 @@ Tensor logspace( const Scalar& end, int64_t steps, double base, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -770,10 +770,10 @@ Tensor logspace( const Tensor& end, int64_t steps, double base, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { TORCH_CHECK(start.dim() == 0 && end.dim() == 0, "logspace only supports 0-dimensional start and end tensors, " "but got start with ", start.dim(), " dimension(s) and end with ", end.dim()," dimension(s)."); return at::logspace(start.item(), end.item(), steps, base, dtype, layout, device, pin_memory); @@ -784,10 +784,10 @@ Tensor logspace( const Scalar& end, int64_t steps, double base, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { TORCH_CHECK(start.dim() == 0, "logspace only supports 0-dimensional start and end tensors, " "but got start with ", start.dim(), " dimension(s)."); return at::logspace(start.item(), end, steps, base, dtype, layout, device, pin_memory); @@ -798,10 +798,10 @@ Tensor logspace( const Tensor& end, int64_t steps, double base, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { TORCH_CHECK(end.dim() == 0, "logspace only supports 0-dimensional start and end tensors, " "but got end with ", end.dim()," dimension(s)."); return at::logspace(start, end.item(), steps, base, dtype, layout, device, pin_memory); @@ -810,10 +810,10 @@ Tensor logspace( // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ones ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tensor ones(IntArrayRef size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { return native::full(size, /*fill_value=*/1., dtype, layout, device, pin_memory); } @@ -823,11 +823,11 @@ Tensor& ones_out(IntArrayRef size, Tensor& result) { Tensor ones_like( const Tensor& self, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, - c10::optional optional_memory_format) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, + std::optional optional_memory_format) { auto result = at::empty_like(self, dtype, layout, device, pin_memory, optional_memory_format); return result.fill_(1.); } @@ -835,10 +835,10 @@ Tensor ones_like( Tensor new_ones( const Tensor& self, IntArrayRef size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { // See [Note: hacky wrapper removal for TensorOptions] Tensor r = self.new_empty(size, TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory)); r.fill_(1.); @@ -848,10 +848,10 @@ Tensor new_ones( // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ scalar_tensor ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tensor scalar_tensor(const Scalar& s, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -874,18 +874,18 @@ Tensor scalar_tensor(const Scalar& s, // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ rand ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tensor rand(IntArrayRef size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { - return native::rand(size, static_cast>(c10::nullopt), dtype, layout, device, pin_memory); -} - -Tensor rand(IntArrayRef size, c10::optional generator, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { + return native::rand(size, static_cast>(c10::nullopt), dtype, layout, device, pin_memory); +} + +Tensor rand(IntArrayRef size, std::optional generator, + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -897,18 +897,18 @@ Tensor& rand_out(IntArrayRef size, Tensor& result) { return native::rand_out(size, c10::nullopt, result); } -Tensor& rand_out(IntArrayRef size, c10::optional generator, Tensor& result) { +Tensor& rand_out(IntArrayRef size, std::optional generator, Tensor& result) { result.resize_(size); return result.uniform_(0, 1, std::move(generator)); } Tensor rand_like( const Tensor& self, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, - c10::optional optional_memory_format) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, + std::optional optional_memory_format) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -919,21 +919,21 @@ Tensor rand_like( // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ randint ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tensor randint(int64_t high, IntArrayRef size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { return native::randint(high, size, c10::nullopt /* generator*/, dtype, layout, device, pin_memory); } Tensor randint( int64_t high, IntArrayRef size, - c10::optional generator, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional generator, + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { return native::randint(0, high, size, std::move(generator), dtype, layout, device, pin_memory); } @@ -941,10 +941,10 @@ Tensor randint( int64_t low, int64_t high, IntArrayRef size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { return native::randint(low, high, size, c10::nullopt, dtype, layout, device, pin_memory); } @@ -952,11 +952,11 @@ Tensor randint( int64_t low, int64_t high, IntArrayRef size, - c10::optional generator, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional generator, + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -970,7 +970,7 @@ Tensor& randint_out(int64_t high, IntArrayRef size, Tensor& result) { Tensor& randint_out(int64_t high, IntArrayRef size, - c10::optional generator, + std::optional generator, Tensor& result) { result.resize_(size); return result.random_(0, high, std::move(generator)); @@ -983,7 +983,7 @@ Tensor& randint_out(int64_t low, int64_t high, IntArrayRef size, Tensor& result) Tensor& randint_out(int64_t low, int64_t high, IntArrayRef size, - c10::optional generator, + std::optional generator, Tensor& result) { result.resize_(size); return result.random_(low, high, std::move(generator)); @@ -992,11 +992,11 @@ Tensor& randint_out(int64_t low, Tensor randint_like( const Tensor& self, int64_t high, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, - c10::optional optional_memory_format) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, + std::optional optional_memory_format) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -1008,11 +1008,11 @@ Tensor randint_like( const Tensor& self, int64_t low, int64_t high, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, - c10::optional optional_memory_format) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, + std::optional optional_memory_format) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -1023,18 +1023,18 @@ Tensor randint_like( // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ randn ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tensor randn(IntArrayRef size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { - return native::randn(size, static_cast>(c10::nullopt), dtype, layout, device, pin_memory); -} - -Tensor randn(IntArrayRef size, c10::optional generator, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { + return native::randn(size, static_cast>(c10::nullopt), dtype, layout, device, pin_memory); +} + +Tensor randn(IntArrayRef size, std::optional generator, + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -1046,17 +1046,17 @@ Tensor& randn_out(IntArrayRef size, Tensor& result) { return native::randn_out(size, c10::nullopt, result); } -Tensor& randn_out(IntArrayRef size, c10::optional generator, Tensor& result) { +Tensor& randn_out(IntArrayRef size, std::optional generator, Tensor& result) { result.resize_(size); return result.normal_(0, 1, std::move(generator)); } Tensor normal(double mean, double std, IntArrayRef size, - c10::optional generator, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional generator, + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -1065,18 +1065,18 @@ Tensor normal(double mean, double std, IntArrayRef size, } Tensor& normal_out(double mean, double std, - IntArrayRef size, c10::optional generator, Tensor& result) { + IntArrayRef size, std::optional generator, Tensor& result) { result.resize_(size); return result.normal_(mean, std, std::move(generator)); } Tensor randn_like( const Tensor& self, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, - c10::optional optional_memory_format) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, + std::optional optional_memory_format) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -1113,18 +1113,18 @@ void randperm_cpu(Tensor& result, int64_t n, CPUGeneratorImpl* generator) { } // namespace Tensor randperm(int64_t n, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { return native::randperm(n, c10::nullopt, dtype, layout, device, pin_memory); } -Tensor randperm(int64_t n, c10::optional generator, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { +Tensor randperm(int64_t n, std::optional generator, + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { if (!dtype.has_value()) { dtype = ScalarType::Long; } @@ -1140,7 +1140,7 @@ Tensor& randperm_out(int64_t n, Tensor& result) { return at::randperm_out(result, n, c10::nullopt); } -Tensor& randperm_out_cpu(int64_t n, c10::optional generator, Tensor& result) { +Tensor& randperm_out_cpu(int64_t n, std::optional generator, Tensor& result) { TORCH_CHECK(n >= 0, "n must be non-negative, got", n); TORCH_CHECK(!generator.has_value() || (generator.has_value() && result.device() == generator->device()), "Expected a '", result.device(), "' generator device but found '", generator->device(), "'"); check_supported_max_int_with_precision(n, result); @@ -1161,10 +1161,10 @@ Tensor range( const Scalar& start, const Scalar& end, const Scalar& step, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -1175,18 +1175,18 @@ Tensor range( Tensor range( const Scalar& start, const Scalar& end, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { return at::native::range(start, end, 1, dtype, layout, device, pin_memory); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tensor tril_indices_cpu( - int64_t row, int64_t col, int64_t offset, c10::optional dtype_opt, - c10::optional layout_opt, c10::optional device_opt, c10::optional pin_memory_opt) { + int64_t row, int64_t col, int64_t offset, std::optional dtype_opt, + std::optional layout_opt, c10::optional device_opt, c10::optional pin_memory_opt) { if (!dtype_opt.has_value()) { dtype_opt = ScalarType::Long; } @@ -1235,8 +1235,8 @@ Tensor tril_indices_cpu( } Tensor triu_indices_cpu( - int64_t row, int64_t col, int64_t offset, c10::optional dtype_opt, - c10::optional layout_opt, c10::optional device_opt, c10::optional pin_memory_opt) { + int64_t row, int64_t col, int64_t offset, std::optional dtype_opt, + std::optional layout_opt, c10::optional device_opt, c10::optional pin_memory_opt) { if (!dtype_opt.has_value()) { dtype_opt = ScalarType::Long; } @@ -1278,10 +1278,10 @@ Tensor triu_indices_cpu( // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ zeros ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ static Tensor zeros_sparse_compressed_symint(c10::SymIntArrayRef size, - c10::optional dtype, + std::optional dtype, Layout layout, - c10::optional device, - c10::optional pin_memory) { + std::optional device, + std::optional pin_memory) { check_size_nonnegative(size); TORCH_CHECK(size.size() >= 2, "torch.zeros: Only batched sparse compressed (non-block) tensors are supported, but got size ", size); auto size_ = C10_AS_INTARRAYREF_SLOW(size); @@ -1312,10 +1312,10 @@ static Tensor zeros_sparse_compressed_symint(c10::SymIntArrayRef size, } Tensor zeros_symint(SymIntArrayRef size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { Layout layout_ = layout.value_or(Layout::Strided); if (at::sparse_csr::is_sparse_compressed(layout_)) { return zeros_sparse_compressed_symint(size, dtype, layout_, device, pin_memory); @@ -1327,10 +1327,10 @@ Tensor zeros_symint(SymIntArrayRef size, } Tensor _efficientzerotensor(IntArrayRef size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { auto device_ = device_or_default(device); auto allocator = at::native::ZeroTensorAllocator(device_); auto dtype_ = dtype_or_default(dtype); @@ -1340,10 +1340,10 @@ Tensor _efficientzerotensor(IntArrayRef size, } Tensor _efficientzerotensor_meta_symint(SymIntArrayRef size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { auto device_ = device_or_default(device); auto allocator = at::native::ZeroTensorAllocator(device_); auto dtype_ = dtype_or_default(dtype); @@ -1372,11 +1372,11 @@ Tensor& zeros_out(IntArrayRef size, Tensor& result) { Tensor zeros_like( const Tensor& self, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, - c10::optional optional_memory_format) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, + std::optional optional_memory_format) { // See [Note: hacky wrapper removal for TensorOptions] auto other_options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); // Prefer values passed in explicitly, but default to value from self. @@ -1423,10 +1423,10 @@ Tensor zeros_like( Tensor new_zeros( const Tensor& self, IntArrayRef size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory ) { Tensor r = self.new_empty(size, TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory)); r.zero_(); @@ -1436,10 +1436,10 @@ Tensor new_zeros( // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ bartlett_window ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tensor bartlett_window(int64_t window_length, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { return native::bartlett_window( window_length, /*periodic=*/true, dtype, layout, device, pin_memory); } @@ -1447,10 +1447,10 @@ Tensor bartlett_window(int64_t window_length, Tensor bartlett_window( int64_t window_length, bool periodic, - c10::optional dtype_opt, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype_opt, + std::optional layout, + std::optional device, + std::optional pin_memory) { // See [Note: hacky wrapper removal for TensorOptions] ScalarType dtype = c10::dtype_or_default(dtype_opt); TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -1475,10 +1475,10 @@ Tensor bartlett_window( // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ blackman_window ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tensor blackman_window(int64_t window_length, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { return native::blackman_window( window_length, /*periodic=*/true, dtype, layout, device, pin_memory); } @@ -1486,10 +1486,10 @@ Tensor blackman_window(int64_t window_length, Tensor blackman_window( int64_t window_length, bool periodic, - c10::optional dtype_opt, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype_opt, + std::optional layout, + std::optional device, + std::optional pin_memory) { // See [Note: hacky wrapper removal for TensorOptions] ScalarType dtype = c10::dtype_or_default(dtype_opt); TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -1515,10 +1515,10 @@ Tensor blackman_window( // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ hamming_window ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tensor hamming_window(int64_t window_length, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { return native::hamming_window( window_length, /*periodic=*/true, dtype, layout, device, pin_memory); } @@ -1526,10 +1526,10 @@ Tensor hamming_window(int64_t window_length, Tensor hamming_window( int64_t window_length, bool periodic, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { return native::hamming_window( window_length, periodic, @@ -1544,10 +1544,10 @@ Tensor hamming_window( int64_t window_length, bool periodic, double alpha, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { return native::hamming_window( window_length, periodic, alpha, /*beta=*/0.46, dtype, layout, device, pin_memory); } @@ -1557,10 +1557,10 @@ Tensor hamming_window( bool periodic, double alpha, double beta, - c10::optional dtype_opt, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype_opt, + std::optional layout, + std::optional device, + std::optional pin_memory) { // See [Note: hacky wrapper removal for TensorOptions] ScalarType dtype = c10::dtype_or_default(dtype_opt); TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -1583,20 +1583,20 @@ Tensor hamming_window( // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ hann_window ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tensor hann_window(int64_t window_length, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { return native::hann_window(window_length, /*periodic=*/true, dtype, layout, device, pin_memory); } Tensor hann_window( int64_t window_length, bool periodic, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -1608,10 +1608,10 @@ Tensor hann_window( // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ kaiser_window ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tensor kaiser_window(int64_t window_length, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { return native::kaiser_window( window_length, /*periodic=*/true, @@ -1623,10 +1623,10 @@ Tensor kaiser_window(int64_t window_length, } Tensor kaiser_window(int64_t window_length, bool periodic, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { return native::kaiser_window(window_length, periodic, /*beta=*/12.0, dtype, layout, device, pin_memory); } @@ -1634,10 +1634,10 @@ Tensor kaiser_window( int64_t window_length, bool periodic, double beta, - c10::optional dtype_opt, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype_opt, + std::optional layout, + std::optional device, + std::optional pin_memory) { // See [Note: hacky wrapper removal for TensorOptions] ScalarType dtype = c10::dtype_or_default(dtype_opt); TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -1667,7 +1667,7 @@ Tensor kaiser_window( // ~~~~~~~~~~~~~~~~~~~~~~~~~~ vandermonde_matrix ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Tensor vander(const Tensor& x, c10::optional N, bool increasing) { +Tensor vander(const Tensor& x, std::optional N, bool increasing) { TORCH_CHECK(x.dim() == 1, "x must be a one-dimensional tensor."); // Acquires n, defaulting to size if not provided @@ -1717,11 +1717,11 @@ Tensor tensor_complex_backend(ArrayRef values, const TensorOptions& options) return at::detail::tensor_complex_backend(values, options); } -Tensor from_file(c10::string_view filename, c10::optional shared, c10::optional size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { +Tensor from_file(c10::string_view filename, std::optional shared, c10::optional size, + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -1745,7 +1745,7 @@ Tensor from_file(c10::string_view filename, c10::optional shared, c10::opt // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ clone ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Tensor clone(const Tensor& src, c10::optional optional_memory_format) { +Tensor clone(const Tensor& src, std::optional optional_memory_format) { auto memory_format = optional_memory_format.value_or(MemoryFormat::Preserve); Tensor self; @@ -1777,10 +1777,10 @@ Tensor full( IntArrayRef size, const Scalar& fill_value, optional names, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -1795,10 +1795,10 @@ Tensor full( Tensor ones( IntArrayRef size, optional names, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { // See [Note: hacky wrapper removal for TensorOptions] return native::full( @@ -1808,31 +1808,31 @@ Tensor ones( Tensor zeros( IntArrayRef size, optional names, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { return native::full(size, /*fill_value=*/0., names, dtype, layout, device, pin_memory); } Tensor randn( IntArrayRef size, optional names, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { return native::randn(size, c10::nullopt, names, dtype, layout, device, pin_memory); } Tensor randn( IntArrayRef size, - c10::optional generator, + std::optional generator, optional names, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -1843,21 +1843,21 @@ Tensor randn( Tensor rand( IntArrayRef size, optional names, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { return native::rand(size, c10::nullopt, names, dtype, layout, device, pin_memory); } Tensor rand( IntArrayRef size, - c10::optional generator, + std::optional generator, optional names, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); diff --git a/aten/src/ATen/native/TensorFactories.h b/aten/src/ATen/native/TensorFactories.h index f9b2893d768a9..58cbbfc4df334 100644 --- a/aten/src/ATen/native/TensorFactories.h +++ b/aten/src/ATen/native/TensorFactories.h @@ -63,7 +63,7 @@ inline int64_t get_tril_size(int64_t row, int64_t col, int64_t offset) { } inline void check_args( - int64_t row, int64_t col, c10::optional layout_opt) { + int64_t row, int64_t col, std::optional layout_opt) { TORCH_CHECK(row >= 0, "row must be non-negative, got", row); TORCH_CHECK(col >= 0, "col must be non-negative, got", col); if (layout_opt.has_value()) { diff --git a/aten/src/ATen/native/TensorShape.cpp b/aten/src/ATen/native/TensorShape.cpp index 366f67a3be447..c4b8b12b67307 100644 --- a/aten/src/ATen/native/TensorShape.cpp +++ b/aten/src/ATen/native/TensorShape.cpp @@ -228,7 +228,7 @@ inline void cat_check_no_zero_dim(const MaterializedITensorListRef& tensors) { } inline c10::MemoryFormat cat_compute_output_memory_format(const MaterializedITensorListRef& inputs) { - c10::optional format = c10::nullopt; + std::optional format = c10::nullopt; for (const Tensor& t : inputs) { auto f = t.suggest_memory_format(); if (f == c10::MemoryFormat::Contiguous) { @@ -2511,8 +2511,8 @@ Tensor index_select_sparse_cpu(const Tensor& self, int64_t dim, const Tensor& in Tensor slice( const Tensor& self, int64_t dim, - c10::optional start, - c10::optional end, + std::optional start, + std::optional end, int64_t step) { int64_t ndim = self.dim(); if (ndim == 0) { @@ -2568,8 +2568,8 @@ Tensor slice_inverse_symint( const Tensor& self, const Tensor& base, int64_t /* dim */, - c10::optional /* start */, - c10::optional /* end */, + std::optional /* start */, + std::optional /* end */, SymInt /* step */) { // assume self has enough to storage to be viewed with base's metadata return self.as_strided_symint(base.sym_sizes(), base.sym_strides(), base.sym_storage_offset()); @@ -3519,7 +3519,7 @@ static inline void handle_unflatten_exception(const std::runtime_error &e, const Tensor &self, int64_t dim, SymIntArrayRef sizes, - c10::optional names) { + std::optional names) { if (!strstr(e.what(), "is invalid for input of size")) { TORCH_CHECK(false, "unflatten got an unexpected error:\n", e.what()); } @@ -3536,7 +3536,7 @@ static inline void handle_unflatten_exception(const std::runtime_error &e, } } -static Tensor unflatten_impl(const Tensor& self, int64_t dim, SymIntArrayRef sizes, c10::optional names) { +static Tensor unflatten_impl(const Tensor& self, int64_t dim, SymIntArrayRef sizes, std::optional names) { dim = maybe_wrap_dim(dim, self.dim()); TORCH_CHECK(!sizes.empty(), "unflatten: sizes must be non-empty"); @@ -4013,7 +4013,7 @@ at::Tensor clone_preserve_strides(const at::Tensor& self) { } -at::Tensor slice_scatter(const at::Tensor& self, const at::Tensor& src, int64_t dim, c10::optional start, c10::optional end, int64_t step) { +at::Tensor slice_scatter(const at::Tensor& self, const at::Tensor& src, int64_t dim, std::optional start, c10::optional end, int64_t step) { // See Note [*_scatter ops preserve strides] auto output = clone_preserve_strides(self); auto slice = output.slice(dim, start, end, step); @@ -4036,7 +4036,7 @@ at::Tensor diagonal_scatter(const at::Tensor& self, const at::Tensor& src, int64 slice.copy_(src); return output; } -at::Tensor as_strided_scatter_symint(const at::Tensor& self, const at::Tensor& src, at::SymIntArrayRef size, at::SymIntArrayRef stride, c10::optional storage_offset) { +at::Tensor as_strided_scatter_symint(const at::Tensor& self, const at::Tensor& src, at::SymIntArrayRef size, at::SymIntArrayRef stride, std::optional storage_offset) { // See Note [as_strided_scatter backward support] TORCH_INTERNAL_ASSERT(!self.requires_grad() || self.is_contiguous(), "as_strided_scatter is currently only supported for contiguous inputs"); // See Note [*_scatter ops preserve strides] diff --git a/aten/src/ATen/native/TensorTransformations.cpp b/aten/src/ATen/native/TensorTransformations.cpp index 5a7c3a6de965f..b13f28d56a86a 100644 --- a/aten/src/ATen/native/TensorTransformations.cpp +++ b/aten/src/ATen/native/TensorTransformations.cpp @@ -230,7 +230,7 @@ std::vector atleast_3d(TensorList tensors) { return result; } -Tensor chalf(const Tensor& self, c10::optional memory_format) { +Tensor chalf(const Tensor& self, std::optional memory_format) { return self.to(kComplexHalf, false, false, memory_format); } diff --git a/aten/src/ATen/native/TestOps.cpp b/aten/src/ATen/native/TestOps.cpp index e2fce123035ba..f9fa0839a51ae 100644 --- a/aten/src/ATen/native/TestOps.cpp +++ b/aten/src/ATen/native/TestOps.cpp @@ -49,7 +49,7 @@ Tensor _test_optional_intlist( /// Else, return a new tensor containing the elementwise sums. Tensor _test_optional_floatlist( const Tensor& values, - c10::optional> addends) { + std::optional> addends) { if (!addends) { return values; } diff --git a/aten/src/ATen/native/UnaryOps.cpp b/aten/src/ATen/native/UnaryOps.cpp index 6c22d2583f130..3520620280fee 100644 --- a/aten/src/ATen/native/UnaryOps.cpp +++ b/aten/src/ATen/native/UnaryOps.cpp @@ -772,23 +772,23 @@ Tensor square(const Tensor& self) { return at::pow(self, 2); } Tensor& square_(Tensor& self) { return self.pow_(2); } Tensor& logit_out(const Tensor& self, - c10::optional eps, + std::optional eps, Tensor& result) { return unary_op_impl_float_out( result, self, logit_stub, Scalar(eps ? eps.value() : -1.0)); } -Tensor logit(const Tensor& self, c10::optional eps) { +Tensor logit(const Tensor& self, std::optional eps) { return unary_op_impl_float( self, logit_stub, Scalar(eps ? eps.value() : -1.0)); } -Tensor& logit_(Tensor& self, c10::optional eps) { +Tensor& logit_(Tensor& self, std::optional eps) { return at::logit_out(self, self, eps); } -Tensor& special_logit_out(const Tensor& self, c10::optional eps, Tensor& result) { +Tensor& special_logit_out(const Tensor& self, std::optional eps, Tensor& result) { return at::logit_out(result, self, eps); } -Tensor special_logit(const Tensor& self, c10::optional eps) { +Tensor special_logit(const Tensor& self, std::optional eps) { return self.logit(eps); } @@ -801,9 +801,9 @@ Tensor special_expit(const Tensor& self) { } Tensor& nan_to_num_out(const Tensor& self, - c10::optional nan, - c10::optional pos_inf, - c10::optional neg_inf, + std::optional nan, + std::optional pos_inf, + std::optional neg_inf, Tensor& result) { TORCH_CHECK( self.scalar_type() == result.scalar_type(), @@ -825,18 +825,18 @@ Tensor& nan_to_num_out(const Tensor& self, Tensor nan_to_num( const Tensor& self, - c10::optional nan, - c10::optional pos_inf, - c10::optional neg_inf) { + std::optional nan, + std::optional pos_inf, + std::optional neg_inf) { auto result = at::empty_like(self); return at::nan_to_num_out(result, self, nan, pos_inf, neg_inf); } Tensor& nan_to_num_( Tensor& self, - c10::optional nan, - c10::optional pos_inf, - c10::optional neg_inf) { + std::optional nan, + std::optional pos_inf, + std::optional neg_inf) { return at::nan_to_num_out(self, self, nan, pos_inf, neg_inf); } diff --git a/aten/src/ATen/native/UnaryOps.h b/aten/src/ATen/native/UnaryOps.h index 91d4d84d4630c..3d99fdc40d048 100644 --- a/aten/src/ATen/native/UnaryOps.h +++ b/aten/src/ATen/native/UnaryOps.h @@ -93,30 +93,30 @@ DECLARE_DISPATCH(unary_fn, special_scaled_modified_bessel_k1_stub); DECLARE_DISPATCH(unary_fn, special_spherical_bessel_j0_stub); // NB: these are actually defined in Distribution -DECLARE_DISPATCH(void(*)(const TensorBase&, const TensorBase&, c10::optional), bernoulli_tensor_stub); -DECLARE_DISPATCH(void(*)(const TensorBase&, const double, c10::optional), bernoulli_scalar_stub); -DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, const double, c10::optional), cauchy_stub); -DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, c10::optional), exponential_stub); -DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, c10::optional), geometric_stub); -DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, const double, c10::optional), log_normal_stub); -DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, const double, c10::optional), uniform_stub); -DECLARE_DISPATCH(void(*)(const TensorBase&, const double, const double, c10::optional), normal_stub); -DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const uint64_t, const int64_t, c10::optional), random_from_to_stub); -DECLARE_DISPATCH(void(*)(TensorIteratorBase&, c10::optional), random_full_64_bits_range_stub); -DECLARE_DISPATCH(void(*)(TensorIteratorBase&, c10::optional), random_stub); +DECLARE_DISPATCH(void(*)(const TensorBase&, const TensorBase&, std::optional), bernoulli_tensor_stub); +DECLARE_DISPATCH(void(*)(const TensorBase&, const double, std::optional), bernoulli_scalar_stub); +DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, const double, std::optional), cauchy_stub); +DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, std::optional), exponential_stub); +DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, std::optional), geometric_stub); +DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, const double, std::optional), log_normal_stub); +DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, const double, std::optional), uniform_stub); +DECLARE_DISPATCH(void(*)(const TensorBase&, const double, const double, std::optional), normal_stub); +DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const uint64_t, const int64_t, std::optional), random_from_to_stub); +DECLARE_DISPATCH(void(*)(TensorIteratorBase&, std::optional), random_full_64_bits_range_stub); +DECLARE_DISPATCH(void(*)(TensorIteratorBase&, std::optional), random_stub); DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const int64_t, const double), kaiser_window_stub); DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const int64_t), polygamma_stub); DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const Scalar& a, const Scalar& b), clamp_stub); DECLARE_DISPATCH( - void (*)(Tensor&, const Tensor&, int64_t, c10::optional), + void (*)(Tensor&, const Tensor&, int64_t, std::optional), multinomial_with_replacement_stub); DECLARE_DISPATCH( void (*)( TensorIteratorBase&, - c10::optional, - c10::optional, - c10::optional), + std::optional, + std::optional, + std::optional), nan_to_num_stub); DECLARE_DISPATCH(void (*)(TensorIteratorBase&, int64_t), round_decimals_stub); diff --git a/aten/src/ATen/native/Unique.cpp b/aten/src/ATen/native/Unique.cpp index 801af5d5e79fe..5c0deff804a33 100644 --- a/aten/src/ATen/native/Unique.cpp +++ b/aten/src/ATen/native/Unique.cpp @@ -484,7 +484,7 @@ unique_dim_consecutive_cpu(const Tensor& self, const int64_t dim, const bool ret } std::tuple -unique_consecutive_cpu(const Tensor& self, const bool return_inverse, const bool return_counts, c10::optional dim) { +unique_consecutive_cpu(const Tensor& self, const bool return_inverse, const bool return_counts, std::optional dim) { if (!dim.has_value() || (dim.value() == 0 && self.dim() == 1)) { return AT_DISPATCH_V2(self.scalar_type(), "unique", AT_WRAP([&] { return unique_consecutive_cpu_template(self, return_inverse, return_counts); diff --git a/aten/src/ATen/native/UpSample.cpp b/aten/src/ATen/native/UpSample.cpp index 2403d11e4604e..e0e3f82ac32fc 100644 --- a/aten/src/ATen/native/UpSample.cpp +++ b/aten/src/ATen/native/UpSample.cpp @@ -10,7 +10,7 @@ namespace at::native::upsample { TORCH_API c10::SmallVector compute_output_size( c10::IntArrayRef input_size, // Full input tensor size. at::OptionalIntArrayRef output_size, - c10::optional> scale_factors) { + std::optional> scale_factors) { const auto spatial_dimensions = static_cast(input_size.size()) - 2; if (output_size) { TORCH_CHECK(!scale_factors, "Must specify exactly one of output_size and scale_factors"); diff --git a/aten/src/ATen/native/UpSample.h b/aten/src/ATen/native/UpSample.h index 8dadc7cee3ae4..e2b3c36b5d775 100644 --- a/aten/src/ATen/native/UpSample.h +++ b/aten/src/ATen/native/UpSample.h @@ -55,9 +55,9 @@ namespace upsample { TORCH_API c10::SmallVector compute_output_size( c10::IntArrayRef input_size, // Full input tensor size. at::OptionalIntArrayRef output_size, - c10::optional> scale_factors); + std::optional> scale_factors); -inline c10::optional get_scale_value(c10::optional> scales, int idx) { +inline std::optional get_scale_value(c10::optional> scales, int idx) { if (!scales) { return c10::nullopt; } @@ -66,7 +66,7 @@ inline c10::optional get_scale_value(c10::optional } // namespace upsample -using scale_t = c10::optional; +using scale_t = std::optional; using upsampling_nearest1d = void(*)(const Tensor& output, const Tensor& input, scale_t scales_w); using _upsampling_nearest_exact1d = void(*)(const Tensor& output, const Tensor& input, scale_t scales_w); using upsampling_nearest2d = void(*)(const Tensor& output, const Tensor& input, scale_t scales_h, scale_t scales_w); @@ -252,7 +252,7 @@ static inline void upsample_2d_shape_check( template static inline scalar_t compute_scales_value( - const c10::optional scale, + const std::optional scale, int64_t input_size, int64_t output_size) { // see Note [compute_scales_value] @@ -267,7 +267,7 @@ static inline scalar_t area_pixel_compute_scale( int64_t input_size, int64_t output_size, bool align_corners, - const c10::optional scale) { + const std::optional scale) { // see Note [area_pixel_compute_scale] if(align_corners) { if(output_size > 1) { @@ -335,7 +335,7 @@ static inline int64_t nearest_idx( int64_t output_index, int64_t input_size, int64_t output_size, - c10::optional scales) { + std::optional scales) { // This method specificly treats cases: output_size == input_size or // output_size == 2 * input_size, that we would like to get rid of // We keep this method for BC and consider as deprecated. @@ -356,13 +356,13 @@ static inline int64_t nearest_exact_idx( int64_t output_index, int64_t input_size, int64_t output_size, - c10::optional scales) { + std::optional scales) { float scale = compute_scales_value(scales, input_size, output_size); return nearest_neighbor_exact_compute_source_index(scale, output_index, input_size); } // Define a typedef to dispatch to nearest_idx or nearest_exact_idx -typedef int64_t (*nearest_idx_fn_t)(int64_t, int64_t, int64_t, c10::optional); +typedef int64_t (*nearest_idx_fn_t)(int64_t, int64_t, int64_t, std::optional); template static scalar_t upsample_get_value_bounded( diff --git a/aten/src/ATen/native/UpSampleBicubic2d.cpp b/aten/src/ATen/native/UpSampleBicubic2d.cpp index f5e523c4a9114..8f5046534103b 100644 --- a/aten/src/ATen/native/UpSampleBicubic2d.cpp +++ b/aten/src/ATen/native/UpSampleBicubic2d.cpp @@ -23,7 +23,7 @@ namespace at::meta { TORCH_META_FUNC(upsample_bicubic2d) ( - const Tensor& input, IntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w + const Tensor& input, IntArrayRef output_size, bool align_corners, std::optional scales_h, c10::optional scales_w ) { auto full_output_size = native::upsample_2d_common_check(input.sizes(), output_size); @@ -41,8 +41,8 @@ TORCH_META_FUNC(upsample_bicubic2d_backward) ( IntArrayRef output_size, IntArrayRef input_size, bool align_corners, - c10::optional scales_h, - c10::optional scales_w + std::optional scales_h, + std::optional scales_w ) { auto full_output_size = native::upsample_2d_common_check(input_size, output_size); @@ -62,7 +62,7 @@ TORCH_META_FUNC(upsample_bicubic2d_backward) ( } TORCH_META_FUNC(_upsample_bicubic2d_aa) ( - const Tensor& input, IntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w + const Tensor& input, IntArrayRef output_size, bool align_corners, std::optional scales_h, c10::optional scales_w ) { auto full_output_size = native::upsample_2d_common_check(input.sizes(), output_size); @@ -80,8 +80,8 @@ TORCH_META_FUNC(_upsample_bicubic2d_aa_backward) ( IntArrayRef output_size, IntArrayRef input_size, bool align_corners, - c10::optional scales_h, - c10::optional scales_w + std::optional scales_h, + std::optional scales_w ) { auto full_output_size = native::upsample_2d_common_check(input_size, output_size); @@ -115,8 +115,8 @@ static void upsample_bicubic2d_backward_out_frame( int64_t nbatch, int64_t channels, bool align_corners, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_h, + std::optional scales_w) { channels = channels * nbatch; auto input_slice_size = input_height * input_width; auto output_slice_size = output_height * output_width; @@ -185,8 +185,8 @@ static void upsample_bicubic2d_backward_kernel( IntArrayRef output_size, IntArrayRef input_size, bool align_corners, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_h, + std::optional scales_w) { int64_t output_height = output_size[0]; int64_t output_width = output_size[1]; @@ -227,8 +227,8 @@ TORCH_IMPL_FUNC(upsample_bicubic2d_out_cpu) ( const Tensor& input, IntArrayRef output_size, bool align_corners, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_h, + std::optional scales_w, const Tensor& output ) { upsample_bicubic2d_kernel(kCPU, output, input, align_corners, scales_h, scales_w); @@ -239,8 +239,8 @@ TORCH_IMPL_FUNC(upsample_bicubic2d_backward_out_cpu) ( IntArrayRef output_size, IntArrayRef input_size, bool align_corners, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_h, + std::optional scales_w, const Tensor& grad_input ) { grad_input.zero_(); @@ -251,8 +251,8 @@ TORCH_IMPL_FUNC(_upsample_bicubic2d_aa_out_cpu) ( const Tensor& input, IntArrayRef output_size, bool align_corners, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_h, + std::optional scales_w, const Tensor& output ) { _upsample_bicubic2d_aa_kernel(kCPU, output, input, align_corners, scales_h, scales_w); @@ -263,8 +263,8 @@ TORCH_IMPL_FUNC(_upsample_bicubic2d_aa_backward_out_cpu) ( IntArrayRef output_size, IntArrayRef input_size, bool align_corners, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_h, + std::optional scales_w, const Tensor& grad_input ) { grad_input.zero_(); @@ -280,7 +280,7 @@ Tensor upsample_bicubic2d( const Tensor& input, at::OptionalIntArrayRef output_size, bool align_corners, - c10::optional> scale_factors) { + std::optional> scale_factors) { auto osize = compute_output_size(input.sizes(), output_size, scale_factors); auto scale_h = get_scale_value(scale_factors, 0); auto scale_w = get_scale_value(scale_factors, 1); @@ -291,7 +291,7 @@ Tensor _upsample_bicubic2d_aa( const Tensor& input, at::OptionalIntArrayRef output_size, bool align_corners, - c10::optional> scale_factors) { + std::optional> scale_factors) { auto osize = compute_output_size(input.sizes(), output_size, scale_factors); auto scale_h = get_scale_value(scale_factors, 0); auto scale_w = get_scale_value(scale_factors, 1); diff --git a/aten/src/ATen/native/UpSampleBilinear2d.cpp b/aten/src/ATen/native/UpSampleBilinear2d.cpp index 202f33ab7970e..2cc8b56678c74 100644 --- a/aten/src/ATen/native/UpSampleBilinear2d.cpp +++ b/aten/src/ATen/native/UpSampleBilinear2d.cpp @@ -24,7 +24,7 @@ namespace at::meta { TORCH_META_FUNC(upsample_bilinear2d) ( - const Tensor& input, IntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w + const Tensor& input, IntArrayRef output_size, bool align_corners, std::optional scales_h, c10::optional scales_w ) { auto full_output_size = native::upsample_2d_common_check(input.sizes(), output_size); @@ -42,8 +42,8 @@ TORCH_META_FUNC(upsample_bilinear2d_backward) ( IntArrayRef output_size, IntArrayRef input_size, bool align_corners, - c10::optional scales_h, - c10::optional scales_w + std::optional scales_h, + std::optional scales_w ) { auto full_output_size = native::upsample_2d_common_check(input_size, output_size); @@ -63,7 +63,7 @@ TORCH_META_FUNC(upsample_bilinear2d_backward) ( } TORCH_META_FUNC(_upsample_bilinear2d_aa) ( - const Tensor& input, IntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w + const Tensor& input, IntArrayRef output_size, bool align_corners, std::optional scales_h, c10::optional scales_w ) { auto full_output_size = native::upsample_2d_common_check(input.sizes(), output_size); @@ -81,8 +81,8 @@ TORCH_META_FUNC(_upsample_bilinear2d_aa_backward) ( IntArrayRef output_size, IntArrayRef input_size, bool align_corners, - c10::optional scales_h, - c10::optional scales_w + std::optional scales_h, + std::optional scales_w ) { auto full_output_size = native::upsample_2d_common_check(input_size, output_size); @@ -109,8 +109,8 @@ TORCH_IMPL_FUNC(upsample_bilinear2d_out_cpu) ( const Tensor& input, IntArrayRef output_size, bool align_corners, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_h, + std::optional scales_w, const Tensor& output ) { upsample_bilinear2d_kernel(kCPU, output, input, align_corners, scales_h, scales_w); @@ -121,8 +121,8 @@ TORCH_IMPL_FUNC(upsample_bilinear2d_backward_out_cpu) ( IntArrayRef output_size, IntArrayRef input_size, bool align_corners, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_h, + std::optional scales_w, const Tensor& grad_input ) { grad_input.zero_(); @@ -134,8 +134,8 @@ TORCH_IMPL_FUNC(_upsample_bilinear2d_aa_out_cpu) ( const Tensor& input, IntArrayRef output_size, bool align_corners, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_h, + std::optional scales_w, const Tensor& output ) { _upsample_bilinear2d_aa_kernel(kCPU, output, input, align_corners, scales_h, scales_w); @@ -146,8 +146,8 @@ TORCH_IMPL_FUNC(_upsample_bilinear2d_aa_backward_out_cpu) ( IntArrayRef output_size, IntArrayRef input_size, bool align_corners, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_h, + std::optional scales_w, const Tensor& grad_input ) { grad_input.zero_(); @@ -161,7 +161,7 @@ Tensor upsample_bilinear2d( const Tensor& input, at::OptionalIntArrayRef output_size, bool align_corners, - c10::optional> scale_factors) { + std::optional> scale_factors) { auto osize = compute_output_size(input.sizes(), output_size, scale_factors); auto scale_h = get_scale_value(scale_factors, 0); auto scale_w = get_scale_value(scale_factors, 1); @@ -172,7 +172,7 @@ Tensor _upsample_bilinear2d_aa( const Tensor& input, at::OptionalIntArrayRef output_size, bool align_corners, - c10::optional> scale_factors) { + std::optional> scale_factors) { auto osize = compute_output_size(input.sizes(), output_size, scale_factors); auto scale_h = get_scale_value(scale_factors, 0); auto scale_w = get_scale_value(scale_factors, 1); diff --git a/aten/src/ATen/native/UpSampleLinear1d.cpp b/aten/src/ATen/native/UpSampleLinear1d.cpp index 7d80d5c2dc2b8..affbcaa4f06d9 100644 --- a/aten/src/ATen/native/UpSampleLinear1d.cpp +++ b/aten/src/ATen/native/UpSampleLinear1d.cpp @@ -23,7 +23,7 @@ TORCH_META_FUNC(upsample_linear1d) ( const Tensor& input, IntArrayRef output_size, bool align_corners, - c10::optional scales + std::optional scales ) { auto full_output_size = native::upsample_1d_common_check(input.sizes(), output_size); @@ -41,7 +41,7 @@ TORCH_META_FUNC(upsample_linear1d_backward) ( IntArrayRef output_size, IntArrayRef input_size, bool align_corners, - c10::optional scales + std::optional scales ) { auto full_output_size = native::upsample_1d_common_check(input_size, output_size); @@ -65,7 +65,7 @@ TORCH_IMPL_FUNC(upsample_linear1d_out_cpu) ( const Tensor& input, IntArrayRef output_size, bool align_corners, - c10::optional scales, + std::optional scales, const Tensor& output ) { upsample_linear1d_kernel(kCPU, output, input, align_corners, scales); @@ -76,7 +76,7 @@ TORCH_IMPL_FUNC(upsample_linear1d_backward_out_cpu) ( IntArrayRef output_size, IntArrayRef input_size, bool align_corners, - c10::optional scales, + std::optional scales, const Tensor& grad_input ) { grad_input.zero_(); @@ -92,7 +92,7 @@ Tensor upsample_linear1d( const Tensor& input, at::OptionalIntArrayRef output_size, bool align_corners, - c10::optional> scale_factors) { + std::optional> scale_factors) { auto osize = compute_output_size(input.sizes(), output_size, scale_factors); auto scale_w = get_scale_value(scale_factors, 0); return at::upsample_linear1d(input, osize, align_corners, scale_w); diff --git a/aten/src/ATen/native/UpSampleNearest1d.cpp b/aten/src/ATen/native/UpSampleNearest1d.cpp index 94441d6c3df97..7555d421d4afd 100644 --- a/aten/src/ATen/native/UpSampleNearest1d.cpp +++ b/aten/src/ATen/native/UpSampleNearest1d.cpp @@ -21,7 +21,7 @@ namespace at::meta { TORCH_META_FUNC(upsample_nearest1d) ( - const Tensor& input, IntArrayRef output_size, c10::optional scales + const Tensor& input, IntArrayRef output_size, std::optional scales ) { auto full_output_size = native::upsample_1d_common_check(input.sizes(), output_size); @@ -35,7 +35,7 @@ TORCH_META_FUNC(upsample_nearest1d) ( } TORCH_META_FUNC(_upsample_nearest_exact1d) ( - const Tensor& input, IntArrayRef output_size, c10::optional scales + const Tensor& input, IntArrayRef output_size, std::optional scales ) { auto full_output_size = native::upsample_1d_common_check(input.sizes(), output_size); @@ -49,7 +49,7 @@ TORCH_META_FUNC(_upsample_nearest_exact1d) ( } TORCH_META_FUNC(upsample_nearest1d_backward) ( - const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, c10::optional scales + const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, std::optional scales ) { auto full_output_size = native::upsample_1d_common_check(input_size, output_size); @@ -61,7 +61,7 @@ TORCH_META_FUNC(upsample_nearest1d_backward) ( } TORCH_META_FUNC(_upsample_nearest_exact1d_backward) ( - const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, c10::optional scales + const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, std::optional scales ) { auto full_output_size = native::upsample_1d_common_check(input_size, output_size); @@ -80,7 +80,7 @@ namespace at::native { TORCH_IMPL_FUNC(upsample_nearest1d_out_cpu) ( const Tensor& input, IntArrayRef output_size, - c10::optional scales, + std::optional scales, const Tensor& output ) { upsample_nearest1d_kernel(kCPU, output, input, scales); @@ -89,7 +89,7 @@ TORCH_IMPL_FUNC(upsample_nearest1d_out_cpu) ( TORCH_IMPL_FUNC(_upsample_nearest_exact1d_out_cpu) ( const Tensor& input, IntArrayRef output_size, - c10::optional scales, + std::optional scales, const Tensor& output ) { _upsample_nearest_exact1d_kernel(kCPU, output, input, scales); @@ -99,7 +99,7 @@ TORCH_IMPL_FUNC(upsample_nearest1d_backward_out_cpu) ( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, - c10::optional scales, + std::optional scales, const Tensor& grad_input ) { grad_input.zero_(); @@ -110,7 +110,7 @@ TORCH_IMPL_FUNC(_upsample_nearest_exact1d_backward_out_cpu) ( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, - c10::optional scales, + std::optional scales, const Tensor& grad_input ) { grad_input.zero_(); @@ -125,7 +125,7 @@ using at::native::upsample::get_scale_value; Tensor upsample_nearest1d( const Tensor& input, at::OptionalIntArrayRef output_size, - c10::optional> scale_factors) { + std::optional> scale_factors) { auto osize = compute_output_size(input.sizes(), output_size, scale_factors); auto scale_w = get_scale_value(scale_factors, 0); return at::upsample_nearest1d(input, osize, scale_w); @@ -134,7 +134,7 @@ Tensor upsample_nearest1d( Tensor _upsample_nearest_exact1d( const Tensor& input, at::OptionalIntArrayRef output_size, - c10::optional> scale_factors) { + std::optional> scale_factors) { auto osize = compute_output_size(input.sizes(), output_size, scale_factors); auto scale_w = get_scale_value(scale_factors, 0); return at::_upsample_nearest_exact1d(input, osize, scale_w); diff --git a/aten/src/ATen/native/UpSampleNearest2d.cpp b/aten/src/ATen/native/UpSampleNearest2d.cpp index 592108291cf76..0ee2db0597023 100644 --- a/aten/src/ATen/native/UpSampleNearest2d.cpp +++ b/aten/src/ATen/native/UpSampleNearest2d.cpp @@ -22,7 +22,7 @@ namespace at::meta { TORCH_META_FUNC(upsample_nearest2d) ( - const Tensor& input, IntArrayRef output_size, c10::optional scales_h, c10::optional scales_w + const Tensor& input, IntArrayRef output_size, std::optional scales_h, c10::optional scales_w ) { auto full_output_size = native::upsample_2d_common_check(input.sizes(), output_size); @@ -36,7 +36,7 @@ TORCH_META_FUNC(upsample_nearest2d) ( } TORCH_META_FUNC(_upsample_nearest_exact2d) ( - const Tensor& input, IntArrayRef output_size, c10::optional scales_h, c10::optional scales_w + const Tensor& input, IntArrayRef output_size, std::optional scales_h, c10::optional scales_w ) { auto full_output_size = native::upsample_2d_common_check(input.sizes(), output_size); @@ -53,8 +53,8 @@ TORCH_META_FUNC(upsample_nearest2d_backward) ( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, - c10::optional scales_h, - c10::optional scales_w + std::optional scales_h, + std::optional scales_w ) { auto full_output_size = native::upsample_2d_common_check(input_size, output_size); @@ -77,8 +77,8 @@ TORCH_META_FUNC(_upsample_nearest_exact2d_backward) ( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, - c10::optional scales_h, - c10::optional scales_w + std::optional scales_h, + std::optional scales_w ) { auto full_output_size = native::upsample_2d_common_check(input_size, output_size); @@ -104,8 +104,8 @@ namespace at::native { TORCH_IMPL_FUNC(upsample_nearest2d_out_cpu) ( const Tensor& input, IntArrayRef output_size, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_h, + std::optional scales_w, const Tensor& output ) { upsample_nearest2d_kernel(kCPU, output, input, scales_h, scales_w); @@ -114,8 +114,8 @@ TORCH_IMPL_FUNC(upsample_nearest2d_out_cpu) ( TORCH_IMPL_FUNC(_upsample_nearest_exact2d_out_cpu) ( const Tensor& input, IntArrayRef output_size, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_h, + std::optional scales_w, const Tensor& output ) { _upsample_nearest_exact2d_kernel(kCPU, output, input, scales_h, scales_w); @@ -125,8 +125,8 @@ TORCH_IMPL_FUNC(upsample_nearest2d_backward_out_cpu) ( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_h, + std::optional scales_w, const Tensor& grad_input) { grad_input.zero_(); upsample_nearest2d_backward_kernel(kCPU, grad_input, grad_output, scales_h, scales_w); @@ -136,8 +136,8 @@ TORCH_IMPL_FUNC(_upsample_nearest_exact2d_backward_out_cpu) ( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_h, + std::optional scales_w, const Tensor& grad_input) { grad_input.zero_(); _upsample_nearest_exact2d_backward_kernel(kCPU, grad_input, grad_output, scales_h, scales_w); @@ -149,7 +149,7 @@ using at::native::upsample::get_scale_value; Tensor upsample_nearest2d( const Tensor& input, at::OptionalIntArrayRef output_size, - c10::optional> scale_factors) { + std::optional> scale_factors) { auto osize = compute_output_size(input.sizes(), output_size, scale_factors); auto scale_h = get_scale_value(scale_factors, 0); auto scale_w = get_scale_value(scale_factors, 1); @@ -159,7 +159,7 @@ Tensor upsample_nearest2d( Tensor _upsample_nearest_exact2d( const Tensor& input, at::OptionalIntArrayRef output_size, - c10::optional> scale_factors) { + std::optional> scale_factors) { auto osize = compute_output_size(input.sizes(), output_size, scale_factors); auto scale_h = get_scale_value(scale_factors, 0); auto scale_w = get_scale_value(scale_factors, 1); diff --git a/aten/src/ATen/native/UpSampleNearest3d.cpp b/aten/src/ATen/native/UpSampleNearest3d.cpp index 0c4851b7be513..ac4dc1796252e 100644 --- a/aten/src/ATen/native/UpSampleNearest3d.cpp +++ b/aten/src/ATen/native/UpSampleNearest3d.cpp @@ -23,9 +23,9 @@ namespace at::meta { TORCH_META_FUNC(upsample_nearest3d) ( const Tensor& input, IntArrayRef output_size, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w + std::optional scales_d, + std::optional scales_h, + std::optional scales_w ) { auto full_output_size = native::upsample_3d_common_check(input.sizes(), output_size); @@ -41,9 +41,9 @@ TORCH_META_FUNC(upsample_nearest3d) ( TORCH_META_FUNC(_upsample_nearest_exact3d) ( const Tensor& input, IntArrayRef output_size, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w + std::optional scales_d, + std::optional scales_h, + std::optional scales_w ) { auto full_output_size = native::upsample_3d_common_check(input.sizes(), output_size); @@ -60,9 +60,9 @@ TORCH_META_FUNC(upsample_nearest3d_backward) ( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w + std::optional scales_d, + std::optional scales_h, + std::optional scales_w ) { auto full_output_size = native::upsample_3d_common_check(input_size, output_size); @@ -85,9 +85,9 @@ TORCH_META_FUNC(_upsample_nearest_exact3d_backward) ( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w + std::optional scales_d, + std::optional scales_h, + std::optional scales_w ) { auto full_output_size = native::upsample_3d_common_check(input_size, output_size); @@ -113,9 +113,9 @@ namespace at::native { TORCH_IMPL_FUNC(upsample_nearest3d_out_cpu) ( const Tensor& input, IntArrayRef output_size, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_d, + std::optional scales_h, + std::optional scales_w, const Tensor& output ) { upsample_nearest3d_kernel(kCPU, output, input, scales_d, scales_h, scales_w); @@ -124,9 +124,9 @@ TORCH_IMPL_FUNC(upsample_nearest3d_out_cpu) ( TORCH_IMPL_FUNC(_upsample_nearest_exact3d_out_cpu) ( const Tensor& input, IntArrayRef output_size, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_d, + std::optional scales_h, + std::optional scales_w, const Tensor& output ) { _upsample_nearest_exact3d_kernel(kCPU, output, input, scales_d, scales_h, scales_w); @@ -136,9 +136,9 @@ TORCH_IMPL_FUNC(upsample_nearest3d_backward_out_cpu) ( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_d, + std::optional scales_h, + std::optional scales_w, const Tensor& grad_input) { grad_input.zero_(); upsample_nearest3d_backward_kernel(kCPU, grad_input, grad_output, scales_d, scales_h, scales_w); @@ -148,9 +148,9 @@ TORCH_IMPL_FUNC(_upsample_nearest_exact3d_backward_out_cpu) ( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_d, + std::optional scales_h, + std::optional scales_w, const Tensor& grad_input) { grad_input.zero_(); _upsample_nearest_exact3d_backward_kernel(kCPU, grad_input, grad_output, scales_d, scales_h, scales_w); @@ -164,7 +164,7 @@ using at::native::upsample::get_scale_value; Tensor upsample_nearest3d( const Tensor& input, at::OptionalIntArrayRef output_size, - c10::optional> scale_factors) { + std::optional> scale_factors) { auto osize = compute_output_size(input.sizes(), output_size, scale_factors); auto scale_d = get_scale_value(scale_factors, 0); auto scale_h = get_scale_value(scale_factors, 1); @@ -175,7 +175,7 @@ Tensor upsample_nearest3d( Tensor _upsample_nearest_exact3d( const Tensor& input, at::OptionalIntArrayRef output_size, - c10::optional> scale_factors) { + std::optional> scale_factors) { auto osize = compute_output_size(input.sizes(), output_size, scale_factors); auto scale_d = get_scale_value(scale_factors, 0); auto scale_h = get_scale_value(scale_factors, 1); diff --git a/aten/src/ATen/native/UpSampleTrilinear3d.cpp b/aten/src/ATen/native/UpSampleTrilinear3d.cpp index 24a915d5d9a42..9aa8f9c5cb73c 100644 --- a/aten/src/ATen/native/UpSampleTrilinear3d.cpp +++ b/aten/src/ATen/native/UpSampleTrilinear3d.cpp @@ -23,9 +23,9 @@ TORCH_META_FUNC(upsample_trilinear3d) ( const Tensor& input, IntArrayRef output_size, bool align_corners, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w + std::optional scales_d, + std::optional scales_h, + std::optional scales_w ) { auto full_output_size = native::upsample_3d_common_check(input.sizes(), output_size); @@ -43,9 +43,9 @@ TORCH_META_FUNC(upsample_trilinear3d_backward) ( IntArrayRef output_size, IntArrayRef input_size, bool align_corners, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w + std::optional scales_d, + std::optional scales_h, + std::optional scales_w ) { auto full_output_size = native::upsample_3d_common_check(input_size, output_size); @@ -71,9 +71,9 @@ TORCH_IMPL_FUNC(upsample_trilinear3d_out_cpu) ( const Tensor& input, IntArrayRef output_size, bool align_corners, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_d, + std::optional scales_h, + std::optional scales_w, const Tensor& output ) { upsample_trilinear3d_kernel(kCPU, output, input, align_corners, scales_d, scales_h, scales_w); @@ -84,9 +84,9 @@ TORCH_IMPL_FUNC(upsample_trilinear3d_backward_out_cpu) ( IntArrayRef output_size, IntArrayRef input_size, bool align_corners, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_d, + std::optional scales_h, + std::optional scales_w, const Tensor& grad_input ) { grad_input.zero_(); @@ -102,7 +102,7 @@ Tensor upsample_trilinear3d( const Tensor& input, at::OptionalIntArrayRef output_size, bool align_corners, - c10::optional> scale_factors) { + std::optional> scale_factors) { auto osize = compute_output_size(input.sizes(), output_size, scale_factors); auto scale_d = get_scale_value(scale_factors, 0); auto scale_h = get_scale_value(scale_factors, 1); diff --git a/aten/src/ATen/native/VariableMethodStubs.cpp b/aten/src/ATen/native/VariableMethodStubs.cpp index 477979d190be2..ed99aed399cb1 100644 --- a/aten/src/ATen/native/VariableMethodStubs.cpp +++ b/aten/src/ATen/native/VariableMethodStubs.cpp @@ -24,7 +24,7 @@ namespace at::native { -void _backward(const Tensor& self, TensorList inputs, const c10::optional& gradient_opt, c10::optional keep_graph, bool create_graph) { +void _backward(const Tensor& self, TensorList inputs, const std::optional& gradient_opt, c10::optional keep_graph, bool create_graph) { return self._backward(inputs, gradient_opt, keep_graph, create_graph); } diff --git a/aten/src/ATen/native/ao_sparse/quantized/cpu/fbgemm_utils.h b/aten/src/ATen/native/ao_sparse/quantized/cpu/fbgemm_utils.h index 58a1a43fe67bc..a14fd4efc1b15 100644 --- a/aten/src/ATen/native/ao_sparse/quantized/cpu/fbgemm_utils.h +++ b/aten/src/ATen/native/ao_sparse/quantized/cpu/fbgemm_utils.h @@ -14,7 +14,7 @@ namespace sparse { struct TORCH_API PackedLinearWeight : public LinearPackedParamsBase { PackedLinearWeight(std::unique_ptr> w, - c10::optional bias, + std::optional bias, std::vector col_offsets, std::vector w_scale, std::vector w_zp, @@ -31,7 +31,7 @@ struct TORCH_API PackedLinearWeight w_zp(std::move(w_zp)), q_scheme(q_scheme) {} std::unique_ptr> w; - c10::optional bias_; + std::optional bias_; std::vector col_offsets; std::vector w_scale; std::vector w_zp; @@ -68,13 +68,13 @@ struct TORCH_API PackedLinearWeight static c10::intrusive_ptr deserialize( const BCSRSerializationType& serialized); - c10::optional bias() override { + std::optional bias() override { return bias_; } static c10::intrusive_ptr prepack( const at::Tensor& weight, - const c10::optional& bias, + const std::optional& bias, const int64_t out_features_block_size, const int64_t in_features_block_size); diff --git a/aten/src/ATen/native/ao_sparse/quantized/cpu/packed_params.h b/aten/src/ATen/native/ao_sparse/quantized/cpu/packed_params.h index 1ca66bf536a77..db8ee9d619066 100644 --- a/aten/src/ATen/native/ao_sparse/quantized/cpu/packed_params.h +++ b/aten/src/ATen/native/ao_sparse/quantized/cpu/packed_params.h @@ -9,14 +9,14 @@ namespace sparse { // using LinearPackedSerializationType = - std::tuple, std::vector>; + std::tuple, std::vector>; #define SPARSE_LINEAR_PACKED_PARAM_SERIALIZATION_VERSION 2 using BCSRSerializationType = std::tuple< int64_t, // Serialization Version - c10::optional, // Bias + std::optional, // Bias int64_t, // Out Features (Row) Block Size int64_t, // In Features (Column) Block Size at::Tensor, // Weight Scales (single element vector if per-tensor) (float) @@ -60,9 +60,9 @@ struct LinearPackedParamsBase : public torch::jit::CustomClassHolder { virtual BCSRSerializationType serialize() = 0; - virtual c10::optional bias() = 0; + virtual std::optional bias() = 0; - virtual void set_bias(const c10::optional& bias) { + virtual void set_bias(const std::optional& bias) { throw std::runtime_error( "set_bias is not implemented for this packed " "parameter type"); diff --git a/aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_prepack.cpp b/aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_prepack.cpp index 8f80d920e3652..f5032f4d425b8 100644 --- a/aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_prepack.cpp +++ b/aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_prepack.cpp @@ -53,7 +53,7 @@ void calc_col_offsets_transpose( c10::intrusive_ptr PackedLinearWeight:: prepack( const at::Tensor& weight, - const c10::optional& bias, + const std::optional& bias, const int64_t out_features_block_size, const int64_t in_features_block_size) { TORCH_CHECK( @@ -110,7 +110,7 @@ c10::intrusive_ptr PackedLinearWeight:: /*col_offsets=*/col_offsets.data(), /*qtype=*/qtype); - c10::optional bias_contig; + std::optional bias_contig; if (bias.has_value()) { const at::Tensor& bias_vec = bias.value(); TORCH_CHECK(bias_vec.dim() == 1, "bias should be a vector (1D Tensor)"); @@ -139,7 +139,7 @@ c10::intrusive_ptr PackedLinearWeight:: c10::intrusive_ptr PackedLinearWeightQnnp:: prepack( const at::Tensor& weight, - const c10::optional& bias, + const std::optional& bias, const int64_t out_features_block_size, const int64_t in_features_block_size) { at::native::initQNNPACK(); @@ -150,7 +150,7 @@ c10::intrusive_ptr PackedLinearWeightQnnp:: // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) PackedLinearWeightQnnp::PackedLinearWeightQnnp( const at::Tensor& weight, - const c10::optional& bias, + const std::optional& bias, const int64_t out_features_block_size, const int64_t in_features_block_size) : LinearPackedParamsBase(out_features_block_size, in_features_block_size), @@ -215,7 +215,7 @@ class QLinearPackWeightInt8 final { public: static c10::intrusive_ptr run( const at::Tensor& weight, - const c10::optional& bias, + const std::optional& bias, const int64_t out_features_block_size, const int64_t in_features_block_size) { auto& ctx = at::globalContext(); diff --git a/aten/src/ATen/native/ao_sparse/quantized/cpu/qnnpack_utils.h b/aten/src/ATen/native/ao_sparse/quantized/cpu/qnnpack_utils.h index 6ac89681899c5..b791cbe845756 100644 --- a/aten/src/ATen/native/ao_sparse/quantized/cpu/qnnpack_utils.h +++ b/aten/src/ATen/native/ao_sparse/quantized/cpu/qnnpack_utils.h @@ -16,9 +16,9 @@ namespace sparse { struct TORCH_API PackedLinearWeightQnnp : public LinearPackedParamsBase { - PackedLinearWeightQnnp(const at::Tensor& weight, const c10::optional& bias, const int64_t out_features_block_size /* block sparsity size across output_features */, const int64_t in_features_block_size /* block sparsity size across input_features */); + PackedLinearWeightQnnp(const at::Tensor& weight, const std::optional& bias, const int64_t out_features_block_size /* block sparsity size across output_features */, const int64_t in_features_block_size /* block sparsity size across input_features */); explicit PackedLinearWeightQnnp(const BCSRSerializationType& serialized); - c10::optional orig_bias_; + std::optional orig_bias_; // Separate copy of bias exist so that we can fill in zeros when // optional bias does not exist. This is to compy with qnnpack operator that // expects bias to be present. @@ -67,13 +67,13 @@ struct TORCH_API PackedLinearWeightQnnp static c10::intrusive_ptr deserialize( const BCSRSerializationType& serialized); - c10::optional bias() override { + std::optional bias() override { return orig_bias_; } static c10::intrusive_ptr prepack( const at::Tensor& weight, - const c10::optional& bias, + const std::optional& bias, const int64_t out_features_block_size, const int64_t in_features_block_size); diff --git a/aten/src/ATen/native/cpu/AvgPoolKernel.cpp b/aten/src/ATen/native/cpu/AvgPoolKernel.cpp index 572d5af43f651..4bf03b12b1446 100644 --- a/aten/src/ATen/native/cpu/AvgPoolKernel.cpp +++ b/aten/src/ATen/native/cpu/AvgPoolKernel.cpp @@ -21,7 +21,7 @@ void cpu_avg_pool2d( int64_t dW, int64_t dH, int64_t padW, int64_t padH, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { using acc_t = at::opmath_type; auto input = input_.contiguous(); @@ -108,7 +108,7 @@ void cpu_avg_pool2d_channels_last( int64_t dW, int64_t dH, int64_t padW, int64_t padH, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { TORCH_CHECK(input_.ndimension() == 4, "2d average pooling with channels last format supports tensors with 4 dims"); auto memory_format = at::MemoryFormat::ChannelsLast; @@ -222,7 +222,7 @@ void cpu_avg_pool2d_channels_last( int64_t dW, int64_t dH, int64_t padW, int64_t padH, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { TORCH_CHECK(input_.ndimension() == 4, "2d average pooling with channels last format supports tensors with 4 dims"); auto memory_format = at::MemoryFormat::ChannelsLast; @@ -354,7 +354,7 @@ void cpu_avg_pool2d_backward( int dW, int dH, int padW, int padH, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { auto grad_output = grad_output_.contiguous(); auto grad_input = grad_input_.contiguous(); @@ -422,7 +422,7 @@ void cpu_avg_pool2d_backward_channels_last( int dW, int dH, int padW, int padH, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { auto memory_format = at::MemoryFormat::ChannelsLast; auto grad_input = grad_input_.contiguous(memory_format); auto grad_output = grad_output_.contiguous(memory_format); @@ -501,7 +501,7 @@ void avg_pool2d_kernel_impl( int64_t dW, int64_t dH, int64_t padW, int64_t padH, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { switch (input.suggest_memory_format()) { case at::MemoryFormat::Contiguous: { AT_DISPATCH_FLOATING_TYPES_AND3(kLong, kBFloat16, kHalf, input.scalar_type(), "avg_pool2d", [&] { @@ -527,7 +527,7 @@ void avg_pool2d_backward_kernel_impl( int dW, int dH, int padW, int padH, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { switch (grad_output.suggest_memory_format()) { case at::MemoryFormat::Contiguous: { AT_DISPATCH_FLOATING_TYPES_AND3(kLong, kBFloat16, kHalf, grad_output.scalar_type(), "avg_pool2d_backward", [&] { @@ -555,7 +555,7 @@ void cpu_avg_pool3d( int64_t dW, int64_t dH, int64_t dD, int64_t padW, int64_t padH, int64_t padD, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { using acc_t = at::opmath_type; auto input = input_.contiguous(); @@ -651,7 +651,7 @@ void cpu_avg_pool3d_channels_last( int64_t dW, int64_t dH, int64_t dD, int64_t padW, int64_t padH, int64_t padD, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { TORCH_CHECK(input_.ndimension() == 5, "3d average pooling with channels last format supports tensors with 5 dims"); auto memory_format = at::MemoryFormat::ChannelsLast3d; @@ -774,7 +774,7 @@ void cpu_avg_pool3d_channels_last( int64_t dW, int64_t dH, int64_t dD, int64_t padW, int64_t padH, int64_t padD, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { TORCH_CHECK(input_.ndimension() == 5, "3d average pooling with channels last format supports tensors with 5 dims"); auto memory_format = at::MemoryFormat::ChannelsLast3d; @@ -915,7 +915,7 @@ void cpu_avg_pool3d_backward( int dW, int dH, int dD, int padW, int padH, int padD, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { auto grad_output = grad_output_.contiguous(); auto grad_input = grad_input_.contiguous(); @@ -992,7 +992,7 @@ void cpu_avg_pool3d_backward_channels_last( int dW, int dH, int dD, int padW, int padH, int padD, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { auto memory_format = at::MemoryFormat::ChannelsLast3d; auto grad_input = grad_input_.contiguous(memory_format); auto grad_output = grad_output_.contiguous(memory_format); @@ -1083,7 +1083,7 @@ void avg_pool3d_kernel_impl( int64_t dW, int64_t dH, int64_t dD, int64_t padW, int64_t padH, int64_t padD, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { switch (input.suggest_memory_format()) { case at::MemoryFormat::Contiguous: { AT_DISPATCH_FLOATING_TYPES_AND3(kLong, kBFloat16, kHalf, input.scalar_type(), "avg_pool3d", [&] { @@ -1110,7 +1110,7 @@ void avg_pool3d_backward_kernel_impl( int dW, int dH, int dD, int padW, int padH, int padD, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { switch (grad_output.suggest_memory_format()) { case at::MemoryFormat::Contiguous: { AT_DISPATCH_FLOATING_TYPES_AND3(kLong, kBFloat16, kHalf, grad_output.scalar_type(), "avg_pool3d_backward", [&] { diff --git a/aten/src/ATen/native/cpu/DistributionKernels.cpp b/aten/src/ATen/native/cpu/DistributionKernels.cpp index 6dce481853ac2..7ee014058d70d 100644 --- a/aten/src/ATen/native/cpu/DistributionKernels.cpp +++ b/aten/src/ATen/native/cpu/DistributionKernels.cpp @@ -26,27 +26,27 @@ namespace at::native { namespace { -static void cauchy_kernel(TensorIteratorBase& iter, double median, double sigma, c10::optional gen) { +static void cauchy_kernel(TensorIteratorBase& iter, double median, double sigma, std::optional gen) { CPUGeneratorImpl* generator = get_generator_or_default(gen, detail::getDefaultCPUGenerator()); templates::cpu::cauchy_kernel(iter, median, sigma, generator); } -void bernoulli_tensor_kernel(const TensorBase &self, const TensorBase &p_, c10::optional gen) { +void bernoulli_tensor_kernel(const TensorBase &self, const TensorBase &p_, std::optional gen) { CPUGeneratorImpl* generator = get_generator_or_default(gen, detail::getDefaultCPUGenerator()); templates::cpu::bernoulli_kernel(self, p_, generator); } #if !AT_MKL_ENABLED() -void bernoulli_scalar_kernel_default(const TensorBase &self, double p, c10::optional gen) { +void bernoulli_scalar_kernel_default(const TensorBase &self, double p, std::optional gen) { CPUGeneratorImpl* generator = get_generator_or_default(gen, detail::getDefaultCPUGenerator()); templates::cpu::bernoulli_kernel(self, p, generator); } -void bernoulli_scalar_kernel(const TensorBase &self, double p, c10::optional gen) { +void bernoulli_scalar_kernel(const TensorBase &self, double p, std::optional gen) { bernoulli_scalar_kernel_default(self, p, gen); } #else -void bernoulli_scalar_kernel(const TensorBase &self, double p, c10::optional gen) { +void bernoulli_scalar_kernel(const TensorBase &self, double p, std::optional gen) { CPUGeneratorImpl* generator = get_generator_or_default(gen, detail::getDefaultCPUGenerator()); int64_t seed; { @@ -99,17 +99,17 @@ void bernoulli_scalar_kernel(const TensorBase &self, double p, c10::optional gen) { +static void exponential_kernel_default(TensorIteratorBase& iter, double lambda, std::optional gen) { CPUGeneratorImpl* generator = get_generator_or_default(gen, detail::getDefaultCPUGenerator()); templates::cpu::exponential_kernel(iter, lambda, generator); } #if (!AT_MKL_ENABLED() || defined(FBCODE_CAFFE2)) -void exponential_kernel(TensorIteratorBase& iter, double lambda, c10::optional gen) { +void exponential_kernel(TensorIteratorBase& iter, double lambda, std::optional gen) { exponential_kernel_default(iter, lambda, gen); } #else -void exponential_kernel(TensorIteratorBase &iter, double lambda, c10::optional gen) { +void exponential_kernel(TensorIteratorBase &iter, double lambda, std::optional gen) { TORCH_CHECK(isFloatingType(iter.dtype()), "Exponential distribution is a continuous probability distribution. dtype must be a floating point but you specified ", iter.dtype()); Tensor self = iter.tensor(0); @@ -195,32 +195,32 @@ void exponential_kernel(TensorIteratorBase &iter, double lambda, c10::optional gen) { +static void geometric_kernel(TensorIteratorBase& iter, double p, std::optional gen) { CPUGeneratorImpl* generator = get_generator_or_default(gen, detail::getDefaultCPUGenerator()); templates::cpu::geometric_kernel(iter, p, generator); } -static void log_normal_kernel(TensorIteratorBase& iter, double mean, double std, c10::optional gen) { +static void log_normal_kernel(TensorIteratorBase& iter, double mean, double std, std::optional gen) { CPUGeneratorImpl* generator = get_generator_or_default(gen, detail::getDefaultCPUGenerator()); templates::cpu::log_normal_kernel(iter, mean, std, generator); } -void uniform_kernel(TensorIteratorBase& iter, double from, double to, c10::optional gen) { +void uniform_kernel(TensorIteratorBase& iter, double from, double to, std::optional gen) { CPUGeneratorImpl* generator = get_generator_or_default(gen, detail::getDefaultCPUGenerator()); templates::cpu::uniform_kernel(iter, from, to, generator); } -void normal_kernel(const TensorBase &self, double mean, double std, c10::optional gen) { +void normal_kernel(const TensorBase &self, double mean, double std, std::optional gen) { CPUGeneratorImpl* generator = get_generator_or_default(gen, detail::getDefaultCPUGenerator()); templates::cpu::normal_kernel(self, mean, std, generator); } -static void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, c10::optional gen) { +static void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, std::optional gen) { CPUGeneratorImpl* generator = get_generator_or_default(gen, detail::getDefaultCPUGenerator()); templates::cpu::random_from_to_kernel(iter, range, base, generator); } -static void random_kernel(TensorIteratorBase& iter, c10::optional gen) { +static void random_kernel(TensorIteratorBase& iter, std::optional gen) { CPUGeneratorImpl* generator = get_generator_or_default(gen, detail::getDefaultCPUGenerator()); templates::cpu::random_kernel(iter, generator); } @@ -228,7 +228,7 @@ static void random_kernel(TensorIteratorBase& iter, c10::optional gen // This is the special kernel to handle single specific case: // from(inclusive) = std::numeric_limits::lowest() // to(exclusive) = None (= std::numeric_limits::max() + 1) -static void random_full_64_bits_range_kernel(TensorIteratorBase& iter, c10::optional gen) { +static void random_full_64_bits_range_kernel(TensorIteratorBase& iter, std::optional gen) { CPUGeneratorImpl* generator = get_generator_or_default(gen, detail::getDefaultCPUGenerator()); templates::cpu::random_full_64_bits_range_kernel(iter, generator); } diff --git a/aten/src/ATen/native/cpu/DistributionTemplates.h b/aten/src/ATen/native/cpu/DistributionTemplates.h index 93a9b33b29285..961c0a3811ec1 100644 --- a/aten/src/ATen/native/cpu/DistributionTemplates.h +++ b/aten/src/ATen/native/cpu/DistributionTemplates.h @@ -57,10 +57,10 @@ void random_full_64_bits_range_kernel(TensorIteratorBase& iter, RNG generator) { template struct RandomFromToKernel { - void operator()(TensorIteratorBase& iter, uint64_t range, int64_t base, c10::optional gen) { + void operator()(TensorIteratorBase& iter, uint64_t range, int64_t base, std::optional gen) { random_from_to_kernel(iter, range, base, check_generator(gen)); } - void operator()(TensorIteratorBase& iter, c10::optional gen) { + void operator()(TensorIteratorBase& iter, std::optional gen) { random_full_64_bits_range_kernel(iter, check_generator(gen)); } }; @@ -78,7 +78,7 @@ void random_kernel(TensorIteratorBase& iter, RNG generator) { template struct RandomKernel { - void operator()(TensorIteratorBase& iter, c10::optional gen) { + void operator()(TensorIteratorBase& iter, std::optional gen) { random_kernel(iter, check_generator(gen)); } }; @@ -257,7 +257,7 @@ void normal_kernel(const TensorBase &self, double mean, double std, RNG generato template struct NormalKernel { - void operator()(Tensor& self, double mean, double std, c10::optional gen) { + void operator()(Tensor& self, double mean, double std, std::optional gen) { normal_kernel(self, mean, std, check_generator(gen)); } }; @@ -279,7 +279,7 @@ void uniform_kernel(TensorIteratorBase& iter, double from_, double to_, RNG gene template struct UniformKernel { - void operator()(TensorIteratorBase& iter, double from, double to, c10::optional gen) { + void operator()(TensorIteratorBase& iter, double from, double to, std::optional gen) { uniform_kernel(iter, from, to, check_generator(gen)); } }; @@ -299,7 +299,7 @@ void cauchy_kernel(TensorIteratorBase& iter, double median, double sigma, RNG ge template struct CauchyKernel { - void operator()(TensorIteratorBase& iter, double median, double sigma, c10::optional gen) { + void operator()(TensorIteratorBase& iter, double median, double sigma, std::optional gen) { cauchy_kernel(iter, median, sigma, check_generator(gen)); } }; @@ -319,7 +319,7 @@ void log_normal_kernel(TensorIteratorBase& iter, double mean, double std, RNG ge template struct LogNormalKernel { - void operator()(TensorIteratorBase& iter, double mean, double std, c10::optional gen) { + void operator()(TensorIteratorBase& iter, double mean, double std, std::optional gen) { log_normal_kernel(iter, mean, std, check_generator(gen)); } }; @@ -339,7 +339,7 @@ void geometric_kernel(TensorIteratorBase& iter, double p, RNG generator) { template struct GeometricKernel { - void operator()(TensorIteratorBase& iter, double p, c10::optional gen) { + void operator()(TensorIteratorBase& iter, double p, std::optional gen) { geometric_kernel(iter, p, check_generator(gen)); } }; @@ -360,7 +360,7 @@ void exponential_kernel(TensorIteratorBase& iter, double lambda, RNG generator) template struct ExponentialKernel { - void operator()(TensorIteratorBase& iter, double lambda, c10::optional gen) { + void operator()(TensorIteratorBase& iter, double lambda, std::optional gen) { exponential_kernel(iter, lambda, check_generator(gen)); } }; @@ -415,10 +415,10 @@ void bernoulli_kernel(const TensorBase &self, double p, RNG generator) { template struct BernoulliKernel { - void operator()(const TensorBase &self, double p, c10::optional gen) { + void operator()(const TensorBase &self, double p, std::optional gen) { bernoulli_kernel(self, p, check_generator(gen)); } - void operator()(const TensorBase &self, const TensorBase &p_, c10::optional gen) { + void operator()(const TensorBase &self, const TensorBase &p_, std::optional gen) { bernoulli_kernel(self, p_, check_generator(gen)); } }; diff --git a/aten/src/ATen/native/cpu/FlashAttentionKernel.cpp b/aten/src/ATen/native/cpu/FlashAttentionKernel.cpp index cb96f24ebdde6..28422330403c6 100644 --- a/aten/src/ATen/native/cpu/FlashAttentionKernel.cpp +++ b/aten/src/ATen/native/cpu/FlashAttentionKernel.cpp @@ -151,8 +151,8 @@ void cpu_flash_attention( const at::Tensor& v, double dropout_p, bool is_causal, - c10::optional attn_mask, - c10::optional scale) { + std::optional attn_mask, + std::optional scale) { // Query (Batch x Num_heads x Q_seq_len x Dim_per_head) // -> (Batch x Q_seq_len x Num_heads x Dim_per_head) // Key (Batch x Num_heads x KV_seq_len x Dim_per_head) @@ -400,8 +400,8 @@ void cpu_flash_attention_backward( const at::Tensor& logsumexp, double dropout_p, bool is_causal, - c10::optional attn_mask, - c10::optional scale) { + std::optional attn_mask, + std::optional scale) { constexpr bool is_reduced_type = is_reduced_floating_point_v; using accum_t = at::opmath_type; using Vec = vec::Vectorized; @@ -694,8 +694,8 @@ void flash_attention_kernel_impl( const at::Tensor& value, double dropout_p, bool is_causal, - c10::optional attn_mask, - c10::optional scale) { + std::optional attn_mask, + std::optional scale) { auto q_seq_len = query.size(2); AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, query.scalar_type(), "flash_attention", [&] { @@ -727,8 +727,8 @@ void flash_attention_backward_kernel_impl( const at::Tensor& logsumexp, double dropout_p, bool is_causal, - c10::optional attn_mask, - c10::optional scale) { + std::optional attn_mask, + std::optional scale) { // make sure grad_out has no zero strides (broadcasted dimensions) // since we are going to call gemm next // zero stride in leading dimension would lead to slow impl for gemm diff --git a/aten/src/ATen/native/cpu/HistogramKernel.cpp b/aten/src/ATen/native/cpu/HistogramKernel.cpp index 0505271f6a9bc..829ec71fbd07c 100644 --- a/aten/src/ATen/native/cpu/HistogramKernel.cpp +++ b/aten/src/ATen/native/cpu/HistogramKernel.cpp @@ -78,7 +78,7 @@ enum BIN_SELECTION_ALGORITHM { }; template void histogramdd_cpu_contiguous(Tensor& hist, const TensorList& bin_edges, - const Tensor& input, const c10::optional& weight) { + const Tensor& input, const std::optional& weight) { TORCH_INTERNAL_ASSERT(input.dim() == 2); const int64_t N = input.size(0); @@ -100,12 +100,12 @@ void histogramdd_cpu_contiguous(Tensor& hist, const TensorList& bin_edges, TensorAccessor accessor_in = input.accessor(); - /* Constructs a c10::optional containing an accessor if + /* Constructs a std::optional containing an accessor if * the optional weight tensor has a value. */ const auto accessor_wt = weight.has_value() - ? c10::optional>(weight.value().accessor()) - : c10::optional>(); + ? std::optional>(weight.value().accessor()) + : std::optional>(); std::vector bin_seq(D); std::vector num_bin_edges(D); @@ -208,7 +208,7 @@ void histogramdd_cpu_contiguous(Tensor& hist, const TensorList& bin_edges, * Initializes hist to 0, calls into the main algorithm, and normalizes output if necessary. */ template -void histogramdd_out_cpu_template(const Tensor& self, const c10::optional& weight, bool density, +void histogramdd_out_cpu_template(const Tensor& self, const std::optional& weight, bool density, Tensor& hist, const TensorList& bin_edges) { hist.fill_(0); @@ -219,8 +219,8 @@ void histogramdd_out_cpu_template(const Tensor& self, const c10::optional(weight.value().reshape({M})) - : c10::optional(); + ? std::optional(weight.value().reshape({M})) + : std::optional(); std::vector bin_edges_contig(bin_edges.size()); for (const auto dim : c10::irange(bin_edges_contig.size())) { @@ -259,7 +259,7 @@ void histogramdd_out_cpu_template(const Tensor& self, const c10::optional& weight, bool density, +static void histogramdd_kernel_impl(const Tensor& self, const std::optional& weight, bool density, Tensor& hist, const TensorList& bin_edges) { histogramdd_out_cpu_template(self, weight, density, hist, bin_edges); } @@ -269,7 +269,7 @@ static void histogramdd_kernel_impl(const Tensor& self, const c10::optional& weight, +static void histogramdd_linear_kernel_impl(const Tensor& self, const std::optional& weight, bool density, Tensor& hist, const TensorList& bin_edges, bool local_search) { if (local_search) { // histogramdd codepath: both hist and bin_edges are eventually returned as output, diff --git a/aten/src/ATen/native/cpu/MaxUnpoolKernel.cpp b/aten/src/ATen/native/cpu/MaxUnpoolKernel.cpp index d5af5d23e8b10..0ebe127c6a8dc 100644 --- a/aten/src/ATen/native/cpu/MaxUnpoolKernel.cpp +++ b/aten/src/ATen/native/cpu/MaxUnpoolKernel.cpp @@ -54,7 +54,7 @@ void cpu_max_unpool( int64_t input_image_size = numel / channels; int64_t output_image_size = output.numel() / channels; - c10::optional optional_error_index; + std::optional optional_error_index; // parallel on dim N, C, D, H, W: [channels, input_image_size] at::parallel_for(0, numel, 0, [&](int64_t begin, int64_t end) { @@ -118,7 +118,7 @@ void cpu_max_unpool_channels_last( int64_t input_image_size = input_height * input_width; int64_t output_image_size = output_height * output_width; - c10::optional optional_error_index; + std::optional optional_error_index; // parallel on dim N, H, W at::parallel_for(0, nbatch * input_image_size, 0, [&](int64_t begin, int64_t end) { @@ -191,7 +191,7 @@ void cpu_max_unpool_backward( int64_t input_image_size = numel / channels; int64_t output_image_size = grad_output.numel() / channels; - c10::optional optional_error_index; + std::optional optional_error_index; // parallel on dim N, C, D, H, W at::parallel_for(0, numel, 0, [&](int64_t begin, int64_t end) { diff --git a/aten/src/ATen/native/cpu/MultinomialKernel.cpp b/aten/src/ATen/native/cpu/MultinomialKernel.cpp index 1c4054abdf239..f15292bd21fdb 100644 --- a/aten/src/ATen/native/cpu/MultinomialKernel.cpp +++ b/aten/src/ATen/native/cpu/MultinomialKernel.cpp @@ -24,7 +24,7 @@ multinomial_with_replacement_apply( Tensor& result, const Tensor& self, const int64_t n_sample, - c10::optional generator) { + std::optional generator) { auto gen = get_generator_or_default( generator, detail::getDefaultCPUGenerator()); // See Note [Acquire lock when using random generators] @@ -128,7 +128,7 @@ multinomial_with_replacement_apply( Tensor& result, const Tensor& self, const int64_t n_sample, - c10::optional generator) { + std::optional generator) { auto gen = get_generator_or_default( generator, detail::getDefaultCPUGenerator()); // See Note [Acquire lock when using random generators] @@ -230,7 +230,7 @@ static void multinomial_with_replacement_kernel_impl( Tensor& result, const Tensor& self, const int64_t n_sample, - c10::optional gen) { + std::optional gen) { AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, self.scalar_type(), "multinomial", [&] { multinomial_with_replacement_apply( diff --git a/aten/src/ATen/native/cpu/ReduceUtils.h b/aten/src/ATen/native/cpu/ReduceUtils.h index d6afac295aff6..8c6424f8b0eac 100644 --- a/aten/src/ATen/native/cpu/ReduceUtils.h +++ b/aten/src/ATen/native/cpu/ReduceUtils.h @@ -60,7 +60,7 @@ inline vec_scalar_t init_value() { } template -inline vec_scalar_t init_value(const c10::optional& initial) { +inline vec_scalar_t init_value(const std::optional& initial) { using acc_t = vec_scalar_t; if (initial.has_value()) { return initial.value().to(); @@ -80,7 +80,7 @@ inline void init(scalar_t* out, int64_t size, const vec_scalar_t& val) } template -inline void init(scalar_t* out, int64_t size, const c10::optional& initial) { +inline void init(scalar_t* out, int64_t size, const std::optional& initial) { using acc_t = vec_scalar_t; acc_t val = init_value(initial); init(out, size, val); diff --git a/aten/src/ATen/native/cpu/UnaryOpsKernel.cpp b/aten/src/ATen/native/cpu/UnaryOpsKernel.cpp index 461ceb2f36383..9754b003e19c6 100644 --- a/aten/src/ATen/native/cpu/UnaryOpsKernel.cpp +++ b/aten/src/ATen/native/cpu/UnaryOpsKernel.cpp @@ -496,9 +496,9 @@ inline Vectorized> _nan_to_num_replace( static void nan_to_num_kernel( TensorIteratorBase& iter, - c10::optional nan, - c10::optional pos_inf, - c10::optional neg_inf) { + std::optional nan, + std::optional pos_inf, + std::optional neg_inf) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kBFloat16, kHalf, iter.dtype(), "nan_to_num", [&]() { using value_t = c10::scalar_value_type::type; value_t nan_replacement = static_cast(nan.value_or(0.)); diff --git a/aten/src/ATen/native/cpu/UpSampleKernel.cpp b/aten/src/ATen/native/cpu/UpSampleKernel.cpp index 67fe50c1d2a62..17b6d0a543f34 100644 --- a/aten/src/ATen/native/cpu/UpSampleKernel.cpp +++ b/aten/src/ATen/native/cpu/UpSampleKernel.cpp @@ -21,7 +21,7 @@ namespace at::native { namespace { -using scale_t = std::vector>; +using scale_t = std::vector>; // TODO: this file could benefit from a global renaming of its functions / // classes and terms, as well as from adding more comments. In particular: @@ -987,7 +987,7 @@ struct HelperInterpBase { template static inline std::tuple, int, unsigned int> _compute_index_ranges_int16_weights( int64_t input_size, int64_t output_size, int64_t stride, int64_t ndims, - int64_t reshape_dim, bool align_corners, const c10::optional opt_scale, + int64_t reshape_dim, bool align_corners, const std::optional opt_scale, int interp_size, aa_filter_fn_t aa_filter_fn, bool antialias, bool align_i32=false ) { @@ -1072,7 +1072,7 @@ struct HelperInterpNearest : public HelperInterpBase { static inline std::vector compute_indices_weights( at::ScalarType scalar_type, int64_t input_size, int64_t output_size, int64_t stride, int64_t ndims, - int64_t reshape_dim, bool align_corners, const c10::optional opt_scale + int64_t reshape_dim, bool align_corners, const std::optional opt_scale ) { TORCH_INTERNAL_ASSERT(!align_corners); @@ -1123,7 +1123,7 @@ struct HelperInterpNearestExact : public HelperInterpNearest { static inline std::vector compute_indices_weights( at::ScalarType scalar_type, int64_t input_size, int64_t output_size, int64_t stride, int64_t ndims, - int64_t reshape_dim, bool align_corners, const c10::optional opt_scale + int64_t reshape_dim, bool align_corners, const std::optional opt_scale ) { TORCH_INTERNAL_ASSERT(!align_corners); @@ -1175,7 +1175,7 @@ struct HelperInterpLinear : public HelperInterpBase { static inline std::vector compute_indices_weights( at::ScalarType scalar_type, int64_t input_size, int64_t output_size, int64_t stride, int64_t ndims, int64_t reshape_dim, - bool align_corners, const c10::optional opt_scale + bool align_corners, const std::optional opt_scale ) { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) std::vector output; @@ -1230,7 +1230,7 @@ struct HelperInterpLinear : public HelperInterpBase { int64_t ndims, int64_t reshape_dim, bool align_corners, - const c10::optional opt_scale, + const std::optional opt_scale, bool antialias ) { @@ -1266,7 +1266,7 @@ struct HelperInterpLinear : public HelperInterpBase { int64_t ndims, int64_t reshape_dim, bool align_corners, - const c10::optional opt_scale, + const std::optional opt_scale, bool antialias, bool align_i32=false ) { @@ -1296,7 +1296,7 @@ struct HelperInterpCubic : public HelperInterpBase { static inline std::vector compute_indices_weights( at::ScalarType scalar_type, int64_t input_size, int64_t output_size, int64_t stride, int64_t ndims, int64_t reshape_dim, - bool align_corners, const c10::optional opt_scale + bool align_corners, const std::optional opt_scale ) { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) std::vector output; @@ -1364,7 +1364,7 @@ struct HelperInterpCubic : public HelperInterpBase { int64_t ndims, int64_t reshape_dim, bool align_corners, - const c10::optional opt_scale, + const std::optional opt_scale, bool antialias ) { @@ -1400,7 +1400,7 @@ struct HelperInterpCubic : public HelperInterpBase { int64_t ndims, int64_t reshape_dim, bool align_corners, - const c10::optional opt_scale, + const std::optional opt_scale, bool antialias, bool align_i32=false ) { @@ -1422,7 +1422,7 @@ struct HelperInterpCubic : public HelperInterpBase { // // Internally, it uses TensorIterator to optimize the computations. // - out_ndims is the number of interpolated dims: 1, 2, 3 -// - scale_type is template type for scales, typically c10::optional +// - scale_type is template type for scales, typically std::optional // - template class F is one of the above structs to compute indices and weights template void upsample_generic_Nd_kernel_impl( @@ -1686,7 +1686,7 @@ void separable_upsample_generic_Nd_kernel_impl( void upsample_nearest1d_kernel_impl( const Tensor& output, const Tensor& input, - c10::optional scales_w) { + std::optional scales_w) { upsample_generic_Nd_kernel_impl<1, scale_t, HelperInterpNearest>( output, input, false, {scales_w}); } @@ -1694,7 +1694,7 @@ void upsample_nearest1d_kernel_impl( void _upsample_nearest_exact1d_kernel_impl( const Tensor& output, const Tensor& input, - c10::optional scales_w) { + std::optional scales_w) { upsample_generic_Nd_kernel_impl<1, scale_t, HelperInterpNearestExact>( output, input, false, {scales_w}); } @@ -1726,8 +1726,8 @@ int _use_vectorized_kernel_cond_3d( void upsample_nearest2d_kernel_impl( const Tensor& output, const Tensor& input, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_h, + std::optional scales_w) { if (_use_vectorized_kernel_cond_2d(output, input)) { AT_DISPATCH_FLOATING_TYPES_AND3(kByte, kBFloat16, kHalf, input.scalar_type(), "upsample_nearest2d_channels_last", [&] { @@ -1742,8 +1742,8 @@ void upsample_nearest2d_kernel_impl( void _upsample_nearest_exact2d_kernel_impl( const Tensor& output, const Tensor& input, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_h, + std::optional scales_w) { if (_use_vectorized_kernel_cond_2d(output, input)) { AT_DISPATCH_FLOATING_TYPES_AND3(kByte, kBFloat16, kHalf, input.scalar_type(), "upsample_nearest2d_channels_last", [&] { cpu_upsample_nearest_channels_last(output, input, {scales_h, scales_w}); @@ -1757,9 +1757,9 @@ void _upsample_nearest_exact2d_kernel_impl( void upsample_nearest3d_kernel_impl( const Tensor& output, const Tensor& input, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_d, + std::optional scales_h, + std::optional scales_w) { if (_use_vectorized_kernel_cond_3d(output, input)) { AT_DISPATCH_FLOATING_TYPES_AND3(kByte, kBFloat16, kHalf, input.scalar_type(), "upsample_nearest3d_channels_last", [&] { @@ -1774,9 +1774,9 @@ void upsample_nearest3d_kernel_impl( void _upsample_nearest_exact3d_kernel_impl( const Tensor& output, const Tensor& input, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_d, + std::optional scales_h, + std::optional scales_w) { if (_use_vectorized_kernel_cond_3d(output, input)) { AT_DISPATCH_FLOATING_TYPES_AND3(kByte, kBFloat16, kHalf, input.scalar_type(), "upsample_nearest3d_channels_last", [&] { cpu_upsample_nearest_channels_last(output, input, {scales_d, scales_h, scales_w}); @@ -1791,7 +1791,7 @@ void upsample_linear1d_kernel_impl( const Tensor& output, const Tensor& input, bool align_corners, - c10::optional scales_w) { + std::optional scales_w) { upsample_generic_Nd_kernel_impl<1, scale_t, HelperInterpLinear>( output, input, align_corners, {scales_w}); } @@ -1801,8 +1801,8 @@ void upsample_bilinear2d_kernel_impl_float( const Tensor& output, const Tensor& input, bool align_corners, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_h, + std::optional scales_w) { // See note above about _use_vectorized_kernel_cond_2d(output, input). The extra cond is present // because benchmarks showed that with only 1 thread, images (C == 3) were @@ -1823,8 +1823,8 @@ void upsample_bilinear2d_kernel_impl( const Tensor& output, const Tensor& input, bool align_corners, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_h, + std::optional scales_w) { if (input.dtype() == at::kByte){ #ifdef CPU_CAPABILITY_AVX2 @@ -1852,8 +1852,8 @@ void upsample_bilinear2d_aa_kernel_impl( const Tensor& output, const Tensor& input, bool align_corners, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_h, + std::optional scales_w) { #ifdef CPU_CAPABILITY_AVX2 if (input.dtype() == at::kByte && input.size(1) <= 4) { upsample_avx_bilinear_bicubic_uint8( @@ -1875,9 +1875,9 @@ void upsample_trilinear3d_kernel_impl( const Tensor& output, const Tensor& input, bool align_corners, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_d, + std::optional scales_h, + std::optional scales_w) { if ((_use_vectorized_kernel_cond_3d(output, input))) { AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, input.scalar_type(), "upsample_trilinear3d_channels_last", [&] { cpu_upsample_linear_channels_last(output, input, align_corners, {scales_d, scales_h, scales_w}); @@ -1892,8 +1892,8 @@ void upsample_bicubic2d_kernel_impl( const Tensor& output, const Tensor& input, bool align_corners, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_h, + std::optional scales_w) { if (input.dtype() == at::kByte){ #ifdef CPU_CAPABILITY_AVX2 @@ -1922,8 +1922,8 @@ void upsample_bicubic2d_aa_kernel_impl( const Tensor& output, const Tensor& input, bool align_corners, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_h, + std::optional scales_w) { #ifdef CPU_CAPABILITY_AVX2 if (input.dtype() == at::kByte && input.size(1) <= 4) { @@ -2061,8 +2061,8 @@ void upsample_bilinear2d_aa_backward_kernel_impl( const Tensor& grad_input, const Tensor& grad_output, bool align_corners, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_h, + std::optional scales_w) { AT_DISPATCH_FLOATING_TYPES( grad_output.scalar_type(), "upsample_bilinear2d_aa_backward_cpu", [&] { cpu_upsample_genNd_backward_aa( @@ -2074,8 +2074,8 @@ void upsample_bicubic2d_aa_backward_kernel_impl( const Tensor& grad_input, const Tensor& grad_output, bool align_corners, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_h, + std::optional scales_w) { AT_DISPATCH_FLOATING_TYPES( grad_output.scalar_type(), "upsample_bicubic2d_aa_backward_cpu", [&] { cpu_upsample_genNd_backward_aa( diff --git a/aten/src/ATen/native/cpu/UpSampleMoreKernel.cpp b/aten/src/ATen/native/cpu/UpSampleMoreKernel.cpp index b97b5cefee2c8..fae70686591ee 100644 --- a/aten/src/ATen/native/cpu/UpSampleMoreKernel.cpp +++ b/aten/src/ATen/native/cpu/UpSampleMoreKernel.cpp @@ -12,7 +12,7 @@ namespace at::native { namespace { -using scale_t = std::vector>; +using scale_t = std::vector>; template , @@ -337,7 +337,7 @@ void cpu_upsample_nearest_backward_channels_last( void upsample_nearest1d_backward_kernel_impl( const Tensor& grad_input, const Tensor& grad_output, - c10::optional scales_w) { + std::optional scales_w) { AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, grad_output.scalar_type(), "upsample_nearest1d_backward", [&] { cpu_upsample_nearest_backward(grad_input, grad_output, {scales_w}); }); @@ -346,7 +346,7 @@ void upsample_nearest1d_backward_kernel_impl( void _upsample_nearest_exact1d_backward_kernel_impl( const Tensor& grad_input, const Tensor& grad_output, - c10::optional scales_w) { + std::optional scales_w) { AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, grad_output.scalar_type(), "_upsample_nearest_exact1d_backward", [&] { cpu_upsample_nearest_backward(grad_input, grad_output, {scales_w}); }); @@ -355,8 +355,8 @@ void _upsample_nearest_exact1d_backward_kernel_impl( void upsample_nearest2d_backward_kernel_impl( const Tensor& grad_input, const Tensor& grad_output, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_h, + std::optional scales_w) { if (grad_output.is_contiguous(at::MemoryFormat::ChannelsLast)) { AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, grad_output.scalar_type(), "upsample_nearest2d_backward_cl", [&] { cpu_upsample_nearest_backward_channels_last(grad_input, grad_output, {scales_h, scales_w}); @@ -371,8 +371,8 @@ void upsample_nearest2d_backward_kernel_impl( void _upsample_nearest_exact2d_backward_kernel_impl( const Tensor& grad_input, const Tensor& grad_output, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_h, + std::optional scales_w) { if (grad_output.is_contiguous(at::MemoryFormat::ChannelsLast)) { AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, grad_output.scalar_type(), "_upsample_nearest_exact2d_backward_cl", [&] { cpu_upsample_nearest_backward_channels_last(grad_input, grad_output, {scales_h, scales_w}); @@ -387,9 +387,9 @@ void _upsample_nearest_exact2d_backward_kernel_impl( void upsample_nearest3d_backward_kernel_impl( const Tensor& grad_input, const Tensor& grad_output, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_d, + std::optional scales_h, + std::optional scales_w) { if (grad_output.is_contiguous(at::MemoryFormat::ChannelsLast3d)) { AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, grad_output.scalar_type(), "_upsample_nearest3d_backward_cl", [&] { cpu_upsample_nearest_backward_channels_last(grad_input, grad_output, {scales_d, scales_h, scales_w}); @@ -404,9 +404,9 @@ void upsample_nearest3d_backward_kernel_impl( void _upsample_nearest_exact3d_backward_kernel_impl( const Tensor& grad_input, const Tensor& grad_output, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_d, + std::optional scales_h, + std::optional scales_w) { if (grad_output.is_contiguous(at::MemoryFormat::ChannelsLast3d)) { AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, grad_output.scalar_type(), "_upsample_nearest_exact3d_backward_cl", [&] { cpu_upsample_nearest_backward_channels_last(grad_input, grad_output, {scales_d, scales_h, scales_w}); @@ -745,7 +745,7 @@ void upsample_linear1d_backward_kernel_impl( const Tensor& grad_input, const Tensor& grad_output, bool align_corners, - c10::optional scales_w) { + std::optional scales_w) { AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, grad_output.scalar_type(), "upsample_linear1d_backward", [&] { cpu_upsample_linear_backward(grad_input, grad_output, align_corners, {scales_w}); }); @@ -755,8 +755,8 @@ void upsample_bilinear2d_backward_kernel_impl( const Tensor& grad_input, const Tensor& grad_output, bool align_corners, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_h, + std::optional scales_w) { if (grad_output.is_contiguous(at::MemoryFormat::ChannelsLast)) { AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, grad_output.scalar_type(), "upsample_bilinear2d_backward_channels_last", [&] { cpu_upsample_linear_backward_channels_last(grad_input, grad_output, align_corners, {scales_h, scales_w}); @@ -772,9 +772,9 @@ void upsample_trilinear3d_backward_kernel_impl( const Tensor& grad_input, const Tensor& grad_output, bool align_corners, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_d, + std::optional scales_h, + std::optional scales_w) { if (grad_output.is_contiguous(at::MemoryFormat::ChannelsLast3d)) { AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, grad_output.scalar_type(), "upsample_trilinear3d_backward_channels_last", [&] { cpu_upsample_linear_backward_channels_last(grad_input, grad_output, align_corners, {scales_d, scales_h, scales_w}); diff --git a/aten/src/ATen/native/cuda/AveragePool2d.cu b/aten/src/ATen/native/cuda/AveragePool2d.cu index e55b9e5e96ef1..3ea9dcc854a3f 100644 --- a/aten/src/ATen/native/cuda/AveragePool2d.cu +++ b/aten/src/ATen/native/cuda/AveragePool2d.cu @@ -250,7 +250,7 @@ TORCH_IMPL_FUNC(avg_pool2d_out_cuda) int64_t padW_, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override, + std::optional divisor_override, const Tensor& output) { TensorArg output_arg{ output, "output", 1 }; TensorArg input_arg{ input_, "input_", 2 }; @@ -362,7 +362,7 @@ TORCH_IMPL_FUNC(avg_pool2d_backward_out_cuda) ( IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override, + std::optional divisor_override, const Tensor& gradInput ) { TensorArg gradInput_arg{ gradInput, "gradInput", 1 }; diff --git a/aten/src/ATen/native/cuda/AveragePool3d.cu b/aten/src/ATen/native/cuda/AveragePool3d.cu index f4b0ee00d9a9a..dabcf5b63be99 100644 --- a/aten/src/ATen/native/cuda/AveragePool3d.cu +++ b/aten/src/ATen/native/cuda/AveragePool3d.cu @@ -351,7 +351,7 @@ TORCH_IMPL_FUNC(avg_pool3d_out_cuda) ( IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override, + std::optional divisor_override, const Tensor& output ) { TensorArg output_arg{ output, "output", 1 }; @@ -451,7 +451,7 @@ TORCH_IMPL_FUNC(avg_pool3d_backward_out_cuda) ( IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override, + std::optional divisor_override, const Tensor& gradInput ) { // See Note [Writing Nondeterministic Operations] diff --git a/aten/src/ATen/native/cuda/Blas.cpp b/aten/src/ATen/native/cuda/Blas.cpp index 9e76aad45f644..c0ed650cf0219 100644 --- a/aten/src/ATen/native/cuda/Blas.cpp +++ b/aten/src/ATen/native/cuda/Blas.cpp @@ -839,11 +839,11 @@ static bool _scaled_mm_allowed_device() { std::tuple _scaled_mm_out_cuda(const Tensor& mat1, const Tensor& mat2, - const c10::optional& bias, - c10::optional out_dtype, - const c10::optional& scale_a, - const c10::optional& scale_b, - const c10::optional& scale_result, + const std::optional& bias, + std::optional out_dtype, + const std::optional& scale_a, + const std::optional& scale_b, + const std::optional& scale_result, bool use_fast_accum, Tensor& out, Tensor& amax) { // Check sizes @@ -1022,11 +1022,11 @@ _scaled_mm_out_cuda(const Tensor& mat1, const Tensor& mat2, std::tuple _scaled_mm_cuda(const Tensor& mat_a, const Tensor& mat_b, - const c10::optional& bias, - c10::optional out_dtype, - const c10::optional& scale_a, - const c10::optional& scale_b, - const c10::optional& scale_result, + const std::optional& bias, + std::optional out_dtype, + const std::optional& scale_a, + const std::optional& scale_b, + const std::optional& scale_result, bool use_fast_accum) { const auto out_dtype_ = out_dtype.value_or(mat_a.scalar_type()); Tensor out = at::empty({0}, mat_a.options().dtype(out_dtype_)); diff --git a/aten/src/ATen/native/cuda/Bucketization.cu b/aten/src/ATen/native/cuda/Bucketization.cu index 05d5421b046f8..73a68683b6c04 100644 --- a/aten/src/ATen/native/cuda/Bucketization.cu +++ b/aten/src/ATen/native/cuda/Bucketization.cu @@ -134,8 +134,8 @@ Tensor& searchsorted_out_cuda( const Tensor& self, bool out_int32, bool right, - const c10::optional side_opt, - const c10::optional& sorter_opt, + const std::optional side_opt, + const std::optional& sorter_opt, Tensor& result) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned sorter_maybe_owned = at::borrow_from_optional_tensor(sorter_opt); @@ -180,8 +180,8 @@ Tensor& searchsorted_out_cuda( const Scalar& self, bool out_int32, bool right, - const c10::optional side_opt, - const c10::optional& sorter_opt, + const std::optional side_opt, + const std::optional& sorter_opt, Tensor& result) { const Tensor& scalar_tensor = searchsorted_scalar_tensor(self, sorted_sequence.device()); return searchsorted_out_cuda(sorted_sequence, scalar_tensor, out_int32, right, side_opt, sorter_opt, result); @@ -192,8 +192,8 @@ Tensor searchsorted_cuda( const Tensor& self, bool out_int32, bool right, - const c10::optional side_opt, - const c10::optional& sorter) { + const std::optional side_opt, + const std::optional& sorter) { ScalarType scalar_type = out_int32 ? ScalarType::Int : ScalarType::Long; c10::TensorOptions options = TensorOptions().device(self.options().device()).dtype(scalar_type); Tensor result = at::empty({0}, options, MemoryFormat::Contiguous); @@ -206,8 +206,8 @@ Tensor searchsorted_cuda( const Scalar& self, bool out_int32, bool right, - const c10::optional side_opt, - const c10::optional& sorter) { + const std::optional side_opt, + const std::optional& sorter) { const Tensor& scalar_tensor = searchsorted_scalar_tensor(self, sorted_sequence.device()); return searchsorted_cuda(sorted_sequence, scalar_tensor, out_int32, right, side_opt, sorter); } diff --git a/aten/src/ATen/native/cuda/ConvolutionMM2d.cu b/aten/src/ATen/native/cuda/ConvolutionMM2d.cu index 9e45e2693cb0f..4f6ef77eb7e05 100644 --- a/aten/src/ATen/native/cuda/ConvolutionMM2d.cu +++ b/aten/src/ATen/native/cuda/ConvolutionMM2d.cu @@ -376,7 +376,7 @@ Tensor& slow_conv2d_forward_out_cuda( const Tensor &self_, const Tensor &weight_, IntArrayRef kernel_size, - const c10::optional &bias_, + const std::optional &bias_, IntArrayRef stride, IntArrayRef padding, Tensor &output) { @@ -409,7 +409,7 @@ Tensor slow_conv2d_forward_cuda( const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, - const c10::optional &bias, + const std::optional &bias, IntArrayRef stride, IntArrayRef padding) { auto output = at::empty({0}, self.options()); diff --git a/aten/src/ATen/native/cuda/DepthwiseConv2d.cu b/aten/src/ATen/native/cuda/DepthwiseConv2d.cu index 2812706f718eb..b87dd41dd59ef 100644 --- a/aten/src/ATen/native/cuda/DepthwiseConv2d.cu +++ b/aten/src/ATen/native/cuda/DepthwiseConv2d.cu @@ -638,7 +638,7 @@ const Tensor& conv_depthwise2d_cuda_out( const Tensor &input_, const Tensor &weight_, IntArrayRef kernel_size, - const c10::optional &bias_opt, + const std::optional &bias_opt, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, @@ -673,7 +673,7 @@ Tensor conv_depthwise2d_cuda( const Tensor &input, const Tensor &weight, IntArrayRef kernel_size, - const c10::optional &bias, + const std::optional &bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) { diff --git a/aten/src/ATen/native/cuda/DepthwiseConv3d.cu b/aten/src/ATen/native/cuda/DepthwiseConv3d.cu index 991471a6ef82f..62c36d66ee40e 100644 --- a/aten/src/ATen/native/cuda/DepthwiseConv3d.cu +++ b/aten/src/ATen/native/cuda/DepthwiseConv3d.cu @@ -390,7 +390,7 @@ void conv_depthwise_shape_check( Tensor conv_depthwise3d_cuda( const Tensor& input, const Tensor& weight, - IntArrayRef kernel_size, const c10::optional& bias_opt, + IntArrayRef kernel_size, const std::optional& bias_opt, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) { diff --git a/aten/src/ATen/native/cuda/DistributionBernoulli.cu b/aten/src/ATen/native/cuda/DistributionBernoulli.cu index 89a518267d25e..5a04ae9b3450f 100644 --- a/aten/src/ATen/native/cuda/DistributionBernoulli.cu +++ b/aten/src/ATen/native/cuda/DistributionBernoulli.cu @@ -23,12 +23,12 @@ namespace at::native { -void bernoulli_tensor_kernel(const TensorBase &self, const TensorBase &p_, c10::optional gen_) { +void bernoulli_tensor_kernel(const TensorBase &self, const TensorBase &p_, std::optional gen_) { auto generator = get_generator_or_default(gen_, cuda::detail::getDefaultCUDAGenerator()); at::native::templates::cuda::bernoulli_kernel(self, p_, generator); } -void bernoulli_scalar_kernel(const TensorBase &self, double p, c10::optional gen) { +void bernoulli_scalar_kernel(const TensorBase &self, double p, std::optional gen) { auto iter = TensorIterator::borrowing_nullary_op(self); auto generator = get_generator_or_default(gen, cuda::detail::getDefaultCUDAGenerator()); at::native::templates::cuda::bernoulli_kernel(iter, p, generator); diff --git a/aten/src/ATen/native/cuda/DistributionCauchyKernel.cu b/aten/src/ATen/native/cuda/DistributionCauchyKernel.cu index a66d3cf3288fd..e6a4629930659 100644 --- a/aten/src/ATen/native/cuda/DistributionCauchyKernel.cu +++ b/aten/src/ATen/native/cuda/DistributionCauchyKernel.cu @@ -5,7 +5,7 @@ namespace at::native { -void cauchy_kernel(TensorIteratorBase& iter, double median, double sigma, c10::optional gen) { +void cauchy_kernel(TensorIteratorBase& iter, double median, double sigma, std::optional gen) { auto generator = get_generator_or_default(gen, cuda::detail::getDefaultCUDAGenerator()); at::native::templates::cuda::cauchy_kernel(iter, median, sigma, generator); } diff --git a/aten/src/ATen/native/cuda/DistributionExponentialKernel.cu b/aten/src/ATen/native/cuda/DistributionExponentialKernel.cu index 76cb94f6fd878..78ee9e745d36b 100644 --- a/aten/src/ATen/native/cuda/DistributionExponentialKernel.cu +++ b/aten/src/ATen/native/cuda/DistributionExponentialKernel.cu @@ -5,7 +5,7 @@ namespace at::native { -void exponential_kernel(TensorIteratorBase& iter, double lambda, c10::optional gen) { +void exponential_kernel(TensorIteratorBase& iter, double lambda, std::optional gen) { auto generator = get_generator_or_default(gen, cuda::detail::getDefaultCUDAGenerator()); at::native::templates::cuda::exponential_kernel(iter, lambda, generator); } diff --git a/aten/src/ATen/native/cuda/DistributionGeometricKernel.cu b/aten/src/ATen/native/cuda/DistributionGeometricKernel.cu index 0fe49d7bbd4b5..783863f99a9aa 100644 --- a/aten/src/ATen/native/cuda/DistributionGeometricKernel.cu +++ b/aten/src/ATen/native/cuda/DistributionGeometricKernel.cu @@ -5,7 +5,7 @@ namespace at::native { -void geometric_kernel(TensorIteratorBase& iter, double p_, c10::optional gen) { +void geometric_kernel(TensorIteratorBase& iter, double p_, std::optional gen) { auto generator = get_generator_or_default(gen, cuda::detail::getDefaultCUDAGenerator()); at::native::templates::cuda::geometric_kernel(iter, p_, generator); } diff --git a/aten/src/ATen/native/cuda/DistributionLogNormalKernel.cu b/aten/src/ATen/native/cuda/DistributionLogNormalKernel.cu index f394d4fea39db..148e8e00dd99b 100644 --- a/aten/src/ATen/native/cuda/DistributionLogNormalKernel.cu +++ b/aten/src/ATen/native/cuda/DistributionLogNormalKernel.cu @@ -5,7 +5,7 @@ namespace at::native { -void log_normal_kernel(TensorIteratorBase& iter, double mean, double std, c10::optional gen) { +void log_normal_kernel(TensorIteratorBase& iter, double mean, double std, std::optional gen) { auto generator = get_generator_or_default(gen, cuda::detail::getDefaultCUDAGenerator()); at::native::templates::cuda::log_normal_kernel(iter, mean, std, generator); } diff --git a/aten/src/ATen/native/cuda/DistributionNormal.cu b/aten/src/ATen/native/cuda/DistributionNormal.cu index a17c3e3da0556..bd4763e269f89 100644 --- a/aten/src/ATen/native/cuda/DistributionNormal.cu +++ b/aten/src/ATen/native/cuda/DistributionNormal.cu @@ -5,7 +5,7 @@ namespace at::native { -void normal_kernel(const TensorBase &self, double mean, double std, c10::optional gen) { +void normal_kernel(const TensorBase &self, double mean, double std, std::optional gen) { auto generator = get_generator_or_default(gen, cuda::detail::getDefaultCUDAGenerator()); at::native::templates::cuda::normal_kernel(self, mean, std, generator); } diff --git a/aten/src/ATen/native/cuda/DistributionRandomKernel.cu b/aten/src/ATen/native/cuda/DistributionRandomKernel.cu index 034a19c512f4f..827a12b3f28be 100644 --- a/aten/src/ATen/native/cuda/DistributionRandomKernel.cu +++ b/aten/src/ATen/native/cuda/DistributionRandomKernel.cu @@ -5,17 +5,17 @@ namespace at::native { -void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, c10::optional gen_) { +void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, std::optional gen_) { auto gen = get_generator_or_default(gen_, cuda::detail::getDefaultCUDAGenerator()); at::native::templates::cuda::random_from_to_kernel(iter, range, base, gen); } -void random_full_64_bits_range_kernel(TensorIteratorBase& iter, c10::optional gen_) { +void random_full_64_bits_range_kernel(TensorIteratorBase& iter, std::optional gen_) { auto gen = get_generator_or_default(gen_, cuda::detail::getDefaultCUDAGenerator()); at::native::templates::cuda::random_full_64_bits_range_kernel(iter, gen); } -void random_kernel(TensorIteratorBase& iter, c10::optional gen_) { +void random_kernel(TensorIteratorBase& iter, std::optional gen_) { auto gen = get_generator_or_default(gen_, cuda::detail::getDefaultCUDAGenerator()); at::native::templates::cuda::random_kernel(iter, gen); } diff --git a/aten/src/ATen/native/cuda/DistributionTemplates.h b/aten/src/ATen/native/cuda/DistributionTemplates.h index 8ac91f3114511..8f8860f04ad1b 100644 --- a/aten/src/ATen/native/cuda/DistributionTemplates.h +++ b/aten/src/ATen/native/cuda/DistributionTemplates.h @@ -352,10 +352,10 @@ void random_full_64_bits_range_kernel(TensorIteratorBase& iter, RNG gen) { template struct RandomFromToKernel { - void operator()(TensorIteratorBase& iter, uint64_t range, int64_t base, c10::optional gen) { + void operator()(TensorIteratorBase& iter, uint64_t range, int64_t base, std::optional gen) { random_from_to_kernel(iter, range, base, check_generator(gen)); } - void operator()(TensorIteratorBase& iter, c10::optional gen) { + void operator()(TensorIteratorBase& iter, std::optional gen) { random_full_64_bits_range_kernel(iter, check_generator(gen)); } }; @@ -448,7 +448,7 @@ void normal_kernel(const TensorBase &self, double mean_, double std_, RNG gen) { template struct NormalKernel { - void operator()(const TensorBase &self, double mean, double std, c10::optional gen) { + void operator()(const TensorBase &self, double mean, double std, std::optional gen) { normal_kernel(self, mean, std, check_generator(gen)); } }; @@ -481,7 +481,7 @@ void uniform_kernel(TensorIteratorBase& iter, double from_, double to_, RNG gen) template struct UniformKernel { - void operator()(TensorIteratorBase& iter, double from, double to, c10::optional gen) { + void operator()(TensorIteratorBase& iter, double from, double to, std::optional gen) { uniform_kernel(iter, from, to, check_generator(gen)); } }; @@ -504,7 +504,7 @@ void log_normal_kernel(TensorIteratorBase& iter, double mean_, double std_, RNG template struct LogNormalKernel { - void operator()(TensorIteratorBase& iter, double mean, double std, c10::optional gen) { + void operator()(TensorIteratorBase& iter, double mean, double std, std::optional gen) { log_normal_kernel(iter, mean, std, check_generator(gen)); } }; @@ -525,7 +525,7 @@ void geometric_kernel(TensorIteratorBase& iter, double p, RNG gen) { template struct GeometricKernel { - void operator()(TensorIteratorBase& iter, double p, c10::optional gen) { + void operator()(TensorIteratorBase& iter, double p, std::optional gen) { geometric_kernel(iter, p, check_generator(gen)); } }; @@ -548,7 +548,7 @@ void exponential_kernel(TensorIteratorBase& iter, double lambda_, RNG gen) { template struct ExponentialKernel { - void operator()(TensorIteratorBase& iter, double lambda, c10::optional gen) { + void operator()(TensorIteratorBase& iter, double lambda, std::optional gen) { exponential_kernel(iter, lambda, check_generator(gen)); } }; @@ -571,7 +571,7 @@ void cauchy_kernel(TensorIteratorBase& iter, double median_, double sigma_, RNG template struct CauchyKernel { - void operator()(TensorIteratorBase& iter, double median, double sigma, c10::optional gen) { + void operator()(TensorIteratorBase& iter, double median, double sigma, std::optional gen) { cauchy_kernel(iter, median, sigma, check_generator(gen)); } }; @@ -661,10 +661,10 @@ void bernoulli_kernel(TensorIteratorBase& iter, double p, RNG gen) { template struct BernoulliKernel { - void operator()(TensorIteratorBase& iter, double p, c10::optional gen) { + void operator()(TensorIteratorBase& iter, double p, std::optional gen) { bernoulli_kernel(iter, p, check_generator(gen)); } - void operator()(const TensorBase &self, const TensorBase &p_, c10::optional gen) { + void operator()(const TensorBase &self, const TensorBase &p_, std::optional gen) { bernoulli_kernel(self, p_, check_generator(gen)); } }; diff --git a/aten/src/ATen/native/cuda/DistributionUniform.cu b/aten/src/ATen/native/cuda/DistributionUniform.cu index 2ebdfa4464598..ed34b78727dbd 100644 --- a/aten/src/ATen/native/cuda/DistributionUniform.cu +++ b/aten/src/ATen/native/cuda/DistributionUniform.cu @@ -5,7 +5,7 @@ namespace at::native { -void uniform_kernel(TensorIteratorBase& iter, double from, double to, c10::optional gen) { +void uniform_kernel(TensorIteratorBase& iter, double from, double to, std::optional gen) { auto generator = get_generator_or_default(gen, cuda::detail::getDefaultCUDAGenerator()); templates::cuda::uniform_kernel(iter, from, to, generator); } diff --git a/aten/src/ATen/native/cuda/Distributions.cpp b/aten/src/ATen/native/cuda/Distributions.cpp index c0d5abb49bf6a..21ce151276fe5 100644 --- a/aten/src/ATen/native/cuda/Distributions.cpp +++ b/aten/src/ATen/native/cuda/Distributions.cpp @@ -18,14 +18,14 @@ namespace at::native { -Tensor _s_poisson_cuda(const Tensor& lambda, c10::optional gen_) { +Tensor _s_poisson_cuda(const Tensor& lambda, std::optional gen_) { auto gen = get_generator_or_default(gen_, cuda::detail::getDefaultCUDAGenerator()); Tensor ret = at::empty(lambda.sizes(), lambda.options()); launch_poisson_cuda_kernel(ret, lambda, gen); return ret; } -Tensor _s_binomial_cuda(const Tensor& count, const Tensor& prob, c10::optional gen_) { +Tensor _s_binomial_cuda(const Tensor& count, const Tensor& prob, std::optional gen_) { auto gen = get_generator_or_default(gen_, cuda::detail::getDefaultCUDAGenerator()); Tensor ret = at::empty(count.sizes(), count.options()); at::TensorIterator iter = at::TensorIteratorConfig() @@ -37,14 +37,14 @@ Tensor _s_binomial_cuda(const Tensor& count, const Tensor& prob, c10::optional gen_) { +Tensor _s_gamma_cuda(const Tensor& alpha, std::optional gen_) { auto gen = get_generator_or_default(gen_, cuda::detail::getDefaultCUDAGenerator()); Tensor ret = at::empty(alpha.sizes(), alpha.options()); launch_gamma_kernel(ret, alpha, gen); return ret; } -Tensor _s_dirichlet_cuda(const Tensor& alpha, c10::optional gen_) { +Tensor _s_dirichlet_cuda(const Tensor& alpha, std::optional gen_) { auto gen = get_generator_or_default(gen_, cuda::detail::getDefaultCUDAGenerator()); Tensor ret = at::empty(alpha.sizes(), alpha.options()); launch_gamma_kernel(ret, alpha, gen); diff --git a/aten/src/ATen/native/cuda/Dropout.cu b/aten/src/ATen/native/cuda/Dropout.cu index a749872ba38f3..690051e679082 100644 --- a/aten/src/ATen/native/cuda/Dropout.cu +++ b/aten/src/ATen/native/cuda/Dropout.cu @@ -366,7 +366,7 @@ dropout_cuda(CUDAGeneratorImpl* gen, const Tensor& self, double p){ } std::tuple -native_dropout_cuda(const Tensor& self, double p, c10::optional train){ +native_dropout_cuda(const Tensor& self, double p, std::optional train){ // short-cut for train == false if (train.has_value() && !train.value()) { return std::make_tuple(self.clone(), at::ones_like(self, self.options().dtype(c10::CppTypeToScalarType::value))); @@ -387,7 +387,7 @@ native_dropout_cuda(const Tensor& self, double p, c10::optional train){ // TODO: _fused_dropout_cuda is to be removed, see PR #63937 std::tuple -fused_dropout_cuda(const Tensor& self, double p, c10::optional gen_){ +fused_dropout_cuda(const Tensor& self, double p, std::optional gen_){ auto gen = get_generator_or_default(gen_, cuda::detail::getDefaultCUDAGenerator()); return dropout_cuda(gen, self, p); } diff --git a/aten/src/ATen/native/cuda/EmbeddingBag.cu b/aten/src/ATen/native/cuda/EmbeddingBag.cu index 64852ae79b1f9..7c9f845b7ee26 100644 --- a/aten/src/ATen/native/cuda/EmbeddingBag.cu +++ b/aten/src/ATen/native/cuda/EmbeddingBag.cu @@ -312,7 +312,7 @@ Tensor embedding_bag_backward_cuda_max(const Tensor &grad, std::tuple _embedding_bag_forward_only_cuda(const Tensor &weight, const Tensor &indices, const Tensor &offsets, const bool scale_grad_by_freq, - const int64_t mode, bool sparse, const c10::optional& per_sample_weights_opt, + const int64_t mode, bool sparse, const std::optional& per_sample_weights_opt, bool include_last_offset, int64_t padding_idx) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned per_sample_weights_maybe_owned = at::borrow_from_optional_tensor(per_sample_weights_opt); @@ -335,7 +335,7 @@ _embedding_bag_forward_only_cuda(const Tensor &weight, const Tensor &indices, std::tuple _embedding_bag_cuda(const Tensor &weight, const Tensor &indices_, const Tensor &offsets_, const bool scale_grad_by_freq, - const int64_t mode, bool sparse, const c10::optional& per_sample_weights_opt, + const int64_t mode, bool sparse, const std::optional& per_sample_weights_opt, bool include_last_offset, int64_t padding_idx) { TORCH_CHECK(indices_.dim() == 1 || indices_.dim() == 2, "input has to be a 1D or 2D Tensor, but got Tensor of dimension ", @@ -432,7 +432,7 @@ Tensor _embedding_bag_dense_backward_cuda(const Tensor &grad_, const Tensor &ind const Tensor &bag_size_, const Tensor &max_indices, int64_t num_weights, - bool scale_grad_by_freq, int64_t mode, const c10::optional& per_sample_weights_opt, + bool scale_grad_by_freq, int64_t mode, const std::optional& per_sample_weights_opt, int64_t padding_idx) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned per_sample_weights_maybe_owned = at::borrow_from_optional_tensor(per_sample_weights_opt); diff --git a/aten/src/ATen/native/cuda/FusedAdamKernel.cu b/aten/src/ATen/native/cuda/FusedAdamKernel.cu index 9365f9a34ea76..99120ffc2816e 100644 --- a/aten/src/ATen/native/cuda/FusedAdamKernel.cu +++ b/aten/src/ATen/native/cuda/FusedAdamKernel.cu @@ -27,8 +27,8 @@ void _fused_adam_kernel_cuda_( const double eps, const bool amsgrad, const bool maximize, - const c10::optional& grad_scale, - const c10::optional& found_inf) { + const std::optional& grad_scale, + const std::optional& found_inf) { if (amsgrad) { TORCH_CHECK( at::native::check_fast_path_restrictions( @@ -86,8 +86,8 @@ void _fused_adam_kernel_cuda_( const double eps, const bool amsgrad, const bool maximize, - const c10::optional& grad_scale, - const c10::optional& found_inf) { + const std::optional& grad_scale, + const std::optional& found_inf) { if (lr.is_cpu()) { _fused_adam_kernel_cuda_( params, diff --git a/aten/src/ATen/native/cuda/FusedAdamWKernel.cu b/aten/src/ATen/native/cuda/FusedAdamWKernel.cu index f926199ae9680..b0fcfe23dee81 100644 --- a/aten/src/ATen/native/cuda/FusedAdamWKernel.cu +++ b/aten/src/ATen/native/cuda/FusedAdamWKernel.cu @@ -28,8 +28,8 @@ void _fused_adamw_kernel_cuda_( const double eps, const bool amsgrad, const bool maximize, - const c10::optional& grad_scale, - const c10::optional& found_inf) { + const std::optional& grad_scale, + const std::optional& found_inf) { if (amsgrad) { TORCH_CHECK( at::native::check_fast_path_restrictions( @@ -87,8 +87,8 @@ void _fused_adamw_kernel_cuda_( const double eps, const bool amsgrad, const bool maximize, - const c10::optional& grad_scale, - const c10::optional& found_inf) { + const std::optional& grad_scale, + const std::optional& found_inf) { if (lr.is_cpu()) { _fused_adamw_kernel_cuda_( params, diff --git a/aten/src/ATen/native/cuda/FusedSgdKernel.cu b/aten/src/ATen/native/cuda/FusedSgdKernel.cu index 36ac7401a2d0b..61da02ce0b888 100644 --- a/aten/src/ATen/native/cuda/FusedSgdKernel.cu +++ b/aten/src/ATen/native/cuda/FusedSgdKernel.cu @@ -157,8 +157,8 @@ void _fused_sgd_with_momentum_kernel_cuda_( const bool nesterov, const bool maximize, const bool is_first_step, - const c10::optional& grad_scale, - const c10::optional& found_inf) { + const std::optional& grad_scale, + const std::optional& found_inf) { TORCH_CHECK_GT(momentum, 0); TORCH_CHECK(at::native::check_fast_path_restrictions( {params, grads, momentum_buffer_list})); @@ -203,8 +203,8 @@ void _fused_sgd_with_momentum_kernel_cuda_( const bool nesterov, const bool maximize, const bool is_first_step, - const c10::optional& grad_scale, - const c10::optional& found_inf) { + const std::optional& grad_scale, + const std::optional& found_inf) { if (lr.is_cpu()) { _fused_sgd_with_momentum_kernel_cuda_( params, @@ -279,8 +279,8 @@ void _fused_sgd_kernel_cuda_( const bool nesterov, const bool maximize, const bool is_first_step, - const c10::optional& grad_scale, - const c10::optional& found_inf) { + const std::optional& grad_scale, + const std::optional& found_inf) { if (!momentum_buffer_list.empty()) { _fused_sgd_with_momentum_kernel_cuda_( params, @@ -343,8 +343,8 @@ void _fused_sgd_kernel_cuda_( const bool nesterov, const bool maximize, const bool is_first_step, - const c10::optional& grad_scale, - const c10::optional& found_inf) { + const std::optional& grad_scale, + const std::optional& found_inf) { if (!momentum_buffer_list.empty()) { _fused_sgd_with_momentum_kernel_cuda_( params, diff --git a/aten/src/ATen/native/cuda/IndexKernel.cpp b/aten/src/ATen/native/cuda/IndexKernel.cpp index 68770bc64e0ac..4c7ee5339afe0 100644 --- a/aten/src/ATen/native/cuda/IndexKernel.cpp +++ b/aten/src/ATen/native/cuda/IndexKernel.cpp @@ -42,7 +42,7 @@ static Tensor & masked_select_out_cuda_impl(Tensor & result, const Tensor & self auto mask_self_expanded = expand_outplace(*mask_temp, *self_temp); at::cuda::index_out( result, *std::get<1>(mask_self_expanded), - c10::List>({*std::move(std::get<0>(mask_self_expanded))})); + c10::List>({*std::move(std::get<0>(mask_self_expanded))})); return result; } diff --git a/aten/src/ATen/native/cuda/Indexing.cu b/aten/src/ATen/native/cuda/Indexing.cu index ca37b2cefd411..b0a5d0a5a6a1b 100644 --- a/aten/src/ATen/native/cuda/Indexing.cu +++ b/aten/src/ATen/native/cuda/Indexing.cu @@ -426,7 +426,7 @@ int64_t largestIndex(const Tensor &self) { return result; } -void index_put_with_sort_kernel(Tensor & self, const c10::List>& indices, const Tensor & value, bool accumulate, bool unsafe) { +void index_put_with_sort_kernel(Tensor & self, const c10::List>& indices, const Tensor & value, bool accumulate, bool unsafe) { TORCH_CHECK(!indices.empty() || is_expandable_to(value.sizes(), self.sizes()), "shape mismatch: value tensor of shape ", value.sizes(), " cannot be broadcast to indexing result of shape ", self.sizes()); if (indices.size() > (size_t)self.dim()) { @@ -561,7 +561,7 @@ void index_put_with_sort_kernel(Tensor & self, const c10::List>& indices, const Tensor & value, double scale, int zero_point, bool unsafe) { +void index_put_with_sort_quantized(Tensor & self, const c10::List>& indices, const Tensor & value, double scale, int zero_point, bool unsafe) { if (indices.size() > (size_t)self.dim()) { TORCH_CHECK_INDEX(false, "too many indices for tensor of dimension ", self.dim(), " (got ", indices.size(), ")"); } @@ -861,7 +861,7 @@ void index_add_cuda_impl(const Tensor& self, int64_t dim, const Tensor& index, c TORCH_CHECK(index.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims"); if (globalContext().deterministicAlgorithms()){ - torch::List> indices; + torch::List> indices; indices.reserve(dim + 1); for (const auto i: c10::irange(dim)) { indices.emplace_back(); diff --git a/aten/src/ATen/native/cuda/LinearAlgebraStubs.cpp b/aten/src/ATen/native/cuda/LinearAlgebraStubs.cpp index 045bfa8d1f90b..701669bf709e5 100644 --- a/aten/src/ATen/native/cuda/LinearAlgebraStubs.cpp +++ b/aten/src/ATen/native/cuda/LinearAlgebraStubs.cpp @@ -98,7 +98,7 @@ void lazy_linalg_eig_kernel(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& i void lazy_svd_kernel(const Tensor& A, const bool full_matrices, const bool compute_uv, - const c10::optional& driver, + const std::optional& driver, const Tensor& U, const Tensor& S, const Tensor& Vh, diff --git a/aten/src/ATen/native/cuda/Loss.cu b/aten/src/ATen/native/cuda/Loss.cu index 1691adca87253..d87f1aa97873b 100644 --- a/aten/src/ATen/native/cuda/Loss.cu +++ b/aten/src/ATen/native/cuda/Loss.cu @@ -62,7 +62,7 @@ void binary_cross_entropy_backward_out_kernel(Tensor& grad_input, const Tensor& namespace at::native { -Tensor binary_cross_entropy_cuda(const Tensor& input, const Tensor& target, const c10::optional& weight_opt, int64_t reduction) { +Tensor binary_cross_entropy_cuda(const Tensor& input, const Tensor& target, const std::optional& weight_opt, int64_t reduction) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; @@ -72,7 +72,7 @@ Tensor binary_cross_entropy_cuda(const Tensor& input, const Tensor& target, cons input, target, weight, reduction, loss); } -Tensor& binary_cross_entropy_out_cuda(const Tensor& input, const Tensor& target, const c10::optional& weight_opt, int64_t reduction, Tensor& loss) { +Tensor& binary_cross_entropy_out_cuda(const Tensor& input, const Tensor& target, const std::optional& weight_opt, int64_t reduction, Tensor& loss) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; @@ -121,7 +121,7 @@ Tensor& binary_cross_entropy_out_cuda(const Tensor& input, const Tensor& target, return loss; } -Tensor binary_cross_entropy_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional& weight_opt, int64_t reduction) { +Tensor binary_cross_entropy_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const std::optional& weight_opt, int64_t reduction) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; @@ -131,7 +131,7 @@ Tensor binary_cross_entropy_backward_cuda(const Tensor& grad, const Tensor& inpu grad, input, target, weight, reduction, grad_input); } -Tensor& binary_cross_entropy_backward_out_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional& weight_opt, int64_t reduction, Tensor& grad_input) { +Tensor& binary_cross_entropy_backward_out_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const std::optional& weight_opt, int64_t reduction, Tensor& grad_input) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; diff --git a/aten/src/ATen/native/cuda/MixedDtypesLinear.cu b/aten/src/ATen/native/cuda/MixedDtypesLinear.cu index 7b55c7a952442..27563c1017fbf 100644 --- a/aten/src/ATen/native/cuda/MixedDtypesLinear.cu +++ b/aten/src/ATen/native/cuda/MixedDtypesLinear.cu @@ -196,8 +196,8 @@ mixed_dtypes_linear_dispatch_bias_activation( Tensor _mixed_dtypes_linear(const Tensor& input, const Tensor& weight, const Tensor& scale, - const c10::optional& bias_opt, - const c10::optional activation_opt) { + const std::optional& bias_opt, + const std::optional activation_opt) { #if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080) AT_ERROR("_mixed_dtypes_linear: not compiled for this platform"); return Tensor{}; diff --git a/aten/src/ATen/native/cuda/MultiMarginLoss.cu b/aten/src/ATen/native/cuda/MultiMarginLoss.cu index 989a3e116ad62..0424fcc8e3d38 100644 --- a/aten/src/ATen/native/cuda/MultiMarginLoss.cu +++ b/aten/src/ATen/native/cuda/MultiMarginLoss.cu @@ -132,7 +132,7 @@ void multi_margin_loss_shape_check( const int64_t& ndims, const Tensor& input, const Tensor& target, - const c10::optional& weight) { + const std::optional& weight) { TORCH_CHECK( (ndims == 2 && input.size(1) != 0) || (ndims == 1 && input.size(0) != 0) || ndims == 0, "Expected non-empty vector or matrix with optional 0-dim batch size, but got: ", @@ -162,7 +162,7 @@ void multi_margin_loss_shape_check( Tensor& multi_margin_loss_cuda_out( const Tensor &input_, const Tensor &target_, const Scalar &p_, const Scalar &margin_, - const c10::optional &weights_, int64_t reduction, Tensor& out_) { + const std::optional &weights_, int64_t reduction, Tensor& out_) { auto p = p_.toLong(); int64_t nframe, dim; const auto ndims = input_.dim(); @@ -288,7 +288,7 @@ Tensor& multi_margin_loss_cuda_out( Tensor multi_margin_loss_cuda( const Tensor &input, const Tensor &target, const Scalar &p, const Scalar &margin, - const c10::optional &weights, int64_t reduction) { + const std::optional &weights, int64_t reduction) { auto out = at::empty({0}, input.options()); multi_margin_loss_cuda_out(input, target, p, margin, weights, reduction, out); return out; @@ -296,7 +296,7 @@ Tensor multi_margin_loss_cuda( Tensor& multi_margin_loss_cuda_backward_out( const Tensor &grad_output_,const Tensor &input_, const Tensor &target_, - const Scalar &p_, const Scalar &margin_, const c10::optional &weights_, + const Scalar &p_, const Scalar &margin_, const std::optional &weights_, int64_t reduction, Tensor &grad_input_) { auto p = p_.toLong(); int64_t nframe, dim; @@ -403,7 +403,7 @@ Tensor& multi_margin_loss_cuda_backward_out( Tensor multi_margin_loss_cuda_backward( const Tensor &grad_output, const Tensor &input, const Tensor &target, - const Scalar &p, const Scalar &margin, const c10::optional &weights, + const Scalar &p, const Scalar &margin, const std::optional &weights, int64_t reduction) { auto grad_input = at::empty({0}, input.options()); multi_margin_loss_cuda_backward_out( diff --git a/aten/src/ATen/native/cuda/MultinomialKernel.cu b/aten/src/ATen/native/cuda/MultinomialKernel.cu index d8f142a813f83..3e67f5ad5bfbe 100644 --- a/aten/src/ATen/native/cuda/MultinomialKernel.cu +++ b/aten/src/ATen/native/cuda/MultinomialKernel.cu @@ -328,7 +328,7 @@ void multinomial_with_replacement_kernel_impl( Tensor& result, const Tensor& self, const int64_t n_sample, - c10::optional generator) { + std::optional generator) { auto gen = get_generator_or_default(generator, cuda::detail::getDefaultCUDAGenerator()); int inputSize = self.dim(); diff --git a/aten/src/ATen/native/cuda/NLLLoss2d.cu b/aten/src/ATen/native/cuda/NLLLoss2d.cu index 94c9aeba79f51..046ea7bbc6d7f 100644 --- a/aten/src/ATen/native/cuda/NLLLoss2d.cu +++ b/aten/src/ATen/native/cuda/NLLLoss2d.cu @@ -233,7 +233,7 @@ void nll_loss2d_forward_out_cuda_template( Tensor& total_weight, const Tensor& input, const Tensor& target, - const c10::optional& weight_opt, + const std::optional& weight_opt, int64_t reduction, int64_t ignore_index) { // See Note [Writing Nondeterministic Operations] @@ -356,7 +356,7 @@ void nll_loss2d_backward_out_cuda_template( const Tensor& grad_output, const Tensor& input, const Tensor& target, - const c10::optional& weight_opt, + const std::optional& weight_opt, int64_t reduction, int64_t ignore_index, const Tensor& total_weight) { @@ -467,7 +467,7 @@ void nll_loss2d_backward_out_cuda_template( std::tuple nll_loss2d_forward_out_cuda( const Tensor& self, const Tensor& target, - const c10::optional& weight_opt, + const std::optional& weight_opt, int64_t reduction, int64_t ignore_index, Tensor& output, @@ -480,7 +480,7 @@ std::tuple nll_loss2d_forward_out_cuda( std::tuple nll_loss2d_forward_cuda( const Tensor& self, const Tensor& target, - const c10::optional& weight_opt, + const std::optional& weight_opt, int64_t reduction, int64_t ignore_index) { auto output = at::empty({0}, self.options()); @@ -494,7 +494,7 @@ Tensor& nll_loss2d_backward_out_cuda( const Tensor& grad_output, const Tensor& self, const Tensor& target, - const c10::optional& weight_opt, + const std::optional& weight_opt, int64_t reduction, int64_t ignore_index, const Tensor& total_weight, @@ -515,7 +515,7 @@ Tensor nll_loss2d_backward_cuda( const Tensor& grad_output, const Tensor& self, const Tensor& target, - const c10::optional& weight_opt, + const std::optional& weight_opt, int64_t reduction, int64_t ignore_index, const Tensor& total_weight) { diff --git a/aten/src/ATen/native/cuda/NaiveConvolutionTranspose3d.cu b/aten/src/ATen/native/cuda/NaiveConvolutionTranspose3d.cu index fd6e83aa24171..56b762a051fbf 100644 --- a/aten/src/ATen/native/cuda/NaiveConvolutionTranspose3d.cu +++ b/aten/src/ATen/native/cuda/NaiveConvolutionTranspose3d.cu @@ -835,7 +835,7 @@ void slow_conv_transpose3d_acc_grad_parameters_cuda( Tensor& slow_conv_transpose3d_out_cuda(const Tensor& input, const Tensor& weight, - IntArrayRef kernel_size, const c10::optional& bias_opt, + IntArrayRef kernel_size, const std::optional& bias_opt, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, @@ -862,7 +862,7 @@ Tensor& slow_conv_transpose3d_out_cuda(const Tensor& input, Tensor slow_conv_transpose3d_cuda( const Tensor& input, const Tensor& weight, - IntArrayRef kernel_size, const c10::optional& bias_opt, + IntArrayRef kernel_size, const std::optional& bias_opt, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, diff --git a/aten/src/ATen/native/cuda/NaiveDilatedConvolution.cu b/aten/src/ATen/native/cuda/NaiveDilatedConvolution.cu index e62e959fdf4a0..cd969fa9405bb 100644 --- a/aten/src/ATen/native/cuda/NaiveDilatedConvolution.cu +++ b/aten/src/ATen/native/cuda/NaiveDilatedConvolution.cu @@ -399,7 +399,7 @@ void slow_conv_dilated_all_cuda_template( Tensor slow_conv_dilated2d_cuda( const Tensor& input, const Tensor& weight, - IntArrayRef kernel_size, const c10::optional& bias_opt, + IntArrayRef kernel_size, const std::optional& bias_opt, IntArrayRef stride_size, IntArrayRef pad_size, IntArrayRef dilation_size) { @@ -505,7 +505,7 @@ std::tuple slow_conv_dilated2d_backward_cuda( Tensor slow_conv_dilated3d_cuda( const Tensor& input, const Tensor& weight, - IntArrayRef kernel_size, const c10::optional& bias_opt, + IntArrayRef kernel_size, const std::optional& bias_opt, IntArrayRef stride_size, IntArrayRef pad_size, IntArrayRef dilation_size) { diff --git a/aten/src/ATen/native/cuda/Normalization.cu b/aten/src/ATen/native/cuda/Normalization.cu index ce0a50daae145..2bfaf13390858 100644 --- a/aten/src/ATen/native/cuda/Normalization.cu +++ b/aten/src/ATen/native/cuda/Normalization.cu @@ -95,8 +95,8 @@ inline Impl batch_norm_choose_impl(const Tensor& in1, const Tensor& in2) { } void batch_norm_elementwise( - const Tensor& out, const Tensor& self, const c10::optional& weight_opt, - const c10::optional& bias_opt, const Tensor& mean_, const Tensor& invstd_) { + const Tensor& out, const Tensor& self, const std::optional& weight_opt, + const std::optional& bias_opt, const Tensor& mean_, const Tensor& invstd_) { switch (batch_norm_choose_impl(self)) { case Impl::Contiguous: { c10::MaybeOwned weight = at::borrow_from_optional_tensor(weight_opt); @@ -432,7 +432,7 @@ void batch_norm_calc_invstd(const Tensor& out_invstd, const Tensor& running_var, } } -std::tuple batch_norm_cuda_out(const Tensor& self, const c10::optional& weight_opt, const c10::optional& bias_opt, const c10::optional& running_mean_opt, const c10::optional& running_var_opt, bool train, double momentum, double epsilon, Tensor& output, Tensor& save_mean, Tensor& save_invstd) { +std::tuple batch_norm_cuda_out(const Tensor& self, const std::optional& weight_opt, const c10::optional& bias_opt, const c10::optional& running_mean_opt, const c10::optional& running_var_opt, bool train, double momentum, double epsilon, Tensor& output, Tensor& save_mean, Tensor& save_invstd) { const bool has_running_mean = (running_mean_opt.has_value() && running_mean_opt->defined()); const bool has_running_var = (running_var_opt.has_value() && running_var_opt->defined()); TORCH_CHECK(has_running_mean == has_running_var); @@ -458,7 +458,7 @@ std::tuple batch_norm_cuda_out(const Tensor& self, co return std::tuple(output, save_mean, save_invstd); } -std::tuple batch_norm_cuda(const Tensor& self, const c10::optional& weight_opt, const c10::optional& bias_opt, const c10::optional& running_mean_opt, const c10::optional& running_var_opt, bool train, double momentum, double epsilon) { +std::tuple batch_norm_cuda(const Tensor& self, const std::optional& weight_opt, const c10::optional& bias_opt, const c10::optional& running_mean_opt, const c10::optional& running_var_opt, bool train, double momentum, double epsilon) { auto output = at::empty_like(self); int64_t n_input = self.size(1); auto options = self.options().dtype( @@ -482,7 +482,7 @@ std::tuple batch_norm_cuda(const Tensor& self, const c10 } std::tuple _batch_norm_with_update_cuda( - const Tensor& input, const c10::optional& weight_opt, const c10::optional& bias_opt, + const Tensor& input, const std::optional& weight_opt, const c10::optional& bias_opt, Tensor& running_mean, Tensor& running_var, double momentum, double eps) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); @@ -507,7 +507,7 @@ std::tuple _batch_norm_with_update_cuda( } std::tuple _batch_norm_with_update_cuda_out( - const Tensor& input, const c10::optional& weight_opt, const c10::optional& bias_opt, + const Tensor& input, const std::optional& weight_opt, const c10::optional& bias_opt, Tensor& running_mean, Tensor& running_var, double momentum, double eps, Tensor& out, Tensor& save_mean, Tensor& save_var, Tensor& reserve) { // See [Note: hacky wrapper removal for optional tensor] @@ -529,26 +529,26 @@ std::tuple _batch_norm_with_update_cuda_out( return std::tuple(out, save_mean, save_var, reserve); } -std::tuple _batch_norm_legit_cuda(const Tensor& self, const c10::optional& weight_opt, const c10::optional& bias_opt, Tensor& running_mean, Tensor& running_var, bool train, double momentum, double epsilon) { +std::tuple _batch_norm_legit_cuda(const Tensor& self, const std::optional& weight_opt, const c10::optional& bias_opt, Tensor& running_mean, Tensor& running_var, bool train, double momentum, double epsilon) { return batch_norm_cuda(self, weight_opt, bias_opt, running_mean, running_var, train, momentum, epsilon); } -std::tuple _batch_norm_legit_no_stats_cuda(const Tensor& self, const c10::optional& weight_opt, const c10::optional& bias_opt, bool train, double momentum, double epsilon) { +std::tuple _batch_norm_legit_no_stats_cuda(const Tensor& self, const std::optional& weight_opt, const c10::optional& bias_opt, bool train, double momentum, double epsilon) { return batch_norm_cuda(self, weight_opt, bias_opt, Tensor(), Tensor(), train, momentum, epsilon); } -std::tuple _batch_norm_legit_cuda_out(const Tensor& self, const c10::optional& weight_opt, const c10::optional& bias_opt, Tensor& running_mean, Tensor& running_var, bool train, double momentum, double epsilon, Tensor& output, Tensor& save_mean, Tensor& save_invstd) { +std::tuple _batch_norm_legit_cuda_out(const Tensor& self, const std::optional& weight_opt, const c10::optional& bias_opt, Tensor& running_mean, Tensor& running_var, bool train, double momentum, double epsilon, Tensor& output, Tensor& save_mean, Tensor& save_invstd) { return batch_norm_cuda_out(self, weight_opt, bias_opt, running_mean, running_var, train, momentum, epsilon, output, save_mean, save_invstd); } -std::tuple _batch_norm_legit_no_stats_cuda_out(const Tensor& self, const c10::optional& weight_opt, const c10::optional& bias_opt, bool train, double momentum, double epsilon, Tensor& output, Tensor& save_mean, Tensor& save_invstd) { +std::tuple _batch_norm_legit_no_stats_cuda_out(const Tensor& self, const std::optional& weight_opt, const c10::optional& bias_opt, bool train, double momentum, double epsilon, Tensor& output, Tensor& save_mean, Tensor& save_invstd) { return batch_norm_cuda_out(self, weight_opt, bias_opt, Tensor(), Tensor(), train, momentum, epsilon, output, save_mean, save_invstd); } std::tuple _new_batch_norm_backward_cuda( const Tensor& grad_output, const Tensor& input, const Tensor& weight, - const c10::optional& running_mean_opt, const c10::optional& running_var_opt, - const c10::optional& save_mean_opt, const c10::optional& save_var_opt, + const std::optional& running_mean_opt, const c10::optional& running_var_opt, + const std::optional& save_mean_opt, const c10::optional& save_var_opt, bool update, double eps, std::array grad_input_mask, const Tensor& reserve) { const Tensor& dummy_bias = at::empty(1); const Tensor& running_mean = c10::value_or_else(running_mean_opt, [] {return Tensor();}); @@ -567,7 +567,7 @@ std::tuple _new_batch_norm_backward_cuda( } } -std::tuple batch_norm_backward_cuda(const Tensor& grad_out, const Tensor& input, const c10::optional& weight_opt, const c10::optional& running_mean_opt, const c10::optional& running_var_opt, const c10::optional& save_mean_opt, const c10::optional& save_invstd_opt, bool train, double epsilon, std::array grad_input_mask) { +std::tuple batch_norm_backward_cuda(const Tensor& grad_out, const Tensor& input, const std::optional& weight_opt, const c10::optional& running_mean_opt, const c10::optional& running_var_opt, const c10::optional& save_mean_opt, const c10::optional& save_invstd_opt, bool train, double epsilon, std::array grad_input_mask) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight = at::borrow_from_optional_tensor(weight_opt); c10::MaybeOwned save_mean = at::borrow_from_optional_tensor(save_mean_opt); @@ -673,8 +673,8 @@ std::tuple batch_norm_stats_cuda(const Tensor& self, double epsi } Tensor batch_norm_elemt_cuda( - const Tensor& self, const c10::optional& weight_opt, - const c10::optional& bias_opt, const Tensor& mean, + const Tensor& self, const std::optional& weight_opt, + const std::optional& bias_opt, const Tensor& mean, const Tensor& invstd, double epsilon) { auto output = at::empty_like(self); // FIXME: Epsilon parameter isn't required, we don't take the reciprocal @@ -682,7 +682,7 @@ Tensor batch_norm_elemt_cuda( return output; } -Tensor& batch_norm_elemt_cuda_out(const Tensor& self, const c10::optional& weight_opt, const c10::optional& bias_opt, +Tensor& batch_norm_elemt_cuda_out(const Tensor& self, const std::optional& weight_opt, const c10::optional& bias_opt, const Tensor& mean, const Tensor& invstd, double epsilon, Tensor& output) { // FIXME: Epsilon parameter isn't required, we don't take the reciprocal batch_norm_elementwise(output, self, weight_opt, bias_opt, mean, invstd); @@ -690,7 +690,7 @@ Tensor& batch_norm_elemt_cuda_out(const Tensor& self, const c10::optional batch_norm_gather_stats_cuda(const Tensor& self, const Tensor& mean, const Tensor& invstd, const c10::optional& running_mean_opt, const c10::optional& running_var_opt, double momentum, double epsilon, int64_t count) { +std::tuple batch_norm_gather_stats_cuda(const Tensor& self, const Tensor& mean, const Tensor& invstd, const std::optional& running_mean_opt, const c10::optional& running_var_opt, double momentum, double epsilon, int64_t count) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned running_mean_maybe_owned = at::borrow_from_optional_tensor(running_mean_opt); const Tensor& running_mean = *running_mean_maybe_owned; @@ -704,7 +704,7 @@ std::tuple batch_norm_gather_stats_cuda(const Tensor& self, cons std::tuple batch_norm_gather_stats_with_counts_cuda( - const Tensor& self, const Tensor& mean, const Tensor& invstd, const c10::optional& running_mean_opt /* optional */, const c10::optional& running_var_opt /* optional */, double momentum, double epsilon, const Tensor& counts) { + const Tensor& self, const Tensor& mean, const Tensor& invstd, const std::optional& running_mean_opt /* optional */, const c10::optional& running_var_opt /* optional */, double momentum, double epsilon, const Tensor& counts) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned running_mean_maybe_owned = at::borrow_from_optional_tensor(running_mean_opt); const Tensor& running_mean = *running_mean_maybe_owned; @@ -722,7 +722,7 @@ std::tuple batch_norm_gather_stats_with_counts_cuda( }); } -std::tuple batch_norm_backward_reduce_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& mean, const Tensor& invstd, const c10::optional& weight_opt, bool input_g, bool weight_g, bool bias_g) { +std::tuple batch_norm_backward_reduce_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& mean, const Tensor& invstd, const std::optional& weight_opt, bool input_g, bool weight_g, bool bias_g) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; @@ -759,7 +759,7 @@ std::tuple batch_norm_backward_reduce_cuda(const }); } -Tensor batch_norm_backward_elemt_cuda(const Tensor& self, const Tensor& input, const Tensor& mean, const Tensor& invstd, const c10::optional& weight_opt, const Tensor& sum_dy, const Tensor& sum_dy_xmu, const Tensor& count) { +Tensor batch_norm_backward_elemt_cuda(const Tensor& self, const Tensor& input, const Tensor& mean, const Tensor& invstd, const std::optional& weight_opt, const Tensor& sum_dy, const Tensor& sum_dy_xmu, const Tensor& count) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); const Tensor& weight = *weight_maybe_owned; @@ -794,8 +794,8 @@ Tensor batch_norm_backward_elemt_cuda(const Tensor& self, const Tensor& input, c } std::tuple batch_norm_update_stats_cuda( - const Tensor& self, const c10::optional& running_mean_opt, - const c10::optional& running_var_opt, double momentum) { + const Tensor& self, const std::optional& running_mean_opt, + const std::optional& running_var_opt, double momentum) { c10::MaybeOwned running_mean = at::borrow_from_optional_tensor(running_mean_opt); c10::MaybeOwned running_var = at::borrow_from_optional_tensor(running_var_opt); diff --git a/aten/src/ATen/native/cuda/RNN.cu b/aten/src/ATen/native/cuda/RNN.cu index a997777fe0c3a..c448ba592e4af 100644 --- a/aten/src/ATen/native/cuda/RNN.cu +++ b/aten/src/ATen/native/cuda/RNN.cu @@ -516,7 +516,7 @@ void gru_backward_impl(const Tensor& grad_hy, const Tensor& workspace, std::tuple _thnn_fused_lstm_cell_cuda( const Tensor& input_gates, const Tensor& hidden_gates, - const Tensor& cx, const c10::optional& input_bias_opt, const c10::optional& hidden_bias_opt) { + const Tensor& cx, const std::optional& input_bias_opt, const c10::optional& hidden_bias_opt) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned input_bias_maybe_owned = at::borrow_from_optional_tensor(input_bias_opt); const Tensor& input_bias = *input_bias_maybe_owned; @@ -564,7 +564,7 @@ void checkLSTMBackwardSizes(const TensorArg& grad_hy, const TensorArg& grad_cy, checkNumel(c, workspace, exp_size[0] * exp_size[1] * 4); } -std::tuple _thnn_fused_lstm_cell_backward_impl_cuda( const c10::optional& grad_hy_opt, const c10::optional& grad_cy_opt, +std::tuple _thnn_fused_lstm_cell_backward_impl_cuda( const std::optional& grad_hy_opt, const c10::optional& grad_cy_opt, const Tensor& cx, const Tensor& cy, const Tensor& workspace, bool has_bias) { // See [Note: hacky wrapper removal for optional tensor] @@ -602,7 +602,7 @@ static constexpr int64_t GRU_WORKSPACE_MULTIPLIER = 5; std::tuple _thnn_fused_gru_cell_cuda( const Tensor& input_gates, const Tensor& hidden_gates, - const Tensor& hx, const c10::optional& input_bias_opt, const c10::optional& hidden_bias_opt) { + const Tensor& hx, const std::optional& input_bias_opt, const c10::optional& hidden_bias_opt) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned input_bias_maybe_owned = at::borrow_from_optional_tensor(input_bias_opt); const Tensor& input_bias = *input_bias_maybe_owned; diff --git a/aten/src/ATen/native/cuda/Randperm.cu b/aten/src/ATen/native/cuda/Randperm.cu index c22c99dfe6a71..bde5457e8cdd8 100644 --- a/aten/src/ATen/native/cuda/Randperm.cu +++ b/aten/src/ATen/native/cuda/Randperm.cu @@ -55,7 +55,7 @@ namespace { template struct alignas(N) OpaqueType { char data[N]; }; } -Tensor& randperm_out_cuda(int64_t n, c10::optional generator, Tensor& result) { +Tensor& randperm_out_cuda(int64_t n, std::optional generator, Tensor& result) { TORCH_CHECK(n >= 0, "n must be non-negative, got", n); check_supported_max_int_with_precision(n, result); diff --git a/aten/src/ATen/native/cuda/Repeat.cu b/aten/src/ATen/native/cuda/Repeat.cu index 0a39a0445dbe2..57a879d6f61ac 100644 --- a/aten/src/ATen/native/cuda/Repeat.cu +++ b/aten/src/ATen/native/cuda/Repeat.cu @@ -54,7 +54,7 @@ namespace at::native { Tensor repeat_interleave_cuda( const Tensor& repeat, - c10::optional output_size) { + std::optional output_size) { Tensor output; AT_DISPATCH_INDEX_TYPES( repeat.scalar_type(), "repeat_interleave_cuda", [&]() { diff --git a/aten/src/ATen/native/cuda/Resize.cpp b/aten/src/ATen/native/cuda/Resize.cpp index 2bf6266d678b9..fe844f55d2333 100644 --- a/aten/src/ATen/native/cuda/Resize.cpp +++ b/aten/src/ATen/native/cuda/Resize.cpp @@ -49,7 +49,7 @@ void resize_bytes_cuda(StorageImpl* storage, size_t size_bytes) { const Tensor& resize_cuda_( const Tensor& self, IntArrayRef size, - c10::optional optional_memory_format) { + std::optional optional_memory_format) { if (self.has_names()) { return resize_named_tensor_(self, size, optional_memory_format); } diff --git a/aten/src/ATen/native/cuda/RreluWithNoise.cu b/aten/src/ATen/native/cuda/RreluWithNoise.cu index 463a5ce00c813..7133a4920c327 100644 --- a/aten/src/ATen/native/cuda/RreluWithNoise.cu +++ b/aten/src/ATen/native/cuda/RreluWithNoise.cu @@ -74,7 +74,7 @@ inline void _rrelu_with_noise_cuda_train( const Tensor& noise_, const Scalar& lower_, const Scalar& upper_, - c10::optional generator) { + std::optional generator) { auto input = input_.contiguous(); auto noise = noise_.contiguous(); Tensor tmp_output = output.contiguous(); @@ -142,7 +142,7 @@ Tensor& rrelu_with_noise_out_cuda(const Tensor& self, const Scalar& lower, const Scalar& upper, bool training, - c10::optional generator, + std::optional generator, Tensor& output) { at::native::resize_output(output, self.sizes()); @@ -176,7 +176,7 @@ Tensor rrelu_with_noise_cuda( const Scalar& lower, const Scalar& upper, bool training, - c10::optional generator) { + std::optional generator) { Tensor output = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); return at::native::rrelu_with_noise_out_cuda(self, noise, lower, upper, training, generator, output); } @@ -187,7 +187,7 @@ Tensor& rrelu_with_noise_cuda_( const Scalar& lower, const Scalar& upper, bool training, - c10::optional generator) { + std::optional generator) { return at::native::rrelu_with_noise_out_cuda( self, noise, lower, upper, training, generator, self); } diff --git a/aten/src/ATen/native/cuda/SegmentReduce.cu b/aten/src/ATen/native/cuda/SegmentReduce.cu index d4af81db771d3..cbdbb020d634a 100644 --- a/aten/src/ATen/native/cuda/SegmentReduce.cu +++ b/aten/src/ATen/native/cuda/SegmentReduce.cu @@ -266,7 +266,7 @@ Tensor _segment_reduce_lengths_offsets_backward_cuda_kernel( ReductionType reduction, const Tensor& lengths_or_offsets_contig, int64_t axis, - const c10::optional& initial, + const std::optional& initial, bool is_offsets_like) { axis = lengths_or_offsets_contig.dim() - 1; int64_t segment_count = is_offsets_like ? @@ -368,7 +368,7 @@ Tensor _segment_reduce_lengths_backward_cuda_kernel( ReductionType reduction, const Tensor& lengths_contig, int64_t axis, - const c10::optional& initial) { + const std::optional& initial) { return _segment_reduce_lengths_offsets_backward_cuda_kernel( grad_contig, output_contig, data_contig, reduction, lengths_contig, axis, initial, /*is_offsets_like=*/false); } @@ -380,7 +380,7 @@ Tensor _segment_reduce_offsets_backward_cuda_kernel( ReductionType reduction, const Tensor& offsets_contig, int64_t axis, - const c10::optional& initial) { + const std::optional& initial) { return _segment_reduce_lengths_offsets_backward_cuda_kernel( grad_contig, output_contig, data_contig, reduction, offsets_contig, axis, initial, /*is_offsets_like=*/true); } @@ -390,7 +390,7 @@ Tensor _segment_reduce_lengths_offsets_cuda_kernel( const Tensor& data, const Tensor& lengths_or_offsets, int64_t axis, - const c10::optional& initial, + const std::optional& initial, bool is_offsets_like) { // data and lengths_or_offsets should be contiguous from the call to .contiguous in segment_reduce_kernel TORCH_CHECK(data.is_contiguous()); @@ -575,7 +575,7 @@ Tensor _segment_reduce_lengths_cuda_kernel( const Tensor& data, const Tensor& lengths, int64_t axis, - const c10::optional& initial) { + const std::optional& initial) { return _segment_reduce_lengths_offsets_cuda_kernel( reduction, data, lengths, axis, initial, /*is_offsets_like=*/false); } @@ -585,7 +585,7 @@ Tensor _segment_reduce_offsets_cuda_kernel( const Tensor& data, const Tensor& offsets, int64_t axis, - const c10::optional& initial) { + const std::optional& initial) { return _segment_reduce_lengths_offsets_cuda_kernel( reduction, data, offsets, axis, initial, /*is_offsets_like=*/true); } diff --git a/aten/src/ATen/native/cuda/SoftMax.cu b/aten/src/ATen/native/cuda/SoftMax.cu index cffd52624f9e3..97528b48d8cb0 100644 --- a/aten/src/ATen/native/cuda/SoftMax.cu +++ b/aten/src/ATen/native/cuda/SoftMax.cu @@ -1113,7 +1113,7 @@ TORCH_IMPL_FUNC(softmax_backward_cuda_out) host_softmax_backward(tmp, output, dim, half_to_float, grad_input); } -Tensor masked_softmax_cuda(const Tensor& input_, const Tensor& mask_, const c10::optional dim_, const c10::optional mask_type_) { +Tensor masked_softmax_cuda(const Tensor& input_, const Tensor& mask_, const std::optional dim_, const c10::optional mask_type_) { Tensor output = at::empty_like(input_, input_.options()); TORCH_CHECK(mask_.scalar_type() == ScalarType::Bool, "Mask should be a boolean tensor"); @@ -1211,7 +1211,7 @@ Tensor masked_softmax_backward_cuda( const Tensor& grad_, const Tensor& output_, const Tensor& mask_, - const c10::optional dim_) { + const std::optional dim_) { Tensor grad_input = at::empty_like(grad_, grad_.options()); if (grad_.numel() == 0) { return grad_input; diff --git a/aten/src/ATen/native/cuda/SparseBinaryOpIntersectionKernel.cu b/aten/src/ATen/native/cuda/SparseBinaryOpIntersectionKernel.cu index 62282659f6e8b..2cd1dd893a447 100644 --- a/aten/src/ATen/native/cuda/SparseBinaryOpIntersectionKernel.cu +++ b/aten/src/ATen/native/cuda/SparseBinaryOpIntersectionKernel.cu @@ -167,7 +167,7 @@ struct CUDAValueSelectionIntersectionKernel { } }; -using OptTensor = c10::optional; +using OptTensor = std::optional; void mul_sparse_sparse_out_cuda_kernel( Tensor& result, diff --git a/aten/src/ATen/native/cuda/SpectralOps.cpp b/aten/src/ATen/native/cuda/SpectralOps.cpp index 1032fb28d799c..5d93797c5bd21 100644 --- a/aten/src/ATen/native/cuda/SpectralOps.cpp +++ b/aten/src/ATen/native/cuda/SpectralOps.cpp @@ -218,7 +218,7 @@ static const Tensor& _exec_fft(Tensor& out, const Tensor& self, IntArrayRef out_ CuFFTParams Params(input.strides(), out.strides(), signal_size, fft_type, value_type); CuFFTParamsLRUCache& plan_cache = cufft_get_plan_cache(input.device().index()); std::unique_lock guard(plan_cache.mutex, std::defer_lock); - c10::optional uncached_plan; + std::optional uncached_plan; const CuFFTConfig * config = nullptr; // Workaround for gh-63152, gh-58724 diff --git a/aten/src/ATen/native/cuda/SummaryOps.cu b/aten/src/ATen/native/cuda/SummaryOps.cu index f2626ccff4db7..30adb0b3e5c1a 100644 --- a/aten/src/ATen/native/cuda/SummaryOps.cu +++ b/aten/src/ATen/native/cuda/SummaryOps.cu @@ -360,7 +360,7 @@ Tensor _histc_cuda_template( namespace native { Tensor _bincount_cuda( - const Tensor& self, const c10::optional& weights_opt, + const Tensor& self, const std::optional& weights_opt, int64_t minlength) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weights_maybe_owned = at::borrow_from_optional_tensor(weights_opt); diff --git a/aten/src/ATen/native/cuda/TensorFactories.cu b/aten/src/ATen/native/cuda/TensorFactories.cu index 42ea83a4b8bf0..87daceacdfba0 100644 --- a/aten/src/ATen/native/cuda/TensorFactories.cu +++ b/aten/src/ATen/native/cuda/TensorFactories.cu @@ -51,7 +51,7 @@ Tensor& eye_out_cuda(int64_t n, int64_t m, Tensor& result) { return result; } -Tensor empty_cuda(IntArrayRef size, c10::optional dtype_opt, c10::optional layout_opt, c10::optional device_opt, c10::optional pin_memory_opt, c10::optional memory_format_opt) { +Tensor empty_cuda(IntArrayRef size, std::optional dtype_opt, c10::optional layout_opt, c10::optional device_opt, c10::optional pin_memory_opt, c10::optional memory_format_opt) { Tensor result = at::detail::empty_cuda(size, dtype_opt, layout_opt, device_opt, pin_memory_opt, memory_format_opt); // See Note [Enabling Deterministic Operations] if (C10_UNLIKELY(at::globalContext().deterministicAlgorithms() && at::globalContext().deterministicFillUninitializedMemory())) { @@ -61,10 +61,10 @@ Tensor empty_cuda(IntArrayRef size, c10::optional dtype_opt, c10::op } Tensor _efficientzerotensor_cuda(IntArrayRef size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { auto device_ = device_or_default(device); if (!device_.has_index()) { device_.set_index(at::cuda::current_device()); @@ -77,7 +77,7 @@ Tensor _efficientzerotensor_cuda(IntArrayRef size, } -Tensor empty_strided_cuda(IntArrayRef size, IntArrayRef stride, c10::optional dtype_opt, c10::optional layout_opt, c10::optional device_opt, c10::optional pin_memory_opt) { +Tensor empty_strided_cuda(IntArrayRef size, IntArrayRef stride, std::optional dtype_opt, c10::optional layout_opt, c10::optional device_opt, c10::optional pin_memory_opt) { Tensor result = at::detail::empty_strided_cuda(size, stride, dtype_opt, layout_opt, device_opt, pin_memory_opt); // See Note [Enabling Deterministic Operations] if (C10_UNLIKELY(at::globalContext().deterministicAlgorithms() && at::globalContext().deterministicFillUninitializedMemory())) { @@ -274,8 +274,8 @@ void tril_indices_kernel(scalar_t * tensor, // implementation, please enable them in test/test_cuda.py and make sure they // pass on your local server. Tensor tril_indices_cuda( - int64_t row, int64_t col, int64_t offset, c10::optional dtype_opt, - c10::optional layout_opt, c10::optional device_opt, c10::optional pin_memory_opt) { + int64_t row, int64_t col, int64_t offset, std::optional dtype_opt, + std::optional layout_opt, c10::optional device_opt, c10::optional pin_memory_opt) { check_args(row, col, layout_opt); auto tril_size = get_tril_size(row, col, offset); @@ -350,8 +350,8 @@ void triu_indices_kernel(scalar_t * tensor, // implementation, please enable them in test/test_cuda.py and make sure they // pass on your local server. Tensor triu_indices_cuda( - int64_t row, int64_t col, int64_t offset, c10::optional dtype_opt, - c10::optional layout_opt, c10::optional device_opt, c10::optional pin_memory_opt) { + int64_t row, int64_t col, int64_t offset, std::optional dtype_opt, + std::optional layout_opt, c10::optional device_opt, c10::optional pin_memory_opt) { check_args(row, col, layout_opt); auto triu_size = row * col - get_tril_size(row, col, offset - 1); diff --git a/aten/src/ATen/native/cuda/UnaryOpsKernel.cu b/aten/src/ATen/native/cuda/UnaryOpsKernel.cu index 451c15443fa8e..1dd47c93fae94 100644 --- a/aten/src/ATen/native/cuda/UnaryOpsKernel.cu +++ b/aten/src/ATen/native/cuda/UnaryOpsKernel.cu @@ -221,9 +221,9 @@ C10_HOST_DEVICE static inline scalar_t _nan_to_num_replace(scalar_t a, scalar_t void nan_to_num_kernel_cuda( TensorIteratorBase& iter, - c10::optional nan, - c10::optional pos_inf, - c10::optional neg_inf) { + std::optional nan, + std::optional pos_inf, + std::optional neg_inf) { if (isComplexType(iter.dtype())) { AT_DISPATCH_COMPLEX_TYPES(iter.dtype(), "nan_to_num", [&]() { using value_t = scalar_t::value_type; diff --git a/aten/src/ATen/native/cuda/Unique.cu b/aten/src/ATen/native/cuda/Unique.cu index e2654be0135f8..39e80e0a68c3c 100644 --- a/aten/src/ATen/native/cuda/Unique.cu +++ b/aten/src/ATen/native/cuda/Unique.cu @@ -218,7 +218,7 @@ unique_dim_consecutive_cuda(const Tensor& self, const int64_t dim, const bool re } std::tuple -unique_consecutive_cuda(const Tensor& self, const bool return_inverse, const bool return_counts, c10::optional dim) { +unique_consecutive_cuda(const Tensor& self, const bool return_inverse, const bool return_counts, std::optional dim) { if (!dim.has_value()) { return AT_DISPATCH_V2(self.scalar_type(), "unique", AT_WRAP([&] { // The current CUDA implementation of unique always sort due to the diff --git a/aten/src/ATen/native/cuda/UpSampleBicubic2d.cu b/aten/src/ATen/native/cuda/UpSampleBicubic2d.cu index 6673fe4993f39..31cdf0a5688b7 100644 --- a/aten/src/ATen/native/cuda/UpSampleBicubic2d.cu +++ b/aten/src/ATen/native/cuda/UpSampleBicubic2d.cu @@ -170,8 +170,8 @@ static void upsample_bicubic2d_out_cuda_template( const Tensor& input, IntArrayRef output_size, bool align_corners, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_h, + std::optional scales_w) { TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2}; checkAllSameGPU(__func__, {input_arg, output_arg}); @@ -225,8 +225,8 @@ static void upsample_bicubic2d_backward_out_cuda_template( IntArrayRef output_size, IntArrayRef input_size, bool align_corners, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_h, + std::optional scales_w) { TensorArg grad_input_arg{grad_input, "grad_input", 1}, grad_output_arg{grad_output_, "grad_output_", 2}; checkAllSameGPU(__func__, {grad_output_arg, grad_input_arg}); @@ -275,8 +275,8 @@ TORCH_IMPL_FUNC(upsample_bicubic2d_out_cuda) ( const Tensor& input, IntArrayRef output_size, bool align_corners, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_h, + std::optional scales_w, const Tensor& output) { upsample_bicubic2d_out_cuda_template(output, input, output_size, align_corners, scales_h, scales_w); } @@ -286,8 +286,8 @@ TORCH_IMPL_FUNC(upsample_bicubic2d_backward_out_cuda) ( IntArrayRef output_size, IntArrayRef input_size, bool align_corners, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_h, + std::optional scales_w, const Tensor& grad_input) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage diff --git a/aten/src/ATen/native/cuda/UpSampleBilinear2d.cu b/aten/src/ATen/native/cuda/UpSampleBilinear2d.cu index 3c80cb7877a5c..4bd230ab8fe76 100644 --- a/aten/src/ATen/native/cuda/UpSampleBilinear2d.cu +++ b/aten/src/ATen/native/cuda/UpSampleBilinear2d.cu @@ -264,8 +264,8 @@ static void upsample_bilinear2d_out_cuda_template( const Tensor& input, IntArrayRef output_size, bool align_corners, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_h, + std::optional scales_w) { TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2}; checkAllSameGPU(__func__, {input_arg, output_arg}); @@ -362,8 +362,8 @@ static void upsample_bilinear2d_backward_out_cuda_template( IntArrayRef output_size, IntArrayRef input_size, bool align_corners, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_h, + std::optional scales_w) { TensorArg grad_input_arg{grad_input, "grad_input", 1}, grad_output_arg{grad_output_, "grad_output_", 2}; checkAllSameGPU(__func__, {grad_output_arg, grad_input_arg}); @@ -674,8 +674,8 @@ static void upsample_gen2d_aa_out_cuda_template( const Tensor& input_, IntArrayRef output_size, bool align_corners, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_h, + std::optional scales_w) { TensorArg input_arg{input_, "input_", 1}, output_arg{output, "output", 2}; checkAllSameGPU("upsample_gen2d_aa_out_cuda", {input_arg, output_arg}); @@ -769,8 +769,8 @@ static void upsample_gen2d_aa_backward_out_cuda_template( IntArrayRef output_size, IntArrayRef input_size, bool align_corners, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_h, + std::optional scales_w) { // Inspired from UpSampleBicubic2d.cu::upsample_bicubic2d_backward_out_cuda_template TensorArg grad_input_arg{grad_input, "grad_input", 1}, @@ -844,8 +844,8 @@ TORCH_IMPL_FUNC(upsample_bilinear2d_out_cuda) ( const Tensor& input, IntArrayRef output_size, bool align_corners, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_h, + std::optional scales_w, const Tensor& output) { upsample_bilinear2d_out_cuda_template(output, input, output_size, align_corners, scales_h, scales_w); } @@ -855,8 +855,8 @@ TORCH_IMPL_FUNC(upsample_bilinear2d_backward_out_cuda) ( IntArrayRef output_size, IntArrayRef input_size, bool align_corners, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_h, + std::optional scales_w, const Tensor& grad_input) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage @@ -869,8 +869,8 @@ TORCH_IMPL_FUNC(_upsample_bilinear2d_aa_out_cuda) ( const Tensor& input, IntArrayRef output_size, bool align_corners, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_h, + std::optional scales_w, const Tensor& output) { upsample_gen2d_aa_out_cuda_template( @@ -882,8 +882,8 @@ TORCH_IMPL_FUNC(_upsample_bilinear2d_aa_backward_out_cuda) ( IntArrayRef output_size, IntArrayRef input_size, bool align_corners, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_h, + std::optional scales_w, const Tensor& grad_input) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage @@ -898,8 +898,8 @@ TORCH_IMPL_FUNC(_upsample_bicubic2d_aa_out_cuda) ( const Tensor& input, IntArrayRef output_size, bool align_corners, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_h, + std::optional scales_w, const Tensor& output) { upsample_gen2d_aa_out_cuda_template( output, input, output_size, align_corners, scales_h, scales_w); @@ -910,8 +910,8 @@ TORCH_IMPL_FUNC(_upsample_bicubic2d_aa_backward_out_cuda) ( IntArrayRef output_size, IntArrayRef input_size, bool align_corners, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_h, + std::optional scales_w, const Tensor& grad_input) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage diff --git a/aten/src/ATen/native/cuda/UpSampleLinear1d.cu b/aten/src/ATen/native/cuda/UpSampleLinear1d.cu index dfba2f5479071..ebd11e234d7b3 100644 --- a/aten/src/ATen/native/cuda/UpSampleLinear1d.cu +++ b/aten/src/ATen/native/cuda/UpSampleLinear1d.cu @@ -121,7 +121,7 @@ static void upsample_linear1d_out_cuda_template( const Tensor& input, IntArrayRef output_size, bool align_corners, - c10::optional scales) { + std::optional scales) { TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2}; checkAllSameGPU(__func__, {input_arg, output_arg}); @@ -164,7 +164,7 @@ static void upsample_linear1d_backward_out_cuda_template( IntArrayRef output_size, IntArrayRef input_size, bool align_corners, - c10::optional scales) { + std::optional scales) { TensorArg grad_output_arg{grad_output_, "grad_output_", 1}, grad_input_arg{grad_input, "grad_input", 2}; checkAllSameGPU(__func__, {grad_output_arg, grad_input_arg}); @@ -208,7 +208,7 @@ TORCH_IMPL_FUNC(upsample_linear1d_out_cuda) ( const Tensor& input, IntArrayRef output_size, bool align_corners, - c10::optional scales, + std::optional scales, const Tensor& output ) { upsample_linear1d_out_cuda_template(output, input, output_size, align_corners, scales); @@ -219,7 +219,7 @@ TORCH_IMPL_FUNC(upsample_linear1d_backward_out_cuda) ( IntArrayRef output_size, IntArrayRef input_size, bool align_corners, - c10::optional scales, + std::optional scales, const Tensor& grad_input ) { // See Note [Writing Nondeterministic Operations] diff --git a/aten/src/ATen/native/cuda/UpSampleNearest1d.cu b/aten/src/ATen/native/cuda/UpSampleNearest1d.cu index 3085cba0a1d16..1073f8d9dbb51 100644 --- a/aten/src/ATen/native/cuda/UpSampleNearest1d.cu +++ b/aten/src/ATen/native/cuda/UpSampleNearest1d.cu @@ -104,7 +104,7 @@ static void upsample_nearest1d_out_cuda_template( const Tensor& output, const Tensor& input_, IntArrayRef output_size, - c10::optional scales) { + std::optional scales) { TensorArg input_arg{input_, "input_", 1}, output_arg{output, "output", 2}; checkAllSameGPU("upsample_nearest1d_out_cuda", {input_arg, output_arg}); @@ -149,7 +149,7 @@ static void upsample_nearest1d_backward_out_cuda_template( const Tensor& grad_output_, IntArrayRef output_size, IntArrayRef input_size, - c10::optional scales) { + std::optional scales) { TensorArg grad_input_arg{grad_input, "grad_input", 1}, grad_output_arg{grad_output_, "grad_output_", 2}; checkAllSameGPU( @@ -198,7 +198,7 @@ static void upsample_nearest1d_backward_out_cuda_template( TORCH_IMPL_FUNC(upsample_nearest1d_out_cuda) ( const Tensor& input, IntArrayRef output_size, - c10::optional scales, + std::optional scales, const Tensor& output ) { upsample_nearest1d_out_cuda_template( @@ -208,7 +208,7 @@ TORCH_IMPL_FUNC(upsample_nearest1d_out_cuda) ( TORCH_IMPL_FUNC(_upsample_nearest_exact1d_out_cuda) ( const Tensor& input, IntArrayRef output_size, - c10::optional scales, + std::optional scales, const Tensor& output ) { upsample_nearest1d_out_cuda_template(output, input, output_size, scales); @@ -218,7 +218,7 @@ TORCH_IMPL_FUNC(upsample_nearest1d_backward_out_cuda) ( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, - c10::optional scales, + std::optional scales, const Tensor& grad_input ) { upsample_nearest1d_backward_out_cuda_template( @@ -229,7 +229,7 @@ TORCH_IMPL_FUNC(_upsample_nearest_exact1d_backward_out_cuda) ( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, - c10::optional scales, + std::optional scales, const Tensor& grad_input ) { upsample_nearest1d_backward_out_cuda_template( diff --git a/aten/src/ATen/native/cuda/UpSampleNearest2d.cu b/aten/src/ATen/native/cuda/UpSampleNearest2d.cu index 197fc9d60bef7..36db81cd277aa 100644 --- a/aten/src/ATen/native/cuda/UpSampleNearest2d.cu +++ b/aten/src/ATen/native/cuda/UpSampleNearest2d.cu @@ -207,8 +207,8 @@ static void upsample_nearest2d_out_cuda_template( const Tensor& output, const Tensor& input_, IntArrayRef output_size, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_h, + std::optional scales_w) { TensorArg input_arg{input_, "input_", 1}, output_arg{output, "output", 2}; checkAllSameGPU(__func__, {input_arg, output_arg}); @@ -337,8 +337,8 @@ static void upsample_nearest2d_backward_out_cuda_template( const Tensor& grad_output_, IntArrayRef output_size, IntArrayRef input_size, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_h, + std::optional scales_w) { TensorArg grad_input_arg{grad_input, "grad_input", 1}, grad_output_arg{grad_output_, "grad_output_", 2}; checkAllSameGPU(__func__, {grad_output_arg, grad_input_arg}); @@ -446,8 +446,8 @@ static void upsample_nearest2d_backward_out_cuda_template( TORCH_IMPL_FUNC(upsample_nearest2d_out_cuda) ( const Tensor& input, IntArrayRef output_size, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_h, + std::optional scales_w, const Tensor& output) { upsample_nearest2d_out_cuda_template( output, input, output_size, scales_h, scales_w); @@ -456,8 +456,8 @@ TORCH_IMPL_FUNC(upsample_nearest2d_out_cuda) ( TORCH_IMPL_FUNC(_upsample_nearest_exact2d_out_cuda) ( const Tensor& input, IntArrayRef output_size, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_h, + std::optional scales_w, const Tensor& output) { upsample_nearest2d_out_cuda_template( output, input, output_size, scales_h, scales_w); @@ -467,8 +467,8 @@ TORCH_IMPL_FUNC(upsample_nearest2d_backward_out_cuda) ( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_h, + std::optional scales_w, const Tensor& grad_input) { upsample_nearest2d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, scales_h, scales_w); @@ -478,8 +478,8 @@ TORCH_IMPL_FUNC(_upsample_nearest_exact2d_backward_out_cuda) ( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_h, + std::optional scales_w, const Tensor& grad_input) { upsample_nearest2d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, scales_h, scales_w); diff --git a/aten/src/ATen/native/cuda/UpSampleNearest3d.cu b/aten/src/ATen/native/cuda/UpSampleNearest3d.cu index 31a7ee92e7488..53e8d71e79a79 100644 --- a/aten/src/ATen/native/cuda/UpSampleNearest3d.cu +++ b/aten/src/ATen/native/cuda/UpSampleNearest3d.cu @@ -148,9 +148,9 @@ static void upsample_nearest3d_out_cuda_template( const Tensor& output, const Tensor& input_, IntArrayRef output_size, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_d, + std::optional scales_h, + std::optional scales_w) { TensorArg input_arg{input_, "input_", 1}, output_arg{output, "output", 2}; checkAllSameGPU(__func__, {input_arg, output_arg}); @@ -223,9 +223,9 @@ static void upsample_nearest3d_backward_out_cuda_template( const Tensor& grad_output_, IntArrayRef output_size, IntArrayRef input_size, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_d, + std::optional scales_h, + std::optional scales_w) { TensorArg grad_input_arg{grad_input, "grad_input", 1}, grad_output_arg{grad_output_, "grad_output_", 2}; checkAllSameGPU( @@ -292,9 +292,9 @@ static void upsample_nearest3d_backward_out_cuda_template( TORCH_IMPL_FUNC(upsample_nearest3d_out_cuda) ( const Tensor& input, IntArrayRef output_size, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_d, + std::optional scales_h, + std::optional scales_w, const Tensor& output) { upsample_nearest3d_out_cuda_template( output, input, output_size, scales_d, scales_h, scales_w); @@ -303,9 +303,9 @@ TORCH_IMPL_FUNC(upsample_nearest3d_out_cuda) ( TORCH_IMPL_FUNC(_upsample_nearest_exact3d_out_cuda) ( const Tensor& input, IntArrayRef output_size, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_d, + std::optional scales_h, + std::optional scales_w, const Tensor& output) { upsample_nearest3d_out_cuda_template(output, input, output_size, scales_d, scales_h, scales_w); } @@ -314,9 +314,9 @@ TORCH_IMPL_FUNC(upsample_nearest3d_backward_out_cuda) ( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_d, + std::optional scales_h, + std::optional scales_w, const Tensor& grad_input) { upsample_nearest3d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, scales_d, scales_h, scales_w); @@ -326,9 +326,9 @@ TORCH_IMPL_FUNC(_upsample_nearest_exact3d_backward_out_cuda) ( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_d, + std::optional scales_h, + std::optional scales_w, const Tensor& grad_input) { upsample_nearest3d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, scales_d, scales_h, scales_w); diff --git a/aten/src/ATen/native/cuda/UpSampleTrilinear3d.cu b/aten/src/ATen/native/cuda/UpSampleTrilinear3d.cu index 43cc09d34b677..0abe0b6bcb4d2 100644 --- a/aten/src/ATen/native/cuda/UpSampleTrilinear3d.cu +++ b/aten/src/ATen/native/cuda/UpSampleTrilinear3d.cu @@ -245,9 +245,9 @@ static void upsample_trilinear3d_out_cuda_template( const Tensor& input, IntArrayRef output_size, bool align_corners, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_d, + std::optional scales_h, + std::optional scales_w) { TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2}; checkAllSameGPU("upsample_trilinear3d_out_cuda", {input_arg, output_arg}); @@ -301,9 +301,9 @@ static void upsample_trilinear3d_backward_out_cuda_template( IntArrayRef output_size, IntArrayRef input_size, bool align_corners, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_d, + std::optional scales_h, + std::optional scales_w) { TensorArg grad_input_arg{grad_input_, "grad_input_", 1}, grad_output_arg{grad_output_, "grad_output_", 2}; checkAllSameGPU( @@ -377,9 +377,9 @@ TORCH_IMPL_FUNC(upsample_trilinear3d_out_cuda) ( const Tensor& input, IntArrayRef output_size, bool align_corners, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_d, + std::optional scales_h, + std::optional scales_w, const Tensor& output) { upsample_trilinear3d_out_cuda_template(output, input, output_size, align_corners, scales_d, scales_h, scales_w); } @@ -389,9 +389,9 @@ TORCH_IMPL_FUNC(upsample_trilinear3d_backward_out_cuda) ( IntArrayRef output_size, IntArrayRef input_size, bool align_corners, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w, + std::optional scales_d, + std::optional scales_h, + std::optional scales_w, const Tensor& grad_input) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage diff --git a/aten/src/ATen/native/cuda/fused_adam_amsgrad_impl.cu b/aten/src/ATen/native/cuda/fused_adam_amsgrad_impl.cu index 9cebb82e512a8..cef07de1b41f9 100644 --- a/aten/src/ATen/native/cuda/fused_adam_amsgrad_impl.cu +++ b/aten/src/ATen/native/cuda/fused_adam_amsgrad_impl.cu @@ -21,8 +21,8 @@ void _fused_adam_amsgrad_cuda_impl_( const double weight_decay, const double eps, const bool maximize, - const c10::optional& grad_scale, - const c10::optional& found_inf) { + const std::optional& grad_scale, + const std::optional& found_inf) { std::vector> tensor_lists{ params.vec(), grads.vec(), @@ -72,8 +72,8 @@ void _fused_adam_amsgrad_cuda_impl_( const double weight_decay, const double eps, const bool maximize, - const c10::optional& grad_scale, - const c10::optional& found_inf) { + const std::optional& grad_scale, + const std::optional& found_inf) { std::vector> tensor_lists{ params.vec(), grads.vec(), diff --git a/aten/src/ATen/native/cuda/fused_adam_impl.cu b/aten/src/ATen/native/cuda/fused_adam_impl.cu index 7f2843b3b4ee4..2c1f5ce0d6d57 100644 --- a/aten/src/ATen/native/cuda/fused_adam_impl.cu +++ b/aten/src/ATen/native/cuda/fused_adam_impl.cu @@ -20,8 +20,8 @@ void _fused_adam_cuda_impl_( const double weight_decay, const double eps, const bool maximize, - const c10::optional& grad_scale, - const c10::optional& found_inf) { + const std::optional& grad_scale, + const std::optional& found_inf) { std::vector> tensor_lists{ params.vec(), grads.vec(), exp_avgs.vec(), exp_avg_sqs.vec()}; @@ -66,8 +66,8 @@ void _fused_adam_cuda_impl_( const double weight_decay, const double eps, const bool maximize, - const c10::optional& grad_scale, - const c10::optional& found_inf) { + const std::optional& grad_scale, + const std::optional& found_inf) { std::vector> tensor_lists{ params.vec(), grads.vec(), exp_avgs.vec(), exp_avg_sqs.vec()}; diff --git a/aten/src/ATen/native/cuda/fused_adamw_amsgrad_impl.cu b/aten/src/ATen/native/cuda/fused_adamw_amsgrad_impl.cu index 376711c39db6d..8a22b57a47e8b 100644 --- a/aten/src/ATen/native/cuda/fused_adamw_amsgrad_impl.cu +++ b/aten/src/ATen/native/cuda/fused_adamw_amsgrad_impl.cu @@ -22,8 +22,8 @@ void _fused_adamw_amsgrad_cuda_impl_( const double weight_decay, const double eps, const bool maximize, - const c10::optional& grad_scale, - const c10::optional& found_inf) { + const std::optional& grad_scale, + const std::optional& found_inf) { std::vector> tensor_lists{ params.vec(), grads.vec(), @@ -73,8 +73,8 @@ void _fused_adamw_amsgrad_cuda_impl_( const double weight_decay, const double eps, const bool maximize, - const c10::optional& grad_scale, - const c10::optional& found_inf) { + const std::optional& grad_scale, + const std::optional& found_inf) { std::vector> tensor_lists{ params.vec(), grads.vec(), diff --git a/aten/src/ATen/native/cuda/fused_adamw_impl.cu b/aten/src/ATen/native/cuda/fused_adamw_impl.cu index cc4feaa145122..b0f9dc6db6aff 100644 --- a/aten/src/ATen/native/cuda/fused_adamw_impl.cu +++ b/aten/src/ATen/native/cuda/fused_adamw_impl.cu @@ -21,8 +21,8 @@ void _fused_adamw_cuda_impl_( const double weight_decay, const double eps, const bool maximize, - const c10::optional& grad_scale, - const c10::optional& found_inf) { + const std::optional& grad_scale, + const std::optional& found_inf) { std::vector> tensor_lists{ params.vec(), grads.vec(), exp_avgs.vec(), exp_avg_sqs.vec()}; @@ -67,8 +67,8 @@ void _fused_adamw_cuda_impl_( const double weight_decay, const double eps, const bool maximize, - const c10::optional& grad_scale, - const c10::optional& found_inf) { + const std::optional& grad_scale, + const std::optional& found_inf) { std::vector> tensor_lists{ params.vec(), grads.vec(), exp_avgs.vec(), exp_avg_sqs.vec()}; diff --git a/aten/src/ATen/native/cuda/jit_utils.cpp b/aten/src/ATen/native/cuda/jit_utils.cpp index 6e804efe5f847..0d870cef58708 100644 --- a/aten/src/ATen/native/cuda/jit_utils.cpp +++ b/aten/src/ATen/native/cuda/jit_utils.cpp @@ -1393,7 +1393,7 @@ std::string generate_reduction_code( } // Acquires (possibly creating) the kernel cache directory -c10::optional get_cache_dir() { +std::optional get_cache_dir() { // If the environment variable USE_TORCH_KERNEL_CACHE is set to "0" then no persistent cache is used const char* uptkc = std::getenv("USE_PYTORCH_KERNEL_CACHE"); const bool use_kernel_cache = (uptkc == nullptr) ? true : std::strcmp(uptkc, "0"); @@ -1483,7 +1483,7 @@ NvrtcFunction jit_pwise_function( NvrtcFunction compiled_kernel_; std::string name = kernel_name + "_kernel"; - static const c10::optional cache_dir = get_cache_dir(); + static const std::optional cache_dir = get_cache_dir(); std::string file_path; if (cache_dir.has_value()) { diff --git a/aten/src/ATen/native/cuda/layer_norm_kernel.cu b/aten/src/ATen/native/cuda/layer_norm_kernel.cu index 6423dddbb2995..f06b247ef32be 100644 --- a/aten/src/ATen/native/cuda/layer_norm_kernel.cu +++ b/aten/src/ATen/native/cuda/layer_norm_kernel.cu @@ -1334,8 +1334,8 @@ void LayerNormBackwardKernelImpl( std::tuple layer_norm_cuda( const Tensor& input, IntArrayRef normalized_shape, - const c10::optional& weight_opt /* optional */, - const c10::optional& bias_opt /* optional */, + const std::optional& weight_opt /* optional */, + const std::optional& bias_opt /* optional */, double eps) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = @@ -1390,8 +1390,8 @@ std::tuple layer_norm_backward_cuda( IntArrayRef normalized_shape, const Tensor& mean, const Tensor& rstd, - const c10::optional& weight_opt /* optional */, - const c10::optional& bias_opt /* optional */, + const std::optional& weight_opt /* optional */, + const std::optional& bias_opt /* optional */, std::array grad_input_mask) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = diff --git a/aten/src/ATen/native/cuda/linalg/BatchLinearAlgebra.cpp b/aten/src/ATen/native/cuda/linalg/BatchLinearAlgebra.cpp index 5471c57ec30ed..04b12695dd0a7 100644 --- a/aten/src/ATen/native/cuda/linalg/BatchLinearAlgebra.cpp +++ b/aten/src/ATen/native/cuda/linalg/BatchLinearAlgebra.cpp @@ -2210,7 +2210,7 @@ void svd_magma(const Tensor& A, void svd_kernel(const Tensor& A, const bool full_matrices, const bool compute_uv, - const c10::optional& driver, + const std::optional& driver, const Tensor& U, const Tensor& S, const Tensor& Vh, diff --git a/aten/src/ATen/native/cuda/linalg/BatchLinearAlgebraLib.cpp b/aten/src/ATen/native/cuda/linalg/BatchLinearAlgebraLib.cpp index 643501f0cbccd..bc06f118ae9a0 100644 --- a/aten/src/ATen/native/cuda/linalg/BatchLinearAlgebraLib.cpp +++ b/aten/src/ATen/native/cuda/linalg/BatchLinearAlgebraLib.cpp @@ -648,7 +648,7 @@ std::string _format_non_converging_batches(const std::vector& batches) void svd_cusolver(const Tensor& A, const bool full_matrices, const bool compute_uv, - const c10::optional& driver, + const std::optional& driver, const Tensor& U, const Tensor& S, const Tensor& V, diff --git a/aten/src/ATen/native/cuda/linalg/BatchLinearAlgebraLib.h b/aten/src/ATen/native/cuda/linalg/BatchLinearAlgebraLib.h index cca2e04941a54..75732ec315a45 100644 --- a/aten/src/ATen/native/cuda/linalg/BatchLinearAlgebraLib.h +++ b/aten/src/ATen/native/cuda/linalg/BatchLinearAlgebraLib.h @@ -61,7 +61,7 @@ void lu_solve_batched_cublas(const Tensor& LU, const Tensor& pivots, const Tenso // entrance of calculations of `svd` using cusolver gesvdj and gesvdjBatched void svd_cusolver(const Tensor& A, const bool full_matrices, const bool compute_uv, - const c10::optional& driver, const Tensor& U, const Tensor& S, const Tensor& V, const Tensor& info); + const std::optional& driver, const Tensor& U, const Tensor& S, const Tensor& V, const Tensor& info); // entrance of calculations of `cholesky` using cusolver potrf and potrfBatched void cholesky_helper_cusolver(const Tensor& input, bool upper, const Tensor& info); diff --git a/aten/src/ATen/native/cudnn/BatchNorm.cpp b/aten/src/ATen/native/cudnn/BatchNorm.cpp index 44b004dff0007..460a9b73dd2c5 100644 --- a/aten/src/ATen/native/cudnn/BatchNorm.cpp +++ b/aten/src/ATen/native/cudnn/BatchNorm.cpp @@ -19,9 +19,9 @@ namespace native { std::tuple cudnn_batch_norm( const Tensor& input, const Tensor& weight, - const c10::optional& bias_opt, - const c10::optional& running_mean_opt, - const c10::optional& running_var_opt, + const std::optional& bias_opt, + const std::optional& running_mean_opt, + const std::optional& running_var_opt, bool training, double exponential_average_factor, double epsilon) { @@ -32,10 +32,10 @@ std::tuple cudnn_batch_norm_backward( const Tensor& input, const Tensor& grad_output, const Tensor& weight, - const c10::optional& running_mean_opt, - const c10::optional& running_var_opt, - const c10::optional& save_mean_opt, - const c10::optional& save_var_opt, + const std::optional& running_mean_opt, + const std::optional& running_var_opt, + const std::optional& save_mean_opt, + const std::optional& save_var_opt, double epsilon, const Tensor& reservedSpace) { AT_ERROR("cudnn_batch_norm_backward: ATen not compiled with cuDNN support"); @@ -121,9 +121,9 @@ size_t _get_cudnn_batch_norm_reserve_space_size( std::tuple cudnn_batch_norm( const Tensor& input_t, const Tensor& weight_t, - const c10::optional& bias_t_opt, - const c10::optional& running_mean_t_opt, - const c10::optional& running_var_t_opt, + const std::optional& bias_t_opt, + const std::optional& running_mean_t_opt, + const std::optional& running_var_t_opt, bool training, double exponential_average_factor, double epsilon) { @@ -274,10 +274,10 @@ std::tuple cudnn_batch_norm_backward( const Tensor& weight_t, // Unused: but we require them to be passed so that double backwards // has access - const c10::optional& running_mean_opt, - const c10::optional& running_var_opt, - const c10::optional& save_mean_t_opt, - const c10::optional& save_var_t_opt, + const std::optional& running_mean_opt, + const std::optional& running_var_opt, + const std::optional& save_mean_t_opt, + const std::optional& save_var_t_opt, double epsilon, const Tensor& reserveSpace) { // See [Note: hacky wrapper removal for optional tensor] diff --git a/aten/src/ATen/native/cudnn/ConvPlaceholders.cpp b/aten/src/ATen/native/cudnn/ConvPlaceholders.cpp index 8475a143f466c..349999e4544f9 100644 --- a/aten/src/ATen/native/cudnn/ConvPlaceholders.cpp +++ b/aten/src/ATen/native/cudnn/ConvPlaceholders.cpp @@ -205,7 +205,7 @@ void raw_cudnn_convolution_backward_weight_out( Tensor cudnn_convolution_relu( const Tensor& input_t, const Tensor& weight_t, - const c10::optional& bias_t, + const std::optional& bias_t, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, @@ -217,8 +217,8 @@ Tensor cudnn_convolution_add_relu( const Tensor& input_t, const Tensor& weight_t, const Tensor& z_t, - const c10::optional& alpha, - const c10::optional& bias_t, + const std::optional& alpha, + const std::optional& bias_t, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, diff --git a/aten/src/ATen/native/cudnn/ConvShared.cpp b/aten/src/ATen/native/cudnn/ConvShared.cpp index 104ae8c70803d..09a10581ab142 100644 --- a/aten/src/ATen/native/cudnn/ConvShared.cpp +++ b/aten/src/ATen/native/cudnn/ConvShared.cpp @@ -705,7 +705,7 @@ std::tuple cudnn_convolution_transpose_backward( Tensor cudnn_convolution_relu( const Tensor& input_t, const Tensor& weight_t, - const c10::optional& bias_t, + const std::optional& bias_t, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, @@ -758,8 +758,8 @@ Tensor cudnn_convolution_add_relu( const Tensor& input_t, const Tensor& weight_t, const Tensor& z_t, - const c10::optional& alpha, - const c10::optional& bias_t, + const std::optional& alpha, + const std::optional& bias_t, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, diff --git a/aten/src/ATen/native/cudnn/RNN.cpp b/aten/src/ATen/native/cudnn/RNN.cpp index 05b1df3114f85..55c666eeca83c 100644 --- a/aten/src/ATen/native/cudnn/RNN.cpp +++ b/aten/src/ATen/native/cudnn/RNN.cpp @@ -51,9 +51,9 @@ std::tuple _cudnn_rnn( const Tensor& input_r, TensorList weight, int64_t weight_stride0, - const c10::optional& weight_buf_r_opt, + const std::optional& weight_buf_r_opt, const Tensor& hx, - const c10::optional& cx_opt, + const std::optional& cx_opt, int64_t fn_mode, int64_t fn_hidden_size, int64_t fn_proj_size, @@ -63,7 +63,7 @@ std::tuple _cudnn_rnn( bool fn_train, bool fn_bidirectional, IntArrayRef fn_batch_sizes, - const c10::optional& fn_dropout_state_opt) { + const std::optional& fn_dropout_state_opt) { AT_ERROR("_cudnn_rnn: ATen not compiled with cuDNN support"); } @@ -73,11 +73,11 @@ std::tuple> _cudnn_rnn_backward( int64_t weight_stride0, const Tensor& weight_buf, const Tensor& hx, - const c10::optional& cx_opt, + const std::optional& cx_opt, const Tensor& output, - const c10::optional& grad_output_r_opt, - const c10::optional& grad_hy_r_opt, - const c10::optional& grad_cy_r_opt, + const std::optional& grad_output_r_opt, + const std::optional& grad_hy_r_opt, + const std::optional& grad_cy_r_opt, int64_t mode, int64_t hidden_size, int64_t proj_size, @@ -87,7 +87,7 @@ std::tuple> _cudnn_rnn_backward( bool train, bool bidirectional, IntArrayRef batch_sizes, - const c10::optional& dropout_state_opt, + const std::optional& dropout_state_opt, const Tensor& reserve, std::array output_mask) { AT_ERROR("_cudnn_rnn_backward: ATen not compiled with cuDNN support"); @@ -97,10 +97,10 @@ Tensor _cudnn_init_dropout_state( double dropout, bool train, int64_t dropout_seed, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory( pin_memory); @@ -1396,9 +1396,9 @@ std::tuple _cudnn_rnn( const Tensor& input_r, TensorList weight, int64_t weight_stride0, - const c10::optional& weight_buf_r_opt, + const std::optional& weight_buf_r_opt, const Tensor& hx, - const c10::optional& cx_opt, + const std::optional& cx_opt, int64_t fn_mode, int64_t fn_hidden_size, int64_t fn_proj_size, @@ -1408,7 +1408,7 @@ std::tuple _cudnn_rnn( bool fn_train, bool fn_bidirectional, IntArrayRef fn_batch_sizes, - const c10::optional& fn_dropout_state_opt) { + const std::optional& fn_dropout_state_opt) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_buf_r_maybe_owned = at::borrow_from_optional_tensor(weight_buf_r_opt); @@ -2105,11 +2105,11 @@ std::tuple> _cudnn_rnn_backward( int64_t weight_stride0, const Tensor& weight_buf, const Tensor& hx, - const c10::optional& cx_opt, + const std::optional& cx_opt, const Tensor& output, - const c10::optional& grad_output_r_opt, - const c10::optional& grad_hy_r_opt, - const c10::optional& grad_cy_r_opt, + const std::optional& grad_output_r_opt, + const std::optional& grad_hy_r_opt, + const std::optional& grad_cy_r_opt, int64_t mode, int64_t hidden_size, int64_t proj_size, @@ -2119,7 +2119,7 @@ std::tuple> _cudnn_rnn_backward( bool train, bool bidirectional, IntArrayRef batch_sizes, - const c10::optional& dropout_state_opt, + const std::optional& dropout_state_opt, const Tensor& reserve, std::array output_mask) { // See [Note: hacky wrapper removal for optional tensor] @@ -2214,10 +2214,10 @@ Tensor _cudnn_init_dropout_state( double dropout, bool train, int64_t dropout_seed, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory( @@ -2304,7 +2304,7 @@ struct DropoutState { // needed for the first time. Note that in this case needed != used, as we // don't need a buffer to e.g. run RNNs in test mode. at::Tensor buffer; - c10::optional event; + std::optional event; std::mutex mutex; #if !defined(USE_ROCM) // cudaStreamGetCaptureInfo will never give back a capture id of 0, so 0 can @@ -2531,8 +2531,8 @@ std::pair _cudnn_impl( } // TODO: try_get_weight_buf returns a Tensor, but _cudnn_rnn below takes a - // c10::optional in weight_buf's slot. Do we want try_get_weight_buf - // to return a c10::optional instead of a defined or undefined Tensor? + // std::optional in weight_buf's slot. Do we want try_get_weight_buf + // to return a std::optional instead of a defined or undefined Tensor? at::cuda::OptionalCUDAGuard guard(input.get_device()); auto weight_buf = try_get_weight_buf( input, diff --git a/aten/src/ATen/native/group_norm.cpp b/aten/src/ATen/native/group_norm.cpp index 1babf82b90e05..85767b7502dc3 100644 --- a/aten/src/ATen/native/group_norm.cpp +++ b/aten/src/ATen/native/group_norm.cpp @@ -61,8 +61,8 @@ void check_group_norm_inputs( std::tuple native_group_norm( const Tensor& X, - const c10::optional& gamma_opt /* optional */, - const c10::optional& beta_opt /* optional */, + const std::optional& gamma_opt /* optional */, + const std::optional& beta_opt /* optional */, int64_t N, int64_t C, int64_t HxW, @@ -107,7 +107,7 @@ std::tuple native_group_norm_backward( const Tensor& X, const Tensor& mean, const Tensor& rstd, - const c10::optional& gamma_opt, + const std::optional& gamma_opt, int64_t N, int64_t C, int64_t HxW, @@ -177,8 +177,8 @@ std::tuple native_group_norm_backward( Tensor group_norm( const Tensor& input, int64_t num_groups, - const c10::optional& weight_opt /* optional */, - const c10::optional& bias_opt /* optional */, + const std::optional& weight_opt /* optional */, + const std::optional& bias_opt /* optional */, double eps, bool /* cudnn_enabled, deprecated */) { // See [Note: hacky wrapper removal for optional tensor] @@ -213,8 +213,8 @@ DEFINE_DISPATCH(GroupNormBackwardKernel); // Ported from pytorch/xla repo std::tuple math_group_norm( const Tensor& input, - const c10::optional& weight_opt, - const c10::optional& bias_opt, + const std::optional& weight_opt, + const std::optional& bias_opt, int64_t N, int64_t C, int64_t HxW, diff --git a/aten/src/ATen/native/layer_norm.cpp b/aten/src/ATen/native/layer_norm.cpp index 27a701dd2eb49..9858840f95223 100644 --- a/aten/src/ATen/native/layer_norm.cpp +++ b/aten/src/ATen/native/layer_norm.cpp @@ -74,7 +74,7 @@ void layer_norm_cpu_out( std::tuple layer_norm_cpu( const Tensor& input, - IntArrayRef normalized_shape, const c10::optional& weight_opt /* optional */, const c10::optional& bias_opt /* optional */, + IntArrayRef normalized_shape, const std::optional& weight_opt /* optional */, const c10::optional& bias_opt /* optional */, double eps) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); @@ -115,8 +115,8 @@ std::tuple layer_norm_backward_cpu( IntArrayRef normalized_shape, const Tensor& mean, const Tensor& rstd, - const c10::optional& weight_opt /* optional */, - const c10::optional& bias_opt /* optional */, + const std::optional& weight_opt /* optional */, + const std::optional& bias_opt /* optional */, std::array grad_input_mask) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = @@ -186,7 +186,7 @@ std::tuple layer_norm_backward_cpu( Tensor layer_norm_symint( const Tensor& input, - c10::SymIntArrayRef normalized_shape, const c10::optional& weight_opt /* optional */, const c10::optional& bias_opt /* optional */, + c10::SymIntArrayRef normalized_shape, const std::optional& weight_opt /* optional */, const c10::optional& bias_opt /* optional */, double eps, bool /* cudnn_enable, deprecated */) { // See [Note: hacky wrapper removal for optional tensor] @@ -204,7 +204,7 @@ DEFINE_DISPATCH(LayerNormBackwardKernel); // Ported from pytorch/xla repo std::tuple math_native_layer_norm( const Tensor& input, - IntArrayRef normalized_shape, const c10::optional& weight_opt, const c10::optional& bias_opt, + IntArrayRef normalized_shape, const std::optional& weight_opt, const c10::optional& bias_opt, double eps) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); @@ -266,8 +266,8 @@ std::tuple math_native_layer_norm( Tensor rms_norm( const Tensor& input, IntArrayRef normalized_shape, - const c10::optional& weight_opt /* optional */, - c10::optional eps) { + const std::optional& weight_opt /* optional */, + std::optional eps) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); diff --git a/aten/src/ATen/native/layer_norm.h b/aten/src/ATen/native/layer_norm.h index 38e63569586e3..e35ccf8634bcc 100644 --- a/aten/src/ATen/native/layer_norm.h +++ b/aten/src/ATen/native/layer_norm.h @@ -74,8 +74,8 @@ void layer_norm_cpu_out( Tensor rms_norm( const Tensor& input, IntArrayRef normalized_shape, - const c10::optional& weight_opt /* optional */, - c10::optional eps); + const std::optional& weight_opt /* optional */, + std::optional eps); using forward_fn = void (*)( const Tensor& /* X */, diff --git a/aten/src/ATen/native/metal/MetalNeuronType.h b/aten/src/ATen/native/metal/MetalNeuronType.h index b59d163c4ae88..c5cb0b99502c6 100644 --- a/aten/src/ATen/native/metal/MetalNeuronType.h +++ b/aten/src/ATen/native/metal/MetalNeuronType.h @@ -20,8 +20,8 @@ enum class NeuronType { }; static inline NeuronType neuronType( - c10::optional output_min, - c10::optional output_max) { + std::optional output_min, + std::optional output_max) { float inf_max = std::numeric_limits::infinity(); float inf_min = -std::numeric_limits::infinity(); float output_max_ = diff --git a/aten/src/ATen/native/metal/MetalPrepackOpContext.h b/aten/src/ATen/native/metal/MetalPrepackOpContext.h index 02f474ece8da2..4481c879eec29 100644 --- a/aten/src/ATen/native/metal/MetalPrepackOpContext.h +++ b/aten/src/ATen/native/metal/MetalPrepackOpContext.h @@ -9,13 +9,13 @@ namespace metal { using SerializationTypeConv2dPrePack = std::tuple< Tensor, - c10::optional, + std::optional, std::vector, std::vector, std::vector, int64_t, - c10::optional, - c10::optional>; + std::optional, + std::optional>; class Conv2dOpContext : public torch::jit::CustomClassHolder { public: @@ -33,13 +33,13 @@ class Conv2dOpContext : public torch::jit::CustomClassHolder { Conv2dOpContext() = delete; Conv2dOpContext( at::Tensor&& weight, - c10::optional&& bias, + std::optional&& bias, std::vector stride, std::vector padding, std::vector dilation, int64_t groups, - c10::optional output_min, - c10::optional output_max) + std::optional output_min, + std::optional output_max) : weight_(std::move(weight)), bias_(std::move(bias)), stride_(std::move(stride)), @@ -65,7 +65,7 @@ class Conv2dOpContext : public torch::jit::CustomClassHolder { return weight_; } - const c10::optional& get_bias() const { + const std::optional& get_bias() const { return bias_; } @@ -85,11 +85,11 @@ class Conv2dOpContext : public torch::jit::CustomClassHolder { return groups_; } - const c10::optional& get_output_min() const { + const std::optional& get_output_min() const { return output_min_; } - const c10::optional& get_output_max() const { + const std::optional& get_output_max() const { return output_max_; } @@ -111,22 +111,22 @@ class Conv2dOpContext : public torch::jit::CustomClassHolder { private: Tensor weight_; - c10::optional bias_; + std::optional bias_; std::vector stride_; std::vector padding_; std::vector dilation_; int64_t groups_; - c10::optional output_min_; - c10::optional output_max_; + std::optional output_min_; + std::optional output_max_; std::function releaseCallback_ = nullptr; void* conv2dOp_ = nullptr; // reserved to hold MPSCNNConv2dOp objects }; using SerializationTypeLinearPrePack = std::tuple< Tensor, - c10::optional, - c10::optional, - c10::optional>; + std::optional, + std::optional, + std::optional>; class LinearOpContext : public torch::jit::CustomClassHolder { public: @@ -136,9 +136,9 @@ class LinearOpContext : public torch::jit::CustomClassHolder { LinearOpContext() = delete; LinearOpContext( at::Tensor&& weight, - c10::optional&& bias, - c10::optional output_min, - c10::optional output_max) + std::optional&& bias, + std::optional output_min, + std::optional output_max) : weight_(std::move(weight)), bias_(std::move(bias)), output_min_(std::move(output_min)), @@ -160,15 +160,15 @@ class LinearOpContext : public torch::jit::CustomClassHolder { return weight_; } - const c10::optional& get_bias() const { + const std::optional& get_bias() const { return bias_; } - const c10::optional& get_output_min() const { + const std::optional& get_output_min() const { return output_min_; } - const c10::optional& get_output_max() const { + const std::optional& get_output_max() const { return output_max_; } @@ -190,9 +190,9 @@ class LinearOpContext : public torch::jit::CustomClassHolder { private: Tensor weight_; - c10::optional bias_; - c10::optional output_min_; - c10::optional output_max_; + std::optional bias_; + std::optional output_min_; + std::optional output_max_; void* opaqueOpPtr_ = nullptr; // reserved to hold MPSCNNFullyConnected objects std::function releaseCallback_ = nullptr; }; diff --git a/aten/src/ATen/native/metal/MetalPrepackOpRegister.cpp b/aten/src/ATen/native/metal/MetalPrepackOpRegister.cpp index bbdf713801860..ebf9b9daf6263 100644 --- a/aten/src/ATen/native/metal/MetalPrepackOpRegister.cpp +++ b/aten/src/ATen/native/metal/MetalPrepackOpRegister.cpp @@ -9,13 +9,13 @@ namespace metal { c10::intrusive_ptr unpack( Tensor&& weight, - c10::optional&& bias, + std::optional&& bias, std::vector&& stride, std::vector&& padding, std::vector&& dilation, int64_t groups, - const c10::optional& output_min, - const c10::optional& output_max) { + const std::optional& output_min, + const std::optional& output_max) { auto packedWeight = weight.contiguous(MemoryFormat::ChannelsLast); return c10::make_intrusive( std::move(packedWeight), @@ -30,9 +30,9 @@ c10::intrusive_ptr unpack( c10::intrusive_ptr unpack( Tensor&& weight, - c10::optional&& bias, - const c10::optional& output_min, - const c10::optional& output_max) { + std::optional&& bias, + const std::optional& output_min, + const std::optional& output_max) { TORCH_CHECK(weight.dim() == 2); // Don't need to do `weight.t()` auto packedWeight = weight.view({weight.size(0), weight.size(1), 1, 1}) @@ -96,13 +96,13 @@ TORCH_LIBRARY(metal_prepack, m) { c10::intrusive_ptr conv2d_prepack( Tensor&& weight, - c10::optional&& bias, + std::optional&& bias, std::vector&& stride, std::vector&& padding, std::vector&& dilation, int64_t groups, - const c10::optional& output_min, - const c10::optional& output_max) { + const std::optional& output_min, + const std::optional& output_max) { TORCH_CHECK(weight.dim() == 4); return c10::make_intrusive( std::move(weight), @@ -117,9 +117,9 @@ c10::intrusive_ptr conv2d_prepack( c10::intrusive_ptr linear_prepack( Tensor&& weight, - c10::optional&& bias, - const c10::optional& output_min, - const c10::optional& output_max) { + std::optional&& bias, + const std::optional& output_min, + const std::optional& output_max) { return c10::make_intrusive( std::move(weight), std::move(bias), output_min, output_max); } diff --git a/aten/src/ATen/native/metal/ops/MetalConvolution.h b/aten/src/ATen/native/metal/ops/MetalConvolution.h index e5a68e45cd929..77053448cbcb4 100644 --- a/aten/src/ATen/native/metal/ops/MetalConvolution.h +++ b/aten/src/ATen/native/metal/ops/MetalConvolution.h @@ -9,7 +9,7 @@ namespace metal { Tensor conv2d( const Tensor& input, const Tensor& weight, - const c10::optional& bias, + const std::optional& bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, diff --git a/aten/src/ATen/native/miopen/BatchNorm_miopen.cpp b/aten/src/ATen/native/miopen/BatchNorm_miopen.cpp index ba0fc04510a65..5a89c01bc0394 100644 --- a/aten/src/ATen/native/miopen/BatchNorm_miopen.cpp +++ b/aten/src/ATen/native/miopen/BatchNorm_miopen.cpp @@ -22,13 +22,13 @@ namespace at { namespace native { // See Note [ATen preprocessor philosophy] std::tuple miopen_batch_norm( - const Tensor& input, const Tensor& weight, const c10::optional& bias_opt, const c10::optional& running_mean_opt, const c10::optional& running_var_opt, + const Tensor& input, const Tensor& weight, const std::optional& bias_opt, const c10::optional& running_mean_opt, const c10::optional& running_var_opt, bool training, double exponential_average_factor, double epsilon) { AT_ERROR("miopen_batch_norm: ATen not compiled with MIOpen support"); } std::tuple miopen_batch_norm_backward( - const Tensor& input, const Tensor& grad_output, const Tensor& weight, const c10::optional& running_mean_opt, const c10::optional& running_var_opt, const c10::optional& save_mean_opt, const c10::optional& save_var_opt, + const Tensor& input, const Tensor& grad_output, const Tensor& weight, const std::optional& running_mean_opt, const c10::optional& running_var_opt, const c10::optional& save_mean_opt, const c10::optional& save_var_opt, double epsilon) { AT_ERROR("miopen_batch_norm_backward: ATen not compiled with MIOpen support"); } @@ -58,7 +58,7 @@ Tensor expandScale(const Tensor& t, int64_t dim) { } // namespace std::tuple miopen_batch_norm( - const Tensor& input_t, const Tensor& weight_t, const c10::optional& bias_t_opt, const c10::optional& running_mean_t_opt, const c10::optional& running_var_t_opt, + const Tensor& input_t, const Tensor& weight_t, const std::optional& bias_t_opt, const c10::optional& running_mean_t_opt, const c10::optional& running_var_t_opt, bool training, double exponential_average_factor, double epsilon) { // See [Note: hacky wrapper removal for optional tensor] diff --git a/aten/src/ATen/native/miopen/Conv_miopen.cpp b/aten/src/ATen/native/miopen/Conv_miopen.cpp index 88f889c2cc1fa..71b4620ecfdf0 100644 --- a/aten/src/ATen/native/miopen/Conv_miopen.cpp +++ b/aten/src/ATen/native/miopen/Conv_miopen.cpp @@ -31,7 +31,7 @@ namespace at { namespace native { // See Note [ATen preprocessor philosophy] at::Tensor miopen_convolution( - const Tensor& input, const Tensor& weight, const c10::optional& bias_opt /* optional */, + const Tensor& input, const Tensor& weight, const std::optional& bias_opt /* optional */, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { AT_ERROR("miopen_convolution: ATen not compiled with MIOpen support"); @@ -64,7 +64,7 @@ std::tuple miopen_convolution_backward( } at::Tensor miopen_convolution_transpose( - const Tensor& input, const Tensor& weight, const c10::optional& bias_opt /* optional */, + const Tensor& input, const Tensor& weight, const std::optional& bias_opt /* optional */, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { AT_ERROR("miopen_convolution_transpose: ATen not compiled with MIOpen support"); @@ -92,7 +92,7 @@ std::tuple miopen_convolution_transpose_backwa } at::Tensor miopen_depthwise_convolution( - const Tensor& input, const Tensor& weight, const c10::optional& bias_opt /* optional */, + const Tensor& input, const Tensor& weight, const std::optional& bias_opt /* optional */, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { AT_ERROR("miopen_depthwise_convolution: ATen not compiled with MIOpen support"); @@ -122,13 +122,13 @@ std::tuple miopen_depthwise_convolution_backwa at::Tensor miopen_convolution_add_relu( const at::Tensor& input, const at::Tensor& weight, const at::Tensor& z, - const c10::optional& alpha, const c10::optional& bias, IntArrayRef stride, + const std::optional& alpha, const c10::optional& bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups) { AT_ERROR("miopen_convolution_add_relu: ATen not compiled with MIOpen support"); } at::Tensor miopen_convolution_relu( - const at::Tensor& input, const at::Tensor& weight, const c10::optional& bias, + const at::Tensor& input, const at::Tensor& weight, const std::optional& bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups) { AT_ERROR("miopen_convolution_relu: ATen not compiled with MIOpen support"); } @@ -795,7 +795,7 @@ Tensor miopen_convolution_forward( } Tensor miopen_convolution( - const Tensor& input_t, const Tensor& weight_t, const c10::optional& bias_t_opt, + const Tensor& input_t, const Tensor& weight_t, const std::optional& bias_t_opt, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { @@ -896,7 +896,7 @@ Tensor miopen_depthwise_convolution_forward( } Tensor miopen_depthwise_convolution( - const Tensor& input_t, const Tensor& weight_t, const c10::optional& bias_t_opt, + const Tensor& input_t, const Tensor& weight_t, const std::optional& bias_t_opt, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { @@ -1463,7 +1463,7 @@ std::tuple miopen_depthwise_convolution_backwa } Tensor miopen_convolution_transpose( - const Tensor& input_t, const Tensor& weight_t, const c10::optional& bias_t_opt, + const Tensor& input_t, const Tensor& weight_t, const std::optional& bias_t_opt, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { @@ -1552,8 +1552,8 @@ Tensor miopen_convolution_add_relu( const Tensor& input, const Tensor& weight, const Tensor& z, - const c10::optional& alpha, - const c10::optional& bias, + const std::optional& alpha, + const std::optional& bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, @@ -1607,7 +1607,7 @@ Tensor miopen_convolution_add_relu( Tensor miopen_convolution_relu( const Tensor& input, const Tensor& weight, - const c10::optional& bias, + const std::optional& bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, diff --git a/aten/src/ATen/native/miopen/RNN_miopen.cpp b/aten/src/ATen/native/miopen/RNN_miopen.cpp index 7b2b2ab80e553..2cba1aa3aef14 100644 --- a/aten/src/ATen/native/miopen/RNN_miopen.cpp +++ b/aten/src/ATen/native/miopen/RNN_miopen.cpp @@ -29,18 +29,18 @@ namespace at { namespace native { std::tuple miopen_rnn( const Tensor& input_r, TensorList weight, int64_t weight_stride0, - const Tensor& hx, const c10::optional& cx_opt, + const Tensor& hx, const std::optional& cx_opt, int64_t fn_mode, int64_t fn_hidden_size, int64_t fn_num_layers, bool batch_first, double fn_dropout, bool fn_train, bool fn_bidirectional, - IntArrayRef fn_batch_sizes, const c10::optional& fn_dropout_state_opt + IntArrayRef fn_batch_sizes, const std::optional& fn_dropout_state_opt ) { AT_ERROR("miopen_rnn : ATen not compiled with MIOpen support."); } std::tuple> miopen_rnn_backward( - const Tensor& input, TensorList weight, int64_t weight_stride0, const Tensor& weight_buf, const Tensor& hx, const c10::optional& cx_opt, - const Tensor& output, const c10::optional& grad_output_r_opt, const c10::optional& grad_hy_r_opt, const c10::optional& grad_cy_r_opt, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, - double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const c10::optional& dropout_state_opt, + const Tensor& input, TensorList weight, int64_t weight_stride0, const Tensor& weight_buf, const Tensor& hx, const std::optional& cx_opt, + const Tensor& output, const std::optional& grad_output_r_opt, const c10::optional& grad_hy_r_opt, const c10::optional& grad_cy_r_opt, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, + double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const std::optional& dropout_state_opt, const Tensor& reserve, std::array output_mask ) { AT_ERROR("miopen_rnn_backward: ATen not compiled with MIOpen support."); @@ -444,10 +444,10 @@ std::vector _output_size(const RNNDescriptorParams& rnn, const TensorDe std::tuple miopen_rnn( const Tensor& input_r, TensorList weight, int64_t weight_stride0, - const Tensor& hx, const c10::optional& cx_opt, + const Tensor& hx, const std::optional& cx_opt, int64_t fn_mode, int64_t fn_hidden_size, int64_t fn_num_layers, bool batch_first, double fn_dropout, bool fn_train, bool fn_bidirectional, - IntArrayRef fn_batch_sizes, const c10::optional& fn_dropout_state_opt + IntArrayRef fn_batch_sizes, const std::optional& fn_dropout_state_opt ) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned cx_maybe_owned = at::borrow_from_optional_tensor(cx_opt); @@ -758,9 +758,9 @@ std::vector miopen_rnn_backward_weight( } std::tuple> miopen_rnn_backward( - const Tensor& input, TensorList weight, int64_t weight_stride0, const Tensor& weight_buf, const Tensor& hx, const c10::optional& cx_opt, - const Tensor& output, const c10::optional& grad_output_r_opt, const c10::optional& grad_hy_r_opt, const c10::optional& grad_cy_r_opt, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, - double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const c10::optional& dropout_state_opt, + const Tensor& input, TensorList weight, int64_t weight_stride0, const Tensor& weight_buf, const Tensor& hx, const std::optional& cx_opt, + const Tensor& output, const std::optional& grad_output_r_opt, const c10::optional& grad_hy_r_opt, const c10::optional& grad_cy_r_opt, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, + double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const std::optional& dropout_state_opt, const Tensor& reserve, std::array output_mask ) { // See [Note: hacky wrapper removal for optional tensor] diff --git a/aten/src/ATen/native/mkldnn/Common.h b/aten/src/ATen/native/mkldnn/Common.h index 4e048ebce7597..baf823a9bcec7 100644 --- a/aten/src/ATen/native/mkldnn/Common.h +++ b/aten/src/ATen/native/mkldnn/Common.h @@ -13,7 +13,7 @@ namespace mkldnn { struct ContextConv final { ideep::tensor weight_packed_; - c10::optional at_bias_; + std::optional at_bias_; std::vector padding_; std::vector stride_; std::vector dilation_; @@ -24,7 +24,7 @@ struct ContextConv final { ContextConv( ideep::tensor&& weight_packed, - c10::optional at_bias, + std::optional at_bias, std::vector padding, std::vector stride, std::vector dilation, diff --git a/aten/src/ATen/native/mkldnn/Conv.cpp b/aten/src/ATen/native/mkldnn/Conv.cpp index 3e41e2f1071d0..09dca06e2b5ae 100644 --- a/aten/src/ATen/native/mkldnn/Conv.cpp +++ b/aten/src/ATen/native/mkldnn/Conv.cpp @@ -22,7 +22,7 @@ namespace at { namespace native { Tensor mkldnn_convolution( - const Tensor& input, const Tensor& weight, const c10::optional& bias_opt, + const Tensor& input, const Tensor& weight, const std::optional& bias_opt, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups) { TORCH_CHECK(false, "mkldnn_convolution_forward: ATen not compiled with MKLDNN support"); } @@ -48,7 +48,7 @@ static std::tuple mkldnn_convolution_backward( REGISTER_NO_CPU_DISPATCH(mkldnn_convolution_backward_stub); static Tensor mkldnn_convolution_transpose( - const Tensor& input, const Tensor& weight, const c10::optional& bias_opt, + const Tensor& input, const Tensor& weight, const std::optional& bias_opt, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups) { TORCH_CHECK(false, "mkldnn_convolution_transpose: ATen not compiled with MKLDNN support"); } @@ -259,16 +259,16 @@ static void _mkldnn_convolution_out ( static Tensor _mkldnn_convolution( const Tensor& input_t, const Tensor& weight_t, - const c10::optional& bias_opt, + const std::optional& bias_opt, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool use_channels_last, c10::string_view attr = "none", - torch::List> scalars = - torch::List>(), - c10::optional algorithm = c10::nullopt) { + torch::List> scalars = + torch::List>(), + std::optional algorithm = c10::nullopt) { ideep::attr_t op_attr = ideep::attr_t(); if (attr != "none") { auto it = fusion_unary_attr_map().find(attr); @@ -324,7 +324,7 @@ static Tensor _mkldnn_convolution( Tensor mkldnn_convolution( const Tensor& input_t, const Tensor& weight_t, - const c10::optional& bias_opt, + const std::optional& bias_opt, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, @@ -345,14 +345,14 @@ namespace{ Tensor mkldnn_convolution_pointwise( const Tensor& input_t, const Tensor& weight_t, - const c10::optional& bias_opt, + const std::optional& bias_opt, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, c10::string_view attr, - torch::List> scalars, - c10::optional algorithm) { + torch::List> scalars, + std::optional algorithm) { c10::impl::ExcludeDispatchKeyGuard edkg(c10::autograd_dispatch_keyset); bool use_channels_last = weight_t.is_mkldnn() || mkldnn_conv_use_channels_last(input_t, weight_t); @@ -382,16 +382,16 @@ Tensor mkldnn_convolution_pointwise_binary( const Tensor& input_t, const Tensor& other_t, const Tensor& weight_t, - const c10::optional& bias_opt, + const std::optional& bias_opt, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, c10::string_view binary_attr, - c10::optional alpha, - c10::optional unary_attr, - torch::List> unary_scalars, - c10::optional unary_algorithm) { + std::optional alpha, + std::optional unary_attr, + torch::List> unary_scalars, + std::optional unary_algorithm) { TORCH_CHECK( input_t.ndimension() == 4 || input_t.ndimension() == 5, "mkldnn_convolution_pointwise_binary: currently only support 2d and 3d") @@ -546,16 +546,16 @@ Tensor& mkldnn_convolution_pointwise_binary_( Tensor& other_t, const Tensor& input_t, const Tensor& weight_t, - const c10::optional& bias_opt, + const std::optional& bias_opt, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, c10::string_view binary_attr, - c10::optional alpha, - c10::optional unary_attr, - torch::List> unary_scalars, - c10::optional unary_algorithm) { + std::optional alpha, + std::optional unary_attr, + torch::List> unary_scalars, + std::optional unary_algorithm) { // other_t += convolution(...), other_t = unary(other_t) TORCH_CHECK( input_t.ndimension() == 4 || input_t.ndimension() == 5, @@ -664,7 +664,7 @@ std::vector _original_deconv_weight_size( Tensor _mkldnn_convolution_transpose( const Tensor& input_t, const Tensor& weight_t, - const c10::optional& bias_opt, + const std::optional& bias_opt, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, @@ -672,9 +672,9 @@ Tensor _mkldnn_convolution_transpose( int64_t groups, bool use_channels_last, c10::string_view attr = "none", - torch::List> scalars = - torch::List>(), - c10::optional algorithm = c10::nullopt) { + torch::List> scalars = + torch::List>(), + std::optional algorithm = c10::nullopt) { ideep::attr_t op_attr = ideep::attr_t(); if (attr != "none") { auto it = fusion_unary_attr_map().find(attr); @@ -760,15 +760,15 @@ Tensor _mkldnn_convolution_transpose( Tensor mkldnn_convolution_transpose_pointwise( const Tensor& input_t, const Tensor& weight_t, - const c10::optional& bias_opt, + const std::optional& bias_opt, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, c10::string_view attr, - torch::List> scalars, - c10::optional algorithm) { + torch::List> scalars, + std::optional algorithm) { c10::impl::ExcludeDispatchKeyGuard edkg(c10::autograd_dispatch_keyset); bool use_channels_last = weight_t.is_mkldnn() || mkldnn_conv_use_channels_last(input_t, weight_t); @@ -791,15 +791,15 @@ Tensor mkldnn_convolution_transpose_pointwise( Tensor mkldnn_convolution_transpose_pointwise_meta( const Tensor& input_t, const Tensor& weight_t, - const c10::optional& bias_opt, + const std::optional& bias_opt, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, c10::string_view attr, - torch::List> scalars, - c10::optional algorithm) { + torch::List> scalars, + std::optional algorithm) { std::vector weight_IOHW_sizes = _original_deconv_weight_size(weight_t, groups); int64_t dim = input_t.ndimension() - 2; @@ -941,7 +941,7 @@ namespace{ Tensor mkldnn_convolution_transpose( const Tensor& input, const Tensor& weight, - const c10::optional& bias_opt, + const std::optional& bias_opt, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, diff --git a/aten/src/ATen/native/mkldnn/ConvPrepack.cpp b/aten/src/ATen/native/mkldnn/ConvPrepack.cpp index 4fb126f25cf09..cab4f1efa55eb 100644 --- a/aten/src/ATen/native/mkldnn/ConvPrepack.cpp +++ b/aten/src/ATen/native/mkldnn/ConvPrepack.cpp @@ -19,7 +19,7 @@ namespace convolution { c10::intrusive_ptr createConvPrePackOpContext( Tensor weight, - c10::optional bias, + std::optional bias, std::vector stride, std::vector padding, std::vector dilation, @@ -43,7 +43,7 @@ c10::intrusive_ptr createConvPrePackOpContext( ContextConv create( const Tensor& weight, - const c10::optional& bias, + const std::optional& bias, const IntArrayRef padding, const IntArrayRef stride, const IntArrayRef dilation, @@ -98,7 +98,7 @@ static void _mkldnn_convolution_out( const ideep::tensor& x, ideep::tensor& y, const ideep::tensor& w, - const c10::optional& b, + const std::optional& b, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, @@ -147,7 +147,7 @@ static void mkldnn_convolution_out( const Tensor& input, ideep::tensor& mkldnn_output, const ideep::tensor& mkldnn_weight, - const c10::optional& bias_opt, + const std::optional& bias_opt, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, @@ -160,7 +160,7 @@ static void mkldnn_convolution_out( c10::impl::ExcludeDispatchKeyGuard edkg(c10::autograd_dispatch_keyset); const ideep::tensor mkldnn_input = itensor_from_tensor(input); - c10::optional mkldnn_bias{c10::nullopt}; + std::optional mkldnn_bias{c10::nullopt}; if (bias.defined()) { mkldnn_bias = itensor_from_tensor(bias); } diff --git a/aten/src/ATen/native/mkldnn/ConvPrepack.h b/aten/src/ATen/native/mkldnn/ConvPrepack.h index 03189c5f5e706..db858b9bb46d9 100644 --- a/aten/src/ATen/native/mkldnn/ConvPrepack.h +++ b/aten/src/ATen/native/mkldnn/ConvPrepack.h @@ -14,7 +14,7 @@ namespace convolution { c10::intrusive_ptr createConvPrePackOpContext( Tensor weight, - c10::optional bias, + std::optional bias, std::vector stride, std::vector padding, std::vector dilation, @@ -28,7 +28,7 @@ Tensor conv_run( ContextConv create( const Tensor& weight, - const c10::optional& bias, + const std::optional& bias, const IntArrayRef padding, const IntArrayRef stride, const IntArrayRef dilation, diff --git a/aten/src/ATen/native/mkldnn/Linear.cpp b/aten/src/ATen/native/mkldnn/Linear.cpp index 71d033fca3b86..70434fde7e479 100644 --- a/aten/src/ATen/native/mkldnn/Linear.cpp +++ b/aten/src/ATen/native/mkldnn/Linear.cpp @@ -26,7 +26,7 @@ namespace native { Tensor mkldnn_linear( const Tensor& self, - const Tensor& weight, const c10::optional& bias_opt) { + const Tensor& weight, const std::optional& bias_opt) { TORCH_CHECK(false, "mkldnn_linear: ATen not compiled with MKLDNN support"); } Tensor mkldnn_linear_backward_input( @@ -58,7 +58,7 @@ namespace native { Tensor mkldnn_linear( const Tensor& self, - const Tensor& weight_t, const c10::optional& bias_opt) { + const Tensor& weight_t, const std::optional& bias_opt) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); const Tensor& bias = *bias_maybe_owned; @@ -183,10 +183,10 @@ std::tuple mkldnn_linear_backward( static Tensor mkldnn_linear_pointwise( const Tensor& input_t, const Tensor& weight_t, - const c10::optional& bias_opt, + const std::optional& bias_opt, c10::string_view attr, - torch::List> scalars, - c10::optional algorithm) { + torch::List> scalars, + std::optional algorithm) { auto input = input_t.contiguous(); auto input_size = input.sizes(); @@ -218,7 +218,7 @@ static Tensor mkldnn_linear_pointwise( const ideep::tensor mkldnn_input = itensor_view_from_dense(input_reshaped); - c10::optional mkldnn_bias{c10::nullopt}; + std::optional mkldnn_bias{c10::nullopt}; if (bias.defined()) { mkldnn_bias = itensor_from_tensor(bias); } @@ -258,7 +258,7 @@ static Tensor mkldnn_linear_pointwise_binary( const Tensor& input_t, const Tensor& other_t, const Tensor& weight_t, - const c10::optional& bias_opt, + const std::optional& bias_opt, c10::string_view attr) { c10::MaybeOwned bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); @@ -303,7 +303,7 @@ static Tensor mkldnn_linear_pointwise_binary( const ideep::tensor mkldnn_other = itensor_from_tensor(other_reshaped); const ideep::tensor mkldnn_input = itensor_view_from_dense(input_reshaped); - c10::optional mkldnn_bias{c10::nullopt}; + std::optional mkldnn_bias{c10::nullopt}; if (bias.defined()) { mkldnn_bias = itensor_from_tensor(bias); } @@ -339,7 +339,7 @@ static Tensor mkl_linear( const Tensor& self, const Tensor& mkl_weight_t, const Tensor& origin_weight_t, - const c10::optional& bias_opt, + const std::optional& bias_opt, const int64_t prepack_batch_size) { c10::MaybeOwned bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); @@ -427,7 +427,7 @@ static Tensor mkl_linear( const Tensor& self, const Tensor& mkl_weight_t, const Tensor& origin_weight_t, - const c10::optional& bias_opt, + const std::optional& bias_opt, const int64_t prepack_batch_size) { TORCH_CHECK(false, "mkl_linear: ATen not compiled with MKL support"); } diff --git a/aten/src/ATen/native/mkldnn/MKLDNNCommon.cpp b/aten/src/ATen/native/mkldnn/MKLDNNCommon.cpp index 061d154f3b40f..e6fdbb0656c07 100644 --- a/aten/src/ATen/native/mkldnn/MKLDNNCommon.cpp +++ b/aten/src/ATen/native/mkldnn/MKLDNNCommon.cpp @@ -61,7 +61,7 @@ ideep::tensor::data_type get_mkldnn_dtype(ScalarType type) { } } -Tensor new_with_itensor_mkldnn(ideep::tensor&& it, c10::optional dtype, c10::optional device) { +Tensor new_with_itensor_mkldnn(ideep::tensor&& it, std::optional dtype, c10::optional device) { // NOTE: int32_t dims from ideep::tensor but sizes needs int64_t // TODO: support int64_t dims in ideep::tensor to avoid extra conversion auto dims = it.get_dims(); diff --git a/aten/src/ATen/native/mkldnn/MKLDNNCommon.h b/aten/src/ATen/native/mkldnn/MKLDNNCommon.h index 5e9044ce908aa..f41c4ae075be5 100644 --- a/aten/src/ATen/native/mkldnn/MKLDNNCommon.h +++ b/aten/src/ATen/native/mkldnn/MKLDNNCommon.h @@ -29,7 +29,7 @@ static inline ideep::tensor::data_type get_mkldnn_dtype(const Tensor& t) { } // Construct aten MKL-DNN tensor given an ideep tensor -TORCH_API Tensor new_with_itensor_mkldnn(ideep::tensor&& it, c10::optional dtype, c10::optional device); +TORCH_API Tensor new_with_itensor_mkldnn(ideep::tensor&& it, std::optional dtype, c10::optional device); // Retrieve `ideep::tensor` from MKL-DNN tensor TORCH_API ideep::tensor& itensor_from_mkldnn(const Tensor& mkldnn_tensor); diff --git a/aten/src/ATen/native/mkldnn/MKLDNNConversions.cpp b/aten/src/ATen/native/mkldnn/MKLDNNConversions.cpp index b2901bc522be2..f01cb8da1241f 100644 --- a/aten/src/ATen/native/mkldnn/MKLDNNConversions.cpp +++ b/aten/src/ATen/native/mkldnn/MKLDNNConversions.cpp @@ -24,7 +24,7 @@ namespace at { namespace native { #if AT_MKLDNN_ENABLED() -Tensor mkldnn_to_dense(const Tensor& mkldnn_tensor, c10::optional dtype, c10::optional masked_grad) { +Tensor mkldnn_to_dense(const Tensor& mkldnn_tensor, std::optional dtype, c10::optional masked_grad) { TORCH_CHECK(mkldnn_tensor.scalar_type() == ScalarType::Float || mkldnn_tensor.scalar_type() == ScalarType::BFloat16 || mkldnn_tensor.scalar_type() == ScalarType::Half || @@ -73,7 +73,7 @@ Tensor mkldnn_to_dense(const Tensor& mkldnn_tensor, c10::optional dt return cpu_tensor.contiguous().resize_(dims, c10::MemoryFormat::Contiguous); } -Tensor dense_to_mkldnn(const Tensor& cpu_tensor, c10::optional dtype) { +Tensor dense_to_mkldnn(const Tensor& cpu_tensor, std::optional dtype) { TORCH_CHECK(cpu_tensor.device().is_cpu(), "dense_to_mkldnn expects CPU tensor input"); TORCH_CHECK(cpu_tensor.layout() == Layout::Strided, @@ -256,7 +256,7 @@ static Tensor mkldnn_reorder_conv_weight( static Tensor mkldnn_reorder_linear_weight( const Tensor& self, - c10::optional batch_size_opt) { + std::optional batch_size_opt) { mkldnn_check_low_precision(self.scalar_type(), "mkldnn_reorder_linear_weight"); auto out_features = self.size(0); auto in_features = self.size(1); @@ -525,11 +525,11 @@ TORCH_LIBRARY_IMPL(mkldnn, CPU, m) { #else -Tensor mkldnn_to_dense(const Tensor& mkldnn_tensor, c10::optional dtype, c10::optional masked_grad) { +Tensor mkldnn_to_dense(const Tensor& mkldnn_tensor, std::optional dtype, c10::optional masked_grad) { TORCH_CHECK(false, "MKL-DNN build is disabled"); } -Tensor dense_to_mkldnn(const Tensor& cpu_tensor, c10::optional dtype) { +Tensor dense_to_mkldnn(const Tensor& cpu_tensor, std::optional dtype) { TORCH_CHECK(false, "MKL-DNN build is disabled"); } diff --git a/aten/src/ATen/native/mkldnn/Normalization.cpp b/aten/src/ATen/native/mkldnn/Normalization.cpp index 0aced614a0ea3..e684a931f7752 100644 --- a/aten/src/ATen/native/mkldnn/Normalization.cpp +++ b/aten/src/ATen/native/mkldnn/Normalization.cpp @@ -21,7 +21,7 @@ namespace at { namespace native { std::tuple mkldnn_batch_norm( - const Tensor& self, const c10::optional& weight_opt, const c10::optional& bias_opt, const c10::optional& running_mean_opt, const c10::optional& running_var_opt, + const Tensor& self, const std::optional& weight_opt, const c10::optional& bias_opt, const c10::optional& running_mean_opt, const c10::optional& running_var_opt, bool train, double momentum, double eps) { @@ -30,7 +30,7 @@ std::tuple mkldnn_batch_norm( std::tuple mkldnn_batch_norm_backward( const Tensor& grad_output, - const Tensor& input, const c10::optional& weight_opt, const c10::optional& running_mean_opt, const c10::optional& running_var_opt, const c10::optional& save_mean_opt, const c10::optional& save_invstd_opt, + const Tensor& input, const std::optional& weight_opt, const c10::optional& running_mean_opt, const c10::optional& running_var_opt, const c10::optional& save_mean_opt, const c10::optional& save_invstd_opt, bool train, double eps, std::array grad_input_mask) { @@ -45,7 +45,7 @@ static std::tuple mkldnn_layer_norm_last_index_weight_bi } std::tuple _mkldnn_batch_norm_legit( - const Tensor& input, const c10::optional& weight_opt, const c10::optional& bias_opt, Tensor& running_mean, Tensor& running_var, + const Tensor& input, const std::optional& weight_opt, const c10::optional& bias_opt, Tensor& running_mean, Tensor& running_var, bool train, double momentum, double eps) { @@ -54,7 +54,7 @@ std::tuple _mkldnn_batch_norm_legit( std::tuple _mkldnn_batch_norm_legit_no_stats( - const Tensor& input, const c10::optional& weight_opt, const c10::optional& bias_opt, + const Tensor& input, const std::optional& weight_opt, const c10::optional& bias_opt, bool train, double momentum, double eps) { @@ -62,15 +62,15 @@ std::tuple _mkldnn_batch_norm_legit_no_stats( } std::tuple _batch_norm_with_update_mkldnn( - const Tensor& input, const c10::optional& weight_opt, const c10::optional& bias_opt, + const Tensor& input, const std::optional& weight_opt, const c10::optional& bias_opt, Tensor& running_mean, Tensor& running_var, double momentum, double eps) { TORCH_CHECK(false, "_batch_norm_with_update_mkldnn: ATen not compiled with MKLDNN support"); } std::tuple _new_batch_norm_backward_mkldnn( const Tensor& grad_output, const Tensor& input, const Tensor& weight, - const c10::optional& running_mean_opt, const c10::optional& running_var_opt, - const c10::optional& save_mean_opt, const c10::optional& save_var_opt, + const std::optional& running_mean_opt, const c10::optional& running_var_opt, + const std::optional& save_mean_opt, const c10::optional& save_var_opt, bool update, double eps, std::array grad_input_mask, const Tensor& reserve) { TORCH_CHECK(false, "_new_batch_norm_backward_mkldnn: ATen not compiled with MKLDNN support"); } @@ -131,7 +131,7 @@ std::tuple mkldnn_layer_norm_last_index_weight_bias_f32( std::tuple mkldnn_batch_norm( - const Tensor& input, const c10::optional& weight_opt, const c10::optional& bias_opt, const c10::optional& running_mean_opt, const c10::optional& running_var_opt, + const Tensor& input, const std::optional& weight_opt, const c10::optional& bias_opt, const c10::optional& running_mean_opt, const c10::optional& running_var_opt, bool train, double momentum, double eps) { @@ -209,7 +209,7 @@ std::tuple mkldnn_batch_norm( std::tuple _batch_norm_with_update_mkldnn( - const Tensor& input, const c10::optional& weight_opt, const c10::optional& bias_opt, + const Tensor& input, const std::optional& weight_opt, const c10::optional& bias_opt, Tensor& running_mean, Tensor& running_var, double momentum, double eps) { Tensor output, save_mean, save_var; std::tie(output, save_mean, save_var) = @@ -220,7 +220,7 @@ std::tuple _batch_norm_with_update_mkldnn( std::tuple _mkldnn_batch_norm_legit( - const Tensor& input, const c10::optional& weight_opt, const c10::optional& bias_opt, Tensor& running_mean, Tensor& running_var, + const Tensor& input, const std::optional& weight_opt, const c10::optional& bias_opt, Tensor& running_mean, Tensor& running_var, bool train, double momentum, double eps) { @@ -229,7 +229,7 @@ std::tuple _mkldnn_batch_norm_legit( std::tuple _mkldnn_batch_norm_legit_no_stats( - const Tensor& input, const c10::optional& weight_opt, const c10::optional& bias_opt, + const Tensor& input, const std::optional& weight_opt, const c10::optional& bias_opt, bool train, double momentum, double eps) { @@ -239,15 +239,15 @@ std::tuple _mkldnn_batch_norm_legit_no_stats( std::tuple _new_batch_norm_backward_mkldnn( const Tensor& grad_output, const Tensor& input, const Tensor& weight, - const c10::optional& running_mean_opt, const c10::optional& running_var_opt, - const c10::optional& save_mean_opt, const c10::optional& save_var_opt, + const std::optional& running_mean_opt, const c10::optional& running_var_opt, + const std::optional& save_mean_opt, const c10::optional& save_var_opt, bool update, double eps, std::array grad_input_mask, const Tensor& reserve) { return mkldnn_batch_norm_backward(grad_output, input, weight, running_mean_opt, running_var_opt, save_mean_opt, save_var_opt, update, eps, grad_input_mask); } std::tuple mkldnn_batch_norm_backward(const Tensor& grad_output, - const Tensor& input, const c10::optional& weight_opt, const c10::optional& running_mean_opt, const c10::optional& running_var_opt, const c10::optional& save_mean_opt, const c10::optional& save_invstd_opt, + const Tensor& input, const std::optional& weight_opt, const c10::optional& running_mean_opt, const c10::optional& running_var_opt, const c10::optional& save_mean_opt, const c10::optional& save_invstd_opt, bool train, double eps, std::array grad_input_mask) { diff --git a/aten/src/ATen/native/mkldnn/OpContext.cpp b/aten/src/ATen/native/mkldnn/OpContext.cpp index 3de67ceacf002..820f1273b0cb5 100644 --- a/aten/src/ATen/native/mkldnn/OpContext.cpp +++ b/aten/src/ATen/native/mkldnn/OpContext.cpp @@ -9,7 +9,7 @@ namespace mkldnn { c10::intrusive_ptr MkldnnConvOpContext::create_context( at::Tensor&& weight, - c10::optional&& bias, + std::optional&& bias, std::vector&& padding, std::vector&& stride, std::vector&& dilation, diff --git a/aten/src/ATen/native/mkldnn/OpContext.h b/aten/src/ATen/native/mkldnn/OpContext.h index 21e8cc78a5134..5ae5344ccf509 100644 --- a/aten/src/ATen/native/mkldnn/OpContext.h +++ b/aten/src/ATen/native/mkldnn/OpContext.h @@ -17,7 +17,7 @@ const static std::map fusion_attr_map = { using SerializationTypeConvPrePack = std::tuple< Tensor, - c10::optional, + std::optional, std::vector, std::vector, std::vector, @@ -28,7 +28,7 @@ using SerializationTypeConvPrePack = std::tuple< class ConvOpContext : public torch::jit::CustomClassHolder { protected: Tensor orig_weight_; - c10::optional orig_bias_; + std::optional orig_bias_; std::vector stride_; std::vector padding_; std::vector dilation_; @@ -60,7 +60,7 @@ class MkldnnConvOpContext final : public ConvOpContext { public: MkldnnConvOpContext( Tensor&& weight, - c10::optional&& bias, + std::optional&& bias, std::vector&& padding, std::vector&& stride, std::vector&& dilation, @@ -83,7 +83,7 @@ class MkldnnConvOpContext final : public ConvOpContext { static c10::intrusive_ptr create_context( Tensor&& weight, - c10::optional&& bias, + std::optional&& bias, std::vector&& padding, std::vector&& stride, std::vector&& dilation, diff --git a/aten/src/ATen/native/mkldnn/Pooling.cpp b/aten/src/ATen/native/mkldnn/Pooling.cpp index 7b59d7b85fe93..e1a5cfe5dff32 100644 --- a/aten/src/ATen/native/mkldnn/Pooling.cpp +++ b/aten/src/ATen/native/mkldnn/Pooling.cpp @@ -56,7 +56,7 @@ Tensor mkldnn_avg_pool2d( IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { TORCH_CHECK(false, "mkldnn_avg_pool2d: ATen not compiled with MKLDNN support"); } @@ -66,7 +66,7 @@ Tensor& mkldnn_avg_pool2d_out(const Tensor& self, IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override, + std::optional divisor_override, Tensor& output) { TORCH_CHECK(false, "mkldnn_avg_pool2d_out: ATen not compiled with MKLDNN support"); } @@ -78,7 +78,7 @@ Tensor mkldnn_avg_pool3d( IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { TORCH_CHECK(false, "mkldnn_avg_pool3d: ATen not compiled with MKLDNN support"); } @@ -88,7 +88,7 @@ Tensor& mkldnn_avg_pool3d_out(const Tensor& self, IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override, + std::optional divisor_override, Tensor& output) { TORCH_CHECK(false, "mkldnn_avg_pool3d_out: ATen not compiled with MKLDNN support"); } @@ -140,7 +140,7 @@ Tensor& mkldnn_avg_pool2d_backward_out(const Tensor & grad_output, IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override, + std::optional divisor_override, Tensor & grad_input) { TORCH_CHECK(false, "mkldnn_avg_pool2d_backward_out: ATen not compiled with MKLDNN support"); } @@ -153,7 +153,7 @@ Tensor mkldnn_avg_pool2d_backward( IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { TORCH_CHECK(false, "mkldnn_avg_pool2d_backward: ATen not compiled with MKLDNN support"); } @@ -164,7 +164,7 @@ Tensor& mkldnn_avg_pool3d_backward_out(const Tensor & grad_output, IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override, + std::optional divisor_override, Tensor & grad_input) { TORCH_CHECK(false, "mkldnn_avg_pool3d_backward_out: ATen not compiled with MKLDNN support"); } @@ -177,7 +177,7 @@ Tensor mkldnn_avg_pool3d_backward( IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { TORCH_CHECK(false, "mkldnn_avg_pool3d_backward: ATen not compiled with MKLDNN support"); } @@ -418,7 +418,7 @@ Tensor mkldnn_avg_pool2d( IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { TORCH_CHECK(!divisor_override.has_value(), "mkldnn_avg_pool2d operator does not support divisor"); if (input.scalar_type() == ScalarType::BFloat16) { @@ -443,7 +443,7 @@ Tensor& mkldnn_avg_pool2d_out(const Tensor& input, IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override, + std::optional divisor_override, Tensor& output) { TORCH_CHECK(false, "mkldnn_avg_pool2d_out: in-place mkldnn operations are not supported yet"); } @@ -455,7 +455,7 @@ Tensor mkldnn_avg_pool3d( IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { TORCH_CHECK(!divisor_override.has_value(), "mkldnn_avg_pool3d operator does not support divisor"); if (input.scalar_type() == ScalarType::BFloat16) { TORCH_CHECK(mkldnn_bf16_device_check(), @@ -479,7 +479,7 @@ Tensor& mkldnn_avg_pool3d_out(const Tensor& input, IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override, + std::optional divisor_override, Tensor& output) { TORCH_CHECK(false, "mkldnn_avg_pool3d_out: in-place mkldnn operations are not supported yet"); } @@ -579,7 +579,7 @@ Tensor mkldnn_avg_pool2d_backward( IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { return _mkldnn_pooling_backward( grad_output, grad_output, @@ -600,7 +600,7 @@ Tensor& mkldnn_avg_pool2d_backward_out(const Tensor & grad_output, IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override, + std::optional divisor_override, Tensor & grad_input) { TORCH_CHECK(false, "mkldnn_avg_pool2d_backward_out: in-place mkldnn operations are not supported yet"); } @@ -613,7 +613,7 @@ Tensor mkldnn_avg_pool3d_backward( IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { return _mkldnn_pooling_backward( grad_output, grad_output, @@ -634,7 +634,7 @@ Tensor& mkldnn_avg_pool3d_backward_out(const Tensor & grad_output, IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override, + std::optional divisor_override, Tensor & grad_input) { TORCH_CHECK(false, "mkldnn_avg_pool3d_backward_out: in-place mkldnn operations are not supported yet"); } diff --git a/aten/src/ATen/native/mkldnn/RNN.cpp b/aten/src/ATen/native/mkldnn/RNN.cpp index afea7f91e79ea..b35504bc19cce 100644 --- a/aten/src/ATen/native/mkldnn/RNN.cpp +++ b/aten/src/ATen/native/mkldnn/RNN.cpp @@ -55,9 +55,9 @@ std::tuple mkldnn_rnn_la const Tensor& output, const Tensor& hy_, const Tensor& cy_, - const c10::optional& grad_output_r_opt, - const c10::optional& grad_hy_r_opt, - const c10::optional& grad_cy_r_opt, + const std::optional& grad_output_r_opt, + const std::optional& grad_hy_r_opt, + const std::optional& grad_cy_r_opt, bool reverse, int64_t mode, int64_t hidden_size, @@ -306,9 +306,9 @@ std::tuple mkldnn_rnn_la const Tensor& output, const Tensor& hy_, const Tensor& cy_, - const c10::optional& grad_output_r_opt, - const c10::optional& grad_hy_r_opt, - const c10::optional& grad_cy_r_opt, + const std::optional& grad_output_r_opt, + const std::optional& grad_hy_r_opt, + const std::optional& grad_cy_r_opt, bool reverse, int64_t mode, int64_t hidden_size, diff --git a/aten/src/ATen/native/mkldnn/TensorFactories.cpp b/aten/src/ATen/native/mkldnn/TensorFactories.cpp index 65a22aa74ed53..81dc5d8880cfa 100644 --- a/aten/src/ATen/native/mkldnn/TensorFactories.cpp +++ b/aten/src/ATen/native/mkldnn/TensorFactories.cpp @@ -12,7 +12,7 @@ namespace at { namespace native { #if AT_MKLDNN_ENABLED() -Tensor empty_mkldnn(IntArrayRef sizes, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional optional_memory_format) { +Tensor empty_mkldnn(IntArrayRef sizes, std::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional optional_memory_format) { TORCH_CHECK( !optional_memory_format.has_value(), "'memory_format' argument is incompatible with mkldnn tensor"); @@ -26,7 +26,7 @@ Tensor empty_mkldnn(IntArrayRef sizes, c10::optional dtype, c10::opt #else -Tensor empty_mkldnn(IntArrayRef sizes, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional optional_memory_format) { +Tensor empty_mkldnn(IntArrayRef sizes, std::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional optional_memory_format) { TORCH_CHECK(false, "empty_mkldnn: MKL-DNN build is disabled"); } diff --git a/aten/src/ATen/native/mkldnn/TensorShape.cpp b/aten/src/ATen/native/mkldnn/TensorShape.cpp index ac47648294242..d653d2588ba22 100644 --- a/aten/src/ATen/native/mkldnn/TensorShape.cpp +++ b/aten/src/ATen/native/mkldnn/TensorShape.cpp @@ -26,7 +26,7 @@ Tensor mkldnn_reshape(const Tensor& self, IntArrayRef size) { TORCH_CHECK(false, "mkldnn_reshape: ATen not compiled with MKLDNN support"); } -Tensor mkldnn_clone(const Tensor& self, c10::optional optional_memory_format) { +Tensor mkldnn_clone(const Tensor& self, std::optional optional_memory_format) { TORCH_CHECK(false, "mkldnn_clone: ATen not compiled with MKLDNN support"); } @@ -65,7 +65,7 @@ Tensor mkldnn_reshape(const Tensor& self, IntArrayRef size) { self.options().device_opt()); } -Tensor mkldnn_clone(const Tensor& self, c10::optional optional_memory_format) { +Tensor mkldnn_clone(const Tensor& self, std::optional optional_memory_format) { TORCH_CHECK( !optional_memory_format.has_value(), "unsupported memory format option ", diff --git a/aten/src/ATen/native/mkldnn/Utils.cpp b/aten/src/ATen/native/mkldnn/Utils.cpp index 400eb9165f347..6578b23ff9c92 100644 --- a/aten/src/ATen/native/mkldnn/Utils.cpp +++ b/aten/src/ATen/native/mkldnn/Utils.cpp @@ -79,14 +79,14 @@ void check_mkldnn_binary_fusion_inputs( #if AT_MKLDNN_ENABLED() #define ATTR_FUNC(NAME) \ - [](torch::List> scalars, \ - c10::optional algorithm) { \ + [](torch::List> scalars, \ + std::optional algorithm) { \ return ideep::attr_t::fuse_##NAME(); \ } AttrFunction attr_func_leaky_relu = - [](torch::List> scalars, - c10::optional algorithm) { + [](torch::List> scalars, + std::optional algorithm) { TORCH_CHECK( scalars.size() == 1 && scalars[0].get().toOptional().has_value(), @@ -97,8 +97,8 @@ AttrFunction attr_func_leaky_relu = }; AttrFunction attr_func_hardtanh = - [](torch::List> scalars, - c10::optional algorithm) { + [](torch::List> scalars, + std::optional algorithm) { TORCH_CHECK( scalars.size() == 2 && scalars[0].get().toOptional().has_value() && @@ -112,8 +112,8 @@ AttrFunction attr_func_hardtanh = return ideep::attr_t::fuse_clamp(lower_bound_value, upper_bound_value); }; -AttrFunction attr_func_gelu = [](torch::List> scalars, - c10::optional algorithm) { +AttrFunction attr_func_gelu = [](torch::List> scalars, + std::optional algorithm) { TORCH_CHECK( algorithm.has_value(), "gelu is expected to have one str input: algorithm"); @@ -131,8 +131,8 @@ AttrFunction attr_func_gelu = [](torch::List> scalars, }; AttrFunction attr_func_hardsigmoid = - [](torch::List> scalars, - c10::optional algorithm) { + [](torch::List> scalars, + std::optional algorithm) { ideep::attr_t attr; ideep::post_ops po; po.append_eltwise( diff --git a/aten/src/ATen/native/mkldnn/Utils.h b/aten/src/ATen/native/mkldnn/Utils.h index aa804d6bc1877..75f1b2c1b709a 100644 --- a/aten/src/ATen/native/mkldnn/Utils.h +++ b/aten/src/ATen/native/mkldnn/Utils.h @@ -73,8 +73,8 @@ static inline Tensor may_convert_to_default_contiguous_strides(const Tensor& inp #if AT_MKLDNN_ENABLED() using AttrFunction = std::function>, - c10::optional)>; + torch::List>, + std::optional)>; const std::map& fusion_unary_attr_map(); diff --git a/aten/src/ATen/native/mkldnn/xpu/Conv.cpp b/aten/src/ATen/native/mkldnn/xpu/Conv.cpp index 8ac19605b1c79..7f84704d30907 100644 --- a/aten/src/ATen/native/mkldnn/xpu/Conv.cpp +++ b/aten/src/ATen/native/mkldnn/xpu/Conv.cpp @@ -563,7 +563,7 @@ Tensor _convolution( Tensor convolution_overrideable( const Tensor& input_r, const Tensor& weight_r, - const c10::optional& bias_r_opt, + const std::optional& bias_r_opt, IntArrayRef stride_, IntArrayRef padding_, IntArrayRef dilation_, diff --git a/aten/src/ATen/native/mps/TensorFactory.cpp b/aten/src/ATen/native/mps/TensorFactory.cpp index 6fe145a6cc556..03ff521db1046 100644 --- a/aten/src/ATen/native/mps/TensorFactory.cpp +++ b/aten/src/ATen/native/mps/TensorFactory.cpp @@ -50,7 +50,7 @@ static inline void maybe_resize_storage_mps(TensorImpl* self, uint64_t new_size) inline TensorImpl* resize_impl_mps_( TensorImpl* self, IntArrayRef size, - c10::optional stride, + std::optional stride, bool device_guard = true) { if (self->sizes() == size && (!stride || self->strides() == stride)) { return self; @@ -72,11 +72,11 @@ inline TensorImpl* resize_impl_mps_( Tensor empty_mps( IntArrayRef size, - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt, - c10::optional memory_format_opt) { + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt, + std::optional memory_format_opt) { return at::detail::empty_mps(size, dtype_opt, layout_opt, device_opt, pin_memory_opt, memory_format_opt); } @@ -84,10 +84,10 @@ Tensor empty_mps( Tensor empty_strided_mps( IntArrayRef size, IntArrayRef stride, - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt) { + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt) { check_size_nonnegative(size); // empty memory formatempty auto t = at::native::empty_mps( @@ -103,7 +103,7 @@ Tensor empty_strided_mps( const Tensor& resize_mps_( const Tensor& self, IntArrayRef size, - c10::optional optional_memory_format) { + std::optional optional_memory_format) { if (self.has_names()) { return resize_named_tensor_(self, size, optional_memory_format); } @@ -142,17 +142,17 @@ Tensor& set_storage_mps_(Tensor& result, Storage storage, int64_t storage_offset checkSetStorage(result, storage, storage_offset, size, stride); //std::cout << "set storage_mps " << storage_offset << " stride " << stride << std::endl; result.unsafeGetTensorImpl()->set_storage_offset(storage_offset); - c10::optional stride_opt = stride.data() != nullptr ? - c10::optional(stride) : c10::nullopt; + std::optional stride_opt = stride.data() != nullptr ? + std::optional(stride) : c10::nullopt; at::native::resize_impl_mps_(result.unsafeGetTensorImpl(), size, stride_opt); return result; } Tensor _efficientzerotensor_mps(IntArrayRef size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { auto device_ = device_or_default(device); auto allocator = at::native::ZeroTensorAllocator(device_); auto dtype_ = dtype_or_default(dtype); diff --git a/aten/src/ATen/native/nested/NestedTensorBackward.cpp b/aten/src/ATen/native/nested/NestedTensorBackward.cpp index e4465b792c21e..488dab9e37cb2 100644 --- a/aten/src/ATen/native/nested/NestedTensorBackward.cpp +++ b/aten/src/ATen/native/nested/NestedTensorBackward.cpp @@ -197,8 +197,8 @@ std::tuple layer_norm_backward_nested( IntArrayRef normalized_shape, const Tensor& mean, const Tensor& rstd, - const c10::optional& weight_opt /* optional */, - const c10::optional& bias_opt /*{ optional */, + const std::optional& weight_opt /* optional */, + const std::optional& bias_opt /*{ optional */, std::array grad_input_mask) { // For NestedTensors weight and bias are non nested. auto* nt_impl_grad = get_nested_tensor_impl(grad); diff --git a/aten/src/ATen/native/nested/NestedTensorFactories.cpp b/aten/src/ATen/native/nested/NestedTensorFactories.cpp index 45425ed63315c..40e5082832021 100644 --- a/aten/src/ATen/native/nested/NestedTensorFactories.cpp +++ b/aten/src/ATen/native/nested/NestedTensorFactories.cpp @@ -8,11 +8,11 @@ namespace native { static TensorOptions verify_empty_parameters( const at::Tensor& self, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, - c10::optional optional_memory_format) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, + std::optional optional_memory_format) { TensorOptions options_ = TensorOptions() .dtype(dtype) .layout(layout) @@ -37,11 +37,11 @@ static TensorOptions verify_empty_parameters( Tensor empty_like_nested( const Tensor& self, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, - c10::optional optional_memory_format) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, + std::optional optional_memory_format) { auto options = verify_empty_parameters( self, dtype, layout, device, pin_memory, optional_memory_format); auto self_nt = get_nested_tensor_impl(self); @@ -83,12 +83,12 @@ static inline Device ensure_has_index(Device device) { Tensor _to_copy_nested( const Tensor& self, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, bool non_blocking, - c10::optional optional_memory_format) { + std::optional optional_memory_format) { TORCH_CHECK( !layout.has_value() || self.layout() == layout.value(), "to(options) doesn't support converting to a different layout, " @@ -132,7 +132,7 @@ Tensor& copy_nested_(Tensor& self, const Tensor& src, bool non_blocking) { Tensor clone_nested( const Tensor& self, - c10::optional optional_memory_format) { + std::optional optional_memory_format) { auto memory_format = optional_memory_format.value_or(c10::MemoryFormat::Preserve); auto self_ptr = get_nested_tensor_impl(self); if (memory_format == c10::MemoryFormat::Preserve || diff --git a/aten/src/ATen/native/nested/NestedTensorMath.cpp b/aten/src/ATen/native/nested/NestedTensorMath.cpp index 7d3e826ef53e9..1974b4fe2cea0 100644 --- a/aten/src/ATen/native/nested/NestedTensorMath.cpp +++ b/aten/src/ATen/native/nested/NestedTensorMath.cpp @@ -113,10 +113,10 @@ bool NestedTensor_nested_tensor_from_mask_left_aligned(const Tensor& t, const Te Tensor _nested_tensor_from_tensor_list( TensorList list, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { for (const auto i : c10::irange(list.size())) { if (i > 0) { int64_t dim_i = list[i].dim(); @@ -146,8 +146,8 @@ Tensor _nested_tensor_from_tensor_list( std::tuple nested_layer_norm( const Tensor& input, IntArrayRef normalized_shape, - const c10::optional& weight_opt, - const c10::optional& bias_opt, + const std::optional& weight_opt, + const std::optional& bias_opt, double eps) { TORCH_CHECK(weight_opt && bias_opt, "NestedTensor layer_norm requires weight and bias"); const auto& weight = *weight_opt; @@ -356,7 +356,7 @@ Tensor NestedTensor_sum_dim_CPU( const Tensor& self, OptionalIntArrayRef opt_dims, bool keepdim, - c10::optional dtype) { + std::optional dtype) { // Only allow reductions across the last dim auto dims = opt_dims.value_or(IntArrayRef{}); TORCH_CHECK( @@ -479,7 +479,7 @@ Tensor select_nested(const Tensor& self, int64_t dim, int64_t index) { } -std::tuple native_dropout_nested(const Tensor& input, double p, c10::optional train) { +std::tuple native_dropout_nested(const Tensor& input, double p, std::optional train) { auto input_ptr = get_nested_tensor_impl(input); const Tensor& input_buffer = input_ptr-> get_unsafe_storage_as_tensor(), & sizemat = input_ptr->get_nested_sizes(), @@ -587,7 +587,7 @@ Tensor squeeze_dim_nested(const Tensor& self, IntArrayRef dims) { // if tensor.size(dim) != 1 torch.squeeze will return the result, we do the same here for (const auto d : c10::irange(ndim)) { if (mask.test(d)) { - c10::optional size_dim = self_ptr->opt_size(d); + std::optional size_dim = self_ptr->opt_size(d); if (!(size_dim.has_value() && *size_dim == 1)) { mask.reset(d); } @@ -925,7 +925,7 @@ Tensor reshape_as_nested(const Tensor& self, const Tensor& other) { // if an accessor is provided in the future, can replace this std::vector sizes; for (int64_t i = 0; i < other_ptr->dim(); i++) { - c10::optional opt_size = other_ptr->opt_size(i); + std::optional opt_size = other_ptr->opt_size(i); if (opt_size.has_value()) { sizes.push_back(*opt_size); } @@ -937,7 +937,7 @@ Tensor reshape_as_nested(const Tensor& self, const Tensor& other) { return self.reshape(sizes); } -Tensor& normal_nested_(Tensor& self, double mean, double std, c10::optional gen) { +Tensor& normal_nested_(Tensor& self, double mean, double std, std::optional gen) { const auto& self_buf = get_nested_tensor_impl(self)->get_buffer(); self_buf.normal_(mean, std, gen); return self; diff --git a/aten/src/ATen/native/nested/NestedTensorMatmul.cpp b/aten/src/ATen/native/nested/NestedTensorMatmul.cpp index 88e2a94570185..aa683ff854ef6 100644 --- a/aten/src/ATen/native/nested/NestedTensorMatmul.cpp +++ b/aten/src/ATen/native/nested/NestedTensorMatmul.cpp @@ -320,7 +320,7 @@ Tensor& matmul_out_nested( // if an accessor is provided in the future, can replace this std::vector sizes; for (int64_t i = 0; i < function_result_ptr->dim(); i++) { - c10::optional opt_size = function_result_ptr->opt_size(i); + std::optional opt_size = function_result_ptr->opt_size(i); if (opt_size.has_value()) { sizes.push_back(*opt_size); } else { diff --git a/aten/src/ATen/native/nested/NestedTensorTransformerFunctions.cpp b/aten/src/ATen/native/nested/NestedTensorTransformerFunctions.cpp index 96d13c366f7ac..6285f2ca1223e 100644 --- a/aten/src/ATen/native/nested/NestedTensorTransformerFunctions.cpp +++ b/aten/src/ATen/native/nested/NestedTensorTransformerFunctions.cpp @@ -59,7 +59,7 @@ inline void check_nested_tensor_matrix_constraints( Tensor nested_linear( const Tensor& input, const Tensor& weight, - const c10::optional& bias_opt) { + const std::optional& bias_opt) { check_nested_tensor_matrix_constraints(input, weight, c10::string_view{"Linear"}); auto* nt_input = get_nested_tensor_impl(input); const Tensor& input_buffer = nt_input->get_buffer(); @@ -93,7 +93,7 @@ Tensor NestedTensor_times_Tensor_plus_Tensor_addmm( const Tensor& mat2, const c10::Scalar& beta, const c10::Scalar& alpha, - c10::optional use_gelu) { + std::optional use_gelu) { // Interesting case: alpha * NT * T + beta * T const auto* nt_mat1 = get_nested_tensor_impl_or_null(mat1); TORCH_INTERNAL_ASSERT(nt_mat1 != nullptr); @@ -184,7 +184,7 @@ Tensor NestedTensor_softmax_dropout(const Tensor& self, const Tensor& query) { } Tensor NestedTensor_softmax_dropout_cuda(const Tensor& self, const Tensor& query) { - c10::optional attn_mask; + std::optional attn_mask; attn_mask = NestedTensor_to_mask(query, 2, self.size(2)); attn_mask = attn_mask->to(query.device(), /*non-blocking=*/true); @@ -211,7 +211,7 @@ Tensor NestedTensor_batch_offsets_from_size_tensor( } -Tensor NestedTensor_to_mask(const Tensor& nt, c10::optional mask_dim, c10::optional mask_dim_length) { +Tensor NestedTensor_to_mask(const Tensor& nt, std::optional mask_dim, c10::optional mask_dim_length) { auto* nt_impl = get_nested_tensor_impl(nt); TORCH_CHECK(nested_tensor_impl_is_contiguous(nt_impl), "to_mask only works on contiguous NestedTensors."); TORCH_CHECK( diff --git a/aten/src/ATen/native/nested/NestedTensorTransformerFunctions.h b/aten/src/ATen/native/nested/NestedTensorTransformerFunctions.h index cee721d7bc8f6..b0df6975304d2 100644 --- a/aten/src/ATen/native/nested/NestedTensorTransformerFunctions.h +++ b/aten/src/ATen/native/nested/NestedTensorTransformerFunctions.h @@ -36,7 +36,7 @@ Tensor NestedTensor_times_Tensor_plus_Tensor_addmm( const Tensor& mat2, const c10::Scalar& beta, const c10::Scalar& alpha, - c10::optional use_gelu = c10::nullopt); + std::optional use_gelu = c10::nullopt); Tensor NestedTensor_add_NestedTensor_in_place( const Tensor& self, @@ -50,7 +50,7 @@ Tensor NestedTensor_from_padded_tensor_cpu( const Tensor& padded, const NestedTensorImpl& nt); -Tensor NestedTensor_to_mask(const Tensor& nt, c10::optional mask_dim, c10::optional mask_dim_length); +Tensor NestedTensor_to_mask(const Tensor& nt, std::optional mask_dim, c10::optional mask_dim_length); template void remove_padding_kernelLauncher( diff --git a/aten/src/ATen/native/nested/NestedTensorUnaryOps.cpp b/aten/src/ATen/native/nested/NestedTensorUnaryOps.cpp index c41b6f15214aa..dc31b2c0de240 100644 --- a/aten/src/ATen/native/nested/NestedTensorUnaryOps.cpp +++ b/aten/src/ATen/native/nested/NestedTensorUnaryOps.cpp @@ -132,7 +132,7 @@ Tensor cos_nested(const Tensor& self) { return map_nt(self, at::cos); } -Tensor _pin_memory_nested(const Tensor& self, c10::optional device) { +Tensor _pin_memory_nested(const Tensor& self, std::optional device) { auto* nt_input = get_nested_tensor_impl(self); const auto& input_buffer = nt_input->get_unsafe_storage_as_tensor(); return wrap_buffer( diff --git a/aten/src/ATen/native/nested/NestedTensorUtils.cpp b/aten/src/ATen/native/nested/NestedTensorUtils.cpp index a5394404543f8..6539475cd1fdd 100644 --- a/aten/src/ATen/native/nested/NestedTensorUtils.cpp +++ b/aten/src/ATen/native/nested/NestedTensorUtils.cpp @@ -59,7 +59,7 @@ std::vector NestedTensor_get_max_size(const NestedTensorImpl& nt) { } int64_t get_consistent_last_dim_of_nested_tensor(const NestedTensorImpl& nt) { - c10::optional last_dim = nt.opt_size(-1); + std::optional last_dim = nt.opt_size(-1); TORCH_CHECK( last_dim != c10::nullopt, "Expected all tensors in nested tensor to have the same trailing dimension, instead last dimension equals: ", diff --git a/aten/src/ATen/native/nested/NestedTensorUtils.h b/aten/src/ATen/native/nested/NestedTensorUtils.h index 3b4f18f11b64b..572b0a827dd06 100644 --- a/aten/src/ATen/native/nested/NestedTensorUtils.h +++ b/aten/src/ATen/native/nested/NestedTensorUtils.h @@ -340,10 +340,10 @@ inline TensorNode get_nested_tensor_structure(at::Tensor tensor) { inline Tensor wrap_tensor_node( TensorNode tensor_node, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { TORCH_CHECK( !tensor_node.is_leaf(), "Expected TensorNode to wrap a list of Tensors."); TensorOptions options_ = diff --git a/aten/src/ATen/native/nested/cuda/NestedTensorTransformerFunctions.cpp b/aten/src/ATen/native/nested/cuda/NestedTensorTransformerFunctions.cpp index 0da0c3e361d1f..977ace14fb34d 100644 --- a/aten/src/ATen/native/nested/cuda/NestedTensorTransformerFunctions.cpp +++ b/aten/src/ATen/native/nested/cuda/NestedTensorTransformerFunctions.cpp @@ -234,7 +234,7 @@ _scaled_dot_product_flash_attention_nestedtensor_cuda( double dropout_p, bool is_causal, bool return_debug_mask, - c10::optional scale) { + std::optional scale) { Tensor query_buffer_reshaped, key_buffer_reshaped, value_buffer_reshaped, cumulative_sequence_length_q, cumulative_sequence_length_kv, output_shape; int64_t max_seqlen_batch_q{0}, max_seqlen_batch_kv{0}; @@ -285,11 +285,11 @@ _scaled_dot_product_efficient_attention_nestedtensor_cuda( const Tensor& query, const Tensor& key, const Tensor& value, - const c10::optional& attn_bias, + const std::optional& attn_bias, bool compute_log_sumexp, double dropout_p, bool is_causal, - c10::optional scale) { + std::optional scale) { Tensor query_buffer_reshaped, key_buffer_reshaped, value_buffer_reshaped, cumulative_sequence_length_q, cumulative_sequence_length_kv, output_shape; int64_t max_seqlen_batch_q{0}; @@ -344,7 +344,7 @@ std::tuple _scaled_dot_product_flash_attenti bool is_causal, const at::Tensor& philox_seed, const at::Tensor& philox_offset, - c10::optional scale){ + std::optional scale){ if (!grad_out_.defined()) { return std::make_tuple(Tensor{}, Tensor{}, Tensor{}); } diff --git a/aten/src/ATen/native/quantized/PackedParams.h b/aten/src/ATen/native/quantized/PackedParams.h index a442628573fec..d73bc0adbc4ef 100644 --- a/aten/src/ATen/native/quantized/PackedParams.h +++ b/aten/src/ATen/native/quantized/PackedParams.h @@ -111,11 +111,11 @@ struct LinearPackedParamsBase : public torch::jit::CustomClassHolder { return output; } - virtual std::tuple> unpack() = 0; + virtual std::tuple> unpack() = 0; - virtual c10::optional bias() = 0; + virtual std::optional bias() = 0; - virtual void set_bias(c10::optional /*bias*/) { + virtual void set_bias(std::optional /*bias*/) { throw std::runtime_error( "set_bias is not implemented for this packed " "parameter type"); @@ -136,7 +136,7 @@ struct ConvPackedParamsBase : public torch::jit::CustomClassHolder { const at::Tensor& input, bool reduce_range) = 0; - virtual std::tuple> unpack() = 0; + virtual std::tuple> unpack() = 0; virtual torch::List stride() const = 0; virtual torch::List padding() const = 0; diff --git a/aten/src/ATen/native/quantized/QTensor.cpp b/aten/src/ATen/native/quantized/QTensor.cpp index 9705de0a4a54d..a6817984c12d2 100644 --- a/aten/src/ATen/native/quantized/QTensor.cpp +++ b/aten/src/ATen/native/quantized/QTensor.cpp @@ -188,13 +188,13 @@ QScheme qscheme_quant(const Tensor& self) { Tensor quantized_clone( const Tensor& self, - c10::optional optional_memory_format) { + std::optional optional_memory_format) { auto memory_format = optional_memory_format.value_or(MemoryFormat::Contiguous); // TODO: To support all features of MemoryFormat::Preserve we need to add // _empty_affine_quantized_strided function and use it similarly to - // Tensor clone(const Tensor& src, c10::optional + // Tensor clone(const Tensor& src, std::optional // optional_memory_format) if (self.is_non_overlapping_and_dense()) -> // _empty_affine_quantized_strided if (memory_format == MemoryFormat::Preserve) { diff --git a/aten/src/ATen/native/quantized/TensorAdvancedIndexing.cpp b/aten/src/ATen/native/quantized/TensorAdvancedIndexing.cpp index 4f06b133771d9..11b005dc924c9 100644 --- a/aten/src/ATen/native/quantized/TensorAdvancedIndexing.cpp +++ b/aten/src/ATen/native/quantized/TensorAdvancedIndexing.cpp @@ -121,7 +121,7 @@ Tensor & masked_fill__quantized_cuda(Tensor& self, const Tensor & mask, const Te return masked_fill_impl_quantized_cuda(self, mask, value.item()); } -Tensor& _index_put_impl_quantized_cpu_(Tensor & self, const torch::List>& indices, const Tensor & value, const bool accumulate, const bool unsafe) { +Tensor& _index_put_impl_quantized_cpu_(Tensor & self, const torch::List>& indices, const Tensor & value, const bool accumulate, const bool unsafe) { TORCH_CHECK_INDEX(indices.size() <= (size_t)self.dim(), "too many indices for tensor of dimension ", self.dim(), " (got ", indices.size(), ")"); TORCH_CHECK(!value.is_quantized(), "Value argument for quantized input_put should not be quantized"); TORCH_CHECK(self.qscheme() == c10::kPerTensorAffine, "index_put for quantized tensors is currently only supported for per tensor quantized tensors"); @@ -145,7 +145,7 @@ Tensor& _index_put_impl_quantized_cpu_(Tensor & self, const torch::List& index: indices) { + for (const std::optional& index: indices) { if (index.has_value()) { at::assert_no_overlap(self, *index); } @@ -157,7 +157,7 @@ Tensor& _index_put_impl_quantized_cpu_(Tensor & self, const torch::List>& indices, const Tensor & value, const bool accumulate, const bool unsafe) { +Tensor& _index_put_impl_quantized_cuda_(Tensor & self, const torch::List>& indices, const Tensor & value, const bool accumulate, const bool unsafe) { TORCH_CHECK_INDEX(indices.size() <= (size_t)self.dim(), "too many indices for tensor of dimension ", self.dim(), " (got ", indices.size(), ")"); TORCH_CHECK(!value.is_quantized(), "Value argument for quantized input_put should not be quantized"); TORCH_CHECK(self.qscheme() == c10::kPerTensorAffine, "index_put for quantized tensors is currently only supported for per tensor quantized tensors"); @@ -183,7 +183,7 @@ Tensor& _index_put_impl_quantized_cuda_(Tensor & self, const torch::List& index: indices) { + for (const std::optional& index: indices) { if (index.has_value()) { at::assert_no_overlap(self, *index); } diff --git a/aten/src/ATen/native/quantized/TensorCompare.cpp b/aten/src/ATen/native/quantized/TensorCompare.cpp index def1622863e1d..2cc6ebcda603f 100644 --- a/aten/src/ATen/native/quantized/TensorCompare.cpp +++ b/aten/src/ATen/native/quantized/TensorCompare.cpp @@ -47,7 +47,7 @@ Tensor& min_quantized_unary_out(const Tensor& self, Tensor& out) { std::tuple sort_quantized_cpu_stable( const Tensor& self, - c10::optional stable, + std::optional stable, int64_t dim, bool descending) { auto [sort_int, sort_indicies] = diff --git a/aten/src/ATen/native/quantized/TensorFactories.cpp b/aten/src/ATen/native/quantized/TensorFactories.cpp index e79f657e0de95..54dcdc37c5b23 100644 --- a/aten/src/ATen/native/quantized/TensorFactories.cpp +++ b/aten/src/ATen/native/quantized/TensorFactories.cpp @@ -14,13 +14,13 @@ namespace native { // change to use quantizer Tensor empty_affine_quantized( IntArrayRef size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, double scale, int64_t zero_point, - c10::optional optional_memory_format) { + std::optional optional_memory_format) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options_ = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -44,11 +44,11 @@ Tensor empty_per_channel_affine_quantized( const Tensor& scales, const Tensor& zero_points, int64_t axis, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, - c10::optional optional_memory_format) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, + std::optional optional_memory_format) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options_ = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -70,11 +70,11 @@ Tensor empty_per_channel_affine_quantized( Tensor empty_unknown_quantized( IntArrayRef size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, - c10::optional optional_memory_format) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, + std::optional optional_memory_format) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options_ = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -93,10 +93,10 @@ Tensor empty_unknown_quantized( Tensor empty_strided_unknown_quantized( IntArrayRef size, IntArrayRef strided, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { TORCH_CHECK(false, "empty_strided not supported on quantized tensors yet see https://github.com/pytorch/pytorch/issues/74540") @@ -105,13 +105,13 @@ Tensor empty_strided_unknown_quantized( // Provide better error message if dtype is wrong Tensor empty_affine_quantized_other_backends_stub( IntArrayRef, - c10::optional, - c10::optional, - c10::optional, - c10::optional, + std::optional, + std::optional, + std::optional, + std::optional, double, int64_t, - c10::optional) { + std::optional) { TORCH_CHECK(false, "Creation of quantized tensor requires quantized dtype like torch.quint8"); } @@ -120,11 +120,11 @@ Tensor empty_per_channel_affine_quantized_other_backends_stub( const Tensor&, const Tensor&, int64_t, - c10::optional, - c10::optional, - c10::optional, - c10::optional, - c10::optional) { + std::optional, + std::optional, + std::optional, + std::optional, + std::optional) { TORCH_CHECK(false, "Creation of quantized tensor requires quantized dtype like torch.quint8"); } @@ -133,11 +133,11 @@ Tensor empty_per_channel_affine_quantized_other_backends_stub( Tensor empty_quantized( IntArrayRef size, const Tensor& qtensor, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, - c10::optional memory_format) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, + std::optional memory_format) { TensorOptions specified_options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); diff --git a/aten/src/ATen/native/quantized/cpu/AveragePool2d.cpp b/aten/src/ATen/native/quantized/cpu/AveragePool2d.cpp index 754c7d6bd529b..d7b53f8457868 100644 --- a/aten/src/ATen/native/quantized/cpu/AveragePool2d.cpp +++ b/aten/src/ATen/native/quantized/cpu/AveragePool2d.cpp @@ -47,7 +47,7 @@ static void avg_pool2d_out_frame( int padW, int padH, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { Tensor input_contig = input.contiguous(); auto input_data = input_contig.data_ptr(); auto output_data = output.data_ptr(); @@ -185,7 +185,7 @@ Tensor q_avg_pool2d( IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) auto [kW, kH] = get_kernel(kernel_size); auto [dW, dH] = get_stride(stride, kW, kH); @@ -265,7 +265,7 @@ Tensor qnnpack_avg_pool2d( IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { auto [kW, kH] = get_kernel(kernel_size); auto [dW, dH] = get_stride(stride, kW, kH); auto [padW, padH] = get_padding(padding); @@ -362,7 +362,7 @@ Tensor avg_pool2d_quantized_cpu( IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { Tensor output; #ifdef USE_PYTORCH_QNNPACK if (at::globalContext().qEngine() == at::QEngine::QNNPACK && diff --git a/aten/src/ATen/native/quantized/cpu/AveragePool3d.cpp b/aten/src/ATen/native/quantized/cpu/AveragePool3d.cpp index 875ae28e46a96..b83e3e313cd08 100644 --- a/aten/src/ATen/native/quantized/cpu/AveragePool3d.cpp +++ b/aten/src/ATen/native/quantized/cpu/AveragePool3d.cpp @@ -100,7 +100,7 @@ Tensor q_avg_pool3d( IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { auto [kW, kH, kD] = get_kernel(kernel_size); auto [dW, dH, dD] = get_stride(stride, kW, kH, kD); auto [padW, padH, padD] = get_padding(padding); @@ -165,7 +165,7 @@ Tensor avg_pool3d_quantized_cpu( IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { Tensor output; AT_DISPATCH_QINT_TYPES(input.scalar_type(), "avg_pool3d_quantized_cpu", [&]() { output = q_avg_pool3d( diff --git a/aten/src/ATen/native/quantized/cpu/EmbeddingPackedParams.h b/aten/src/ATen/native/quantized/cpu/EmbeddingPackedParams.h index 140b716df2691..e6f47d611a19f 100644 --- a/aten/src/ATen/native/quantized/cpu/EmbeddingPackedParams.h +++ b/aten/src/ATen/native/quantized/cpu/EmbeddingPackedParams.h @@ -6,19 +6,19 @@ struct EmbeddingPackedParamsBase : public torch::jit::CustomClassHolder { virtual at::Tensor embeddingbag_byte( const at::Tensor& indices, - const c10::optional& offsets, + const std::optional& offsets, bool pruned_weights, - const c10::optional& per_sample_weights_, - const c10::optional& compressed_indices_mapping, + const std::optional& per_sample_weights_, + const std::optional& compressed_indices_mapping, bool include_last_offset, bool is_embedding_op) = 0; virtual at::Tensor embeddingbag_4bit( const at::Tensor& indices, - const c10::optional& offsets, + const std::optional& offsets, bool pruned_weights, - const c10::optional& per_sample_weights_, - const c10::optional& compressed_indices_mapping, + const std::optional& per_sample_weights_, + const std::optional& compressed_indices_mapping, bool include_last_offset, bool is_embedding_op) = 0; diff --git a/aten/src/ATen/native/quantized/cpu/LinearUnpackImpl.cpp b/aten/src/ATen/native/quantized/cpu/LinearUnpackImpl.cpp index 7bff3e3d4b443..df74b10d70f97 100644 --- a/aten/src/ATen/native/quantized/cpu/LinearUnpackImpl.cpp +++ b/aten/src/ATen/native/quantized/cpu/LinearUnpackImpl.cpp @@ -22,7 +22,7 @@ int register_linear_params(); #ifdef USE_FBGEMM -std::tuple> PackedLinearWeight::unpack() { +std::tuple> PackedLinearWeight::unpack() { auto packB = w.get(); int64_t N = static_cast(packB->numCols()); @@ -53,16 +53,16 @@ std::tuple> PackedLinearWeight::unpack() { // (QLinearUnpackWeightInt8): "); packB->unpack(weight_ptr_int8); - return std::tuple>( + return std::tuple>( weight_origin, bias_); } #endif // USE_FBGEMM #ifdef USE_PYTORCH_QNNPACK -std::tuple> PackedLinearWeightsQnnp:: +std::tuple> PackedLinearWeightsQnnp:: unpack() { if (orig_weight.defined()) { - return std::tuple>( + return std::tuple>( orig_weight, bias_); } else { // Unpacking requires reverting *make_zero_points_and_scales_tensor* @@ -110,14 +110,14 @@ std::tuple> PackedLinearWeightsQnnp:: weight_ptr_int8[i] = (int8_t)(weight_ptr_int8[i] - 128); } - return std::tuple>( + return std::tuple>( weight_origin, bias_); } } #endif // USE_PYTORCH_QNNPACK #ifdef USE_FBGEMM -std::tuple> PackedLinearWeightFp16:: +std::tuple> PackedLinearWeightFp16:: unpack() { auto& packed_weight_ptr = w; @@ -135,8 +135,8 @@ std::tuple> PackedLinearWeightFp16:: #endif // USE_FBGEMM #if AT_MKLDNN_ENABLED() -std::tuple> PackedLinearWeightsOnednn::unpack() { - return std::tuple>( +std::tuple> PackedLinearWeightsOnednn::unpack() { + return std::tuple>( orig_weight_, orig_bias_); } #endif // #if AT_MKLDNN_ENABLED() diff --git a/aten/src/ATen/native/quantized/cpu/Normalization.cpp b/aten/src/ATen/native/quantized/cpu/Normalization.cpp index 0f5fb9884a9c5..e92a9669cce04 100644 --- a/aten/src/ATen/native/quantized/cpu/Normalization.cpp +++ b/aten/src/ATen/native/quantized/cpu/Normalization.cpp @@ -54,8 +54,8 @@ void compute_fused_params( template Tensor q_batch_norm1d_impl( Tensor qx, - c10::optional mb_weight, - c10::optional mb_bias, + std::optional mb_weight, + std::optional mb_bias, Tensor mean, Tensor var, double eps, @@ -162,8 +162,8 @@ Tensor q_batch_norm1d_impl( template Tensor q_batch_norm2d_impl( Tensor qx, - c10::optional mb_weight, - c10::optional mb_bias, + std::optional mb_weight, + std::optional mb_bias, Tensor mean, Tensor var, double eps, @@ -256,8 +256,8 @@ Tensor q_batch_norm2d_impl( template Tensor q_batch_norm3d_impl( Tensor qx, - c10::optional mb_weight, - c10::optional mb_bias, + std::optional mb_weight, + std::optional mb_bias, Tensor mean, Tensor var, double eps, @@ -353,8 +353,8 @@ Tensor q_batch_norm3d_impl( template Tensor q_batch_norm_impl( Tensor qx, - c10::optional mb_weight, - c10::optional mb_bias, + std::optional mb_weight, + std::optional mb_bias, Tensor mean, Tensor var, double eps, @@ -380,7 +380,7 @@ Tensor q_batch_norm_impl( } // namespace Tensor quantized_batch_norm( - const Tensor& qx, const c10::optional& weight_opt /* optional */, const c10::optional& bias_opt /* optional */, + const Tensor& qx, const std::optional& weight_opt /* optional */, const c10::optional& bias_opt /* optional */, const Tensor& mean /* optional */, const Tensor& var /* optional */, double eps, diff --git a/aten/src/ATen/native/quantized/cpu/OnednnUtils.h b/aten/src/ATen/native/quantized/cpu/OnednnUtils.h index 8887bb83deb91..535ccaf9acba1 100644 --- a/aten/src/ATen/native/quantized/cpu/OnednnUtils.h +++ b/aten/src/ATen/native/quantized/cpu/OnednnUtils.h @@ -119,9 +119,9 @@ enum PostOps { struct PackedLinearWeightsOnednn : public LinearPackedParamsBase { PackedLinearWeightsOnednn( std::unique_ptr weight, - c10::optional bias, + std::optional bias, at::Tensor orig_weight, - c10::optional orig_bias) + std::optional orig_bias) : weight_(std::move(weight)), bias_(std::move(bias)), orig_weight_(std::move(orig_weight)), @@ -129,9 +129,9 @@ struct PackedLinearWeightsOnednn : public LinearPackedParamsBase { cache_initialized_flag = std::make_unique(); } std::unique_ptr weight_; - c10::optional bias_; + std::optional bias_; at::Tensor orig_weight_; - c10::optional orig_bias_; + std::optional orig_bias_; at::Tensor apply( at::Tensor input, @@ -156,15 +156,15 @@ struct PackedLinearWeightsOnednn : public LinearPackedParamsBase { double output_scale, int64_t output_zero_point); - std::tuple> unpack() override; + std::tuple> unpack() override; - c10::optional bias() override { + std::optional bias() override { return orig_bias_; } static c10::intrusive_ptr prepack( at::Tensor weight, - c10::optional bias); + std::optional bias); private: LinearPrimitiveCache prim_cache; @@ -189,9 +189,9 @@ template struct PackedConvWeightsOnednn : public ConvPackedParamsBase { PackedConvWeightsOnednn( std::unique_ptr weight, - c10::optional bias, + std::optional bias, at::Tensor orig_weight, - c10::optional orig_bias, + std::optional orig_bias, torch::List stride, torch::List padding, torch::List output_padding, @@ -212,9 +212,9 @@ struct PackedConvWeightsOnednn : public ConvPackedParamsBase { } std::unique_ptr weight_; - c10::optional bias_; + std::optional bias_; at::Tensor orig_weight_; - c10::optional orig_bias_; + std::optional orig_bias_; torch::List stride_; torch::List padding_; torch::List output_padding_; @@ -248,11 +248,11 @@ struct PackedConvWeightsOnednn : public ConvPackedParamsBase { double output_scale, int64_t output_zero_point); - std::tuple> unpack() override; + std::tuple> unpack() override; static c10::intrusive_ptr> prepack( at::Tensor weight, - c10::optional bias, + std::optional bias, torch::List stride, torch::List padding, torch::List output_padding, @@ -292,7 +292,7 @@ struct PackedConvWeightsOnednn : public ConvPackedParamsBase { template at::Tensor apply_impl( const at::Tensor& input, - const c10::optional& accum, + const std::optional& accum, double output_scale, int64_t output_zero_point); @@ -316,7 +316,7 @@ static ideep::attr_t create_attr_by_post_op( int64_t input1_zero_point, const ideep::tensor::desc& input1_desc, const c10::string_view& unary_post_op, - const torch::List>& unary_post_op_args, + const torch::List>& unary_post_op_args, const c10::string_view& unary_post_op_algorithm) { using ideep::tensor; if (binary_post_op == "none") { @@ -470,7 +470,7 @@ at::Tensor _qconv_prepack_onednn( torch::List padding, torch::List dilation, int64_t groups, - c10::optional> input_shape=c10::nullopt); + std::optional> input_shape=c10::nullopt); static at::Tensor _quantized_convolution_onednn( at::Tensor act, // contains quantized values but not QTensor @@ -479,7 +479,7 @@ static at::Tensor _quantized_convolution_onednn( at::Tensor weight, // MKLDNN tensor with quantized values at::Tensor weight_scales, at::Tensor weight_zero_points, - c10::optional bias, // Bias is packed if not None + std::optional bias, // Bias is packed if not None torch::List stride, torch::List padding, torch::List dilation, @@ -487,14 +487,14 @@ static at::Tensor _quantized_convolution_onednn( int64_t groups, double output_scale, int64_t output_zero_point, - c10::optional accum=c10::nullopt, // accum to fused with conv add + std::optional accum=c10::nullopt, // accum to fused with conv add double accum_scale=1.0, int64_t accum_zero_point=0, bool fp32_output=false, - c10::optional binary_attr=c10::nullopt, - c10::optional binary_alpha=c10::nullopt, - c10::optional unary_attr=c10::nullopt, - torch::List> unary_scalars=torch::List>(), - c10::optional unary_algorithm=c10::nullopt); + std::optional binary_attr=c10::nullopt, + std::optional binary_alpha=c10::nullopt, + std::optional unary_attr=c10::nullopt, + torch::List> unary_scalars=torch::List>(), + std::optional unary_algorithm=c10::nullopt); #endif // #if AT_MKLDNN_ENABLED() diff --git a/aten/src/ATen/native/quantized/cpu/QnnpackUtils.h b/aten/src/ATen/native/quantized/cpu/QnnpackUtils.h index 88ff258be891f..b217c757740b3 100644 --- a/aten/src/ATen/native/quantized/cpu/QnnpackUtils.h +++ b/aten/src/ATen/native/quantized/cpu/QnnpackUtils.h @@ -38,7 +38,7 @@ struct PackedLinearWeightsQnnp : public LinearPackedParamsBase { std::unique_ptr w, at::Tensor orig_weight, at::Tensor bias, - c10::optional input_scale, + std::optional input_scale, at::Tensor w_scales, std::vector&& w_zps) : w(std::move(w)), @@ -57,7 +57,7 @@ struct PackedLinearWeightsQnnp : public LinearPackedParamsBase { at::Tensor orig_weight; at::Tensor bias_; bool per_channel_; - c10::optional input_scale; + std::optional input_scale; at::Tensor w_scales; std::vector w_zero_points; std::vector requantization_scales; @@ -76,15 +76,15 @@ struct PackedLinearWeightsQnnp : public LinearPackedParamsBase { at::Tensor apply_dynamic(at::Tensor input, bool reduce_range=false) override; at::Tensor apply_dynamic_relu(at::Tensor input, bool reduce_range=false) override; - std::tuple> unpack() override; + std::tuple> unpack() override; - c10::optional bias() override { + std::optional bias() override { return bias_; } static c10::intrusive_ptr prepack( at::Tensor weight, - c10::optional bias); + std::optional bias); bool per_channel() const { return per_channel_; @@ -125,7 +125,7 @@ struct PackedConvWeightsQnnp : public ConvPackedParamsBase { torch::List dilation, int64_t groups, bool transpose, - c10::optional input_scale, + std::optional input_scale, std::vector kernel, at::Tensor w_scale, std::vector&& w_zps, @@ -302,7 +302,7 @@ struct PackedConvWeightsQnnp : public ConvPackedParamsBase { int64_t groups_; bool transpose_; bool is_per_channel_; - c10::optional input_scale; + std::optional input_scale; std::vector kernel_; at::Tensor w_scales; std::vector w_zero_points; @@ -323,11 +323,11 @@ struct PackedConvWeightsQnnp : public ConvPackedParamsBase { const at::Tensor& input, bool reduce_range=false) override; - std::tuple> unpack() override; + std::tuple> unpack() override; static c10::intrusive_ptr> prepack( at::Tensor weight, - c10::optional bias, + std::optional bias, torch::List stride, torch::List padding, torch::List output_padding, @@ -438,7 +438,7 @@ Tensor qnnpack_avg_pool2d( IntArrayRef padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override); + std::optional divisor_override); } // qnnp_avgpool_helper } // namespace native } // namespace at diff --git a/aten/src/ATen/native/quantized/cpu/QuantizedOps.h b/aten/src/ATen/native/quantized/cpu/QuantizedOps.h index 3ef8a3f4f4f42..9257f57b65dcd 100644 --- a/aten/src/ATen/native/quantized/cpu/QuantizedOps.h +++ b/aten/src/ATen/native/quantized/cpu/QuantizedOps.h @@ -129,7 +129,7 @@ using qavg_pool2d_fn = void (*)( int padW, int padH, bool count_include_pad, - c10::optional divisor_override); + std::optional divisor_override); using qavg_pool3d_fn = void (*)( const Tensor& qx, @@ -152,7 +152,7 @@ using qavg_pool3d_fn = void (*)( int padH, int padD, bool count_include_pad, - c10::optional divisor_override); + std::optional divisor_override); using qupsample_bilinear2d_fn = void (*)( Tensor& output, @@ -164,8 +164,8 @@ using qupsample_bilinear2d_fn = void (*)( int64_t nbatch, int64_t channels, bool align_corners, - c10::optional scales_h, - c10::optional scales_w); + std::optional scales_h, + std::optional scales_w); using qcat_nhwc_fn = Tensor (*)( const MaterializedITensorListRef& qxs, @@ -192,13 +192,13 @@ using qmean_inner_dim_fn = void (*)( const Tensor& /* X */, OptionalIntArrayRef /* opt_dim */, bool /* keepdim */, - c10::optional /* opt_dtype */, + std::optional /* opt_dtype */, Tensor& /* Y */); using qstd_inner_dim_fn = void (*)( const Tensor& /* X */, OptionalIntArrayRef /* dim */, - const c10::optional& /* correction */, + const std::optional& /* correction */, bool /* keepdim */, Tensor& /* Y */); diff --git a/aten/src/ATen/native/quantized/cpu/ReduceOps.cpp b/aten/src/ATen/native/quantized/cpu/ReduceOps.cpp index 0ad1a5ae013bc..113c57f2cc351 100644 --- a/aten/src/ATen/native/quantized/cpu/ReduceOps.cpp +++ b/aten/src/ATen/native/quantized/cpu/ReduceOps.cpp @@ -47,7 +47,7 @@ inline bool is_innnermost_dim( inline bool is_mean_inner_dim_fast_path( const Tensor& self, OptionalIntArrayRef opt_dim, - c10::optional opt_dtype) { + std::optional opt_dtype) { bool is_fast_path = is_innnermost_dim(self, opt_dim) && (!opt_dtype.has_value() || opt_dtype.value() == self.scalar_type()); @@ -131,7 +131,7 @@ Tensor& mean_out_quantized_cpu( const Tensor& self, OptionalIntArrayRef opt_dim, bool keepdim, - c10::optional opt_dtype, + std::optional opt_dtype, Tensor& result) { #ifdef USE_PYTORCH_QNNPACK if (at::globalContext().qEngine() == at::QEngine::QNNPACK && @@ -177,7 +177,7 @@ static Tensor& mean_out_quantized_cpu( const Tensor& self, DimnameList dim, bool keepdim, - c10::optional opt_dtype) { + std::optional opt_dtype) { return mean_out_quantized_cpu( self, dimnames_to_positions(self, dim), keepdim, opt_dtype, result); } @@ -186,7 +186,7 @@ static Tensor& mean_out_quantized_cpu( inline bool is_std_inner_dim_fast_path( const Tensor& self, OptionalIntArrayRef dim, - const c10::optional& correction) { + const std::optional& correction) { // Do not enter fast path if there are too few elements IntArrayRef dims = dim.has_value() ? dim.value() : IntArrayRef(); auto all_dims = std::vector(self.dim()); @@ -206,7 +206,7 @@ inline bool is_std_inner_dim_fast_path( Tensor& std_out_quantized_cpu( const Tensor& self, OptionalIntArrayRef dim, - const c10::optional& correction, + const std::optional& correction, bool keepdim, Tensor& result) { // Fast path @@ -230,7 +230,7 @@ Tensor& std_out_quantized_cpu( Tensor std_quantized_cpu( const Tensor& self, OptionalIntArrayRef dim, - const c10::optional& correction, + const std::optional& correction, bool keepdim) { Tensor result; std_out_quantized_cpu(self, dim, correction, keepdim, result); @@ -240,7 +240,7 @@ Tensor std_quantized_cpu( static Tensor std_quantized_cpu( const Tensor& self, DimnameList dim, - const c10::optional& correction, + const std::optional& correction, bool keepdim) { return std_quantized_cpu( self, dimnames_to_positions(self, dim), correction, keepdim); @@ -250,7 +250,7 @@ static Tensor& std_out_quantized_cpu( Tensor& result, const Tensor& self, DimnameList dim, - const c10::optional& correction, + const std::optional& correction, bool keepdim) { return std_out_quantized_cpu( self, dimnames_to_positions(self, dim), correction, keepdim, result); diff --git a/aten/src/ATen/native/quantized/cpu/TensorOperators.cpp b/aten/src/ATen/native/quantized/cpu/TensorOperators.cpp index 1ee305c64fc5f..388218c01ca02 100644 --- a/aten/src/ATen/native/quantized/cpu/TensorOperators.cpp +++ b/aten/src/ATen/native/quantized/cpu/TensorOperators.cpp @@ -81,7 +81,7 @@ AT_FORALL_OPERATORS(DEFINE_COMPARATOR) const Tensor& quantized_resize_cpu_( const Tensor& self, IntArrayRef size, - c10::optional optional_memory_format) { + std::optional optional_memory_format) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because if storage is resized, new elements are uninitialized globalContext().alertNotDeterministic("quantized_resize_cpu_"); diff --git a/aten/src/ATen/native/quantized/cpu/TensorShape.cpp b/aten/src/ATen/native/quantized/cpu/TensorShape.cpp index 58af539cb142f..4c810ef97b5bc 100644 --- a/aten/src/ATen/native/quantized/cpu/TensorShape.cpp +++ b/aten/src/ATen/native/quantized/cpu/TensorShape.cpp @@ -126,8 +126,8 @@ template Tensor qcat( const c10::List& qxs, int64_t dim, - c10::optional scale, - c10::optional zero_point) { + std::optional scale, + std::optional zero_point) { TORCH_CHECK(is_valid_quantization_scheme(qxs[0]), "Only per-tensor quantization is supported in 'cat'!") double _scale = scale.has_value() ? scale.value() : qxs.get(0).q_scale(); diff --git a/aten/src/ATen/native/quantized/cpu/UpSampleBilinear2d.cpp b/aten/src/ATen/native/quantized/cpu/UpSampleBilinear2d.cpp index f428745eaa86f..d4dfa7ff08c91 100644 --- a/aten/src/ATen/native/quantized/cpu/UpSampleBilinear2d.cpp +++ b/aten/src/ATen/native/quantized/cpu/UpSampleBilinear2d.cpp @@ -46,8 +46,8 @@ static void upsample_bilinear2d_out_frame( int64_t nbatch, int64_t channels, bool align_corners, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_h, + std::optional scales_w) { auto* idata = static_cast(input.const_data_ptr()); auto* odata = static_cast(output.data_ptr()); @@ -146,8 +146,8 @@ Tensor upsample_bilinear2d_quantized_cpu( const Tensor& input, IntArrayRef output_size, bool align_corners, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_h, + std::optional scales_w) { TORCH_CHECK( output_size.size() == 2, "It is expected output_size equals to 2, but got size ", @@ -223,7 +223,7 @@ static Tensor upsample_bilinear2d_quantized_cpu( const Tensor& input, at::OptionalIntArrayRef output_size, bool align_corners, - c10::optional> scale_factors) { + std::optional> scale_factors) { auto osize = compute_output_size(input.sizes(), output_size, scale_factors); auto scale_h = get_scale_value(scale_factors, 0); auto scale_w = get_scale_value(scale_factors, 1); diff --git a/aten/src/ATen/native/quantized/cpu/UpSampleNearest2d.cpp b/aten/src/ATen/native/quantized/cpu/UpSampleNearest2d.cpp index 1020aef797e50..191407bed66a8 100644 --- a/aten/src/ATen/native/quantized/cpu/UpSampleNearest2d.cpp +++ b/aten/src/ATen/native/quantized/cpu/UpSampleNearest2d.cpp @@ -36,8 +36,8 @@ static void upsample_nearest2d_out_frame( int64_t output_width, int64_t nbatch, int64_t channels, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_h, + std::optional scales_w) { float height_scale = compute_scales_value(scales_h, input_height, output_height); float width_scale = compute_scales_value(scales_w, input_width, output_width); @@ -92,8 +92,8 @@ static void upsample_nearest2d_out_frame_nhwc( int64_t output_width, int64_t nbatch, int64_t channels, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_h, + std::optional scales_w) { float height_scale = compute_scales_value(scales_h, input_height, output_height); float width_scale = compute_scales_value(scales_w, input_width, output_width); @@ -121,8 +121,8 @@ template Tensor _upsample_nearest2d_quantized_cpu( const Tensor& input, IntArrayRef output_size, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_h, + std::optional scales_w) { TORCH_CHECK( output_size.size() == 2, "It is expected output_size equals to 2, but got size ", @@ -205,23 +205,23 @@ using at::native::upsample::get_scale_value; Tensor upsample_nearest2d_quantized_cpu( const Tensor& input, IntArrayRef osize, - c10::optional scale_h, - c10::optional scale_w) { + std::optional scale_h, + std::optional scale_w) { return _upsample_nearest2d_quantized_cpu(input, osize, scale_h, scale_w); } Tensor _upsample_nearest_exact2d_quantized_cpu( const Tensor& input, IntArrayRef osize, - c10::optional scale_h, - c10::optional scale_w) { + std::optional scale_h, + std::optional scale_w) { return _upsample_nearest2d_quantized_cpu(input, osize, scale_h, scale_w); } static Tensor upsample_nearest2d_quantized_cpu( const Tensor& input, at::OptionalIntArrayRef output_size, - c10::optional> scale_factors) { + std::optional> scale_factors) { auto osize = compute_output_size(input.sizes(), output_size, scale_factors); auto scale_h = get_scale_value(scale_factors, 0); auto scale_w = get_scale_value(scale_factors, 1); @@ -231,7 +231,7 @@ static Tensor upsample_nearest2d_quantized_cpu( static Tensor _upsample_nearest_exact2d_quantized_cpu( const Tensor& input, at::OptionalIntArrayRef output_size, - c10::optional> scale_factors) { + std::optional> scale_factors) { auto osize = compute_output_size(input.sizes(), output_size, scale_factors); auto scale_h = get_scale_value(scale_factors, 0); auto scale_w = get_scale_value(scale_factors, 1); diff --git a/aten/src/ATen/native/quantized/cpu/UpSampleNearest3d.cpp b/aten/src/ATen/native/quantized/cpu/UpSampleNearest3d.cpp index 91ddfefcd4d4e..d98883123f057 100644 --- a/aten/src/ATen/native/quantized/cpu/UpSampleNearest3d.cpp +++ b/aten/src/ATen/native/quantized/cpu/UpSampleNearest3d.cpp @@ -36,9 +36,9 @@ static void upsample_nearest3d_out_frame( int64_t output_width, int64_t nbatch, int64_t channels, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_d, + std::optional scales_h, + std::optional scales_w) { float depth_scale = compute_scales_value(scales_d, input_depth, output_depth); float height_scale = compute_scales_value(scales_h, input_height, output_height); float width_scale = compute_scales_value(scales_w, input_width, output_width); @@ -93,9 +93,9 @@ static void upsample_nearest3d_out_frame_nhwc( int64_t output_width, int64_t nbatch, int64_t channels, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_d, + std::optional scales_h, + std::optional scales_w) { float depth_scale = compute_scales_value(scales_d, input_depth, output_depth); float height_scale = compute_scales_value(scales_h, input_height, output_height); float width_scale = compute_scales_value(scales_w, input_width, output_width); @@ -133,9 +133,9 @@ template Tensor _upsample_nearest3d_quantized_cpu( const Tensor& input, IntArrayRef output_size, - c10::optional scales_d, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_d, + std::optional scales_h, + std::optional scales_w) { TORCH_CHECK( output_size.size() == 3, "It is expected output_size equals to 3, but got size ", @@ -217,9 +217,9 @@ Tensor _upsample_nearest3d_quantized_cpu( Tensor upsample_nearest3d_quantized_cpu( const Tensor& input, IntArrayRef osize, - c10::optional scale_d, - c10::optional scale_h, - c10::optional scale_w) { + std::optional scale_d, + std::optional scale_h, + std::optional scale_w) { return _upsample_nearest3d_quantized_cpu( input, osize, scale_d, scale_h, scale_w); } @@ -227,9 +227,9 @@ Tensor upsample_nearest3d_quantized_cpu( Tensor _upsample_nearest_exact3d_quantized_cpu( const Tensor& input, IntArrayRef osize, - c10::optional scale_d, - c10::optional scale_h, - c10::optional scale_w) { + std::optional scale_d, + std::optional scale_h, + std::optional scale_w) { return _upsample_nearest3d_quantized_cpu( input, osize, scale_d, scale_h, scale_w); } diff --git a/aten/src/ATen/native/quantized/cpu/conv_serialization.h b/aten/src/ATen/native/quantized/cpu/conv_serialization.h index 9f452a1cc7213..85451fb57482a 100644 --- a/aten/src/ATen/native/quantized/cpu/conv_serialization.h +++ b/aten/src/ATen/native/quantized/cpu/conv_serialization.h @@ -73,7 +73,7 @@ using ConvParamsSerializationTypeV2 = std::tuple< // non-optional tensors std::vector, // optional tensors - std::vector>>; + std::vector>>; using ConvParamsSerializationTypeV3 = std::tuple< // version, int for versions 3 and up @@ -81,7 +81,7 @@ using ConvParamsSerializationTypeV3 = std::tuple< // configuration values std::vector, // optional tensors - std::vector>>; + std::vector>>; // Parses any historical conv packed params format into // the current format. @@ -119,7 +119,7 @@ ConvParamsSerializationTypeV3 parse_conv_serialized_state(c10::IValue v) { const auto& elements = v.toTupleRef().elements(); at::Tensor weight = elements[0].toTensor(); - c10::optional bias = elements[1].toOptional(); + std::optional bias = elements[1].toOptional(); torch::List stride_x_kSpatialDim = elements[2].toTensorList(); torch::List padding_x_kSpatialDim = elements[3].toTensorList(); torch::List dilation_x_kSpatialDim = elements[4].toTensorList(); @@ -150,7 +150,7 @@ ConvParamsSerializationTypeV3 parse_conv_serialized_state(c10::IValue v) { // transpose does not exist in v1, so we fill in a default value config_vals.push_back(0); - std::vector> tensors; + std::vector> tensors; tensors.emplace_back(); tensors.emplace_back(weight); tensors.emplace_back(bias); @@ -161,7 +161,7 @@ ConvParamsSerializationTypeV3 parse_conv_serialized_state(c10::IValue v) { // version 2 const auto& elements = v.toTupleRef().elements(); std::vector non_optional = elements[1].toTensorList().vec(); - std::vector> optional; + std::vector> optional; if (elements[2].isTensorList()) { for (const auto& elem : elements[2].toTensorList()) { @@ -187,7 +187,7 @@ ConvParamsSerializationTypeV3 parse_conv_serialized_state(c10::IValue v) { auto weight = non_optional[1]; auto bias = optional[0]; - std::vector> tensors; + std::vector> tensors; tensors.emplace_back(); tensors.emplace_back(weight); tensors.emplace_back(bias); @@ -213,7 +213,7 @@ ConvParamsSerializationTypeV2 serialize_conv( std::string version = "2"; std::vector non_optional; - std::vector> optional; + std::vector> optional; // create a packed int8_t tensor for conv params std::vector params_vec; @@ -267,7 +267,7 @@ ConvParamsSerializationTypeV3 serialize_conv( auto [weight, bias] = params->unpack(); - std::vector> tensors; + std::vector> tensors; tensors.emplace_back(); tensors.emplace_back(weight); tensors.emplace_back(bias); @@ -287,8 +287,8 @@ c10::intrusive_ptr> deserialize_conv( TORCH_INTERNAL_ASSERT(version == 3, "Unexpected serialized qconv version: ", version); TORCH_CHECK(tensors.size() == 3, "Wrong number of tensors", tensors.size()); - c10::optional weight = tensors[1]; - c10::optional bias = tensors[2]; + std::optional weight = tensors[1]; + std::optional bias = tensors[2]; TORCH_INTERNAL_ASSERT(weight, "Weight should always be present in serialized qconv."); torch::List stride, padding, output_padding, dilation; diff --git a/aten/src/ATen/native/quantized/cpu/fbgemm_utils.cpp b/aten/src/ATen/native/quantized/cpu/fbgemm_utils.cpp index d942e2f161a26..d6ac157a116b5 100644 --- a/aten/src/ATen/native/quantized/cpu/fbgemm_utils.cpp +++ b/aten/src/ATen/native/quantized/cpu/fbgemm_utils.cpp @@ -433,7 +433,7 @@ TORCH_API int register_conv_params<3>(); TORCH_API int register_linear_params(); TORCH_API int register_linear_params() { - using SerializationType = std::tuple>; + using SerializationType = std::tuple>; static auto register_linear_params = torch::selective_class_( "quantized", TORCH_SELECTIVE_CLASS("LinearPackedParamsBase")) @@ -446,7 +446,7 @@ TORCH_API int register_linear_params() { -> c10::intrusive_ptr< LinearPackedParamsBase> { // __setstate__ at::Tensor weight; - c10::optional bias; + std::optional bias; weight = std::move(std::get<0>(state)); bias = std::move(std::get<1>(state)); diff --git a/aten/src/ATen/native/quantized/cpu/fbgemm_utils.h b/aten/src/ATen/native/quantized/cpu/fbgemm_utils.h index bfaf5b93d667b..75b5047713bb0 100644 --- a/aten/src/ATen/native/quantized/cpu/fbgemm_utils.h +++ b/aten/src/ATen/native/quantized/cpu/fbgemm_utils.h @@ -23,7 +23,7 @@ struct TORCH_API PackedLinearWeight : public LinearPackedParamsBase { PackedLinearWeight( std::unique_ptr> w, - c10::optional bias, + std::optional bias, std::vector col_offsets, std::vector w_scale, std::vector w_zp, @@ -35,7 +35,7 @@ struct TORCH_API PackedLinearWeight : public LinearPackedParamsBase { w_zp(std::move(w_zp)), q_scheme(std::move(q_scheme)) {} std::unique_ptr> w; - c10::optional bias_; + std::optional bias_; std::vector col_offsets; std::vector w_scale; std::vector w_zp; @@ -79,15 +79,15 @@ struct TORCH_API PackedLinearWeight : public LinearPackedParamsBase { at::Tensor apply_dynamic_relu(at::Tensor input, bool reduce_range = false) override; - std::tuple> unpack() override; + std::tuple> unpack() override; - c10::optional bias() override { + std::optional bias() override { return bias_; } static c10::intrusive_ptr prepack( at::Tensor weight, - c10::optional bias); + std::optional bias); private: template @@ -110,11 +110,11 @@ struct TORCH_API PackedLinearWeight : public LinearPackedParamsBase { struct TORCH_API PackedLinearWeightFp16 : public LinearPackedParamsBase { PackedLinearWeightFp16( std::unique_ptr w, - c10::optional bias) + std::optional bias) : w(std::move(w)), bias_(std::move(bias)) {} std::unique_ptr w; - c10::optional bias_; + std::optional bias_; at::Tensor apply( at::Tensor /*input*/, @@ -143,17 +143,17 @@ struct TORCH_API PackedLinearWeightFp16 : public LinearPackedParamsBase { at::Tensor& output, bool reduce_range = false) override; - std::tuple> unpack() override; + std::tuple> unpack() override; - c10::optional bias() override { + std::optional bias() override { return bias_; } static c10::intrusive_ptr prepack( at::Tensor weight, - c10::optional bias); + std::optional bias); - void set_bias(c10::optional bias) override; + void set_bias(std::optional bias) override; private: template @@ -164,7 +164,7 @@ template struct TORCH_API PackedConvWeight : public ConvPackedParamsBase { PackedConvWeight( std::unique_ptr> w, - c10::optional bias, + std::optional bias, torch::List stride, torch::List padding, torch::List output_padding, @@ -191,7 +191,7 @@ struct TORCH_API PackedConvWeight : public ConvPackedParamsBase { q_scheme(q_scheme) {} std::unique_ptr> w; - c10::optional bias; + std::optional bias; torch::List stride_; torch::List padding_; torch::List output_padding_; @@ -218,11 +218,11 @@ struct TORCH_API PackedConvWeight : public ConvPackedParamsBase { const at::Tensor& input, bool reduce_range) override; - std::tuple> unpack() override; + std::tuple> unpack() override; static c10::intrusive_ptr> prepack( at::Tensor weight, - c10::optional bias, + std::optional bias, torch::List stride, torch::List padding, torch::List output_padding, @@ -393,19 +393,19 @@ struct TORCH_API PackedEmbeddingBagWeight : public EmbeddingPackedParamsBase { at::Tensor embeddingbag_byte( const at::Tensor& indices, - const c10::optional& offsets, + const std::optional& offsets, bool pruned_weights, - const c10::optional& per_sample_weights_, - const c10::optional& compressed_indices_mapping, + const std::optional& per_sample_weights_, + const std::optional& compressed_indices_mapping, bool include_last_offset, bool is_embedding_op) override; at::Tensor embeddingbag_4bit( const at::Tensor& indices, - const c10::optional& offsets, + const std::optional& offsets, bool pruned_weights, - const c10::optional& per_sample_weights_, - const c10::optional& compressed_indices_mapping, + const std::optional& per_sample_weights_, + const std::optional& compressed_indices_mapping, bool include_last_offset, bool is_embedding_op) override; }; diff --git a/aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp b/aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp index dc9063ecf46f1..11828f273bbc8 100644 --- a/aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp +++ b/aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp @@ -2023,7 +2023,7 @@ void _qavg_pool_nhwc_kernel( int padH, int padD, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { T* idata = static_cast(qx.data_ptr()); T* odata = static_cast(qy.data_ptr()); int strideC = 1; @@ -2135,7 +2135,7 @@ void qavg_pool2d_nhwc_kernel( int padW, int padH, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { AT_DISPATCH_QINT_TYPES(qx.scalar_type(), "avg_pool2d_nhwc", [&]() { _qavg_pool_nhwc_kernel( qx, @@ -2183,7 +2183,7 @@ void qavg_pool3d_nhwc_kernel( int padH, int padD, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { AT_DISPATCH_QINT_TYPES(qx.scalar_type(), "avg_pool3d_nhwc", [&]() { _qavg_pool_nhwc_kernel( qx, @@ -2288,8 +2288,8 @@ void qupsample_bilinear2d_nhwc_kernel( int64_t nbatch, int64_t channels, bool align_corners, - c10::optional scales_h, - c10::optional scales_w) { + std::optional scales_h, + std::optional scales_w) { AT_DISPATCH_QINT_TYPES(input.scalar_type(), "upsample_bilinear2d_nhwc", [&]() { auto* idata = static_cast(input.data_ptr()); auto* odata = static_cast(output.data_ptr()); @@ -2940,7 +2940,7 @@ void qmean_inner_dim_kernel( const Tensor& self, OptionalIntArrayRef opt_dim, bool keepdim, - c10::optional opt_dtype, + std::optional opt_dtype, Tensor& result) { // 'opt_dtype' should be none or equal to that of input ScalarType dtype = self.scalar_type(); @@ -2989,7 +2989,7 @@ void qmean_inner_dim_kernel( void qstd_inner_dim_kernel( const Tensor& self, OptionalIntArrayRef dim, - const c10::optional& correction_opt, + const std::optional& correction_opt, bool keepdim, Tensor& result) { ScalarType dtype = self.scalar_type(); diff --git a/aten/src/ATen/native/quantized/cpu/qconv.cpp b/aten/src/ATen/native/quantized/cpu/qconv.cpp index f915c014af143..82223d6d3314c 100644 --- a/aten/src/ATen/native/quantized/cpu/qconv.cpp +++ b/aten/src/ATen/native/quantized/cpu/qconv.cpp @@ -1152,7 +1152,7 @@ template template at::Tensor PackedConvWeightsOnednn::apply_impl( const at::Tensor& act, - const c10::optional& accum, + const std::optional& accum, double output_scale, int64_t output_zero_point) { std::string func_name = "quantized::conv"; @@ -1391,7 +1391,7 @@ static at::Tensor _quantized_convolution_onednn( at::Tensor weight, // MKLDNN tensor with quantized values at::Tensor weight_scales, at::Tensor weight_zero_points, - c10::optional bias, // Bias is not packed into MKLDNN tensor + std::optional bias, // Bias is not packed into MKLDNN tensor torch::List stride, torch::List padding, torch::List dilation, @@ -1399,15 +1399,15 @@ static at::Tensor _quantized_convolution_onednn( int64_t groups, double output_scale, int64_t output_zero_point, - c10::optional accum, // accum to fused with conv add + std::optional accum, // accum to fused with conv add double accum_scale, int64_t accum_zero_point, - c10::optional output_dtype, - c10::optional binary_attr, - c10::optional binary_alpha, - c10::optional unary_attr, - torch::List> unary_scalars, - c10::optional unary_algorithm) { + std::optional output_dtype, + std::optional binary_attr, + std::optional binary_alpha, + std::optional unary_attr, + torch::List> unary_scalars, + std::optional unary_algorithm) { /*********************************/ /* Checks */ /*********************************/ @@ -1867,17 +1867,17 @@ class QConvoneDNN final { at::Tensor weight, // contains quantized values but not QTensor at::Tensor weight_scales, at::Tensor weight_zero_points, - c10::optional bias, + std::optional bias, torch::List stride, torch::List padding, torch::List dilation, int64_t groups, double output_scale, int64_t output_zero_point, - c10::optional output_dtype, + std::optional output_dtype, c10::string_view attr, - torch::List> scalars, - c10::optional algorithm) { + torch::List> scalars, + std::optional algorithm) { #if AT_MKLDNN_ENABLED() if (act.dim() == 3 || act.dim() == 5) { // Conv1D/3D post op check @@ -1919,19 +1919,19 @@ class QConvoneDNN final { at::Tensor weight, // contains quantized values but not QTensor at::Tensor weight_scales, at::Tensor weight_zero_points, - c10::optional bias, + std::optional bias, torch::List stride, torch::List padding, torch::List dilation, int64_t groups, double output_scale, int64_t output_zero_point, - c10::optional output_dtype, + std::optional output_dtype, c10::string_view binary_attr, - c10::optional alpha, - c10::optional unary_attr, - torch::List> unary_scalars, - c10::optional unary_algorithm) { + std::optional alpha, + std::optional unary_attr, + torch::List> unary_scalars, + std::optional unary_algorithm) { #if AT_MKLDNN_ENABLED() // Conv2D post op check TORCH_CHECK( diff --git a/aten/src/ATen/native/quantized/cpu/qconv_prepack.cpp b/aten/src/ATen/native/quantized/cpu/qconv_prepack.cpp index 46172f0c199f4..5f76890da2cae 100644 --- a/aten/src/ATen/native/quantized/cpu/qconv_prepack.cpp +++ b/aten/src/ATen/native/quantized/cpu/qconv_prepack.cpp @@ -28,7 +28,7 @@ c10::intrusive_ptr> PackedConvWeight< kSpatialDim>:: prepack( at::Tensor weight, - c10::optional bias, + std::optional bias, torch::List stride, torch::List padding, torch::List output_padding, @@ -155,7 +155,7 @@ c10::intrusive_ptr> PackedConvWeight< } } - c10::optional bias_contig; + std::optional bias_contig; if (bias.has_value()) { at::Tensor bias_vec = bias.value(); TORCH_CHECK(bias_vec.dim() == 1, "bias should be a vector (1D Tensor)"); @@ -196,7 +196,7 @@ c10::intrusive_ptr> PackedConvWeightsQnnp< kSpatialDim>:: prepack( at::Tensor weight, - c10::optional bias_in, + std::optional bias_in, torch::List stride, torch::List padding, torch::List output_padding, @@ -313,7 +313,7 @@ c10::intrusive_ptr> PackedConvWeightsQnnp< 2>:: prepack( at::Tensor weight, - c10::optional bias_in, + std::optional bias_in, torch::List stride, torch::List padding, torch::List output_padding, @@ -328,7 +328,7 @@ c10::intrusive_ptr> PackedConvWeightsOnednn< kSpatialDim>:: prepack( at::Tensor weight, - c10::optional bias, + std::optional bias, torch::List stride, torch::List padding, torch::List output_padding, @@ -458,7 +458,7 @@ c10::intrusive_ptr> PackedConvWeightsOnednn< packed_weight_p->set_zero_point(wgt_zero_points); std::unique_ptr weight_ptr(packed_weight_p); // Bias - c10::optional onednn_bias{c10::nullopt}; + std::optional onednn_bias{c10::nullopt}; if (bias.has_value()) { at::Tensor bias_vec = bias.value(); TORCH_CHECK(bias_vec.dim() == 1, "bias should be a vector (1D Tensor)"); @@ -468,7 +468,7 @@ c10::intrusive_ptr> PackedConvWeightsOnednn< auto bias_desc = ideep::tensor::desc(bias.value().sizes().vec(), dnnl::memory::data_type::f32); ideep::tensor packed_bias; packed_bias.init(bias_desc, bias.value().data_ptr()); - onednn_bias = c10::optional(packed_bias); + onednn_bias = std::optional(packed_bias); } auto ret_ptr = c10::make_intrusive>( PackedConvWeightsOnednn{ @@ -499,7 +499,7 @@ at::Tensor _qconv_prepack_onednn( torch::List padding, torch::List dilation, int64_t groups, - c10::optional> input_shape) { + std::optional> input_shape) { int kSpatialDim = weight.ndimension() - 2; TORCH_CHECK( weight.ndimension() == kSpatialDim + 2, @@ -624,7 +624,7 @@ class QConvPackWeightInt8 final { public: static c10::intrusive_ptr> run_conv( Tensor weight, - c10::optional bias, + std::optional bias, torch::List stride, torch::List padding, torch::List dilation, @@ -640,7 +640,7 @@ class QConvPackWeightInt8 final { static c10::intrusive_ptr> run_deconv( Tensor weight, - c10::optional bias, + std::optional bias, torch::List stride, torch::List padding, torch::List output_padding, @@ -653,7 +653,7 @@ class QConvPackWeightInt8 final { private: static c10::intrusive_ptr> _run( Tensor weight, - c10::optional bias, + std::optional bias, torch::List stride, torch::List padding, torch::List output_padding, @@ -713,7 +713,7 @@ class QConv1dPackWeightInt8 final { public: static c10::intrusive_ptr> run_conv( Tensor weight, - c10::optional bias, + std::optional bias, torch::List stride, torch::List padding, torch::List dilation, @@ -725,7 +725,7 @@ class QConv1dPackWeightInt8 final { static c10::intrusive_ptr> run_deconv( Tensor weight, - c10::optional bias, + std::optional bias, torch::List stride, torch::List padding, torch::List output_padding, @@ -738,7 +738,7 @@ class QConv1dPackWeightInt8 final { private: static c10::intrusive_ptr> _run( Tensor weight, - c10::optional bias, + std::optional bias, torch::List stride, torch::List padding, torch::List output_padding, @@ -814,7 +814,7 @@ class QConvPrepackOneDNN final { torch::List padding, torch::List dilation, int64_t groups, - c10::optional> input_shape) { + std::optional> input_shape) { #if AT_MKLDNN_ENABLED() return _qconv_prepack_onednn( weight, weight_scales, input_scale, input_zero_point, diff --git a/aten/src/ATen/native/quantized/cpu/qconv_unpack_impl.cpp b/aten/src/ATen/native/quantized/cpu/qconv_unpack_impl.cpp index 8af8d62f2f8a9..4f11cc2bc9393 100644 --- a/aten/src/ATen/native/quantized/cpu/qconv_unpack_impl.cpp +++ b/aten/src/ATen/native/quantized/cpu/qconv_unpack_impl.cpp @@ -11,7 +11,7 @@ #ifdef USE_FBGEMM template -std::tuple> PackedConvWeight< +std::tuple> PackedConvWeight< kSpatialDim>::unpack() { auto* packed_weights_p = w.get(); // output channels @@ -90,19 +90,19 @@ std::tuple> PackedConvWeight< at::native::fbgemm_utils::TransposeConvTensorUnpackConversion< kSpatialDim>(unpacked_weights, groups); } - return std::tuple>( + return std::tuple>( unpacked_weights, bias); } -template std::tuple> PackedConvWeight< +template std::tuple> PackedConvWeight< 2>::unpack(); -template std::tuple> PackedConvWeight< +template std::tuple> PackedConvWeight< 3>::unpack(); #endif // USE_FBGEMM #ifdef USE_PYTORCH_QNNPACK template -std::tuple> PackedConvWeightsQnnp< +std::tuple> PackedConvWeightsQnnp< kSpatialDim>::unpack() { TORCH_CHECK( kSpatialDim == 2, @@ -112,25 +112,25 @@ std::tuple> PackedConvWeightsQnnp< orig_weight.defined(), "Cannot unpack weights. " "Call at::globalContext()::setReleaseOriginalWeights(false) before packing or loading to enable unpacking."); - return std::tuple>(orig_weight, bias); + return std::tuple>(orig_weight, bias); } -template std::tuple> PackedConvWeightsQnnp< +template std::tuple> PackedConvWeightsQnnp< 2>::unpack(); -template std::tuple> PackedConvWeightsQnnp< +template std::tuple> PackedConvWeightsQnnp< 3>::unpack(); #endif // USE_PYTORCH_QNNPACK #if AT_MKLDNN_ENABLED() template -std::tuple> PackedConvWeightsOnednn< +std::tuple> PackedConvWeightsOnednn< kSpatialDim>::unpack() { - return std::tuple>( + return std::tuple>( orig_weight_.clone(), orig_bias_); } -template std::tuple> PackedConvWeightsOnednn< +template std::tuple> PackedConvWeightsOnednn< 2>::unpack(); -template std::tuple> PackedConvWeightsOnednn< +template std::tuple> PackedConvWeightsOnednn< 3>::unpack(); #endif // #if AT_MKLDNN_ENABLED() diff --git a/aten/src/ATen/native/quantized/cpu/qembeddingbag.cpp b/aten/src/ATen/native/quantized/cpu/qembeddingbag.cpp index 7e5083057a0ba..8b3f9b8afc8d2 100644 --- a/aten/src/ATen/native/quantized/cpu/qembeddingbag.cpp +++ b/aten/src/ATen/native/quantized/cpu/qembeddingbag.cpp @@ -38,8 +38,8 @@ at::Tensor& embedding_lookup_fallback_impl( const at::Tensor& weight, const at::Tensor& indices, const at::Tensor& offsets, - const c10::optional& per_sample_weights_, - const c10::optional& compressed_indices_mapping, + const std::optional& per_sample_weights_, + const std::optional& compressed_indices_mapping, at::Tensor& output, const int64_t block_size, const int64_t output_size, @@ -227,8 +227,8 @@ at::Tensor& embedding_bag_nbit_impl( const at::Tensor& indices, const at::Tensor& offsets, bool pruned_weights, - const c10::optional& per_sample_weights_, - const c10::optional& compressed_indices_mapping, + const std::optional& per_sample_weights_, + const std::optional& compressed_indices_mapping, bool include_last_offset, bool is_embedding_op) { TORCH_CHECK(weight.dim() == 2); @@ -399,8 +399,8 @@ at::Tensor& embedding_bag_byte_impl( const at::Tensor& indices, const at::Tensor& offsets, bool pruned_weights, - const c10::optional& per_sample_weights_, - const c10::optional& compressed_indices_mapping, + const std::optional& per_sample_weights_, + const std::optional& compressed_indices_mapping, bool include_last_offset, bool is_embedding_op) { TORCH_CHECK(weight.scalar_type() == at::kByte); @@ -558,10 +558,10 @@ at::Tensor& embedding_bag_byte_helper( at::Tensor& output, const at::Tensor& weight, const at::Tensor& indices, - const c10::optional& offsets_in, + const std::optional& offsets_in, bool pruned_weights, - const c10::optional& per_sample_weights_, - const c10::optional& compressed_indices_mapping, + const std::optional& per_sample_weights_, + const std::optional& compressed_indices_mapping, bool include_last_offset, bool is_embedding_op) { c10::MaybeOwned offsets; @@ -656,10 +656,10 @@ at::Tensor& _embedding_bag_nbit_helper( const at::Tensor& weight, const int bit_width, const at::Tensor& indices, - const c10::optional& offsets_in, + const std::optional& offsets_in, bool pruned_weights, - const c10::optional& per_sample_weights_, - const c10::optional& compressed_indices_mapping, + const std::optional& per_sample_weights_, + const std::optional& compressed_indices_mapping, bool include_last_offset, bool is_embedding_op) { c10::MaybeOwned offsets; @@ -760,10 +760,10 @@ at::Tensor& _embedding_bag_nbit_helper( at::Tensor PackedEmbeddingBagWeight::embeddingbag_byte( const at::Tensor& indices, - const c10::optional& offsets_in, + const std::optional& offsets_in, bool pruned_weights, - const c10::optional& per_sample_weights_, - const c10::optional& compressed_indices_mapping, + const std::optional& per_sample_weights_, + const std::optional& compressed_indices_mapping, bool include_last_offset, bool is_embedding_op) { auto output = at::empty({0}, packed_w.options().dtype(at::kFloat)); @@ -781,10 +781,10 @@ at::Tensor PackedEmbeddingBagWeight::embeddingbag_byte( at::Tensor PackedEmbeddingBagWeight::embeddingbag_4bit( const at::Tensor& indices, - const c10::optional& offsets_in, + const std::optional& offsets_in, bool pruned_weights, - const c10::optional& per_sample_weights_, - const c10::optional& compressed_indices_mapping, + const std::optional& per_sample_weights_, + const std::optional& compressed_indices_mapping, bool include_last_offset, bool is_embedding_op) { if (per_sample_weights_.has_value()) { @@ -819,12 +819,12 @@ Tensor& embedding_bag_byte_rowwise_offsets_out( Tensor& output, const Tensor& weight, const Tensor& indices, - const c10::optional& offsets_in, + const std::optional& offsets_in, const bool /* scale_grad_by_freq */, const int64_t /* mode */, bool pruned_weights, - const c10::optional& per_sample_weights_, - const c10::optional& compressed_indices_mapping, + const std::optional& per_sample_weights_, + const std::optional& compressed_indices_mapping, bool include_last_offset) { return embedding_bag_byte_helper( output, @@ -842,12 +842,12 @@ Tensor& embedding_bag_4bit_rowwise_offsets_out( Tensor& output, const Tensor& weight, const Tensor& indices, - const c10::optional& offsets_in, + const std::optional& offsets_in, const bool /* scale_grad_by_freq */, const int64_t /* mode */, bool pruned_weights, - const c10::optional& per_sample_weights_, - const c10::optional& compressed_indices_mapping, + const std::optional& per_sample_weights_, + const std::optional& compressed_indices_mapping, bool include_last_offset) { if (per_sample_weights_.has_value()) { @@ -877,12 +877,12 @@ static Tensor& embedding_bag_2bit_rowwise_offsets_out( Tensor& output, const Tensor& weight, const Tensor& indices, - const c10::optional& offsets_in, + const std::optional& offsets_in, const bool /* scale_grad_by_freq */, const int64_t /* mode */, bool pruned_weights, - const c10::optional& per_sample_weights_, - const c10::optional& compressed_indices_mapping, + const std::optional& per_sample_weights_, + const std::optional& compressed_indices_mapping, bool include_last_offset) { if (per_sample_weights_.has_value()) { @@ -921,12 +921,12 @@ inline at::Tensor create_empty_from( Tensor embedding_bag_byte_rowwise_offsets( const Tensor& weight, const Tensor& indices, - const c10::optional& offsets_in, + const std::optional& offsets_in, const bool /* scale_grad_by_freq */, const int64_t /* mode */, bool pruned_weights, - const c10::optional& per_sample_weights_, - const c10::optional& compressed_indices_mapping, + const std::optional& per_sample_weights_, + const std::optional& compressed_indices_mapping, bool include_last_offset) { auto output = create_empty_from(weight, at::kFloat); embedding_bag_byte_rowwise_offsets_out( @@ -946,12 +946,12 @@ Tensor embedding_bag_byte_rowwise_offsets( Tensor embedding_bag_4bit_rowwise_offsets( const Tensor& weight, const Tensor& indices, - const c10::optional& offsets_in, + const std::optional& offsets_in, const bool /* scale_grad_by_freq */, const int64_t /* mode */, bool pruned_weights, - const c10::optional& per_sample_weights_, - const c10::optional& compressed_indices_mapping, + const std::optional& per_sample_weights_, + const std::optional& compressed_indices_mapping, bool include_last_offset) { auto output = create_empty_from(weight, at::kFloat); embedding_bag_4bit_rowwise_offsets_out( @@ -971,12 +971,12 @@ Tensor embedding_bag_4bit_rowwise_offsets( Tensor embedding_bag_2bit_rowwise_offsets( const Tensor& weight, const Tensor& indices, - const c10::optional& offsets_in, + const std::optional& offsets_in, const bool /* scale_grad_by_freq */, const int64_t /* mode */, bool pruned_weights, - const c10::optional& per_sample_weights_, - const c10::optional& compressed_indices_mapping, + const std::optional& per_sample_weights_, + const std::optional& compressed_indices_mapping, bool include_last_offset) { auto output = create_empty_from(weight, at::kFloat); embedding_bag_2bit_rowwise_offsets_out( @@ -996,12 +996,12 @@ Tensor embedding_bag_2bit_rowwise_offsets( Tensor embedding_bag_byte_rowwise_offsets_meta( const Tensor& weight, const Tensor& indices, - const c10::optional& offsets_in, + const std::optional& offsets_in, const bool /* scale_grad_by_freq */, const int64_t /* mode */, bool /* pruned_weights */, - const c10::optional& /* per_sample_weights_ */, - const c10::optional& /* compressed_indices_mapping */, + const std::optional& /* per_sample_weights_ */, + const std::optional& /* compressed_indices_mapping */, bool include_last_offset) { TORCH_CHECK( indices.dim() == 1 || indices.dim() == 2, @@ -1038,12 +1038,12 @@ class QEmbeddingBag final { static at::Tensor run( const c10::intrusive_ptr& packed_weight, const Tensor& indices, - const c10::optional& offsets, + const std::optional& offsets, const bool /* scale_grad_by_freq */, const int64_t /* mode */, bool pruned_weights, - const c10::optional& per_sample_weights_, - const c10::optional& compressed_indices_mapping, + const std::optional& per_sample_weights_, + const std::optional& compressed_indices_mapping, bool include_last_offset) { if (bit_rate == 8) { return packed_weight->embeddingbag_byte( diff --git a/aten/src/ATen/native/quantized/cpu/qembeddingbag.h b/aten/src/ATen/native/quantized/cpu/qembeddingbag.h index 86ed0f530f9c3..644d85fa357ee 100644 --- a/aten/src/ATen/native/quantized/cpu/qembeddingbag.h +++ b/aten/src/ATen/native/quantized/cpu/qembeddingbag.h @@ -8,24 +8,24 @@ Tensor& embedding_bag_byte_rowwise_offsets_out( Tensor& output, const Tensor& weight, const Tensor& indices, - const c10::optional& offsets_in, + const std::optional& offsets_in, const bool /* scale_grad_by_freq */, const int64_t /* mode */, bool pruned_weights, - const c10::optional& per_sample_weights_, - const c10::optional& compressed_indices_mapping, + const std::optional& per_sample_weights_, + const std::optional& compressed_indices_mapping, bool include_last_offset); Tensor& embedding_bag_4bit_rowwise_offsets_out( Tensor& output, const Tensor& weight, const Tensor& indices, - const c10::optional& offsets_in, + const std::optional& offsets_in, const bool /* scale_grad_by_freq */, const int64_t /* mode */, bool pruned_weights, - const c10::optional& per_sample_weights_, - const c10::optional& compressed_indices_mapping, + const std::optional& per_sample_weights_, + const std::optional& compressed_indices_mapping, bool include_last_offset); Tensor& qembeddingbag_byte_unpack_out(Tensor& output, const Tensor& packed_weight); diff --git a/aten/src/ATen/native/quantized/cpu/qlinear.cpp b/aten/src/ATen/native/quantized/cpu/qlinear.cpp index df6df3c35201d..1c180173aab53 100644 --- a/aten/src/ATen/native/quantized/cpu/qlinear.cpp +++ b/aten/src/ATen/native/quantized/cpu/qlinear.cpp @@ -917,17 +917,17 @@ static at::Tensor linear_int8_with_onednn_weight( at::Tensor onednn_weight, // int8 tensor from MkldnnCPU at::Tensor weight_scales, at::Tensor weight_zero_points, - c10::optional bias, // plain tensor + std::optional bias, // plain tensor double output_scale, int64_t output_zero_point, - c10::optional output_dtype, - c10::optional other, // extra input for binary post-op + std::optional output_dtype, + std::optional other, // extra input for binary post-op double other_scale, int64_t other_zero_point, const c10::string_view& binary_post_op, // e.g. "none", "sum", "add" double binary_alpha, const c10::string_view& unary_post_op, // e.g. "none", "relu" - torch::List>& unary_post_op_args, + torch::List>& unary_post_op_args, c10::string_view& unary_post_op_algorithm) { using ideep::tensor; const int64_t dim = input.dim(); @@ -989,7 +989,7 @@ static at::Tensor linear_int8_with_onednn_weight( auto output_size = input.sizes().vec(); output_size[dim - 1] = N; - c10::optional onednn_bias{c10::nullopt}; + std::optional onednn_bias{c10::nullopt}; bool with_bias = bias.has_value(); at::Tensor bias_val_float; if (with_bias) { @@ -1194,15 +1194,15 @@ class QLinearOnednn final { Tensor onednn_weight, // int8 tensor from MkldnnCPU Tensor weight_scales, Tensor weight_zero_points, - c10::optional bias, + std::optional bias, double output_scale, int64_t output_zero_point, - c10::optional output_dtype, + std::optional output_dtype, c10::string_view post_op_name, - torch::List> post_op_args, + torch::List> post_op_args, c10::string_view post_op_algorithm) { #if AT_MKLDNN_ENABLED() - static c10::optional other = c10::nullopt; + static std::optional other = c10::nullopt; static const c10::string_view binary_post_op = "none"; return linear_int8_with_onednn_weight( act, act_scale, act_zero_point, @@ -1223,17 +1223,17 @@ class QLinearOnednn final { Tensor onednn_weight, // int8 tensor from MkldnnCPU Tensor weight_scales, Tensor weight_zero_points, - c10::optional bias, + std::optional bias, double output_scale, int64_t output_zero_point, - c10::optional output_dtype, + std::optional output_dtype, c10::string_view post_op_name, - torch::List> post_op_args, + torch::List> post_op_args, c10::string_view post_op_algorithm) { #if AT_MKLDNN_ENABLED() TORCH_CHECK(act_scale.numel() == 1 && act_zero_point.numel() == 1, "onednn int8 linear: act scale/zp size should be 1"); - static c10::optional other = c10::nullopt; + static std::optional other = c10::nullopt; static const c10::string_view binary_post_op = "none"; return linear_int8_with_onednn_weight( act, act_scale.item().toDouble(), act_zero_point.item().toLong(), @@ -1254,17 +1254,17 @@ class QLinearOnednn final { Tensor onednn_weight, // int8 tensor from MkldnnCPU Tensor weight_scales, Tensor weight_zero_points, - c10::optional bias, + std::optional bias, double output_scale, int64_t output_zero_point, - c10::optional output_dtype, - c10::optional other, // extra input for binary post-op + std::optional output_dtype, + std::optional other, // extra input for binary post-op double other_scale, int64_t other_zero_point, c10::string_view binary_post_op, // e.g. "none", "sum", "add" double binary_alpha, c10::string_view unary_post_op, // e.g. "none", "relu" - torch::List> unary_post_op_args, + torch::List> unary_post_op_args, c10::string_view unary_post_op_algorithm) { #if AT_MKLDNN_ENABLED() return linear_int8_with_onednn_weight( @@ -1286,17 +1286,17 @@ class QLinearOnednn final { Tensor onednn_weight, // int8 tensor from MkldnnCPU Tensor weight_scales, Tensor weight_zero_points, - c10::optional bias, + std::optional bias, double output_scale, int64_t output_zero_point, - c10::optional output_dtype, - c10::optional other, // extra input for binary post-op + std::optional output_dtype, + std::optional other, // extra input for binary post-op double other_scale, int64_t other_zero_point, c10::string_view binary_post_op, // e.g. "none", "sum", "add" double binary_alpha, c10::string_view unary_post_op, // e.g. "none", "relu" - torch::List> unary_post_op_args, + torch::List> unary_post_op_args, c10::string_view unary_post_op_algorithm) { #if AT_MKLDNN_ENABLED() TORCH_CHECK(act_scale.numel() == 1 && act_zero_point.numel() == 1, diff --git a/aten/src/ATen/native/quantized/cpu/qlinear_dynamic.cpp b/aten/src/ATen/native/quantized/cpu/qlinear_dynamic.cpp index 935ad081bd908..111990ad4e277 100644 --- a/aten/src/ATen/native/quantized/cpu/qlinear_dynamic.cpp +++ b/aten/src/ATen/native/quantized/cpu/qlinear_dynamic.cpp @@ -483,7 +483,7 @@ at::Tensor& PackedLinearWeightFp16::apply_dynamic_relu_out( return apply_dynamic_impl(input, output); } -void PackedLinearWeightFp16::set_bias(c10::optional bias) { +void PackedLinearWeightFp16::set_bias(std::optional bias) { bias_ = std::move(bias); } diff --git a/aten/src/ATen/native/quantized/cpu/qlinear_prepack.cpp b/aten/src/ATen/native/quantized/cpu/qlinear_prepack.cpp index a2fb34f90b289..d8427076b5afd 100644 --- a/aten/src/ATen/native/quantized/cpu/qlinear_prepack.cpp +++ b/aten/src/ATen/native/quantized/cpu/qlinear_prepack.cpp @@ -58,7 +58,7 @@ void calc_col_offsets_transpose( c10::intrusive_ptr PackedLinearWeight::prepack( at::Tensor weight, - c10::optional bias) { + std::optional bias) { TORCH_CHECK( weight.dim() == 2, "The weight tensor for quantized::linear_prepack (fbgemm) should" @@ -102,7 +102,7 @@ c10::intrusive_ptr PackedLinearWeight::prepack( /*col_offsets=*/col_offsets.data(), /*qtype=*/qtype); - c10::optional bias_contig; + std::optional bias_contig; if (bias.has_value()) { at::Tensor bias_vec = bias.value(); TORCH_CHECK(bias_vec.dim() == 1, "bias should be a vector (1D Tensor)"); @@ -132,7 +132,7 @@ c10::intrusive_ptr PackedLinearWeight::prepack( #ifdef USE_PYTORCH_QNNPACK c10::intrusive_ptr PackedLinearWeightsQnnp::prepack( at::Tensor weight, - c10::optional bias_in) { + std::optional bias_in) { TORCH_CHECK( weight.dim() == 2, "quantized::linear_prepack (qnnpack): Weight tensor rank should be == 2"); @@ -181,7 +181,7 @@ c10::intrusive_ptr PackedLinearWeightsQnnp::prepack( c10::intrusive_ptr PackedLinearWeightFp16::prepack( at::Tensor weight, - c10::optional bias) { + std::optional bias) { weight = at::_saturate_weight_to_fp16(weight); @@ -208,7 +208,7 @@ c10::intrusive_ptr PackedLinearWeightFp16::prepack( #if AT_MKLDNN_ENABLED() c10::intrusive_ptr PackedLinearWeightsOnednn::prepack( at::Tensor weight, - c10::optional bias) { + std::optional bias) { TORCH_CHECK( weight.dim() == 2, "The weight tensor for quantized::linear_prepack (onednn) should" @@ -257,7 +257,7 @@ c10::intrusive_ptr PackedLinearWeightsOnednn::prepack( packed_weight_p->set_zero_point(wgt_zero_points); std::unique_ptr weight_ptr(packed_weight_p); // Bias - c10::optional onednn_bias{c10::nullopt}; + std::optional onednn_bias{c10::nullopt}; if (bias.has_value()) { auto& b = bias.value(); auto bias_size = b.sizes().vec(); @@ -270,7 +270,7 @@ c10::intrusive_ptr PackedLinearWeightsOnednn::prepack( auto bias_desc = ideep::tensor::desc(bias_size, dnnl::memory::data_type::f32); ideep::tensor packed_bias; packed_bias.init(bias_desc, b.data_ptr()); - onednn_bias = c10::optional(packed_bias); + onednn_bias = std::optional(packed_bias); } auto ret_ptr = c10::make_intrusive( PackedLinearWeightsOnednn{ @@ -283,7 +283,7 @@ c10::intrusive_ptr PackedLinearWeightsOnednn::prepack( inline at::Tensor pack_weight_to_onednn_tensor( const at::Tensor& weight, - c10::optional>& input_shape) { + std::optional>& input_shape) { std::vector w_dims = weight.sizes().vec(); ideep::tensor wei = ideep::tensor({w_dims, dnnl::memory::data_type::s8}, weight.data_ptr()); wei.transpose_(0, 1); // oneDNN requires transposed weight @@ -319,7 +319,7 @@ class QLinearPackWeightInt8 final { public: static c10::intrusive_ptr run( at::Tensor weight, - c10::optional bias) { + std::optional bias) { auto& ctx = at::globalContext(); #ifdef USE_FBGEMM @@ -350,7 +350,7 @@ class QLinearPackWeightFp16 final { public: static c10::intrusive_ptr run( at::Tensor weight, - c10::optional bias) { + std::optional bias) { auto& ctx = at::globalContext(); #ifdef USE_FBGEMM // temporarily convert weight back to fp32, needs to be fixed @@ -387,7 +387,7 @@ class QLinearPackWeightFp16 final { class QLinearPackWeightInt8Legacy final { public: - static Tensor run(at::Tensor weight, c10::optional bias) { + static Tensor run(at::Tensor weight, std::optional bias) { TORCH_CHECK(false, "This model uses an outdated version of quantized.linear_prepack. " "Please re-export your model using the newer definitions in torch.jit.quantized"); @@ -396,7 +396,7 @@ class QLinearPackWeightInt8Legacy final { class QLinearPackWeightFp16Legacy final { public: - static Tensor run(at::Tensor weight, c10::optional bias) { + static Tensor run(at::Tensor weight, std::optional bias) { TORCH_CHECK(false, "This model uses an outdated version of quantized.linear_prepack_fp16. " "Please re-export your model using the newer definitions in torch.jit.quantized"); @@ -407,7 +407,7 @@ class QLinearPackWeightInt8Onednn final { public: static at::Tensor run( at::Tensor weight, // Not QTensor - c10::optional> input_shape) { + std::optional> input_shape) { #if AT_MKLDNN_ENABLED() return pack_weight_to_onednn_tensor(weight, input_shape); #else diff --git a/aten/src/ATen/native/quantized/cpu/qnormalization.cpp b/aten/src/ATen/native/quantized/cpu/qnormalization.cpp index b803bdd8aff7a..9de75e80bc4df 100644 --- a/aten/src/ATen/native/quantized/cpu/qnormalization.cpp +++ b/aten/src/ATen/native/quantized/cpu/qnormalization.cpp @@ -135,8 +135,8 @@ TORCH_LIBRARY_IMPL(quantized, QuantizedCPU, m) { m.impl(TORCH_SELECTIVE_NAME("quantized::layer_norm"), []( Tensor input, std::vector normalized_shape, // because IntArrayRef doesn't work - c10::optional weight, - c10::optional bias, + std::optional weight, + std::optional bias, double eps, double output_scale, int64_t output_zero_point) { @@ -149,8 +149,8 @@ TORCH_LIBRARY_IMPL(quantized, QuantizedCPU, m) { m.impl(TORCH_SELECTIVE_NAME("quantized::group_norm"), []( Tensor qx, int64_t num_groups, - c10::optional weight, - c10::optional bias, + std::optional weight, + std::optional bias, double eps, double output_scale, int64_t output_zero_point) { @@ -162,8 +162,8 @@ TORCH_LIBRARY_IMPL(quantized, QuantizedCPU, m) { }); m.impl(TORCH_SELECTIVE_NAME("quantized::instance_norm"), []( Tensor qx, - c10::optional weight, - c10::optional bias, + std::optional weight, + std::optional bias, double eps, double output_scale, int64_t output_zero_point) { diff --git a/aten/src/ATen/native/quantized/cpu/qsoftmax.cpp b/aten/src/ATen/native/quantized/cpu/qsoftmax.cpp index a2d3ed6305fc3..0d764aee90d09 100644 --- a/aten/src/ATen/native/quantized/cpu/qsoftmax.cpp +++ b/aten/src/ATen/native/quantized/cpu/qsoftmax.cpp @@ -44,8 +44,8 @@ Tensor qsoftmax_qnnpack(const Tensor& qx, const int64_t dim) { */ const int64_t last_dim = qx.dim() - 1; - c10::optional> permuted_dims = c10::nullopt; - c10::optional qx_contig = c10::nullopt; + std::optional> permuted_dims = c10::nullopt; + std::optional qx_contig = c10::nullopt; const at::Tensor* qx_contig_ptr = nullptr; if (qx.stride(dim) == 1) { diff --git a/aten/src/ATen/native/quantized/cuda/EmbeddingBag.cu b/aten/src/ATen/native/quantized/cuda/EmbeddingBag.cu index 3574bfe28f505..de3f1032dbcae 100644 --- a/aten/src/ATen/native/quantized/cuda/EmbeddingBag.cu +++ b/aten/src/ATen/native/quantized/cuda/EmbeddingBag.cu @@ -90,7 +90,7 @@ __global__ void embedding_bag_nbits_rowwise_offsets_kernel( const PackedTensorAccessor32 offsets, const bool /* pruned_weights */, const PackedTensorAccessor32 per_sample_weights_, - const c10::optional& compressed_indices_mapping, + const std::optional& compressed_indices_mapping, const bool include_last_offset, PackedTensorAccessor32 output) { static_assert(bits_per_dim == 4 || bits_per_dim == 8, "the current embedding_bag_nbits_rowwise_offsets_kernel only has been tested for 4 and 8 bits per dim"); @@ -192,8 +192,8 @@ at::Tensor& embedding_bag_byte_impl( const at::Tensor& indices, const at::Tensor& offsets, bool pruned_weights, - const c10::optional& per_sample_weights_, - const c10::optional& compressed_indices_mapping, + const std::optional& per_sample_weights_, + const std::optional& compressed_indices_mapping, bool include_last_offset, bool is_embedding_op) { TORCH_CHECK(weight.is_cuda()); @@ -267,12 +267,12 @@ at::Tensor& embedding_bag_byte_impl( Tensor embedding_bag_byte_rowwise_offsets( const Tensor& weight, const Tensor& indices, - const c10::optional& offsets_in, + const std::optional& offsets_in, const bool /* scale_grad_by_freq */, const int64_t /* mode */, bool pruned_weights, - const c10::optional& per_sample_weights_, - const c10::optional& compressed_indices_mapping, + const std::optional& per_sample_weights_, + const std::optional& compressed_indices_mapping, bool include_last_offset) { bool is_embedding_op = false; auto output = create_empty_from(weight, at::kFloat); @@ -375,8 +375,8 @@ at::Tensor& embedding_bag_4bit_impl( const at::Tensor& indices, const at::Tensor& offsets, bool pruned_weights, - const c10::optional& per_sample_weights_, - const c10::optional& compressed_indices_mapping, + const std::optional& per_sample_weights_, + const std::optional& compressed_indices_mapping, bool include_last_offset) { TORCH_CHECK(weight.is_cuda()); TORCH_CHECK(indices.is_cuda()); @@ -449,12 +449,12 @@ at::Tensor& embedding_bag_4bit_impl( Tensor embedding_bag_4bit_rowwise_offsets( const Tensor& weight, const Tensor& indices, - const c10::optional& offsets_in, + const std::optional& offsets_in, const bool /* scale_grad_by_freq */, const int64_t /* mode */, bool pruned_weights, - const c10::optional& per_sample_weights_, - const c10::optional& compressed_indices_mapping, + const std::optional& per_sample_weights_, + const std::optional& compressed_indices_mapping, bool include_last_offset) { auto output = create_empty_from(weight, at::kFloat); diff --git a/aten/src/ATen/native/quantized/cudnn/BinaryOps.cpp b/aten/src/ATen/native/quantized/cudnn/BinaryOps.cpp index a225a86eeb903..07ccc19c48282 100644 --- a/aten/src/ATen/native/quantized/cudnn/BinaryOps.cpp +++ b/aten/src/ATen/native/quantized/cudnn/BinaryOps.cpp @@ -186,7 +186,7 @@ Tensor add(Tensor qa, Tensor qb, double output_scale, int64_t output_zero_point) // relu_op computes // relu( (qa_int8 + qb_int8 * ( qb_scale/qa_scale ) ) ) // output is a fp32 tensor - c10::optional relu_op; + std::optional relu_op; if (kReluFused) { // we use inplace operation here where the output is assigned to the input relu_op.emplace(cudnn_frontend::OperationBuilder(CUDNN_BACKEND_OPERATION_POINTWISE_DESCRIPTOR) diff --git a/aten/src/ATen/native/quantized/cudnn/Conv.cpp b/aten/src/ATen/native/quantized/cudnn/Conv.cpp index bb97a69859cb4..606d769fe6eb4 100644 --- a/aten/src/ATen/native/quantized/cudnn/Conv.cpp +++ b/aten/src/ATen/native/quantized/cudnn/Conv.cpp @@ -70,8 +70,8 @@ void PackedConvWeightCudnn::apply_impl_helper(const at::Tensor& qua auto requantize_multiplier = act_scale * weight_scale / output_scale; at::Tensor requantize_multiplier_tensor = cudnn_utils::getRequantMultiplierTensor(requantize_multiplier, kSpatialDim + 2); - c10::optional bias_multiplier_tensor; - c10::optional broadcasted_bias; + std::optional bias_multiplier_tensor; + std::optional broadcasted_bias; if (bias_.has_value()) { // the input bias is a 1-D tensor whose size is the same as the size of the second dimension of quantized_output. // we need to add trailing dimensions in order to properly broadcast bias, otherwise broadcast_to will fail. @@ -154,12 +154,12 @@ void PackedConvWeightCudnn::apply_impl_helper(const at::Tensor& qua .build(); // std::cout << "operator:" << conv_op.describe() << std::endl; - c10::optional bias_mult_op; - c10::optional sum_conv_bias_op; + std::optional bias_mult_op; + std::optional sum_conv_bias_op; if (bias_.has_value()) { // we can't directly assign bias_mult_op because operator= is deleted for cudnn_frontend::Operation; // alternatively, I think we can use std::unique_ptr and dynamically allocate these builder ops - // but here, we chose to do it statically. c10::optional::emplace() enables this approach + // but here, we chose to do it statically. std::optional::emplace() enables this approach // bias_mult_op computes bias_fp32 / (act_scale * w_scale) or bias_fp32 * (1 / (act_scale * w_scale)) // where bias_multiplier = (1 / (act_scale * w_scale)) @@ -188,7 +188,7 @@ void PackedConvWeightCudnn::apply_impl_helper(const at::Tensor& qua // relu_op computes relu(act_int8 * w_int8 + [bias_fp32/(act_scale * w_scale)] // or relu(act_int8 * w_int8) if bias is not present. // output is a fp32 tensor - c10::optional relu_op; + std::optional relu_op; std::shared_ptr tensor2requant_ptr = bias_.has_value() ? sum_conv_bias_op.value().getOutputTensor() : conv_op.getOutputTensor(); if (kReluFused) { // we use inplace operation here where the output is assigned to the input diff --git a/aten/src/ATen/native/quantized/cudnn/ConvPrepack.cpp b/aten/src/ATen/native/quantized/cudnn/ConvPrepack.cpp index 44d37f27bf6f6..b1bd94ee7a55c 100644 --- a/aten/src/ATen/native/quantized/cudnn/ConvPrepack.cpp +++ b/aten/src/ATen/native/quantized/cudnn/ConvPrepack.cpp @@ -27,7 +27,7 @@ c10::intrusive_ptr> PackedConvWeightCudnn< kSpatialDim>:: prepack( at::Tensor weight, - c10::optional bias, + std::optional bias, torch::List stride, torch::List padding, torch::List output_padding, @@ -116,7 +116,7 @@ c10::intrusive_ptr> PackedConvWeightCudnn< 2>:: prepack( at::Tensor weight, - c10::optional bias_in, + std::optional bias_in, torch::List stride, torch::List padding, torch::List output_padding, @@ -133,7 +133,7 @@ class QConvPackWeightInt8Cudnn final { public: static c10::intrusive_ptr> run_conv( Tensor weight, - c10::optional bias, + std::optional bias, torch::List stride, torch::List padding, torch::List dilation, @@ -150,7 +150,7 @@ class QConvPackWeightInt8Cudnn final { private: static c10::intrusive_ptr> _run( Tensor weight, - c10::optional bias, + std::optional bias, torch::List stride, torch::List padding, torch::List output_padding, @@ -167,7 +167,7 @@ class QConv1dPackWeightInt8Cudnn final { public: static c10::intrusive_ptr> run_conv( Tensor weight, - c10::optional bias, + std::optional bias, torch::List stride, torch::List padding, torch::List dilation, @@ -180,7 +180,7 @@ class QConv1dPackWeightInt8Cudnn final { private: static c10::intrusive_ptr> _run( Tensor weight, - c10::optional bias, + std::optional bias, torch::List stride, torch::List padding, torch::List output_padding, diff --git a/aten/src/ATen/native/quantized/cudnn/ConvUnpackImpl.cpp b/aten/src/ATen/native/quantized/cudnn/ConvUnpackImpl.cpp index ce5ee36cad4f0..fbb4a1fe94111 100644 --- a/aten/src/ATen/native/quantized/cudnn/ConvUnpackImpl.cpp +++ b/aten/src/ATen/native/quantized/cudnn/ConvUnpackImpl.cpp @@ -11,12 +11,12 @@ #include template -std::tuple> PackedConvWeightCudnn< +std::tuple> PackedConvWeightCudnn< kSpatialDim>::unpack() { - return std::tuple>{maybe_padded_weight_, bias_}; + return std::tuple>{maybe_padded_weight_, bias_}; } -template std::tuple> PackedConvWeightCudnn< +template std::tuple> PackedConvWeightCudnn< 2>::unpack(); #endif // AT_CUDNN_ENABLED diff --git a/aten/src/ATen/native/quantized/cudnn/Linear.cpp b/aten/src/ATen/native/quantized/cudnn/Linear.cpp index f9333d6fbed7a..d3219592e25bb 100644 --- a/aten/src/ATen/native/quantized/cudnn/Linear.cpp +++ b/aten/src/ATen/native/quantized/cudnn/Linear.cpp @@ -98,8 +98,8 @@ void PackedLinearWeightCudnn::apply_impl_helper(const at::Tensor& quantized_outp auto weight_scale = orig_weight.q_scale(); auto requantize_multiplier = act_scale * weight_scale / output_scale; at::Tensor requantize_multiplier_tensor = cudnn_utils::getRequantMultiplierTensor(requantize_multiplier, quantized_output.dim()); - c10::optional bias_multiplier_tensor; - c10::optional broadcasted_bias; + std::optional bias_multiplier_tensor; + std::optional broadcasted_bias; if (bias_.has_value()) { // the input bias is a 1-D tensor whose size is the same as the size of the last dimension of quantized_output // we need to add trailing dimensions in order to properly broadcast bias, otherwise broadcast_to will fail. @@ -183,12 +183,12 @@ void PackedLinearWeightCudnn::apply_impl_helper(const at::Tensor& quantized_outp .build(); // std::cout << "operator:" << linear_op.describe() << std::endl; - c10::optional bias_mult_op; - c10::optional sum_linear_bias_op; + std::optional bias_mult_op; + std::optional sum_linear_bias_op; if (bias_.has_value()) { // we can't directly assign bias_mult_op because operator= is deleted for cudnn_frontend::Operation; // alternatively, I think we can use std::unique_ptr and dynamically allocate these builder ops - // but here, we chose to do it statically. c10::optional::emplace() enables this approach + // but here, we chose to do it statically. std::optional::emplace() enables this approach // bias_mult_op computes bias_fp32 / (act_scale * w_scale) or bias_fp32 * (1 / (act_scale * w_scale)) // where bias_multiplier = (1 / (act_scale * w_scale)) @@ -222,7 +222,7 @@ void PackedLinearWeightCudnn::apply_impl_helper(const at::Tensor& quantized_outp // relu_op computes relu(act_int8 * w_int8 + [bias_fp32/(act_scale * w_scale)] // or relu(act_int8 * w_int8) if bias is not present. // output is a fp32 tensor - c10::optional relu_op; + std::optional relu_op; std::shared_ptr tensor2requant_ptr = bias_.has_value() ? sum_linear_bias_op.value().getOutputTensor() : linear_op.getOutputTensor(); if (kReluFused) { // we use inplace operation here where the output is assigned to the input diff --git a/aten/src/ATen/native/quantized/cudnn/LinearPrepack.cpp b/aten/src/ATen/native/quantized/cudnn/LinearPrepack.cpp index abbb5922f3933..fd7c870e006d1 100644 --- a/aten/src/ATen/native/quantized/cudnn/LinearPrepack.cpp +++ b/aten/src/ATen/native/quantized/cudnn/LinearPrepack.cpp @@ -16,7 +16,7 @@ int register_linear_params(); c10::intrusive_ptr PackedLinearWeightCudnn::prepack( at::Tensor weight, - c10::optional bias) { + std::optional bias) { TORCH_CHECK(weight.qscheme() == c10::kPerTensorAffine, "Unsupported qscheme: ", toString(weight.qscheme())); const int output_channels = weight.size(0); const auto qtype = weight.qscheme(); @@ -42,7 +42,7 @@ class QLinearPackWeightInt8Cudnn final { public: static c10::intrusive_ptr run( at::Tensor weight, - c10::optional bias) { + std::optional bias) { return PackedLinearWeightCudnn::prepack(std::move(weight), std::move(bias)); } }; diff --git a/aten/src/ATen/native/quantized/cudnn/LinearUnpackImpl.cpp b/aten/src/ATen/native/quantized/cudnn/LinearUnpackImpl.cpp index 7200872480efd..40088052cd151 100644 --- a/aten/src/ATen/native/quantized/cudnn/LinearUnpackImpl.cpp +++ b/aten/src/ATen/native/quantized/cudnn/LinearUnpackImpl.cpp @@ -10,8 +10,8 @@ #include -std::tuple> PackedLinearWeightCudnn::unpack() { - return std::tuple>{orig_weight, bias_}; +std::tuple> PackedLinearWeightCudnn::unpack() { + return std::tuple>{orig_weight, bias_}; } #endif // AT_CUDNN_ENABLED diff --git a/aten/src/ATen/native/quantized/cudnn/utils.h b/aten/src/ATen/native/quantized/cudnn/utils.h index 18c891fcaa1c0..fbd10e2ec95e7 100644 --- a/aten/src/ATen/native/quantized/cudnn/utils.h +++ b/aten/src/ATen/native/quantized/cudnn/utils.h @@ -27,7 +27,7 @@ C10_DIAGNOSTIC_POP() struct PackedLinearWeightCudnn : public LinearPackedParamsBase { PackedLinearWeightCudnn( at::Tensor orig_weight, - c10::optional bias, + std::optional bias, c10::QScheme q_scheme) : orig_weight(std::move(orig_weight)), bias_(std::move(bias)), @@ -53,19 +53,19 @@ struct PackedLinearWeightCudnn : public LinearPackedParamsBase { "parameter type"); } - std::tuple> unpack() override; + std::tuple> unpack() override; - c10::optional bias() override { + std::optional bias() override { return bias_; } static c10::intrusive_ptr prepack( at::Tensor weight, - c10::optional bias); + std::optional bias); private: at::Tensor orig_weight; - c10::optional bias_; + std::optional bias_; c10::QScheme q_scheme; template @@ -85,7 +85,7 @@ template struct PackedConvWeightCudnn : public ConvPackedParamsBase { PackedConvWeightCudnn( at::Tensor orig_weight, - c10::optional bias, + std::optional bias, torch::List stride, torch::List padding, torch::List output_padding, @@ -127,11 +127,11 @@ struct PackedConvWeightCudnn : public ConvPackedParamsBase { TORCH_CHECK(false, "apply_dynamic_relu is currently not reported"); } - std::tuple> unpack() override; + std::tuple> unpack() override; static c10::intrusive_ptr> prepack( at::Tensor weight, - c10::optional bias, + std::optional bias, torch::List stride, torch::List padding, torch::List output_padding, @@ -171,7 +171,7 @@ struct PackedConvWeightCudnn : public ConvPackedParamsBase { // convention "maybe"_padded_weight. // TODO: when and if cudnn enables padding in their operators, we can remove padding on our end and rename this to orig_weight_ at::Tensor maybe_padded_weight_; - c10::optional bias_; + std::optional bias_; torch::List stride_; torch::List padding_; torch::List output_padding_; diff --git a/aten/src/ATen/native/quantized/qconv_unpack.cpp b/aten/src/ATen/native/quantized/qconv_unpack.cpp index fe4007c712ce5..1fdc7745cfa2e 100644 --- a/aten/src/ATen/native/quantized/qconv_unpack.cpp +++ b/aten/src/ATen/native/quantized/qconv_unpack.cpp @@ -49,7 +49,7 @@ namespace { template class QConvUnpackWeightsInt8 final { public: - static std::tuple> run( + static std::tuple> run( const c10::intrusive_ptr>& packed_weight) { auto& ctx = at::globalContext(); @@ -85,17 +85,17 @@ class QConvUnpackWeightsInt8 final { class QConv1dUnpackWeightsInt8 final { public: - static std::tuple> run( + static std::tuple> run( const c10::intrusive_ptr>& packed_weight) { auto& ctx = at::globalContext(); at::Tensor weight; - c10::optional bias; + std::optional bias; #ifdef USE_FBGEMM if (ctx.qEngine() == at::QEngine::FBGEMM || ctx.qEngine() == at::QEngine::X86) { std::tie(weight, bias) = packed_weight->unpack(); weight = weight.squeeze_(quant_utils::kConv1dSqueezeDim + 2); - return std::tuple>(weight, bias); + return std::tuple>(weight, bias); } #endif @@ -104,7 +104,7 @@ class QConv1dUnpackWeightsInt8 final { std::tie(weight, bias) = packed_weight->unpack(); at::Tensor new_weight = weight.clone(); new_weight = new_weight.squeeze_(quant_utils::kConv1dSqueezeDim + 2); - return std::tuple>(new_weight, bias); + return std::tuple>(new_weight, bias); } #endif @@ -113,7 +113,7 @@ class QConv1dUnpackWeightsInt8 final { std::tie(weight, bias) = packed_weight->unpack(); at::Tensor new_weight = weight.clone(); new_weight.squeeze_(quant_utils::kConv1dSqueezeDim + 2); - return std::tuple>(new_weight, bias); + return std::tuple>(new_weight, bias); } #endif diff --git a/aten/src/ATen/native/quantized/qlinear_unpack.cpp b/aten/src/ATen/native/quantized/qlinear_unpack.cpp index 19c9890c82e38..85eab571df9e0 100644 --- a/aten/src/ATen/native/quantized/qlinear_unpack.cpp +++ b/aten/src/ATen/native/quantized/qlinear_unpack.cpp @@ -21,7 +21,7 @@ namespace { class QLinearUnpackWeightInt8 final { public: - static std::tuple> run( + static std::tuple> run( const c10::intrusive_ptr& packed_weight) { return packed_weight->unpack(); } @@ -29,7 +29,7 @@ class QLinearUnpackWeightInt8 final { class QLinearUnpackWeightFp16 final { public: - static std::tuple> run( + static std::tuple> run( const c10::intrusive_ptr& packed_weight) { auto& ctx = at::globalContext(); @@ -44,7 +44,7 @@ class QLinearUnpackWeightFp16 final { class QLinearUnpackWeightInt8Legacy final { public: - static std::tuple> run( + static std::tuple> run( const at::Tensor& packed_weight) { TORCH_CHECK(false, "quantized.linear_unpack(Tensor) is unsupported! Please " @@ -55,7 +55,7 @@ class QLinearUnpackWeightInt8Legacy final { class QLinearUnpackWeightFp16Legacy final { public: - static std::tuple> run( + static std::tuple> run( const at::Tensor& packed_weight) { TORCH_CHECK(false, "quantized.linear_unpack(Tensor) is unsupported! Please " diff --git a/aten/src/ATen/native/sparse/SoftMax.cpp b/aten/src/ATen/native/sparse/SoftMax.cpp index 883c2b9c4ea95..179db48beacca 100644 --- a/aten/src/ATen/native/sparse/SoftMax.cpp +++ b/aten/src/ATen/native/sparse/SoftMax.cpp @@ -615,7 +615,7 @@ static Tensor _sparse_softmax(const Tensor& input_, const int64_t dim_) { return result; } -Tensor _sparse_softmax(const Tensor& input_, const int64_t dim_, c10::optional dtype) { +Tensor _sparse_softmax(const Tensor& input_, const int64_t dim_, std::optional dtype) { auto result = [&]() { NoNamesGuard guard; if (input_.is_cuda() && input_.scalar_type() == ScalarType::Half && dtype == ScalarType::Float){ @@ -642,7 +642,7 @@ static Tensor _sparse_log_softmax(const Tensor& input_, const int64_t dim_) { return result; } -Tensor _sparse_log_softmax(const Tensor& input_, const int64_t dim_, c10::optional dtype) { +Tensor _sparse_log_softmax(const Tensor& input_, const int64_t dim_, std::optional dtype) { auto result = [&]() { NoNamesGuard guard; if (input_.is_cuda() && input_.scalar_type() == ScalarType::Half && dtype == ScalarType::Float){ diff --git a/aten/src/ATen/native/sparse/SparseBinaryOpIntersectionCommon.h b/aten/src/ATen/native/sparse/SparseBinaryOpIntersectionCommon.h index 8782031c49aa1..fb4a6fe85c05c 100644 --- a/aten/src/ATen/native/sparse/SparseBinaryOpIntersectionCommon.h +++ b/aten/src/ATen/native/sparse/SparseBinaryOpIntersectionCommon.h @@ -133,8 +133,8 @@ void _sparse_binary_op_intersection_kernel_impl( const Tensor& x_, const Tensor& y_, const std::vector& broadcasted_shape, - const c10::optional& x_hash_opt_ = c10::nullopt, - const c10::optional& y_hash_opt_ = c10::nullopt, + const std::optional& x_hash_opt_ = c10::nullopt, + const std::optional& y_hash_opt_ = c10::nullopt, const bool accumulate_matches = true, const bool distributive_with_sum = true ) { @@ -148,7 +148,7 @@ void _sparse_binary_op_intersection_kernel_impl( " to output ", res.scalar_type()); using KernelLauncher = KernelLauncher; - using OptTensor = c10::optional; + using OptTensor = std::optional; // If the op and sum are not distributive, coalesce is required. const auto coalesce_if_not_distributive = [distributive_with_sum](const Tensor& t, const OptTensor& t_hash_opt) -> auto { @@ -423,8 +423,8 @@ void _sparse_binary_op_intersection_kernel_out( Tensor& res, const Tensor& x, const Tensor& y, - const c10::optional& x_hash_opt = c10::nullopt, - const c10::optional& y_hash_opt = c10::nullopt, + const std::optional& x_hash_opt = c10::nullopt, + const std::optional& y_hash_opt = c10::nullopt, // If op distributes with the sum, the arguments are processed as is, // without the calls to coalesce(). const bool distributive_with_sum = true @@ -439,7 +439,7 @@ void _sparse_binary_op_intersection_kernel_out( x._indices().scalar_type() == y._indices().scalar_type(), NAME, "(): expects inputs' indices to be of the same dtype (i.e. long or int)"); - const auto check_hash_validity = [](const Tensor& t, const c10::optional& t_hash_opt) { + const auto check_hash_validity = [](const Tensor& t, const std::optional& t_hash_opt) { if (!t_hash_opt.has_value()) { return; } diff --git a/aten/src/ATen/native/sparse/SparseBinaryOpIntersectionKernel.cpp b/aten/src/ATen/native/sparse/SparseBinaryOpIntersectionKernel.cpp index 2db8c9e9404cc..94a1e3d622355 100644 --- a/aten/src/ATen/native/sparse/SparseBinaryOpIntersectionKernel.cpp +++ b/aten/src/ATen/native/sparse/SparseBinaryOpIntersectionKernel.cpp @@ -119,7 +119,7 @@ struct CPUValueSelectionIntersectionKernel { } }; -using OptTensor = c10::optional; +using OptTensor = std::optional; void mul_sparse_sparse_out_cpu_kernel( Tensor& result, diff --git a/aten/src/ATen/native/sparse/SparseCsrTensor.cpp b/aten/src/ATen/native/sparse/SparseCsrTensor.cpp index d1973c43e9ad7..59b048f5d147c 100644 --- a/aten/src/ATen/native/sparse/SparseCsrTensor.cpp +++ b/aten/src/ATen/native/sparse/SparseCsrTensor.cpp @@ -363,10 +363,10 @@ Tensor sparse_compressed_tensor_with_dims( c10::IntArrayRef size, c10::IntArrayRef blocksize, ScalarType index_dtype, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { // sparse_compressed_tensor_with_dims is a generalization of empty // that enables the specification of nnz, dense_dim, blocksize, and // index_dtype for sparse compressed tensors. @@ -435,10 +435,10 @@ Tensor _sparse_compressed_tensor_unsafe_symint( const Tensor& plain_indices, const Tensor& values, c10::SymIntArrayRef size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { if (!layout) { AT_ERROR("sparse_compressed_tensor_unsafe expected sparse compressed tensor layout but got none"); } @@ -458,10 +458,10 @@ Tensor _sparse_compressed_tensor_unsafe_template(const Tensor& compressed_indice const Tensor& plain_indices, const Tensor& values, IntArrayRef size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { Layout layout_ = layout.value_or(required_layout); TORCH_CHECK(layout_ == required_layout, "sparse compressed layout must be ",required_layout, " but got ", layout_); if (at::globalContext().checkSparseTensorInvariants()) { @@ -478,10 +478,10 @@ Tensor _sparse_compressed_tensor_unsafe_template(const Tensor& compressed_indice const Tensor& plain_indices, \ const Tensor& values, \ IntArrayRef size, \ - c10::optional dtype, \ - c10::optional layout, \ - c10::optional device, \ - c10::optional pin_memory) { \ + std::optional dtype, \ + std::optional layout, \ + std::optional device, \ + std::optional pin_memory) { \ return _sparse_compressed_tensor_unsafe_template(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory); \ } @@ -554,10 +554,10 @@ Tensor sparse_compressed_tensor( const Tensor& plain_indices, const Tensor& values, IntArrayRef size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { if (!layout) { AT_ERROR("sparse_compressed_tensor expected sparse compressed tensor layout but got none"); @@ -583,10 +583,10 @@ Tensor sparse_compressed_tensor( const Tensor& compressed_indices, const Tensor& plain_indices, const Tensor& values, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { if (!layout) { AT_ERROR("sparse_compressed_tensor expected sparse compressed tensor layout but got none"); @@ -614,28 +614,28 @@ Tensor sparse_compressed_tensor( Tensor sparse_##KIND##_tensor(const Tensor& compressed_indices, \ const Tensor& plain_indices, \ const Tensor& values, \ - c10::optional dtype, \ - c10::optional layout, \ - c10::optional device, \ - c10::optional pin_memory) { \ + std::optional dtype, \ + std::optional layout, \ + std::optional device, \ + std::optional pin_memory) { \ if (layout) { \ TORCH_CHECK(layout.value() == REQUIRED_LAYOUT, "sparse " # KIND " layout must be ", REQUIRED_LAYOUT, " but got ", layout.value()); \ } \ - c10::optional layout_(REQUIRED_LAYOUT); \ + std::optional layout_(REQUIRED_LAYOUT); \ return at::native::sparse_compressed_tensor(compressed_indices, plain_indices, values, dtype, layout_, device, pin_memory); \ } \ Tensor sparse_##KIND##_tensor(const Tensor& compressed_indices, \ const Tensor& plain_indices, \ const Tensor& values, \ IntArrayRef size, \ - c10::optional dtype, \ - c10::optional layout, \ - c10::optional device, \ - c10::optional pin_memory) { \ + std::optional dtype, \ + std::optional layout, \ + std::optional device, \ + std::optional pin_memory) { \ if (layout) { \ TORCH_CHECK(layout.value() == REQUIRED_LAYOUT, "sparse " # KIND " layout must be ", REQUIRED_LAYOUT, " but got ", layout.value()); \ } \ - c10::optional layout_(REQUIRED_LAYOUT); \ + std::optional layout_(REQUIRED_LAYOUT); \ return at::native::sparse_compressed_tensor(compressed_indices, plain_indices, values, size, dtype, layout_, device, pin_memory); \ } @@ -650,11 +650,11 @@ SPARSE_COMPRESSED_TENSOR(bsc, kSparseBsc) // indices. The implementation below is kept for BC. Tensor empty_sparse_compressed( IntArrayRef size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, - c10::optional optional_memory_format) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, + std::optional optional_memory_format) { check_size_nonnegative(size); TORCH_CHECK(size.size() >= 2, "torch.empty: Only batched sparse compressed (non-block) tensors are supported, but got size ", size); @@ -699,7 +699,7 @@ Tensor empty_sparse_compressed( const Tensor& resize_sparse_csr_( const Tensor& self, IntArrayRef size, - c10::optional optional_memory_format) { + std::optional optional_memory_format) { check_size_nonnegative(size); TORCH_CHECK(size.size() >= 2, "torch.resize_: Only batched sparse CSR matrices are supported, but got size ", size); TORCH_CHECK( @@ -836,7 +836,7 @@ const SparseCsrTensor& resize_as_sparse_compressed_( SparseCsrTensor clone_sparse_compressed( const SparseCsrTensor& self, - c10::optional optional_memory_format) { + std::optional optional_memory_format) { TORCH_CHECK( !optional_memory_format.has_value(), "unsupported memory format option ", @@ -863,11 +863,11 @@ SparseCsrTensor clone_sparse_compressed( Tensor empty_like_sparse_csr( const Tensor& self, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, - c10::optional optional_memory_format) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, + std::optional optional_memory_format) { TensorOptions options_ = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); TensorOptions options = self.options() diff --git a/aten/src/ATen/native/sparse/SparseCsrTensorMath.cpp b/aten/src/ATen/native/sparse/SparseCsrTensorMath.cpp index bff9842a2a3ab..ccac30d65a1a7 100644 --- a/aten/src/ATen/native/sparse/SparseCsrTensorMath.cpp +++ b/aten/src/ATen/native/sparse/SparseCsrTensorMath.cpp @@ -326,7 +326,7 @@ Tensor& normal_sparse_csr_( Tensor& self, double mean, double std, - c10::optional gen) { + std::optional gen) { return unary_op_inplace(self, &Tensor::normal_, mean, std, gen); } @@ -1000,7 +1000,7 @@ struct Reduction...Op { inline scalar_t identity() const { return ...; } }; -Tensor _sparse_csr_..._cpu(const Tensor& input, IntArrayRef dims_to_sum, bool keepdim, c10::optional dtype) { +Tensor _sparse_csr_..._cpu(const Tensor& input, IntArrayRef dims_to_sum, bool keepdim, std::optional dtype) { ... result = reduce_sparse_csr_cpu_template(input_, dims_to_sum, keepdim, Reduction...Op()); ... @@ -1336,7 +1336,7 @@ struct ReductionMulOp { } // namespace -Tensor _sparse_csr_sum_cpu(const Tensor& input, IntArrayRef dims_to_sum, bool keepdim, c10::optional dtype) { +Tensor _sparse_csr_sum_cpu(const Tensor& input, IntArrayRef dims_to_sum, bool keepdim, std::optional dtype) { ScalarType dtype_ = dtype.value_or(input.scalar_type()); Tensor input_ = at::sparse_csr::to_type(input, dtype_); Tensor result; @@ -1352,7 +1352,7 @@ Tensor _sparse_csr_sum_cpu(const Tensor& input, IntArrayRef dims_to_sum, bool ke return result; } -Tensor _sparse_csr_prod_cpu(const Tensor& input, IntArrayRef dims_to_reduce, bool keepdim, c10::optional dtype) { +Tensor _sparse_csr_prod_cpu(const Tensor& input, IntArrayRef dims_to_reduce, bool keepdim, std::optional dtype) { ScalarType dtype_ = dtype.value_or(input.scalar_type()); Tensor input_ = input.to(dtype_); Tensor result; diff --git a/aten/src/ATen/native/sparse/SparseFactories.cpp b/aten/src/ATen/native/sparse/SparseFactories.cpp index 6ee92320e12d1..38a59b40c808a 100644 --- a/aten/src/ATen/native/sparse/SparseFactories.cpp +++ b/aten/src/ATen/native/sparse/SparseFactories.cpp @@ -22,7 +22,7 @@ Tensor spdiags( const Tensor& diagonals, const Tensor& offsets, IntArrayRef shape, - c10::optional layout) { + std::optional layout) { auto diagonals_2d = diagonals.dim() == 1 ? diagonals.unsqueeze(0) : diagonals; TORCH_CHECK(diagonals_2d.dim() == 2, "Diagonals must be vector or matrix"); TORCH_CHECK(shape.size() == 2, "Output shape must be 2d"); diff --git a/aten/src/ATen/native/sparse/SparseStubs.h b/aten/src/ATen/native/sparse/SparseStubs.h index 2a3aef5c8bd92..af6df0785fe92 100644 --- a/aten/src/ATen/native/sparse/SparseStubs.h +++ b/aten/src/ATen/native/sparse/SparseStubs.h @@ -13,10 +13,10 @@ namespace native { using mul_sparse_sparse_out_fn = void (*)(Tensor& res, const Tensor& x, const Tensor& y); DECLARE_DISPATCH(mul_sparse_sparse_out_fn, mul_sparse_sparse_out_stub); -using sparse_mask_intersection_out_fn = void (*)(Tensor& res, const Tensor& x, const Tensor& y, const c10::optional& x_hash_opt); +using sparse_mask_intersection_out_fn = void (*)(Tensor& res, const Tensor& x, const Tensor& y, const std::optional& x_hash_opt); DECLARE_DISPATCH(sparse_mask_intersection_out_fn, sparse_mask_intersection_out_stub); -using sparse_mask_projection_out_fn = void (*)(Tensor& res, const Tensor& x, const Tensor& y, const c10::optional& x_hash_opt, bool accumulate_matches); +using sparse_mask_projection_out_fn = void (*)(Tensor& res, const Tensor& x, const Tensor& y, const std::optional& x_hash_opt, bool accumulate_matches); DECLARE_DISPATCH(sparse_mask_projection_out_fn, sparse_mask_projection_out_stub); using flatten_indices_fn = Tensor (*)(const Tensor& indices, IntArrayRef size); diff --git a/aten/src/ATen/native/sparse/SparseTensor.cpp b/aten/src/ATen/native/sparse/SparseTensor.cpp index add7f433731a2..e9f10d964b320 100644 --- a/aten/src/ATen/native/sparse/SparseTensor.cpp +++ b/aten/src/ATen/native/sparse/SparseTensor.cpp @@ -143,10 +143,10 @@ Tensor values_default(const Tensor& self) { /*** Helper methods ***/ static SparseTensor new_sparse( - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { AT_ASSERT(layout.has_value() && *layout == kSparse); DispatchKey dispatch_key; switch (device_or_default(device).type()) { @@ -170,10 +170,10 @@ SparseTensor new_with_dims_sparse( int64_t sparse_dim, int64_t dense_dim, ArrayRef size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { SparseTensor self = new_sparse(dtype, layout, device, pin_memory); get_sparse_impl(self)->resize_and_clear_(sparse_dim, dense_dim, size); return self; @@ -185,11 +185,11 @@ SparseTensor new_with_dims_and_tensor_sparse_symint( c10::SymIntArrayRef size, const Tensor& indices, const Tensor& values, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, - c10::optional is_coalesced) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, + std::optional is_coalesced) { SparseTensor self = new_sparse(dtype, layout, device, pin_memory); auto impl = get_sparse_impl(self); impl->resize_(sparse_dim, dense_dim, size); @@ -228,11 +228,11 @@ SparseTensor new_with_dims_and_tensor_sparse_symint( /** Empty init **/ Tensor empty_sparse( IntArrayRef size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, - c10::optional optional_memory_format) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, + std::optional optional_memory_format) { TORCH_CHECK( !pin_memory.has_value() || !*pin_memory, "Only dense CPU tensors can be pinned"); @@ -242,10 +242,10 @@ Tensor empty_sparse( /* Shape init */ Tensor sparse_coo_tensor(IntArrayRef size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -268,11 +268,11 @@ static inline Tensor expand_values_if_needed(const Tensor& values) { } // namespace Tensor sparse_coo_tensor(const Tensor& indices, const Tensor& values_, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, - c10::optional is_coalesced) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, + std::optional is_coalesced) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); @@ -352,7 +352,7 @@ void _validate_sparse_coo_tensor_args( const Tensor& indices, const Tensor& values_, ArrayRef size, - c10::optional is_coalesced_) { + std::optional is_coalesced_) { Tensor values = expand_values_if_needed(values_); bool is_coalesced = is_coalesced_.value_or(false); @@ -425,11 +425,11 @@ void _validate_sparse_coo_tensor_args( // NB: Got rid of the sizes == NULL case Tensor sparse_coo_tensor(const Tensor& indices, const Tensor& values, IntArrayRef size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, - c10::optional is_coalesced) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, + std::optional is_coalesced) { // See [Note: hacky wrapper removal for TensorOptions] TensorOptions options = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); // arg checking @@ -449,11 +449,11 @@ Tensor sparse_coo_tensor(const Tensor& indices, const Tensor& values, IntArrayRe } Tensor _sparse_coo_tensor_unsafe(const Tensor& indices, const Tensor& values_, at::IntArrayRef size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, - c10::optional is_coalesced) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, + std::optional is_coalesced) { if (at::globalContext().checkSparseTensorInvariants()) { at::native::_validate_sparse_coo_tensor_args(indices, values_, size, is_coalesced); } @@ -467,11 +467,11 @@ Tensor _sparse_coo_tensor_unsafe(const Tensor& indices, const Tensor& values_, a // _validate_sparse_coo_tensor_args before using the tensor. // NB: Got rid of the size == NULL case Tensor _sparse_coo_tensor_unsafe_symint(const Tensor& indices, const Tensor& values_, c10::SymIntArrayRef size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, - c10::optional is_coalesced) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, + std::optional is_coalesced) { // See [Note: hacky wrapper removal for TensorOptions] Tensor values = expand_values_if_needed(values_); @@ -495,7 +495,7 @@ Tensor _sparse_coo_tensor_unsafe_symint(const Tensor& indices, const Tensor& val SparseTensor clone_sparse( const SparseTensor& self, - c10::optional optional_memory_format) { + std::optional optional_memory_format) { TORCH_CHECK( !optional_memory_format.has_value(), "unsupported memory format option ", @@ -687,7 +687,7 @@ SparseTensor _coalesce_sparse_cpu(const SparseTensor& self) { DEFINE_DISPATCH(sparse_mask_intersection_out_stub); DEFINE_DISPATCH(sparse_mask_projection_out_stub); -using OptTensor = c10::optional; +using OptTensor = std::optional; static std::tuple sparse_mask_like_prepare_sparse_inputs( const std::string& method_name, @@ -814,11 +814,11 @@ Tensor sparse_mask_projection(const Tensor& t, const Tensor& mask, bool accumula Tensor empty_like_sparse_coo( const Tensor& self, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, - c10::optional optional_memory_format) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, + std::optional optional_memory_format) { TensorOptions options_ = TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory); TORCH_CHECK( diff --git a/aten/src/ATen/native/sparse/SparseTensorMath.cpp b/aten/src/ATen/native/sparse/SparseTensorMath.cpp index a3227df942c45..f058c68579f86 100644 --- a/aten/src/ATen/native/sparse/SparseTensorMath.cpp +++ b/aten/src/ATen/native/sparse/SparseTensorMath.cpp @@ -220,7 +220,7 @@ static SparseTensor& coalesce_(SparseTensor& tensor) { // div(SparseTensor, Scalar) // -------------------------------------------------------------------- -SparseTensor& div_out_sparse_zerodim(const SparseTensor& t, const Tensor& value, c10::optional rounding_mode, SparseTensor& r) { +SparseTensor& div_out_sparse_zerodim(const SparseTensor& t, const Tensor& value, std::optional rounding_mode, SparseTensor& r) { TORCH_CHECK(value.dim() == 0, "Sparse division requires a scalar or ", "zero-dim dense tensor divisor (got shape ", value.sizes(), " for divisor)"); TORCH_CHECK(!value.is_sparse(), "Sparse division requires a scalar or ", @@ -274,7 +274,7 @@ static SparseTensor& div_out_sparse_scalar(const SparseTensor& t, Scalar value, return div_out_sparse_zerodim(t, wrapped_scalar_tensor(value), r); } -Tensor div_sparse(const Tensor& self, const Tensor& value, c10::optional rounding_mode) { +Tensor div_sparse(const Tensor& self, const Tensor& value, std::optional rounding_mode) { auto commonDtype = at::result_type(self, value); if (c10::isIntegralType(commonDtype, /*includeBool=*/true) && !rounding_mode.has_value()) { commonDtype = typeMetaToScalarType(at::get_default_dtype()); @@ -283,11 +283,11 @@ Tensor div_sparse(const Tensor& self, const Tensor& value, c10::optional rounding_mode) { +Tensor& div_sparse_(Tensor& self, const Tensor& value, std::optional rounding_mode) { return div_out_sparse_zerodim(self, value, std::move(rounding_mode), self); } -static SparseTensor& div_out_sparse_scalar(const SparseTensor& t, Scalar value, c10::optional rounding_mode, SparseTensor& r) { +static SparseTensor& div_out_sparse_scalar(const SparseTensor& t, Scalar value, std::optional rounding_mode, SparseTensor& r) { return div_out_sparse_zerodim(t, wrapped_scalar_tensor(value), std::move(rounding_mode), r); } diff --git a/aten/src/ATen/native/sparse/SparseUnaryOps.cpp b/aten/src/ATen/native/sparse/SparseUnaryOps.cpp index ce6e3d4eac11b..f5445ba4bd48d 100644 --- a/aten/src/ATen/native/sparse/SparseUnaryOps.cpp +++ b/aten/src/ATen/native/sparse/SparseUnaryOps.cpp @@ -257,16 +257,16 @@ Tensor& threshold_backward_sparse_out( } Tensor nan_to_num_sparse( - const Tensor &self, c10::optional nan, - c10::optional posinf, c10::optional neginf) { + const Tensor &self, std::optional nan, + std::optional posinf, c10::optional neginf) { return coalesced_unary_ufunc( self, [&](const Tensor &t) { return at::nan_to_num(t, nan, posinf, neginf); }); } Tensor& nan_to_num_sparse_out( - const Tensor &self, c10::optional nan, - c10::optional posinf, c10::optional neginf, + const Tensor &self, std::optional nan, + std::optional posinf, c10::optional neginf, Tensor &out) { return coalesced_unary_ufunc_out( self, out, [&](const Tensor &t, Tensor &out) { @@ -274,8 +274,8 @@ Tensor& nan_to_num_sparse_out( }); } Tensor& nan_to_num_sparse_( - Tensor &self, c10::optional nan, - c10::optional posinf, c10::optional neginf) { + Tensor &self, std::optional nan, + std::optional posinf, c10::optional neginf) { TORCH_CHECK(self.is_coalesced(), "nan_to_num_ requires coalesced input"); return nan_to_num_sparse_out(self, nan, posinf, neginf, self); } diff --git a/aten/src/ATen/native/sparse/cuda/SparseCsrTensorMath.cu b/aten/src/ATen/native/sparse/cuda/SparseCsrTensorMath.cu index 75474e77ea848..1ee5a8b9d2c01 100644 --- a/aten/src/ATen/native/sparse/cuda/SparseCsrTensorMath.cu +++ b/aten/src/ATen/native/sparse/cuda/SparseCsrTensorMath.cu @@ -389,7 +389,7 @@ struct Reduction...Op { }; -Tensor _sparse_csr_..._cuda(const Tensor& input, IntArrayRef dims_to_sum, bool keepdim, c10::optional dtype) { +Tensor _sparse_csr_..._cuda(const Tensor& input, IntArrayRef dims_to_sum, bool keepdim, std::optional dtype) { ... result = reduce_sparse_csr_cuda_template(input_, dims_to_sum, keepdim, Reduction...Op()); ... @@ -708,7 +708,7 @@ struct ReductionMulOp { } // namespace -Tensor _sparse_csr_sum_cuda(const Tensor& input, IntArrayRef dims_to_sum, bool keepdim, c10::optional dtype) { +Tensor _sparse_csr_sum_cuda(const Tensor& input, IntArrayRef dims_to_sum, bool keepdim, std::optional dtype) { ScalarType dtype_ = dtype.value_or(input.scalar_type()); Tensor input_ = at::sparse_csr::to_type(input, dtype_); Tensor result; @@ -724,7 +724,7 @@ Tensor _sparse_csr_sum_cuda(const Tensor& input, IntArrayRef dims_to_sum, bool k return result; } -Tensor _sparse_csr_prod_cuda(const Tensor& input, IntArrayRef dims_to_reduce, bool keepdim, c10::optional dtype) { +Tensor _sparse_csr_prod_cuda(const Tensor& input, IntArrayRef dims_to_reduce, bool keepdim, std::optional dtype) { ScalarType dtype_ = dtype.value_or(input.scalar_type()); Tensor input_ = input.to(dtype_); Tensor result; diff --git a/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredLinear.cu b/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredLinear.cu index 47ee1568beb1e..01aa11dbdecb5 100644 --- a/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredLinear.cu +++ b/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredLinear.cu @@ -600,9 +600,9 @@ Tensor two_four_sgemm_dispatch_layouts_bias_activation( // number of checks throughout the code. Tensor _sparse_semi_structured_linear( const Tensor& input, const Tensor& weight, - const Tensor& meta, const c10::optional& bias_opt, - const c10::optional activation_opt, - const c10::optional out_dtype_opt) { + const Tensor& meta, const std::optional& bias_opt, + const std::optional activation_opt, + const std::optional out_dtype_opt) { TORCH_WARN_ONCE("_sparse_semi_structured_linear is deprecated and will be " "removed in a future PyTorch release. Please use " "_sparse_semi_structured_mm/_sparse_semi_structured_addmm " diff --git a/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredOps.cu b/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredOps.cu index 8c05acc66bc92..abd6cf9739c63 100644 --- a/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredOps.cu +++ b/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredOps.cu @@ -522,8 +522,8 @@ void spgemm_cutlass_dispatch_layouts_tensor_c( // aten._sparse_semi_structured_addmm operators. Tensor sparse_semi_structured_mad_op( const Tensor& mat1, const Tensor& mat1_meta, const Tensor& mat2, - const c10::optional& input_opt, const Scalar& alpha, - const Scalar& beta, const c10::optional out_dtype_opt) { + const std::optional& input_opt, const Scalar& alpha, + const Scalar& beta, const std::optional out_dtype_opt) { #if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080) AT_ERROR(__func__, " : CUTLASS not supported"); return Tensor{}; @@ -787,9 +787,9 @@ Tensor sparse_semi_structured_mad_op( // Implementation of aten._sparse_semi_structured_mm operator. Tensor _sparse_semi_structured_mm( const Tensor& mat1, const Tensor& mat1_meta, const Tensor& mat2, - const c10::optional out_dtype_opt) { + const std::optional out_dtype_opt) { return sparse_semi_structured_mad_op(mat1, mat1_meta, mat2, - c10::optional(), 1, 0, + std::optional(), 1, 0, out_dtype_opt); } @@ -797,7 +797,7 @@ Tensor _sparse_semi_structured_mm( Tensor _sparse_semi_structured_addmm( const Tensor& input, const Tensor& mat1, const Tensor& mat1_meta, const Tensor& mat2, const Scalar& alpha, const Scalar& beta, - const c10::optional out_dtype_opt) { + const std::optional out_dtype_opt) { return sparse_semi_structured_mad_op(mat1, mat1_meta, mat2, input, alpha, beta, out_dtype_opt); } diff --git a/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredTile.cu b/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredTile.cu index fd5a04fa61039..b5382b5b08486 100644 --- a/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredTile.cu +++ b/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredTile.cu @@ -207,7 +207,7 @@ std::tuple sparse_semi_structured_tile_t std::string algorithm) { using KT = KernelTypes; - c10::optional device_guard; + std::optional device_guard; if (!input.is_meta()) { device_guard.emplace(input.device()); } diff --git a/aten/src/ATen/native/sparse/cuda/SparseSemiSturcturedApply.cu b/aten/src/ATen/native/sparse/cuda/SparseSemiSturcturedApply.cu index 023e8f73930fd..2fbbaa0290703 100644 --- a/aten/src/ATen/native/sparse/cuda/SparseSemiSturcturedApply.cu +++ b/aten/src/ATen/native/sparse/cuda/SparseSemiSturcturedApply.cu @@ -34,7 +34,7 @@ std::tuple _sparse_semi_structured_apply_typed(Tensor input, Ten if (input.stride(1) != 1) { input = input.contiguous(); } - c10::optional device_guard; + std::optional device_guard; if (!kIsMeta) { device_guard.emplace(input.device()); } diff --git a/aten/src/ATen/native/sparse/cuda/cuSPARSELtOps.cpp b/aten/src/ATen/native/sparse/cuda/cuSPARSELtOps.cpp index c66fbf8f2a93d..384fa2422b247 100644 --- a/aten/src/ATen/native/sparse/cuda/cuSPARSELtOps.cpp +++ b/aten/src/ATen/native/sparse/cuda/cuSPARSELtOps.cpp @@ -101,9 +101,9 @@ at::Tensor _cslt_compress(const Tensor& sparse_input) std::tuple _cslt_sparse_mm_impl( const Tensor& compressed_A, const Tensor& dense_B, - const c10::optional& bias_opt, - const c10::optional& alpha_opt, - const c10::optional out_dtype_opt, + const std::optional& bias_opt, + const std::optional& alpha_opt, + const std::optional out_dtype_opt, bool transpose_result, int alg_id, bool search_alg_id @@ -343,9 +343,9 @@ std::tuple _cslt_sparse_mm_impl( at::Tensor _cslt_sparse_mm( const Tensor& compressed_A, const Tensor& dense_B, - const c10::optional& bias_opt, - const c10::optional& alpha_opt, - const c10::optional out_dtype_opt, + const std::optional& bias_opt, + const std::optional& alpha_opt, + const std::optional out_dtype_opt, bool transpose_result, int64_t alg_id ) @@ -365,9 +365,9 @@ at::Tensor _cslt_sparse_mm( int64_t _cslt_sparse_mm_search( const Tensor& compressed_A, const Tensor& dense_B, - const c10::optional& bias_opt, - const c10::optional& alpha_opt, - const c10::optional out_dtype_opt, + const std::optional& bias_opt, + const std::optional& alpha_opt, + const std::optional out_dtype_opt, bool transpose_result ) { @@ -398,9 +398,9 @@ at::Tensor _cslt_compress(const Tensor& sparse_input){ at::Tensor _cslt_sparse_mm( const Tensor& compressed_A, const Tensor& dense_B, - const c10::optional& bias_opt, - const c10::optional& alpha_opt, - const c10::optional out_dtype, + const std::optional& bias_opt, + const std::optional& alpha_opt, + const std::optional out_dtype, bool transpose_result, int64_t alg_id) { @@ -410,9 +410,9 @@ at::Tensor _cslt_sparse_mm( int64_t _cslt_sparse_mm_search( const Tensor& compressed_A, const Tensor& dense_B, - const c10::optional& bias_opt, - const c10::optional& alpha_opt, - const c10::optional out_dtype, + const std::optional& bias_opt, + const std::optional& alpha_opt, + const std::optional out_dtype, bool transpose_result ) { diff --git a/aten/src/ATen/native/transformers/attention.cpp b/aten/src/ATen/native/transformers/attention.cpp index e26de29537954..ede02ab1352f0 100644 --- a/aten/src/ATen/native/transformers/attention.cpp +++ b/aten/src/ATen/native/transformers/attention.cpp @@ -106,9 +106,9 @@ Tensor bmm_nt(const Tensor& a, const Tensor& b) { Tensor masked_softmax( Tensor& attn_scores, - c10::optional attn_mask, + std::optional attn_mask, const Tensor& query, - c10::optional mask_type) { + std::optional mask_type) { if (query.is_nested() && !attn_mask) { return at::_nested_tensor_softmax_with_shape(attn_scores, query); } @@ -267,10 +267,10 @@ std::tuple native_multi_head_attention_cpu( const Tensor& qkv_bias, const Tensor& proj_weight, const Tensor& proj_bias, - const c10::optional& mask, + const std::optional& mask, bool need_weights, bool average_attn_weights, - const c10::optional mask_type) { + const std::optional mask_type) { // query shape: [B, T, D] // qkv_weight shape: [3 * D, D] @@ -423,7 +423,7 @@ std::tuple native_multi_head_attention_cpu( } int64_t _fused_sdp_choice_cpp(const Tensor& query_, const Tensor& key, const Tensor& value, - const c10::optional& attn_mask_, double dropout_p, bool is_causal, c10::optional scale){ + const std::optional& attn_mask_, double dropout_p, bool is_causal, c10::optional scale){ sdp::sdp_params kernel_params{query_, key, value, attn_mask_, dropout_p, is_causal}; auto backend = sdp::select_sdp_backend_cpp(kernel_params); if (backend == sdp::SDPBackend::error) { @@ -445,10 +445,10 @@ int64_t _fused_sdp_choice_meta( const Tensor& query_, const Tensor& key, const Tensor& value, - const c10::optional& attn_mask_, + const std::optional& attn_mask_, double dropout_p, bool is_causal, - c10::optional scale) { + std::optional scale) { auto query_key_set = query_.key_set(); #if defined(USE_ROCM) bool has_rocm = query_key_set.has(c10::DispatchKey::HIP); @@ -479,10 +479,10 @@ inline void validate_sdpa_input( const Tensor& query_, const Tensor& key, const Tensor& value, - const c10::optional& attn_mask_, + const std::optional& attn_mask_, double dropout_p, bool is_causal, - c10::optional scale) { + std::optional scale) { TORCH_CHECK( query_.dtype() == key.dtype() && query_.dtype() == value.dtype(), "Expected query, key, and value to have the same dtype, but got query.dtype: ", @@ -512,7 +512,7 @@ inline void validate_sdpa_input( // the math and memory efficient attn_mask implementation // Args: // attn_mask: attn_mask of shape (B, L, S) or (L, S) or (B, N_heads, L, S) -c10::optional convert_boolean_attn_mask(const c10::optional& attn_mask, caffe2::TypeMeta dtype) { +std::optional convert_boolean_attn_mask(const c10::optional& attn_mask, caffe2::TypeMeta dtype) { // Pass through if(!attn_mask.has_value()){ return c10::nullopt; @@ -598,7 +598,7 @@ at::Tensor post_process_flash_output( } int64_t handle_private_use(const Tensor& query_, const Tensor& key, const Tensor& value, - const c10::optional& attn_mask_, double dropout_p, bool is_causal, c10::optional scale){ + const std::optional& attn_mask_, double dropout_p, bool is_causal, c10::optional scale){ int64_t choice_int = static_cast(sdp::SDPBackend::math); try { choice_int = _fused_sdp_choice_stub(query_.device().type(), @@ -643,10 +643,10 @@ Tensor scaled_dot_product_attention( const Tensor& query_, const Tensor& key, const Tensor& value, - const c10::optional& attn_mask_, + const std::optional& attn_mask_, double dropout_p, bool is_causal, - c10::optional scale) { + std::optional scale) { validate_sdpa_input(query_, key, value, attn_mask_, dropout_p, is_causal, scale); int64_t choice_int = static_cast(sdp::SDPBackend::math); if (query_.device().type() == DeviceType::CUDA @@ -662,7 +662,7 @@ Tensor scaled_dot_product_attention( } } sdp::SDPBackend backend = static_cast(choice_int); - c10::optional attn_mask = convert_boolean_attn_mask(attn_mask_, query_.dtype()); + std::optional attn_mask = convert_boolean_attn_mask(attn_mask_, query_.dtype()); switch (backend) { case sdp::SDPBackend::cudnn_attention: { bool compute_logsumexp = @@ -719,8 +719,8 @@ Tensor scaled_dot_product_attention( std::tuple _scaled_dot_product_attention_math( const Tensor& query_, const Tensor& key, const Tensor& value, - const c10::optional& attn_mask_, double dropout_p, bool is_causal, - const c10::optional& dropout_mask, c10::optional scale) { + const std::optional& attn_mask_, double dropout_p, bool is_causal, + const std::optional& dropout_mask, c10::optional scale) { C10_LOG_API_USAGE_ONCE("torch.sdpa.math_fallback"); if (query_.is_nested() || key.is_nested() || value.is_nested()) { TORCH_CHECK( @@ -779,8 +779,8 @@ _scaled_dot_product_flash_attention_cpu( const Tensor& value, double dropout_p, bool is_causal, - const c10::optional& attn_mask, - c10::optional scale) { + const std::optional& attn_mask, + std::optional scale) { const auto dtype = query.scalar_type(); int64_t batchSize = query.size(0); int64_t qSize = query.size(2); @@ -827,8 +827,8 @@ _scaled_dot_product_flash_attention_cpu_backward( const Tensor& logsumexp, double dropout_p, bool is_causal, - const c10::optional& attn_mask, - c10::optional scale) { + const std::optional& attn_mask, + std::optional scale) { if (!grad_out.defined()) { return std::make_tuple(Tensor{}, Tensor{}, Tensor{}); } @@ -864,7 +864,7 @@ Tensor triton_multi_head_attention( const Tensor& qkv_bias, const Tensor& proj_weight, const Tensor& proj_bias, - const c10::optional& mask) { + const std::optional& mask) { // query shape: [B, T, D] // qkv_weight shape: [3 * D, D] TORCH_CHECK(!mask, "Only causal mask is supported for Triton."); diff --git a/aten/src/ATen/native/transformers/attention.h b/aten/src/ATen/native/transformers/attention.h index 2d2740a92e7dc..0e4a52f445442 100644 --- a/aten/src/ATen/native/transformers/attention.h +++ b/aten/src/ATen/native/transformers/attention.h @@ -9,16 +9,16 @@ namespace at { namespace native { using fused_sdp_choice_fn = int64_t (*)(const Tensor& query_, const Tensor& key, const Tensor& value, - const c10::optional& attn_mask_, double dropout_p, bool is_causal, c10::optional scale); + const std::optional& attn_mask_, double dropout_p, bool is_causal, c10::optional scale); DECLARE_DISPATCH(fused_sdp_choice_fn, _fused_sdp_choice_stub); TORCH_API Tensor bmm_nt(const Tensor& a, const Tensor& b); TORCH_API Tensor masked_softmax( Tensor& attn_scores, - c10::optional attn_mask, + std::optional attn_mask, const Tensor& query, - c10::optional mask_type = {}); + std::optional mask_type = {}); using transform_bias_rescale_qkv_fn = void(*)( at::ScalarType type, @@ -53,8 +53,8 @@ using flash_attention_fn = void (*)( const Tensor& output, const Tensor& logsumexp, const Tensor& query, const Tensor& key, const Tensor& value, double dropout_p, bool is_causal, - c10::optional attn_mask, - c10::optional scale); + std::optional attn_mask, + std::optional scale); using flash_attention_backward_fn = void (*)( const Tensor& grad_q, const Tensor& grad_k, @@ -62,8 +62,8 @@ using flash_attention_backward_fn = void (*)( const Tensor& query, const Tensor& key, const Tensor& value, const Tensor& out, const Tensor& logsumexp, double dropout_p, bool is_causal, - c10::optional attn_mask, - c10::optional scale); + std::optional attn_mask, + std::optional scale); DECLARE_DISPATCH(flash_attention_fn, flash_attention_kernel); DECLARE_DISPATCH(flash_attention_backward_fn, flash_attention_backward_kernel); diff --git a/aten/src/ATen/native/transformers/cuda/attention.cu b/aten/src/ATen/native/transformers/cuda/attention.cu index dcf451feead7b..e55560791a085 100644 --- a/aten/src/ATen/native/transformers/cuda/attention.cu +++ b/aten/src/ATen/native/transformers/cuda/attention.cu @@ -479,10 +479,10 @@ std::tuple native_multi_head_attention_cuda( const Tensor& qkv_bias, const Tensor& proj_weight, const Tensor& proj_bias, - const c10::optional& mask, + const std::optional& mask, bool need_weights, bool average_attn_weights, - const c10::optional mask_type) { + const std::optional mask_type) { // query shape: [B, T, D] // qkv_weight shape: [3 * D, D] @@ -681,7 +681,7 @@ std::tuple scale) { + std::optional scale) { // Used for tracking usage statistics C10_LOG_API_USAGE_ONCE("torch.sdpa.flash_attention"); // Query (Batch x Num_heads x Q_seq_len x Dim_per_head) @@ -733,7 +733,7 @@ std::tuple scale) { + std::optional scale) { // Used for tracking usage statistics C10_LOG_API_USAGE_ONCE("torch.sdpa.flash_attention_cudnn"); // Query (Batch x Num_heads x Q_seq_len x Dim_per_head) @@ -780,11 +780,11 @@ std::tuple _scaled_dot_product_efficient_attenti const Tensor& query, const Tensor& key, const Tensor& value, - const c10::optional& attn_bias, + const std::optional& attn_bias, bool compute_log_sumexp, double dropout_p, bool is_causal, - c10::optional scale) { + std::optional scale) { // Used for tracking usage statistics C10_LOG_API_USAGE_ONCE("torch.sdpa.mem_efficient_attention"); // Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head) @@ -817,7 +817,7 @@ std::tuple _scaled_dot_product_efficient_attenti } int64_t _fused_sdp_choice_cuda(const Tensor& query_, const Tensor& key, const Tensor& value, - const c10::optional& attn_mask_, double dropout_p, bool is_causal, c10::optional scale){ + const std::optional& attn_mask_, double dropout_p, bool is_causal, c10::optional scale){ sdp::sdp_params kernel_params{query_, key, value, attn_mask_, dropout_p, is_causal}; auto backend = select_sdp_backend(kernel_params); if (backend == sdp::SDPBackend::error) { @@ -834,23 +834,23 @@ _flash_attention_forward( const Tensor& query, const Tensor& key, const Tensor& value, - const c10::optional& cumulative_sequence_length_q, - const c10::optional& cumulative_sequence_length_k, + const std::optional& cumulative_sequence_length_q, + const std::optional& cumulative_sequence_length_k, int64_t max_seqlen_batch_q, int64_t max_seqlen_batch_k, double dropout_p, bool is_causal, bool return_debug_mask, - c10::optional scale) { + std::optional scale) { #if defined(USE_FLASH_ATTENTION) const auto softmax_scale = sdp::calculate_scale(query, scale).as_float_unchecked(); - c10::optional out = c10::nullopt; + std::optional out = c10::nullopt; // This can be used when your sequence length k is not the full extent // of the tensor. This is useful for kv cache scenarios but for now // we will not support in this PR. - c10::optional seqused_k = c10::nullopt; - c10::optional alibi_slopes = c10::nullopt; + std::optional seqused_k = c10::nullopt; + std::optional alibi_slopes = c10::nullopt; // We are going to have two paths: // 1. The standard MHA path for dense tensors @@ -937,23 +937,23 @@ std::tuple _efficient_ const at::Tensor& query, // [b, seqlen, num_heads, K] const at::Tensor& key, // [b, seqlen, num_heads, K] const at::Tensor& value, // [b, seqlen, num_heads, Kv] - const c10::optional& bias, // [b, num_heads, seqlen, seqlen] + const std::optional& bias, // [b, num_heads, seqlen, seqlen] // (Mode 1MHK only) [b+1]: cu_seqlens_q[b] contains the // position of the first query token for batch $b - const c10::optional& seqstart_q, + const std::optional& seqstart_q, // (Mode 1MHK only) [b+1]: cu_seqlen_k[b] contains the // position of the first key token for batch $b - const c10::optional& seqstart_k, + const std::optional& seqstart_k, // (Mode 1MHK only) Maximum sequence length across batches - const c10::optional max_seqlen_q_, - const c10::optional max_seqlen_k_, + const std::optional max_seqlen_q_, + const std::optional max_seqlen_k_, double dropout_p, // attention matrix dropout probability int64_t custom_mask_type, bool compute_logsumexp, - c10::optional scale, - const c10::optional& causal_diagonal, - const c10::optional& seqlen_k, - const c10::optional window_size) { + std::optional scale, + const std::optional& causal_diagonal, + const std::optional& seqlen_k, + const std::optional window_size) { #if defined(USE_MEM_EFF_ATTENTION) // TODO In theory it is possible to compile with _CUDA_ARCH < 5.0 and run on a // machine that is >= 5.0. In practice, this is not a problem but since diff --git a/aten/src/ATen/native/transformers/cuda/attention_backward.cu b/aten/src/ATen/native/transformers/cuda/attention_backward.cu index 0405b6d73329f..78c2d54fdc8a6 100644 --- a/aten/src/ATen/native/transformers/cuda/attention_backward.cu +++ b/aten/src/ATen/native/transformers/cuda/attention_backward.cu @@ -66,22 +66,22 @@ std::tuple _flash_attention_backward( bool is_causal, const Tensor& philox_seed, const Tensor& philox_offset, - c10::optional scale) { + std::optional scale) { #if defined(USE_FLASH_ATTENTION) const auto softmax_scale = sdp::calculate_scale(query, scale).as_float_unchecked(); // CUDA code assumes that dout is contiguous auto contiguous_grad_out = grad_out.contiguous(); auto contiguous_out = out.contiguous(); - c10::optional dq{c10::nullopt}; - c10::optional dk{c10::nullopt}; - c10::optional dv{c10::nullopt}; + std::optional dq{c10::nullopt}; + std::optional dk{c10::nullopt}; + std::optional dv{c10::nullopt}; // The kernel computes irregardless we will drop for this functions return Tensor grad_softmax; // Currently unused args: - c10::optional alibi_slopes{c10::nullopt}; + std::optional alibi_slopes{c10::nullopt}; bool determinisitic{false}; auto& ctx = at::globalContext(); @@ -167,7 +167,7 @@ std::tuple _scaled_dot_product_cudnn_attention_backward_ bool is_causal, const Tensor& philox_seed, const Tensor& philox_offset, - c10::optional scale) { + std::optional scale) { const int64_t batch_size = query.size(0); const int64_t num_heads = query.size(1); const int64_t head_dim = query.size(3); @@ -205,14 +205,14 @@ _efficient_attention_backward( const at::Tensor& query, const at::Tensor& key, const at::Tensor& value, - const c10::optional& kernel_bias, // additive attention bias + const std::optional& kernel_bias, // additive attention bias const at::Tensor& out, // (Mode 1MHK only) [b+1]: cu_seqlens_q[b] contains the // position of the first query token for batch $b - const c10::optional& cu_seqlens_q_dummy, + const std::optional& cu_seqlens_q_dummy, // (Mode 1MHK only) [b+1]: cu_seqlens_k[b] contains the // position of the first key token for batch $b - const c10::optional& cu_seqlens_k_dummy, + const std::optional& cu_seqlens_k_dummy, // (Mode 1MHK only) Maximum sequence length across batches int64_t max_seqlen_q, // (Mode 1MHK only) Maximum sequence length across batches @@ -223,9 +223,9 @@ _efficient_attention_backward( const at::Tensor& philox_offset, // offset into random number sequence int64_t custom_mask_type, const bool bias_requires_grad, - const c10::optional scale, - c10::optional num_splits_key, - const c10::optional window_size) { + const std::optional scale, + std::optional num_splits_key, + const std::optional window_size) { #if defined(USE_MEM_EFF_ATTENTION) if (!grad_out_.defined()) { return std::make_tuple(Tensor{}, Tensor{}, Tensor{}, Tensor{}); @@ -233,8 +233,8 @@ _efficient_attention_backward( // This path is used when we directly call _efficient_attention_forward // from python. // This is needed because SaveVariable automatically converts - // c10::optional to undefined tensor - c10::optional bias, cu_seqlens_q, cu_seqlens_k; + // std::optional to undefined tensor + std::optional bias, cu_seqlens_q, cu_seqlens_k; bias = kernel_bias.has_value() && !kernel_bias->defined() ? c10::nullopt : kernel_bias; cu_seqlens_q = cu_seqlens_q_dummy.has_value() && !cu_seqlens_q_dummy->defined() ? c10::nullopt : cu_seqlens_q_dummy; cu_seqlens_k = cu_seqlens_k_dummy.has_value() && !cu_seqlens_k_dummy->defined() ? c10::nullopt : cu_seqlens_k_dummy; @@ -603,7 +603,7 @@ std::tuple _scaled_dot_product_flash_attenti bool is_causal, const at::Tensor& philox_seed, const at::Tensor& philox_offset, - c10::optional scale){ + std::optional scale){ if (!grad_out_.defined()) { return std::make_tuple(Tensor{}, Tensor{}, Tensor{}); } @@ -653,7 +653,7 @@ std::tuple _scaled_dot_product_e double dropout_p, std::array grad_input_mask, bool causal, - c10::optional scale) { + std::optional scale) { if (!grad_out_.defined()) { return std::make_tuple(Tensor{}, Tensor{}, Tensor{}, Tensor{}); @@ -667,8 +667,8 @@ std::tuple _scaled_dot_product_e Tensor grad_q, grad_k, grad_v, grad_bias; // This is needed because SaveVariable automatically converts - // c10::optional to undefined tensor - c10::optional kernel_bias; + // std::optional to undefined tensor + std::optional kernel_bias; if (attn_bias.defined()) { kernel_bias = attn_bias; } diff --git a/aten/src/ATen/native/transformers/cuda/flash_attn/flash_api.cpp b/aten/src/ATen/native/transformers/cuda/flash_attn/flash_api.cpp index 8f6f7a9f357dc..5c7db42368931 100644 --- a/aten/src/ATen/native/transformers/cuda/flash_attn/flash_api.cpp +++ b/aten/src/ATen/native/transformers/cuda/flash_attn/flash_api.cpp @@ -322,7 +322,7 @@ void set_params_splitkv(Flash_fwd_params ¶ms, const int batch_size, } } -void set_params_alibi(Flash_fwd_params ¶ms, c10::optional &alibi_slopes_, int batch_size, int num_heads){ +void set_params_alibi(Flash_fwd_params ¶ms, std::optional &alibi_slopes_, int batch_size, int num_heads){ #ifdef FLASHATTENTION_DISABLE_ALIBI TORCH_CHECK(!alibi_slopes_.has_value(), "This flash attention build does not support alibi."); params.alibi_slopes_ptr = nullptr; @@ -346,15 +346,15 @@ std::tuple &out_, // batch_size x seqlen_q x num_heads x head_size - c10::optional &alibi_slopes_, // num_heads or batch_size x num_heads + std::optional &out_, // batch_size x seqlen_q x num_heads x head_size + std::optional &alibi_slopes_, // num_heads or batch_size x num_heads const float p_dropout, const float softmax_scale, bool is_causal, int window_size_left, int window_size_right, const bool return_softmax, - c10::optional gen_) { + std::optional gen_) { auto dprops = at::cuda::getCurrentDeviceProperties(); // bool is_sm75 = dprops->major == 7 && dprops->minor == 5; @@ -532,11 +532,11 @@ std::tuple &out_, // total_q x num_heads x head_size, total_k := \sum_{i=0}^{b} s_i + std::optional &out_, // total_q x num_heads x head_size, total_k := \sum_{i=0}^{b} s_i const at::Tensor &cu_seqlens_q, // b+1 const at::Tensor &cu_seqlens_k, // b+1 - c10::optional &seqused_k, // b. If given, only this many elements of each batch element's keys are used. - c10::optional &alibi_slopes_, // num_heads or b x num_heads + std::optional &seqused_k, // b. If given, only this many elements of each batch element's keys are used. + std::optional &alibi_slopes_, // num_heads or b x num_heads int max_seqlen_q, const int max_seqlen_k, const float p_dropout, @@ -546,7 +546,7 @@ mha_varlen_fwd(const at::Tensor &q, // total_q x num_heads x head_size, total_q int window_size_left, int window_size_right, const bool return_softmax, - c10::optional gen_) { + std::optional gen_) { auto dprops = at::cuda::getCurrentDeviceProperties(); // bool is_sm75 = dprops->major == 7 && dprops->minor == 5; @@ -765,10 +765,10 @@ mha_bwd(const at::Tensor &dout, // batch_size x seqlen_q x num_heads, x head_si const at::Tensor &v, // batch_size x seqlen_k x num_heads_k x head_size const at::Tensor &out, // batch_size x seqlen_q x num_heads x head_size const at::Tensor &softmax_lse, // b x h x seqlen_q - c10::optional &dq_, // batch_size x seqlen_q x num_heads x head_size - c10::optional &dk_, // batch_size x seqlen_k x num_heads_k x head_size - c10::optional &dv_, // batch_size x seqlen_k x num_heads_k x head_size - c10::optional &alibi_slopes_, // num_heads or batch_size x num_heads + std::optional &dq_, // batch_size x seqlen_q x num_heads x head_size + std::optional &dk_, // batch_size x seqlen_k x num_heads_k x head_size + std::optional &dv_, // batch_size x seqlen_k x num_heads_k x head_size + std::optional &alibi_slopes_, // num_heads or batch_size x num_heads const float p_dropout, // probability to drop const float softmax_scale, const bool is_causal, @@ -976,12 +976,12 @@ mha_varlen_bwd(const at::Tensor &dout, // total_q x num_heads, x head_size const at::Tensor &v, // total_k x num_heads_k x head_size, total_k := \sum_{i=0}^{b} s_i const at::Tensor &out, // total_q x num_heads x head_size const at::Tensor &softmax_lse, // b x h x s softmax logsumexp - c10::optional &dq_, // total_q x num_heads x head_size, total_q := \sum_{i=0}^{b} s_i - c10::optional &dk_, // total_k x num_heads_k x head_size, total_k := \sum_{i=0}^{b} s_i - c10::optional &dv_, // total_k x num_heads_k x head_size, total_k := \sum_{i=0}^{b} s_i + std::optional &dq_, // total_q x num_heads x head_size, total_q := \sum_{i=0}^{b} s_i + std::optional &dk_, // total_k x num_heads_k x head_size, total_k := \sum_{i=0}^{b} s_i + std::optional &dv_, // total_k x num_heads_k x head_size, total_k := \sum_{i=0}^{b} s_i const at::Tensor &cu_seqlens_q, // b+1 const at::Tensor &cu_seqlens_k, // b+1 - c10::optional &alibi_slopes_, // num_heads or b x num_heads + std::optional &alibi_slopes_, // num_heads or b x num_heads const int max_seqlen_q, const int max_seqlen_k, // max sequence length to choose the kernel const float p_dropout, // probability to drop @@ -1208,15 +1208,15 @@ std::tuple mha_fwd_kvcache(at::Tensor &q, // batch_size x seqlen_q x num_heads x head_size const at::Tensor &kcache, // batch_size_c x seqlen_k x num_heads_k x head_size or num_blocks x page_block_size x num_heads_k x head_size if there's a block_table. const at::Tensor &vcache, // batch_size_c x seqlen_k x num_heads_k x head_size or num_blocks x page_block_size x num_heads_k x head_size if there's a block_table. - c10::optional &k_, // batch_size x seqlen_knew x num_heads_k x head_size - c10::optional &v_, // batch_size x seqlen_knew x num_heads_k x head_size - c10::optional &seqlens_k_, // batch_size - c10::optional &rotary_cos_, // seqlen_ro x (rotary_dim / 2) - c10::optional &rotary_sin_, // seqlen_ro x (rotary_dim / 2) - c10::optional &cache_batch_idx_, // indices to index into the KV cache - c10::optional &block_table_, // batch_size x max_num_blocks_per_seq - c10::optional &alibi_slopes_, // num_heads or batch_size x num_heads - c10::optional &out_, // batch_size x seqlen_q x num_heads x head_size + std::optional &k_, // batch_size x seqlen_knew x num_heads_k x head_size + std::optional &v_, // batch_size x seqlen_knew x num_heads_k x head_size + std::optional &seqlens_k_, // batch_size + std::optional &rotary_cos_, // seqlen_ro x (rotary_dim / 2) + std::optional &rotary_sin_, // seqlen_ro x (rotary_dim / 2) + std::optional &cache_batch_idx_, // indices to index into the KV cache + std::optional &block_table_, // batch_size x max_num_blocks_per_seq + std::optional &alibi_slopes_, // num_heads or batch_size x num_heads + std::optional &out_, // batch_size x seqlen_q x num_heads x head_size const float softmax_scale, bool is_causal, int window_size_left, diff --git a/aten/src/ATen/native/transformers/cuda/flash_attn/flash_api.h b/aten/src/ATen/native/transformers/cuda/flash_attn/flash_api.h index 2745b28dca29b..a3aa8aaa7adff 100644 --- a/aten/src/ATen/native/transformers/cuda/flash_attn/flash_api.h +++ b/aten/src/ATen/native/transformers/cuda/flash_attn/flash_api.h @@ -11,25 +11,25 @@ std::tuple &out_, // batch_size x seqlen_q x num_heads x head_size - c10::optional &alibi_slopes_, // num_heads or batch_size x num_heads + std::optional &out_, // batch_size x seqlen_q x num_heads x head_size + std::optional &alibi_slopes_, // num_heads or batch_size x num_heads const float p_dropout, const float softmax_scale, bool is_causal, int window_size_left, int window_size_right, const bool return_softmax, - c10::optional gen_); + std::optional gen_); std::tuple mha_varlen_fwd(const at::Tensor &q, // total_q x num_heads x head_size, total_q := \sum_{i=0}^{b} s_i const at::Tensor &k, // total_k x num_heads_k x head_size, total_k := \sum_{i=0}^{b} s_i const at::Tensor &v, // total_k x num_heads_k x head_size, total_k := \sum_{i=0}^{b} s_i - c10::optional &out_, // total_q x num_heads x head_size, total_k := \sum_{i=0}^{b} s_i + std::optional &out_, // total_q x num_heads x head_size, total_k := \sum_{i=0}^{b} s_i const at::Tensor &cu_seqlens_q, // b+1 const at::Tensor &cu_seqlens_k, // b+1 - c10::optional &seqused_k, // b. If given, only this many elements of each batch element's keys are used. - c10::optional &alibi_slopes_, // num_heads or b x num_heads + std::optional &seqused_k, // b. If given, only this many elements of each batch element's keys are used. + std::optional &alibi_slopes_, // num_heads or b x num_heads int max_seqlen_q, const int max_seqlen_k, const float p_dropout, @@ -39,7 +39,7 @@ mha_varlen_fwd(const at::Tensor &q, // total_q x num_heads x head_size, total_q int window_size_left, int window_size_right, const bool return_softmax, - c10::optional gen_); + std::optional gen_); std::tuple @@ -49,10 +49,10 @@ mha_bwd(const at::Tensor &dout, // batch_size x seqlen_q x num_heads, x head_si const at::Tensor &v, // batch_size x seqlen_k x num_heads_k x head_size const at::Tensor &out, // batch_size x seqlen_q x num_heads x head_size const at::Tensor &softmax_lse, // b x h x seqlen_q - c10::optional &dq_, // batch_size x seqlen_q x num_heads x head_size - c10::optional &dk_, // batch_size x seqlen_k x num_heads_k x head_size - c10::optional &dv_, // batch_size x seqlen_k x num_heads_k x head_size - c10::optional &alibi_slopes_, // num_heads or batch_size x num_heads + std::optional &dq_, // batch_size x seqlen_q x num_heads x head_size + std::optional &dk_, // batch_size x seqlen_k x num_heads_k x head_size + std::optional &dv_, // batch_size x seqlen_k x num_heads_k x head_size + std::optional &alibi_slopes_, // num_heads or batch_size x num_heads const float p_dropout, // probability to drop const float softmax_scale, const bool is_causal, @@ -69,12 +69,12 @@ mha_varlen_bwd(const at::Tensor &dout, // total_q x num_heads, x head_size const at::Tensor &v, // total_k x num_heads_k x head_size, total_k := \sum_{i=0}^{b} s_i const at::Tensor &out, // total_q x num_heads x head_size const at::Tensor &softmax_lse, // b x h x s softmax logsumexp - c10::optional &dq_, // total_q x num_heads x head_size, total_q := \sum_{i=0}^{b} s_i - c10::optional &dk_, // total_k x num_heads_k x head_size, total_k := \sum_{i=0}^{b} s_i - c10::optional &dv_, // total_k x num_heads_k x head_size, total_k := \sum_{i=0}^{b} s_i + std::optional &dq_, // total_q x num_heads x head_size, total_q := \sum_{i=0}^{b} s_i + std::optional &dk_, // total_k x num_heads_k x head_size, total_k := \sum_{i=0}^{b} s_i + std::optional &dv_, // total_k x num_heads_k x head_size, total_k := \sum_{i=0}^{b} s_i const at::Tensor &cu_seqlens_q, // b+1 const at::Tensor &cu_seqlens_k, // b+1 - c10::optional &alibi_slopes_, // num_heads or b x num_heads + std::optional &alibi_slopes_, // num_heads or b x num_heads const int max_seqlen_q, const int max_seqlen_k, // max sequence length to choose the kernel const float p_dropout, // probability to drop diff --git a/aten/src/ATen/native/transformers/sdp_utils_cpp.h b/aten/src/ATen/native/transformers/sdp_utils_cpp.h index 6e15a27fae542..7c56a1f617dbc 100644 --- a/aten/src/ATen/native/transformers/sdp_utils_cpp.h +++ b/aten/src/ATen/native/transformers/sdp_utils_cpp.h @@ -44,7 +44,7 @@ struct sdp_params { at::Tensor query; at::Tensor key; at::Tensor value; - c10::optional attn_mask; + std::optional attn_mask; double dropout; bool is_causal; }; @@ -53,7 +53,7 @@ SDPBackend select_sdp_backend_cpp(sdp_params const& kernel_params); inline c10::SymFloat calculate_scale( const at::Tensor& query, - c10::optional scale) { + std::optional scale) { const auto softmax_scale = scale.has_value() ? scale.value() : (c10::SymFloat(1.0) / (c10::SymFloat(query.sym_size(-1)).sqrt())); diff --git a/aten/src/ATen/native/transformers/transformer.cpp b/aten/src/ATen/native/transformers/transformer.cpp index 4f64c95b204b2..b551100555675 100644 --- a/aten/src/ATen/native/transformers/transformer.cpp +++ b/aten/src/ATen/native/transformers/transformer.cpp @@ -27,7 +27,7 @@ Tensor linear_for_ffn( const Tensor& bias, const Tensor& mat1, const Tensor& mat2, - c10::optional use_gelu) { + std::optional use_gelu) { if (mat1.is_nested()) { return NestedTensor_times_Tensor_plus_Tensor_addmm( bias, mat1, mat2.t(), 1, 1, use_gelu); @@ -91,8 +91,8 @@ Tensor transformer_encoder_layer_forward( const Tensor& ffn_bias_1, const Tensor& ffn_weight_2, const Tensor& ffn_bias_2, - const c10::optional& mask, - const c10::optional mask_type) { + const std::optional& mask, + const std::optional mask_type) { { const Tensor& check_for_empty = src.is_nested() ? get_nested_tensor_impl(src)->get_buffer() : src; if (check_for_empty.numel() == 0) { diff --git a/aten/src/ATen/native/utils/Factory.cpp b/aten/src/ATen/native/utils/Factory.cpp index ea6be4e017552..28ef6477e3335 100644 --- a/aten/src/ATen/native/utils/Factory.cpp +++ b/aten/src/ATen/native/utils/Factory.cpp @@ -12,7 +12,7 @@ Tensor empty_with_tail_padding( const IntArrayRef size, const caffe2::TypeMeta dtype, const c10::MemoryFormat memory_format, - c10::optional maybe_names) { + std::optional maybe_names) { auto* const allocator_ptr = c10::GetDefaultMobileCPUAllocator(); const int64_t nelements = c10::multiply_integers(size); size_t size_bytes = nelements * dtype.itemsize(); diff --git a/aten/src/ATen/native/utils/Factory.h b/aten/src/ATen/native/utils/Factory.h index bd153aaa67529..b0302417cdce0 100644 --- a/aten/src/ATen/native/utils/Factory.h +++ b/aten/src/ATen/native/utils/Factory.h @@ -17,7 +17,7 @@ at::Tensor empty_with_tail_padding( IntArrayRef size, const caffe2::TypeMeta dtype, c10::MemoryFormat memory_format, - c10::optional maybe_names); + std::optional maybe_names); } // namespace mobile } // namespace native diff --git a/aten/src/ATen/native/vulkan/ops/Batchnorm.cpp b/aten/src/ATen/native/vulkan/ops/Batchnorm.cpp index 3f583ddc3c4ae..e12e69c4ebec2 100644 --- a/aten/src/ATen/native/vulkan/ops/Batchnorm.cpp +++ b/aten/src/ATen/native/vulkan/ops/Batchnorm.cpp @@ -73,10 +73,10 @@ using namespace api::utils; Tensor batch_norm( const at::Tensor& input_arg, - const c10::optional& weight_opt /* optional */, - const c10::optional& bias_opt /* optional */, - const c10::optional& running_mean_opt /* optional */, - const c10::optional& running_var_opt /* optional */, + const std::optional& weight_opt /* optional */, + const std::optional& bias_opt /* optional */, + const std::optional& running_mean_opt /* optional */, + const std::optional& running_var_opt /* optional */, bool training, double /* momentum, not used in eval mode */, double eps, @@ -104,10 +104,10 @@ TORCH_LIBRARY_IMPL(aten, Vulkan, m) { } // namespace BatchNormPackedContext::BatchNormPackedContext( - const c10::optional& weight_opt, - const c10::optional& bias_opt, - const c10::optional& running_mean_opt, - const c10::optional& running_var_opt, + const std::optional& weight_opt, + const std::optional& bias_opt, + const std::optional& running_mean_opt, + const std::optional& running_var_opt, double eps) : unpacked_{c10::AnyType::get()} { packed_.reserve(ListArgs::kNumArgs); @@ -181,10 +181,10 @@ BatchNormPackedContext BatchNormPackedContext::pack( } c10::intrusive_ptr create_batchnorm_context( - c10::optional&& weight_opt, - c10::optional&& bias_opt, - c10::optional&& running_mean_opt, - c10::optional&& running_var_opt, + std::optional&& weight_opt, + std::optional&& bias_opt, + std::optional&& running_mean_opt, + std::optional&& running_var_opt, bool training, double /* momentum */, double eps, diff --git a/aten/src/ATen/native/vulkan/ops/Batchnorm.h b/aten/src/ATen/native/vulkan/ops/Batchnorm.h index 6afaeb6f243b3..4108b0d4e3201 100644 --- a/aten/src/ATen/native/vulkan/ops/Batchnorm.h +++ b/aten/src/ATen/native/vulkan/ops/Batchnorm.h @@ -18,10 +18,10 @@ class BatchNormPackedContext final : virtual public VulkanPackedContext, public: BatchNormPackedContext( - const c10::optional& weight_opt, - const c10::optional& bias_opt, - const c10::optional& running_mean_opt, - const c10::optional& running_var_opt, + const std::optional& weight_opt, + const std::optional& bias_opt, + const std::optional& running_mean_opt, + const std::optional& running_var_opt, double eps); /* @@ -47,10 +47,10 @@ class BatchNormPackedContext final : virtual public VulkanPackedContext, }; c10::intrusive_ptr create_batchnorm_context( - c10::optional&& weight_opt, - c10::optional&& bias_opt, - c10::optional&& running_mean_opt, - c10::optional&& running_var_opt, + std::optional&& weight_opt, + std::optional&& bias_opt, + std::optional&& running_mean_opt, + std::optional&& running_var_opt, bool training, double /* momentum */, double eps, diff --git a/aten/src/ATen/native/vulkan/ops/BinaryOp.cpp b/aten/src/ATen/native/vulkan/ops/BinaryOp.cpp index c08363a17f8eb..e1445f40ac5f8 100644 --- a/aten/src/ATen/native/vulkan/ops/BinaryOp.cpp +++ b/aten/src/ATen/native/vulkan/ops/BinaryOp.cpp @@ -15,7 +15,7 @@ using namespace api::utils; Tensor binary_op_scalar( const Tensor& self_arg, const Scalar& other, - const c10::optional& alpha_arg, + const std::optional& alpha_arg, const api::ShaderInfo& shader_descriptor) { api::Context* const context = api::context(); @@ -102,7 +102,7 @@ Tensor binary_op_preprocess_other_arg(const Tensor& other_arg) { Tensor& binary_op_scalar_( Tensor& self_arg, const Scalar& other, - const c10::optional& alpha_arg, + const std::optional& alpha_arg, const api::ShaderInfo& shader_descriptor) { TORCH_CHECK( self_arg.is_vulkan(), @@ -152,7 +152,7 @@ Tensor& binary_op_scalar_( Tensor binary_op_tensor( const Tensor& self_arg, const Tensor& other_arg, - const c10::optional& alpha_arg, + const std::optional& alpha_arg, const api::ShaderInfo& shader_descriptor) { utils::is_broadcastable(self_arg, other_arg); api::Context* const context = api::context(); @@ -313,7 +313,7 @@ Tensor quantized_binary_op_tensor( Tensor& binary_op_tensor_( Tensor& self_arg, const Tensor& other_arg, - const c10::optional& alpha_arg, + const std::optional& alpha_arg, const api::ShaderInfo& shader_descriptor) { TORCH_CHECK( get_dim(self_arg) >= get_dim(other_arg) && @@ -389,12 +389,12 @@ Tensor add_scalar( const Scalar& other, const Scalar& alpha) { return binary_op_scalar( - self_arg, other, c10::optional(alpha), VK_KERNEL(add_scalar)); + self_arg, other, std::optional(alpha), VK_KERNEL(add_scalar)); } Tensor& add_scalar_(Tensor& self, const Scalar& other, const Scalar& alpha) { return binary_op_scalar_( - self, other, c10::optional(alpha), VK_KERNEL(add_scalar_inplace)); + self, other, std::optional(alpha), VK_KERNEL(add_scalar_inplace)); } Tensor quantized_add( @@ -438,7 +438,7 @@ Tensor add_tensor( const Tensor& other_arg, const Scalar& alpha) { return binary_op_tensor( - self_arg, other_arg, c10::optional(alpha), VK_KERNEL(add)); + self_arg, other_arg, std::optional(alpha), VK_KERNEL(add)); } Tensor& add_tensor_( @@ -446,7 +446,7 @@ Tensor& add_tensor_( const Tensor& other_arg, const Scalar& alpha) { return binary_op_tensor_( - self, other_arg, c10::optional(alpha), VK_KERNEL(add_inplace)); + self, other_arg, std::optional(alpha), VK_KERNEL(add_inplace)); } Tensor sub_scalar( @@ -456,7 +456,7 @@ Tensor sub_scalar( return binary_op_scalar( self_arg, other, - c10::optional(-1 * alpha.to()), + std::optional(-1 * alpha.to()), VK_KERNEL(add_scalar)); } @@ -464,7 +464,7 @@ Tensor& sub_scalar_(Tensor& self, const Scalar& other, const Scalar& alpha) { return binary_op_scalar_( self, other, - c10::optional(-1 * alpha.to()), + std::optional(-1 * alpha.to()), VK_KERNEL(add_scalar_inplace)); } @@ -473,7 +473,7 @@ Tensor sub_tensor( const Tensor& other_arg, const Scalar& alpha) { return binary_op_tensor( - self_arg, other_arg, c10::optional(alpha), VK_KERNEL(sub)); + self_arg, other_arg, std::optional(alpha), VK_KERNEL(sub)); } Tensor& sub_tensor_( @@ -481,34 +481,34 @@ Tensor& sub_tensor_( const Tensor& other_arg, const Scalar& alpha) { return binary_op_tensor_( - self, other_arg, c10::optional(alpha), VK_KERNEL(sub_inplace)); + self, other_arg, std::optional(alpha), VK_KERNEL(sub_inplace)); } Tensor mul_scalar(const Tensor& self_arg, const Scalar& other) { return binary_op_scalar( - self_arg, other, c10::optional(), VK_KERNEL(mul_scalar)); + self_arg, other, std::optional(), VK_KERNEL(mul_scalar)); } Tensor& mul_scalar_(Tensor& self, const Scalar& other) { return binary_op_scalar_( - self, other, c10::optional(), VK_KERNEL(mul_scalar_inplace)); + self, other, std::optional(), VK_KERNEL(mul_scalar_inplace)); } Tensor mul_tensor(const Tensor& self_arg, const Tensor& other_arg) { return binary_op_tensor( - self_arg, other_arg, c10::optional(), VK_KERNEL(mul)); + self_arg, other_arg, std::optional(), VK_KERNEL(mul)); } Tensor& mul_tensor_(Tensor& self, const Tensor& other_arg) { return binary_op_tensor_( - self, other_arg, c10::optional(), VK_KERNEL(mul_inplace)); + self, other_arg, std::optional(), VK_KERNEL(mul_inplace)); } Tensor div_scalar(const Tensor& self_arg, const Scalar& other) { return binary_op_scalar( self_arg, 1.0 / other.to(), - c10::optional(), + std::optional(), VK_KERNEL(mul_scalar)); } @@ -516,45 +516,45 @@ Tensor& div_scalar_(Tensor& self, const Scalar& other) { return binary_op_scalar_( self, 1.0 / other.to(), - c10::optional(), + std::optional(), VK_KERNEL(mul_scalar_inplace)); } Tensor div_tensor(const Tensor& self_arg, const Tensor& other_arg) { return binary_op_tensor( - self_arg, other_arg, c10::optional(), VK_KERNEL(div)); + self_arg, other_arg, std::optional(), VK_KERNEL(div)); } Tensor& div_tensor_(Tensor& self, const Tensor& other_arg) { return binary_op_tensor_( - self, other_arg, c10::optional(), VK_KERNEL(div_inplace)); + self, other_arg, std::optional(), VK_KERNEL(div_inplace)); } Tensor pow(const Tensor& self, const Tensor& other) { - return binary_op_tensor(self, other, c10::optional(), VK_KERNEL(pow)); + return binary_op_tensor(self, other, std::optional(), VK_KERNEL(pow)); } Tensor& pow_(Tensor& self, const Tensor& other) { return binary_op_tensor_( - self, other, c10::optional(), VK_KERNEL(pow_inplace)); + self, other, std::optional(), VK_KERNEL(pow_inplace)); } Tensor pow_tensor_scalar(const Tensor& self, const Scalar& other) { return binary_op_scalar( - self, other, c10::optional(), VK_KERNEL(pow_tensor_scalar)); + self, other, std::optional(), VK_KERNEL(pow_tensor_scalar)); } Tensor& pow_tensor_scalar_(Tensor& self, const Scalar& other) { return binary_op_scalar_( self, other, - c10::optional(), + std::optional(), VK_KERNEL(pow_tensor_scalar_inplace)); } Tensor pow_scalar_tensor(const Scalar& self, const Tensor& other) { return binary_op_scalar( - other, self, c10::optional(), VK_KERNEL(pow_scalar_tensor)); + other, self, std::optional(), VK_KERNEL(pow_scalar_tensor)); } Tensor floor_divide_scalar(const Tensor& self, const Scalar& other) { @@ -563,7 +563,7 @@ Tensor floor_divide_scalar(const Tensor& self, const Scalar& other) { return binary_op_scalar( self, 1.0 / other.to(), - c10::optional(), + std::optional(), VK_KERNEL(floor_mul_scalar)); } @@ -573,20 +573,20 @@ Tensor& floor_divide_scalar_(Tensor& self, const Scalar& other) { return binary_op_scalar_( self, 1.0 / other.to(), - c10::optional(), + std::optional(), VK_KERNEL(floor_mul_scalar_inplace)); } Tensor floor_divide_tensor(const Tensor& self, const Tensor& other) { return binary_op_tensor( - self, other, c10::optional(), VK_KERNEL(floor_divide)); + self, other, std::optional(), VK_KERNEL(floor_divide)); } Tensor& floor_divide_tensor_(Tensor& self, const Tensor& other_arg) { return binary_op_tensor_( self, other_arg, - c10::optional(), + std::optional(), VK_KERNEL(floor_divide_inplace)); } diff --git a/aten/src/ATen/native/vulkan/ops/Clamp.cpp b/aten/src/ATen/native/vulkan/ops/Clamp.cpp index 3cc4dd3d3c4bc..e336b01323666 100644 --- a/aten/src/ATen/native/vulkan/ops/Clamp.cpp +++ b/aten/src/ATen/native/vulkan/ops/Clamp.cpp @@ -11,8 +11,8 @@ using namespace api::utils; Tensor _clamp( const Tensor& self_arg, - const c10::optional& min, - const c10::optional& max, + const std::optional& min, + const std::optional& max, const api::ShaderInfo& shader_descriptor) { TORCH_CHECK(min || max, "At least one of 'min' or 'max' must not be None"); @@ -96,15 +96,15 @@ Tensor _clamp( Tensor clamp( const Tensor& self_arg, - const c10::optional& min, - const c10::optional& max) { + const std::optional& min, + const std::optional& max) { return _clamp(self_arg, min, max, VK_KERNEL(clamp)); } Tensor& _clamp_( Tensor& self_arg, - const c10::optional& min, - const c10::optional& max, + const std::optional& min, + const std::optional& max, const api::ShaderInfo& shader_descriptor) { TORCH_CHECK(min || max, "At least one of 'min' or 'max' must not be None"); @@ -186,8 +186,8 @@ Tensor threshold( Tensor& clamp_( Tensor& self, - const c10::optional& min, - const c10::optional& max) { + const std::optional& min, + const std::optional& max) { return _clamp_(self, min, max, VK_KERNEL(clamp_)); } diff --git a/aten/src/ATen/native/vulkan/ops/Clone.cpp b/aten/src/ATen/native/vulkan/ops/Clone.cpp index 2601d785ddb52..3e9e611717257 100644 --- a/aten/src/ATen/native/vulkan/ops/Clone.cpp +++ b/aten/src/ATen/native/vulkan/ops/Clone.cpp @@ -16,7 +16,7 @@ namespace { Tensor clone( const Tensor& src, - c10::optional optional_memory_format) { + std::optional optional_memory_format) { auto memory_format = optional_memory_format.value_or(MemoryFormat::Preserve); TORCH_CHECK( (c10::MemoryFormat::Preserve == memory_format) || diff --git a/aten/src/ATen/native/vulkan/ops/Common.h b/aten/src/ATen/native/vulkan/ops/Common.h index 83cb45b163a2a..c74483f793c52 100644 --- a/aten/src/ATen/native/vulkan/ops/Common.h +++ b/aten/src/ATen/native/vulkan/ops/Common.h @@ -76,18 +76,18 @@ uint32_t get_dim(const vTensor& v_in) { return get_dim(v_in.sizes()); } -inline c10::optional get_optional_tensor( +inline std::optional get_optional_tensor( const c10::impl::GenericList& gen_list, const uint32_t idx) { return gen_list.get(idx).isTensor() ? gen_list.get(idx).toTensor() - : c10::optional(); + : std::optional(); } -inline c10::optional get_optional_scalar( +inline std::optional get_optional_scalar( const c10::impl::GenericList& gen_list, const uint32_t idx) { return gen_list.get(idx).isScalar() ? gen_list.get(idx).toScalar() - : c10::optional(); + : std::optional(); } inline float roundevenf(float v) { diff --git a/aten/src/ATen/native/vulkan/ops/Convolution.cpp b/aten/src/ATen/native/vulkan/ops/Convolution.cpp index 01dccac003011..f210c253800b1 100644 --- a/aten/src/ATen/native/vulkan/ops/Convolution.cpp +++ b/aten/src/ATen/native/vulkan/ops/Convolution.cpp @@ -245,7 +245,7 @@ at::Tensor rearrange_weights_2d(const Tensor& weight_in, bool tconv) { * taking each texel and arranging them along the x axis. */ at::Tensor rearrange_bias( - const c10::optional& bias_in, + const std::optional& bias_in, const at::Tensor& weight_in, bool tconv) { // If optional is empty, just return zeros @@ -543,7 +543,7 @@ vTensor pack_weights( } vTensor pack_biases( - const c10::optional& bias, + const std::optional& bias, const Tensor& weight, const bool transposed, const bool quantized) { @@ -629,7 +629,7 @@ bool weight_valid(const Tensor& weight, const bool quantized) { } bool bias_valid( - const c10::optional& bias, + const std::optional& bias, const Tensor& weight, const bool transposed, const bool quantized) { @@ -656,7 +656,7 @@ bool bias_valid( bool available( const Tensor& weight, - const c10::optional& bias, + const std::optional& bias, const IntArrayRef stride, const IntArrayRef padding, const IntArrayRef dilation, @@ -664,8 +664,8 @@ bool available( const bool quantized, const IntArrayRef /* output_padding */, const int64_t groups, - const c10::optional& output_min, - const c10::optional& output_max) { + const std::optional& output_min, + const std::optional& output_max) { if (!weight_valid(weight, quantized)) { return false; } @@ -765,7 +765,7 @@ static inline std::vector get_conv_transpose_output_size( Tensor convolution( const Tensor& input, const Tensor& weight, - const c10::optional& bias, + const std::optional& bias, const IntArrayRef stride, const IntArrayRef padding, const IntArrayRef dilation, @@ -790,7 +790,7 @@ Tensor convolution( Tensor quantized_convolution( const Tensor& input, const Tensor& weight, - const c10::optional& bias, + const std::optional& bias, const IntArrayRef stride, const IntArrayRef padding, const IntArrayRef dilation, @@ -865,7 +865,7 @@ vTensor pack_weights_using_width_packing(const Tensor& weight_arg) { Tensor run_conv1d_context_impl( const Tensor& input_arg, const Tensor& weight_arg, - const c10::optional& bias_arg_opt, + const std::optional& bias_arg_opt, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, @@ -962,7 +962,7 @@ Tensor run_conv1d_context_impl( Conv2dPackedContext::Conv2dPackedContext( const Tensor& weight, - const c10::optional& bias, + const std::optional& bias, const IntArrayRef stride_arg, const IntArrayRef padding_arg, const IntArrayRef dilation_arg, @@ -970,8 +970,8 @@ Conv2dPackedContext::Conv2dPackedContext( const bool quantized, const IntArrayRef output_padding_arg, const int64_t groups, - const c10::optional& output_min, - const c10::optional& output_max) + const std::optional& output_min, + const std::optional& output_max) : unpacked_{c10::AnyType::get()} { const auto stride = expand_param_if_needed(stride_arg, "stride", 2); const auto padding = expand_param_if_needed(padding_arg, "padding", 2); @@ -1058,13 +1058,13 @@ Conv2dPackedContext Conv2dPackedContext::pack(c10::impl::GenericList unpacked) { c10::intrusive_ptr create_conv2d_context( Tensor&& weight, - c10::optional&& bias, + std::optional&& bias, std::vector&& stride, std::vector&& padding, std::vector&& dilation, const int64_t groups, - const c10::optional& output_min, - const c10::optional& output_max) { + const std::optional& output_min, + const std::optional& output_max) { return c10::make_intrusive(Conv2dPackedContext( weight, bias, @@ -1081,14 +1081,14 @@ c10::intrusive_ptr create_conv2d_context( c10::intrusive_ptr create_tconv2d_context( Tensor&& weight, - c10::optional&& bias, + std::optional&& bias, std::vector&& stride, std::vector&& padding, std::vector&& output_padding, std::vector&& dilation, const int64_t groups, - const c10::optional& output_min, - const c10::optional& output_max) { + const std::optional& output_min, + const std::optional& output_max) { return c10::make_intrusive(Conv2dPackedContext( weight, bias, @@ -1105,13 +1105,13 @@ c10::intrusive_ptr create_tconv2d_context( c10::intrusive_ptr create_qconv2d_context( Tensor&& weight, - c10::optional&& bias, + std::optional&& bias, std::vector&& stride, std::vector&& padding, std::vector&& dilation, const int64_t groups, - const c10::optional& output_min, - const c10::optional& output_max) { + const std::optional& output_min, + const std::optional& output_max) { return c10::make_intrusive(Conv2dPackedContext( weight, bias, @@ -1128,14 +1128,14 @@ c10::intrusive_ptr create_qconv2d_context( c10::intrusive_ptr create_qtconv2d_context( Tensor&& weight, - c10::optional&& bias, + std::optional&& bias, std::vector&& stride, std::vector&& padding, std::vector&& output_padding, std::vector&& dilation, const int64_t groups, - const c10::optional& output_min, - const c10::optional& output_max) { + const std::optional& output_min, + const std::optional& output_max) { return c10::make_intrusive(Conv2dPackedContext( weight, bias, @@ -1294,7 +1294,7 @@ Tensor run_qconv2d_context( Tensor quantized_conv2d( const Tensor& input, const Tensor& weight, - const c10::optional& bias, + const std::optional& bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, @@ -1321,15 +1321,15 @@ Conv2dOpContext::Conv2dOpContext(Conv2dPackedContext conv_context) Conv2dOpContext Conv2dOpContext::create( const Tensor& weight, - const c10::optional& bias, + const std::optional& bias, const IntArrayRef stride_arg, const IntArrayRef padding_arg, const IntArrayRef dilation_arg, const bool transposed, const IntArrayRef output_padding_arg, const int64_t groups, - const c10::optional& output_min, - const c10::optional& output_max) { + const std::optional& output_min, + const std::optional& output_max) { return Conv2dOpContext{Conv2dPackedContext( weight, bias, @@ -1367,13 +1367,13 @@ Conv2dOpContext::State Conv2dOpContext::unpack() const { c10::intrusive_ptr conv2d_clamp_prepack( Tensor&& weight, - c10::optional&& bias, + std::optional&& bias, std::vector&& stride, std::vector&& padding, std::vector&& dilation, const int64_t groups, - const c10::optional& output_min, - const c10::optional& output_max) { + const std::optional& output_min, + const std::optional& output_max) { return c10::make_intrusive(Conv2dOpContext::create( std::move(weight), std::move(bias), @@ -1395,7 +1395,7 @@ Tensor conv2d_clamp_run( Conv1dPackedContext::Conv1dPackedContext( const Tensor& weight, - const c10::optional& bias, + const std::optional& bias, const IntArrayRef stride_arg, const IntArrayRef padding_arg, const IntArrayRef dilation_arg, @@ -1435,7 +1435,7 @@ Conv1dPackedContext Conv1dPackedContext::pack(c10::impl::GenericList unpacked) { c10::intrusive_ptr create_conv1d_context( Tensor&& weight, - c10::optional&& bias, + std::optional&& bias, std::vector&& stride, std::vector&& padding, std::vector&& dilation, @@ -1447,7 +1447,7 @@ c10::intrusive_ptr create_conv1d_context( Tensor convolution1d( const Tensor& input, const Tensor& weight, - const c10::optional& bias, + const std::optional& bias, const IntArrayRef stride, const IntArrayRef padding, const IntArrayRef dilation, @@ -1464,7 +1464,7 @@ Tensor run_conv1d_context( const c10::intrusive_ptr& context) { const Tensor weight = context->get_val(Conv1dPackedContext::Packed::Weight).toTensor(); - const c10::optional& bias_opt = + const std::optional& bias_opt = context->get_val(Conv1dPackedContext::Packed::Bias).toTensor(); const auto stride = context->get_val(Conv1dPackedContext::Packed::Stride).toIntVector(); diff --git a/aten/src/ATen/native/vulkan/ops/Convolution.h b/aten/src/ATen/native/vulkan/ops/Convolution.h index 1d51190b8cab5..84ace9526bbfc 100644 --- a/aten/src/ATen/native/vulkan/ops/Convolution.h +++ b/aten/src/ATen/native/vulkan/ops/Convolution.h @@ -21,7 +21,7 @@ namespace conv2d { Tensor rearrange_weights_dw(const Tensor& weight_in); Tensor rearrange_weights_2d(const Tensor& weight_in, bool tconv); Tensor rearrange_bias( - const c10::optional& bias_in, + const std::optional& bias_in, const at::Tensor& weight_in, bool tconv); @@ -60,7 +60,7 @@ class Conv2dPackedContext final : virtual public VulkanPackedContext, public: Conv2dPackedContext( const Tensor& weight, - const c10::optional& bias, + const std::optional& bias, const IntArrayRef stride_arg, const IntArrayRef padding_arg, const IntArrayRef dilation_arg, @@ -68,8 +68,8 @@ class Conv2dPackedContext final : virtual public VulkanPackedContext, const bool quantized, const IntArrayRef output_padding_arg, const int64_t groups, - const c10::optional& output_min = c10::nullopt, - const c10::optional& output_max = c10::nullopt); + const std::optional& output_min = c10::nullopt, + const std::optional& output_max = c10::nullopt); /* * Assigns a name to each index in the unpacked list. @@ -127,13 +127,13 @@ class Conv2dPackedContext final : virtual public VulkanPackedContext, c10::intrusive_ptr create_conv2d_context( Tensor&& weight, - c10::optional&& bias, + std::optional&& bias, std::vector&& stride, std::vector&& padding, std::vector&& dilation, const int64_t groups, - const c10::optional& output_min = c10::nullopt, - const c10::optional& output_max = c10::nullopt); + const std::optional& output_min = c10::nullopt, + const std::optional& output_max = c10::nullopt); Tensor run_conv2d_context( const Tensor& input, @@ -141,14 +141,14 @@ Tensor run_conv2d_context( c10::intrusive_ptr create_tconv2d_context( Tensor&& weight, - c10::optional&& bias, + std::optional&& bias, std::vector&& stride, std::vector&& padding, std::vector&& output_padding, std::vector&& dilation, const int64_t groups, - const c10::optional& output_min = c10::nullopt, - const c10::optional& output_max = c10::nullopt); + const std::optional& output_min = c10::nullopt, + const std::optional& output_max = c10::nullopt); Tensor run_tconv2d_context( const Tensor& input, @@ -156,13 +156,13 @@ Tensor run_tconv2d_context( c10::intrusive_ptr create_qconv2d_context( Tensor&& weight, - c10::optional&& bias, + std::optional&& bias, std::vector&& stride, std::vector&& padding, std::vector&& dilation, const int64_t groups, - const c10::optional& output_min = c10::nullopt, - const c10::optional& output_max = c10::nullopt); + const std::optional& output_min = c10::nullopt, + const std::optional& output_max = c10::nullopt); Tensor run_qconv2d_context( const Tensor& input_arg, @@ -172,39 +172,39 @@ Tensor run_qconv2d_context( c10::intrusive_ptr create_qtconv2d_context( Tensor&& weight, - c10::optional&& bias, + std::optional&& bias, std::vector&& stride, std::vector&& padding, std::vector&& output_padding, std::vector&& dilation, const int64_t groups, - const c10::optional& output_min = c10::nullopt, - const c10::optional& output_max = c10::nullopt); + const std::optional& output_min = c10::nullopt, + const std::optional& output_max = c10::nullopt); // Backwards compatibility class Conv2dOpContext final : public torch::jit::CustomClassHolder { public: static Conv2dOpContext create( const Tensor& weight, - const c10::optional& bias, + const std::optional& bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, - const c10::optional& output_min = c10::nullopt, - const c10::optional& output_max = c10::nullopt); + const std::optional& output_min = c10::nullopt, + const std::optional& output_max = c10::nullopt); using State = std::tuple< Tensor, - c10::optional, + std::optional, std::vector, std::vector, std::vector, int64_t, - c10::optional, - c10::optional>; + std::optional, + std::optional>; Tensor run(const Tensor& input) const; State unpack() const; @@ -220,13 +220,13 @@ Tensor conv2d_clamp_run( c10::intrusive_ptr conv2d_clamp_prepack( Tensor&& weight, - c10::optional&& bias, + std::optional&& bias, std::vector&& stride, std::vector&& padding, std::vector&& dilation, const int64_t groups, - const c10::optional& output_min, - const c10::optional& output_max); + const std::optional& output_min, + const std::optional& output_max); class Conv1dPackedContext final : virtual public VulkanPackedContext, public torch::jit::CustomClassHolder { @@ -237,7 +237,7 @@ class Conv1dPackedContext final : virtual public VulkanPackedContext, public: Conv1dPackedContext( const Tensor& weight, - const c10::optional& bias, + const std::optional& bias, const IntArrayRef stride_arg, const IntArrayRef padding_arg, const IntArrayRef dilation_arg, @@ -287,7 +287,7 @@ class Conv1dPackedContext final : virtual public VulkanPackedContext, c10::intrusive_ptr create_conv1d_context( Tensor&& weight, - c10::optional&& bias, + std::optional&& bias, std::vector&& stride, std::vector&& padding, std::vector&& dilation, diff --git a/aten/src/ATen/native/vulkan/ops/Factory.cpp b/aten/src/ATen/native/vulkan/ops/Factory.cpp index b746868c238fd..afe82caed8f19 100644 --- a/aten/src/ATen/native/vulkan/ops/Factory.cpp +++ b/aten/src/ATen/native/vulkan/ops/Factory.cpp @@ -8,10 +8,10 @@ namespace ops { Tensor _empty_affine_quantized( const IntArrayRef sizes, - const c10::optional dtype, - const c10::optional layout, - const c10::optional device, - const c10::optional pin_memory, + const std::optional dtype, + const std::optional layout, + const std::optional device, + const std::optional pin_memory, const double scale, const int64_t zero_point, const optional memory_format) { @@ -30,10 +30,10 @@ Tensor _empty_affine_quantized( Tensor empty_memory_format( const IntArrayRef sizes, - const c10::optional dtype, - const c10::optional layout, - const c10::optional device, - const c10::optional pin_memory, + const std::optional dtype, + const std::optional layout, + const std::optional device, + const std::optional pin_memory, const optional memory_format) { api::StorageType storage_type = api::StorageType::TEXTURE_3D; return convert(vTensor{ diff --git a/aten/src/ATen/native/vulkan/ops/Factory.h b/aten/src/ATen/native/vulkan/ops/Factory.h index 9dee6307bb85c..9839ba2d84319 100644 --- a/aten/src/ATen/native/vulkan/ops/Factory.h +++ b/aten/src/ATen/native/vulkan/ops/Factory.h @@ -7,10 +7,10 @@ namespace ops { Tensor _empty_affine_quantized( const IntArrayRef sizes, - const c10::optional dtype, - const c10::optional layout, - const c10::optional device, - const c10::optional pin_memory, + const std::optional dtype, + const std::optional layout, + const std::optional device, + const std::optional pin_memory, const double scale, const int64_t zero_point, const optional memory_format); diff --git a/aten/src/ATen/native/vulkan/ops/Layernorm.cpp b/aten/src/ATen/native/vulkan/ops/Layernorm.cpp index cdca77f95fcaf..6b6a4b866c700 100644 --- a/aten/src/ATen/native/vulkan/ops/Layernorm.cpp +++ b/aten/src/ATen/native/vulkan/ops/Layernorm.cpp @@ -19,8 +19,8 @@ namespace vulkan { namespace ops { LayernormPackedContext::LayernormPackedContext( - const c10::optional& weight, - const c10::optional& bias, + const std::optional& weight, + const std::optional& bias, double eps) : unpacked_{c10::AnyType::get()} { packed_.reserve(ListArgs::kNumArgs); @@ -48,8 +48,8 @@ LayernormPackedContext LayernormPackedContext::pack( } c10::intrusive_ptr create_layernorm_context( - c10::optional&& weight, - c10::optional&& bias, + std::optional&& weight, + std::optional&& bias, double eps) { return c10::make_intrusive( LayernormPackedContext(weight, bias, eps)); @@ -61,10 +61,10 @@ Tensor run_layernorm_context( const c10::intrusive_ptr& layernorm_context) { const Tensor input = input_arg.is_vulkan() ? input_arg : input_arg.vulkan(); - const c10::optional& weight_opt = + const std::optional& weight_opt = layernorm_context->get_val(LayernormPackedContext::ListArgs::kWeight) .toTensor(); - const c10::optional& bias_opt = + const std::optional& bias_opt = layernorm_context->get_val(LayernormPackedContext::ListArgs::kBias) .toTensor(); const float eps = api::utils::safe_downcast( @@ -81,8 +81,8 @@ Tensor run_layernorm_context( Tensor layer_norm( const at::Tensor& input_arg, IntArrayRef normalized_shape, - const c10::optional& weight_opt /* optional */, - const c10::optional& bias_opt /* optional */, + const std::optional& weight_opt /* optional */, + const std::optional& bias_opt /* optional */, double eps, bool /* cudnn_enable, deprecated */) { return run_layernorm_context( diff --git a/aten/src/ATen/native/vulkan/ops/Layernorm.h b/aten/src/ATen/native/vulkan/ops/Layernorm.h index 39518bf63bc9f..881fd6ba9b36c 100644 --- a/aten/src/ATen/native/vulkan/ops/Layernorm.h +++ b/aten/src/ATen/native/vulkan/ops/Layernorm.h @@ -18,8 +18,8 @@ class LayernormPackedContext final : virtual public VulkanPackedContext, public: LayernormPackedContext( - const c10::optional& weight, - const c10::optional& bias, + const std::optional& weight, + const std::optional& bias, double eps); /* @@ -43,8 +43,8 @@ class LayernormPackedContext final : virtual public VulkanPackedContext, }; c10::intrusive_ptr create_layernorm_context( - c10::optional&& weight, - c10::optional&& bias, + std::optional&& weight, + std::optional&& bias, double eps); Tensor run_layernorm_context( diff --git a/aten/src/ATen/native/vulkan/ops/Mm.cpp b/aten/src/ATen/native/vulkan/ops/Mm.cpp index e5893e8172875..c4f4d6d0a6342 100644 --- a/aten/src/ATen/native/vulkan/ops/Mm.cpp +++ b/aten/src/ATen/native/vulkan/ops/Mm.cpp @@ -149,7 +149,7 @@ vTensor pack_weights(const Tensor& weight_arg, const bool use_batch = false) { vTensor pack_biases( const Tensor& weight_arg, - const c10::optional& bias_arg, + const std::optional& bias_arg, const bool use_batch = false) { if (bias_arg) { Tensor bias = *bias_arg; @@ -166,7 +166,7 @@ vTensor pack_biases( // removed in the future. vTensor pack_biases_quantized_weights( const Tensor& weight_arg, - const c10::optional& bias_arg, + const std::optional& bias_arg, const bool use_batch = false) { TORCH_CHECK( weight_arg.is_quantized(), @@ -291,7 +291,7 @@ vTensor pack_biases_quantized_weights( bool available_check_with_batch( const Tensor& weight, - const c10::optional& bias) { + const std::optional& bias) { const bool weight_available = (3 == weight.ndimension()) && (weight.size(Layout::BatchMatrices::batch) > 0) && (weight.size(Layout::BatchMatrices::height) > 0) && @@ -345,7 +345,7 @@ bool available_check_with_batch( bool available( const Tensor& weight, - const c10::optional& bias, + const std::optional& bias, const bool use_batch = false) { if (!api::available()) { return false; @@ -897,7 +897,7 @@ Tensor mm(const Tensor& mat1_arg, const Tensor& mat2_arg) { 1.0f, 1.0f, c10::make_intrusive( - LinearPackedContext(mat2_arg, c10::optional())), + LinearPackedContext(mat2_arg, std::optional())), false, 0, 0); @@ -909,7 +909,7 @@ Tensor bmm(const Tensor& mat1_arg, const Tensor& mat2_arg) { 1.0f, 1.0f, c10::make_intrusive(LinearPackedContext( - mat2_arg, c10::optional(), true /*use batch*/))); + mat2_arg, std::optional(), true /*use batch*/))); } Tensor baddbmm( @@ -941,7 +941,7 @@ TORCH_LIBRARY_IMPL(aten, Vulkan, m) { LinearPackedContext::LinearPackedContext( const Tensor& weight, - const c10::optional& bias, + const std::optional& bias, const bool use_batch) : unpacked_{c10::AnyType::get()} { TORCH_CHECK( @@ -974,7 +974,7 @@ LinearPackedContext LinearPackedContext::pack(c10::impl::GenericList unpacked) { c10::intrusive_ptr create_linear_context( Tensor&& weight, - c10::optional&& bias) { + std::optional&& bias) { return c10::make_intrusive( LinearPackedContext(weight, bias)); } diff --git a/aten/src/ATen/native/vulkan/ops/Mm.h b/aten/src/ATen/native/vulkan/ops/Mm.h index b4fcb31bc315c..99862913a65a0 100644 --- a/aten/src/ATen/native/vulkan/ops/Mm.h +++ b/aten/src/ATen/native/vulkan/ops/Mm.h @@ -61,7 +61,7 @@ class LinearPackedContext final : virtual public VulkanPackedContext, public: LinearPackedContext( const Tensor& weight, - const c10::optional& bias, + const std::optional& bias, const bool use_batch = false); /* @@ -97,7 +97,7 @@ class LinearPackedContext final : virtual public VulkanPackedContext, c10::intrusive_ptr create_linear_context( Tensor&& weight, - c10::optional&& bias); + std::optional&& bias); Tensor run_linear_context( const Tensor& input, diff --git a/aten/src/ATen/native/vulkan/ops/NativeLayerNorm.cpp b/aten/src/ATen/native/vulkan/ops/NativeLayerNorm.cpp index ffeb8c27c52b5..94d155cc2f647 100644 --- a/aten/src/ATen/native/vulkan/ops/NativeLayerNorm.cpp +++ b/aten/src/ATen/native/vulkan/ops/NativeLayerNorm.cpp @@ -12,8 +12,8 @@ using namespace api::utils; void _check_layer_norm_inputs( const at::Tensor& input, IntArrayRef normalized_shape, - const c10::optional& weight /* optional */, - const c10::optional& bias /* optional */) { + const std::optional& weight /* optional */, + const std::optional& bias /* optional */) { const auto normalized_ndim = normalized_shape.size(); TORCH_CHECK( normalized_ndim >= 1, @@ -55,8 +55,8 @@ void _check_layer_norm_inputs( std::tuple native_layer_norm( const at::Tensor& input_arg, IntArrayRef normalized_shape, - const c10::optional& weight_opt /* optional */, - const c10::optional& bias_opt /* optional */, + const std::optional& weight_opt /* optional */, + const std::optional& bias_opt /* optional */, double eps) { _check_layer_norm_inputs(input_arg, normalized_shape, weight_opt, bias_opt); diff --git a/aten/src/ATen/native/vulkan/ops/Pool.cpp b/aten/src/ATen/native/vulkan/ops/Pool.cpp index fab4f05b4a98b..8730cf660a43b 100644 --- a/aten/src/ATen/native/vulkan/ops/Pool.cpp +++ b/aten/src/ATen/native/vulkan/ops/Pool.cpp @@ -232,7 +232,7 @@ Tensor avg_pool2d( const IntArrayRef padding_arg, const bool ceil_mode, const bool /* count_include_pad */, - const c10::optional /* divisor_override */) { + const std::optional /* divisor_override */) { return pool2d( self_arg, kernel_arg, diff --git a/aten/src/ATen/native/vulkan/ops/QuantizedFunctions.h b/aten/src/ATen/native/vulkan/ops/QuantizedFunctions.h index b22a3aa05b819..d72ad00321043 100644 --- a/aten/src/ATen/native/vulkan/ops/QuantizedFunctions.h +++ b/aten/src/ATen/native/vulkan/ops/QuantizedFunctions.h @@ -52,7 +52,7 @@ Tensor quantized_div( Tensor quantized_conv2d( const Tensor& input_, const Tensor& weight, - const c10::optional& bias_opt, + const std::optional& bias_opt, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, @@ -63,8 +63,8 @@ Tensor quantized_conv2d( Tensor quantized_upsample_nearest2d( const Tensor& input_arg, const IntArrayRef output_sizes, - const c10::optional scales_h, - const c10::optional scales_w); + const std::optional scales_h, + const std::optional scales_w); } // namespace ops } // namespace vulkan diff --git a/aten/src/ATen/native/vulkan/ops/Random.cpp b/aten/src/ATen/native/vulkan/ops/Random.cpp index c266b10417039..3103f7fe6f58d 100644 --- a/aten/src/ATen/native/vulkan/ops/Random.cpp +++ b/aten/src/ATen/native/vulkan/ops/Random.cpp @@ -16,7 +16,7 @@ Tensor& uniform_( Tensor& self, const double from, const double to, - const c10::optional /* not implemented */) { + const std::optional /* not implemented */) { TORCH_CHECK( self.is_vulkan(), "Vulkan: In-place operator is only supported on Vulkan tensors."); @@ -59,11 +59,11 @@ Tensor& uniform_( Tensor rand_like( const at::Tensor& input_arg, - const c10::optional /* not implemented */, - const c10::optional /* not implemented */, - const c10::optional /* not implemented */, - const c10::optional /* not implemented */, - const c10::optional /* not implemented */) { + const std::optional /* not implemented */, + const std::optional /* not implemented */, + const std::optional /* not implemented */, + const std::optional /* not implemented */, + const std::optional /* not implemented */) { // Returns a tensor with the same size as input that is filled with random // numbers from a uniform distribution on the interval [0,1). To match the CPU // implementation, we simplify the range to [0,1] and tolerate the small @@ -75,7 +75,7 @@ Tensor& normal_( Tensor& self, const double mean, const double std, - const c10::optional /* not implemented */) { + const std::optional /* not implemented */) { TORCH_CHECK( self.is_vulkan(), "Vulkan: In-place operator is only supported on Vulkan tensors."); @@ -120,11 +120,11 @@ Tensor& normal_( Tensor randn_like( const at::Tensor& input_arg, - const c10::optional /* not implemented */, - const c10::optional /* not implemented */, - const c10::optional /* not implemented */, - const c10::optional /* not implemented */, - const c10::optional /* not implemented */) { + const std::optional /* not implemented */, + const std::optional /* not implemented */, + const std::optional /* not implemented */, + const std::optional /* not implemented */, + const std::optional /* not implemented */) { // Returns a tensor with the same size as input that is filled with random // numbers from a normal distribution with mean 0 and standard deviation 1. return input_arg.clone().detach().normal_(0.0, 1.0); diff --git a/aten/src/ATen/native/vulkan/ops/Slice.cpp b/aten/src/ATen/native/vulkan/ops/Slice.cpp index 7d7721bcb7b15..dad391e9a5ddd 100644 --- a/aten/src/ATen/native/vulkan/ops/Slice.cpp +++ b/aten/src/ATen/native/vulkan/ops/Slice.cpp @@ -232,8 +232,8 @@ Tensor slice_height( Tensor slice( const Tensor& self, int64_t dim, - c10::optional start, - c10::optional end, + std::optional start, + std::optional end, const int64_t step) { TORCH_CHECK(step > 0, "slice step must be positive"); auto nDims = safe_downcast(self.dim()); diff --git a/aten/src/ATen/native/vulkan/ops/Sum.cpp b/aten/src/ATen/native/vulkan/ops/Sum.cpp index 56eed26448dd5..6d8331caff215 100644 --- a/aten/src/ATen/native/vulkan/ops/Sum.cpp +++ b/aten/src/ATen/native/vulkan/ops/Sum.cpp @@ -132,7 +132,7 @@ Tensor sum_dim_IntList( return self; } -Tensor sum(const Tensor& self, const c10::optional dtype) { +Tensor sum(const Tensor& self, const std::optional dtype) { std::vector dims; for (int64_t d = 0; d < self.dim(); d++) { // If any dimension has zero elements, we will shortcut to a zero-dim. diff --git a/aten/src/ATen/native/vulkan/ops/Upsample.cpp b/aten/src/ATen/native/vulkan/ops/Upsample.cpp index 776d1e79ce705..7e3a2ead2d632 100644 --- a/aten/src/ATen/native/vulkan/ops/Upsample.cpp +++ b/aten/src/ATen/native/vulkan/ops/Upsample.cpp @@ -12,8 +12,8 @@ using namespace api::utils; Tensor upsample_nearest2d( const Tensor& input_arg, const IntArrayRef output_sizes, - const c10::optional scales_h, - const c10::optional scales_w) { + const std::optional scales_h, + const std::optional scales_w) { api::Context* const context = api::context(); TORCH_CHECK( @@ -98,8 +98,8 @@ Tensor upsample_bilinear2d( const Tensor& input_arg, const IntArrayRef output_sizes, bool align_corners, - const c10::optional scales_h, - const c10::optional scales_w) { + const std::optional scales_h, + const std::optional scales_w) { api::Context* const context = api::context(); TORCH_CHECK( diff --git a/aten/src/ATen/native/vulkan/ops/Zero.cpp b/aten/src/ATen/native/vulkan/ops/Zero.cpp index 5ceaae07cdc3e..fc903ad3f1e19 100644 --- a/aten/src/ATen/native/vulkan/ops/Zero.cpp +++ b/aten/src/ATen/native/vulkan/ops/Zero.cpp @@ -43,10 +43,10 @@ Tensor& zero_(at::Tensor& self) { Tensor zeros( const IntArrayRef size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { TORCH_CHECK(size.size() <= 4, "Vulkan zeros supports up to 4d tensors"); // Get the global Vulkan context diff --git a/aten/src/ATen/native/vulkan/ops/cumsum.cpp b/aten/src/ATen/native/vulkan/ops/cumsum.cpp index c0e8a0c09362d..e6537fcc5acd5 100644 --- a/aten/src/ATen/native/vulkan/ops/cumsum.cpp +++ b/aten/src/ATen/native/vulkan/ops/cumsum.cpp @@ -87,7 +87,7 @@ void set_cumsum_kernel_params( Tensor cumsum( const at::Tensor& input_arg, const int64_t dim_arg, - const c10::optional dtype) { + const std::optional dtype) { TORCH_CHECK( input_arg.dim() >= 1 && input_arg.dim() <= 4, "Vulkan cumsum expects 1 <= input dimension <= 4, Tensor input dimensions ", diff --git a/aten/src/ATen/native/xnnpack/Convolution.cpp b/aten/src/ATen/native/xnnpack/Convolution.cpp index aaf42ea3ed3d3..504c6a363816c 100644 --- a/aten/src/ATen/native/xnnpack/Convolution.cpp +++ b/aten/src/ATen/native/xnnpack/Convolution.cpp @@ -170,7 +170,7 @@ const Tensor reorder_weights_for_transpose_conv(const Tensor& weight_nhwc, ContextConv2D create( const Tensor& weight, - const c10::optional& bias, + const std::optional& bias, const IntArrayRef padding, const IntArrayRef output_padding, const IntArrayRef stride, @@ -396,13 +396,13 @@ Tensor run( c10::intrusive_ptr createConv2dClampPrePackOpContext( Tensor weight, - c10::optional bias, + std::optional bias, std::vector stride, std::vector padding, std::vector dilation, int64_t groups, - const c10::optional& output_min, - const c10::optional& output_max) { + const std::optional& output_min, + const std::optional& output_max) { return xnnpack::XNNPackConv2dOpContext::create_context( std::move(weight), std::move(bias), @@ -417,14 +417,14 @@ c10::intrusive_ptr c10::intrusive_ptr createConv2dTransposeClampPrePackOpContext( Tensor weight, - c10::optional bias, + std::optional bias, std::vector stride, std::vector padding, std::vector output_padding, std::vector dilation, int64_t groups, - const c10::optional& output_min, - const c10::optional& output_max) { + const std::optional& output_min, + const std::optional& output_max) { return xnnpack::XNNPackTransposeConv2dOpContext::create_context( std::move(weight), std::move(bias), diff --git a/aten/src/ATen/native/xnnpack/Convolution.h b/aten/src/ATen/native/xnnpack/Convolution.h index 0df4a6bcd483d..0ec3f01f36bb6 100644 --- a/aten/src/ATen/native/xnnpack/Convolution.h +++ b/aten/src/ATen/native/xnnpack/Convolution.h @@ -12,25 +12,25 @@ namespace internal::convolution2d { c10::intrusive_ptr createConv2dClampPrePackOpContext( Tensor weight, - c10::optional bias, + std::optional bias, std::vector stride, std::vector padding, std::vector dilation, int64_t groups, - const c10::optional& output_min, - const c10::optional& output_max); + const std::optional& output_min, + const std::optional& output_max); c10::intrusive_ptr createConv2dTransposeClampPrePackOpContext( Tensor weight, - c10::optional bias, + std::optional bias, std::vector stride, std::vector padding, std::vector output_padding, std::vector dilation, int64_t groups, - const c10::optional& output_min, - const c10::optional& output_max); + const std::optional& output_min, + const std::optional& output_max); Tensor conv2d_clamp_run( const Tensor& input, @@ -45,7 +45,7 @@ Tensor conv2d_transpose_clamp_run( ContextConv2D create( const Tensor& weight, - const c10::optional& bias, + const std::optional& bias, const IntArrayRef padding, const IntArrayRef output_padding, const IntArrayRef stride, diff --git a/aten/src/ATen/native/xnnpack/Linear.cpp b/aten/src/ATen/native/xnnpack/Linear.cpp index dcab40ec17cfd..b1f4936625828 100644 --- a/aten/src/ATen/native/xnnpack/Linear.cpp +++ b/aten/src/ATen/native/xnnpack/Linear.cpp @@ -14,7 +14,7 @@ namespace { // TODO: Decouple and improve error handling and messages. bool available( const Tensor& weight, - const c10::optional& bias, + const std::optional& bias, const float output_min, const float output_max) { // XNNPACK @@ -65,7 +65,7 @@ Tensor create_and_run( ContextLinear create( const Tensor& weight, - const c10::optional& bias, + const std::optional& bias, const float output_min, const float output_max) { const Tensor weight_contig = weight.contiguous(); @@ -173,9 +173,9 @@ Tensor run( c10::intrusive_ptr createLinearClampPrePackOpContext( Tensor weight, - c10::optional bias, - const c10::optional& output_min, - const c10::optional& output_max) { + std::optional bias, + const std::optional& output_min, + const std::optional& output_max) { return xnnpack::XNNPackLinearOpContext::create_context( std::move(weight), std::move(bias), output_min, output_max); } diff --git a/aten/src/ATen/native/xnnpack/Linear.h b/aten/src/ATen/native/xnnpack/Linear.h index 32c9d93bf4533..9a16918ca0a99 100644 --- a/aten/src/ATen/native/xnnpack/Linear.h +++ b/aten/src/ATen/native/xnnpack/Linear.h @@ -11,9 +11,9 @@ namespace internal::linear { c10::intrusive_ptr createLinearClampPrePackOpContext( Tensor weight, - c10::optional bias, - const c10::optional& output_min, - const c10::optional& output_max); + std::optional bias, + const std::optional& output_min, + const std::optional& output_max); Tensor linear_clamp_run(const Tensor& input, const c10::intrusive_ptr& op_context); @@ -22,7 +22,7 @@ unpack_prepacked_sizes_linear(const IValue& ivalue); ContextLinear create( const Tensor& weight, - const c10::optional& bias, + const std::optional& bias, const float output_min, const float output_max); diff --git a/aten/src/ATen/native/xnnpack/OpContext.cpp b/aten/src/ATen/native/xnnpack/OpContext.cpp index 07f926cd8add5..71c40d1dccd7b 100644 --- a/aten/src/ATen/native/xnnpack/OpContext.cpp +++ b/aten/src/ATen/native/xnnpack/OpContext.cpp @@ -10,9 +10,9 @@ namespace at::native::xnnpack { c10::intrusive_ptr XNNPackLinearOpContext::create_context( at::Tensor&& weight, - c10::optional&& bias, - const c10::optional& output_min, - const c10::optional& output_max) { + std::optional&& bias, + const std::optional& output_min, + const std::optional& output_max) { auto linear_op_context = c10::make_intrusive( std::move(weight), @@ -46,13 +46,13 @@ Tensor XNNPackLinearOpContext::run(const Tensor& input) { c10::intrusive_ptr XNNPackConv2dOpContext::create_context(at::Tensor&& weight, - c10::optional&& bias, + std::optional&& bias, std::vector&& padding, std::vector&& stride, std::vector&& dilation, int64_t groups, - const c10::optional& output_min, - const c10::optional& output_max) { + const std::optional& output_min, + const std::optional& output_max) { auto op_context = xnnpack::internal::convolution2d::create( weight, @@ -89,14 +89,14 @@ XNNPackConv2dOpContext::create_context(at::Tensor&& weight, c10::intrusive_ptr XNNPackTransposeConv2dOpContext::create_context(at::Tensor&& weight, - c10::optional&& bias, + std::optional&& bias, std::vector&& padding, std::vector&& output_padding, std::vector&& stride, std::vector&& dilation, int64_t groups, - const c10::optional& output_min, - const c10::optional& output_max) { + const std::optional& output_min, + const std::optional& output_max) { auto op_context = xnnpack::internal::convolution2d::create( weight, diff --git a/aten/src/ATen/native/xnnpack/OpContext.h b/aten/src/ATen/native/xnnpack/OpContext.h index eecc8b11fad13..0aec38b102ff5 100644 --- a/aten/src/ATen/native/xnnpack/OpContext.h +++ b/aten/src/ATen/native/xnnpack/OpContext.h @@ -10,37 +10,37 @@ namespace at::native::xnnpack { using SerializationTypeLinearPrePack = std::tuple< Tensor, - c10::optional, - c10::optional, - c10::optional>; + std::optional, + std::optional, + std::optional>; using SerializationTypeConv2dPrePack = std::tuple< Tensor, - c10::optional, + std::optional, std::vector, std::vector, std::vector, int64_t, - c10::optional, - c10::optional>; + std::optional, + std::optional>; using SerializationTypeTransposeConv2dPrePack = std::tuple< Tensor, - c10::optional, + std::optional, std::vector, std::vector, std::vector, std::vector, int64_t, - c10::optional, - c10::optional>; + std::optional, + std::optional>; class LinearOpContext : public torch::jit::CustomClassHolder { protected: Tensor orig_weight_; - c10::optional orig_bias_; - c10::optional output_min_; - c10::optional output_max_; + std::optional orig_bias_; + std::optional output_min_; + std::optional output_max_; bool orig_weight_and_bias_freed_; public: @@ -60,9 +60,9 @@ class XNNPackLinearOpContext final : public LinearOpContext { public: XNNPackLinearOpContext( Tensor&& weight, - c10::optional&& bias, - const c10::optional& min, - const c10::optional& max, + std::optional&& bias, + const std::optional& min, + const std::optional& max, ContextLinear&& op_context) : op_context_(std::move(op_context)) { orig_weight_ = std::move(weight); @@ -77,21 +77,21 @@ class XNNPackLinearOpContext final : public LinearOpContext { static c10::intrusive_ptr create_context( Tensor&& weight, - c10::optional&& bias, - const c10::optional& output_min, - const c10::optional& output_max); + std::optional&& bias, + const std::optional& output_min, + const std::optional& output_max); }; class Conv2dOpContext : public torch::jit::CustomClassHolder { protected: Tensor orig_weight_; - c10::optional orig_bias_; + std::optional orig_bias_; std::vector stride_; std::vector padding_; std::vector dilation_; int64_t groups_; - c10::optional output_min_; - c10::optional output_max_; + std::optional output_min_; + std::optional output_max_; bool orig_weight_and_bias_freed_; public: @@ -115,14 +115,14 @@ class Conv2dOpContext : public torch::jit::CustomClassHolder { class TransposeConv2dOpContext : public torch::jit::CustomClassHolder { protected: Tensor orig_weight_; - c10::optional orig_bias_; + std::optional orig_bias_; std::vector stride_; std::vector padding_; std::vector output_padding_; std::vector dilation_; int64_t groups_; - c10::optional output_min_; - c10::optional output_max_; + std::optional output_min_; + std::optional output_max_; bool orig_weight_and_bias_freed_; public: @@ -158,13 +158,13 @@ class XNNPackConv2dOpContext final : public Conv2dOpContext { public: XNNPackConv2dOpContext( Tensor&& weight, - c10::optional&& bias, + std::optional&& bias, std::vector&& padding, std::vector&& stride, std::vector&& dilation, uint64_t groups, - const c10::optional& min, - const c10::optional& max, + const std::optional& min, + const std::optional& max, ContextConv2D&& op_context) : op_context_(std::move(op_context)) { orig_weight_ = std::move(weight); @@ -183,13 +183,13 @@ class XNNPackConv2dOpContext final : public Conv2dOpContext { static c10::intrusive_ptr create_context( Tensor&& weight, - c10::optional&& bias, + std::optional&& bias, std::vector&& padding, std::vector&& stride, std::vector&& dilation, int64_t groups, - const c10::optional& output_min, - const c10::optional& output_max); + const std::optional& output_min, + const std::optional& output_max); }; class XNNPackTransposeConv2dOpContext final : public TransposeConv2dOpContext { @@ -206,14 +206,14 @@ class XNNPackTransposeConv2dOpContext final : public TransposeConv2dOpContext { public: XNNPackTransposeConv2dOpContext( Tensor&& weight, - c10::optional&& bias, + std::optional&& bias, std::vector&& padding, std::vector&& output_padding, std::vector&& stride, std::vector&& dilation, uint64_t groups, - const c10::optional& min, - const c10::optional& max, + const std::optional& min, + const std::optional& max, ContextConv2D&& op_context) : op_context_(std::move(op_context)) { orig_weight_ = std::move(weight); @@ -233,14 +233,14 @@ class XNNPackTransposeConv2dOpContext final : public TransposeConv2dOpContext { static c10::intrusive_ptr create_context( Tensor&& weight, - c10::optional&& bias, + std::optional&& bias, std::vector&& padding, std::vector&& output_padding, std::vector&& stride, std::vector&& dilation, int64_t groups, - const c10::optional& output_min, - const c10::optional& output_max); + const std::optional& output_min, + const std::optional& output_max); }; } // namespace at::native::xnnpack diff --git a/aten/src/ATen/ops/from_blob.h b/aten/src/ATen/ops/from_blob.h index 8ebc01a922029..88089092c1fd7 100644 --- a/aten/src/ATen/ops/from_blob.h +++ b/aten/src/ATen/ops/from_blob.h @@ -31,7 +31,7 @@ class TORCH_API TensorMaker { return *this; } - TensorMaker& storage_offset(c10::optional value) noexcept { + TensorMaker& storage_offset(std::optional value) noexcept { storage_offset_ = value; return *this; @@ -50,7 +50,7 @@ class TORCH_API TensorMaker { return *this; } - TensorMaker& target_device(c10::optional value) noexcept { + TensorMaker& target_device(std::optional value) noexcept { device_ = value; return *this; @@ -91,10 +91,10 @@ class TORCH_API TensorMaker { void* data_; IntArrayRef sizes_; OptionalIntArrayRef strides_{}; - c10::optional storage_offset_{}; + std::optional storage_offset_{}; std::function deleter_{}; std::unique_ptr ctx_{nullptr, detail::noopDelete}; - c10::optional device_{}; + std::optional device_{}; TensorOptions opts_{}; bool resizeable_{}; c10::Allocator* allocator_{}; @@ -110,7 +110,7 @@ inline Tensor from_blob( IntArrayRef strides, const std::function& deleter, const TensorOptions& options = {}, - const c10::optional target_device = c10::nullopt) { + const std::optional target_device = c10::nullopt) { return for_blob(data, sizes) .strides(strides) .deleter(deleter) @@ -126,7 +126,7 @@ inline Tensor from_blob( int64_t storage_offset, const std::function& deleter, const TensorOptions& options = {}, - const c10::optional target_device = c10::nullopt) { + const std::optional target_device = c10::nullopt) { return for_blob(data, sizes) .strides(strides) .storage_offset(storage_offset) @@ -141,7 +141,7 @@ inline Tensor from_blob( IntArrayRef sizes, std::function deleter, const TensorOptions& options = {}, - const c10::optional target_device = c10::nullopt) { + const std::optional target_device = c10::nullopt) { return for_blob(data, sizes) .deleter(std::move(deleter)) .options(options) diff --git a/aten/src/ATen/record_function.cpp b/aten/src/ATen/record_function.cpp index bc3a0ba517483..04743ff256ece 100644 --- a/aten/src/ATen/record_function.cpp +++ b/aten/src/ATen/record_function.cpp @@ -43,7 +43,7 @@ RecordFunctionCallbacks::iterator findCallback( return std::find_if(entries.begin(), entries.end(), match_handle); } -c10::optional extractCallback( +std::optional extractCallback( RecordFunctionCallbacks& entries, CallbackHandle handle) { auto it = findCallback(entries, handle); @@ -132,7 +132,7 @@ class CacheEntry { // The caller is expected to check `GlobalCallbackManager::get().version()' // and call CacheEntry::update() if necessary. StepCallbacks getActiveCallbacks(); - c10::optional getActiveCallbacksUnlessEmpty(); + std::optional getActiveCallbacksUnlessEmpty(); // Full rebuild. (E.g. during registration) void update(const std::vector& callbacks); @@ -174,7 +174,7 @@ class LocalCallbackManager { public: const RecordFunctionTLS& getTLS() const; StepCallbacks getActiveCallbacks(const RecordScope scope); - c10::optional getActiveCallbacksUnlessEmpty(const RecordScope scope); + std::optional getActiveCallbacksUnlessEmpty(const RecordScope scope); void setTLS(const RecordFunctionTLS& tls); void seed(uint32_t seed); @@ -310,7 +310,7 @@ StepCallbacks CacheEntry::getActiveCallbacks() { return active_callbacks_; } -c10::optional CacheEntry::getActiveCallbacksUnlessEmpty() { +std::optional CacheEntry::getActiveCallbacksUnlessEmpty() { getActiveCallbacksImpl(); if (C10_LIKELY(active_callbacks_.empty())) { return c10::nullopt; @@ -397,7 +397,7 @@ StepCallbacks LocalCallbackManager::getActiveCallbacks( return active_callbacks_[static_cast(scope)].getActiveCallbacks(); } -c10::optional LocalCallbackManager::getActiveCallbacksUnlessEmpty( +std::optional LocalCallbackManager::getActiveCallbacksUnlessEmpty( const RecordScope scope) { rebuildActiveCallbacksIfNeeded(); return active_callbacks_[static_cast(scope)].getActiveCallbacksUnlessEmpty(); @@ -585,25 +585,25 @@ size_t RecordFunction::num_outputs() const { fn_); } -c10::optional RecordFunction::operator_name() const { +std::optional RecordFunction::operator_name() const { return std::visit( c10::overloaded( - [&](const std::string&) -> c10::optional { + [&](const std::string&) -> std::optional { return c10::nullopt; }, - [](const schema_ref_t schema) -> c10::optional { + [](const schema_ref_t schema) -> std::optional { return schema.get().operator_name(); }), fn_); } -c10::optional RecordFunction::operator_schema() const { +std::optional RecordFunction::operator_schema() const { return std::visit( c10::overloaded( - [&](const std::string&) -> c10::optional { + [&](const std::string&) -> std::optional { return c10::nullopt; }, - [](const schema_ref_t schema) -> c10::optional { + [](const schema_ref_t schema) -> std::optional { return schema.get(); }), fn_); @@ -613,7 +613,7 @@ StepCallbacks getStepCallbacks(RecordScope scope) { return LocalCallbackManager::get().getActiveCallbacks(scope); } -c10::optional getStepCallbacksUnlessEmpty(RecordScope scope) { +std::optional getStepCallbacksUnlessEmpty(RecordScope scope) { return LocalCallbackManager::get().getActiveCallbacksUnlessEmpty(scope); } diff --git a/aten/src/ATen/record_function.h b/aten/src/ATen/record_function.h index c6f79289e6c21..014260fb220f8 100644 --- a/aten/src/ATen/record_function.h +++ b/aten/src/ATen/record_function.h @@ -433,10 +433,10 @@ struct TORCH_API RecordFunction { return handle_; } - c10::optional operator_name() const; + std::optional operator_name() const; // This method returns a copy of the FunctionSchema and can be expensive. - c10::optional operator_schema() const; + std::optional operator_schema() const; void setHandle(RecordFunctionHandle handle) { handle_ = handle; @@ -521,7 +521,7 @@ struct TORCH_API RecordFunction { TORCH_API StepCallbacks getStepCallbacks(RecordScope scope); -TORCH_API c10::optional getStepCallbacksUnlessEmpty( +TORCH_API std::optional getStepCallbacksUnlessEmpty( RecordScope scope); namespace detail { diff --git a/aten/src/ATen/templates/RegisterBackendSelect.cpp b/aten/src/ATen/templates/RegisterBackendSelect.cpp index dcb5986ab69ed..3586e44da999b 100644 --- a/aten/src/ATen/templates/RegisterBackendSelect.cpp +++ b/aten/src/ATen/templates/RegisterBackendSelect.cpp @@ -23,7 +23,7 @@ namespace { ${backend_select_method_definitions} -bool is_pinned(const Tensor& self, c10::optional device) { +bool is_pinned(const Tensor& self, std::optional device) { // Only CPU tensors can be pinned if (!self.is_cpu()) { return false; @@ -33,7 +33,7 @@ bool is_pinned(const Tensor& self, c10::optional device) { return at::_ops::is_pinned::redispatch(_dk, self, device); } -at::Tensor _pin_memory(const Tensor& self, c10::optional device) { +at::Tensor _pin_memory(const Tensor& self, std::optional device) { TORCH_CHECK(self.device().is_cpu(), "cannot pin '", self.toString(), "' only dense CPU tensors can be pinned"); DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(c10::nullopt, self.layout(), device.value_or(at::kCUDA))); if (self.is_nested()) { diff --git a/aten/src/ATen/templates/RegisterFunctionalization.cpp b/aten/src/ATen/templates/RegisterFunctionalization.cpp index fabc12a03fa9f..74d02be9f93d3 100644 --- a/aten/src/ATen/templates/RegisterFunctionalization.cpp +++ b/aten/src/ATen/templates/RegisterFunctionalization.cpp @@ -60,7 +60,7 @@ inline Tensor to_meta(const Tensor& t) { /*device=*/c10::make_optional(c10::Device(kMeta)), /*pin_memory=*/c10::nullopt); } -inline c10::optional to_meta(const c10::optional& t) { +inline std::optional to_meta(const c10::optional& t) { if (t.has_value()) { return c10::make_optional(to_meta(*t)); } diff --git a/aten/src/ATen/templates/TensorBody.h b/aten/src/ATen/templates/TensorBody.h index 010f12d4cfbce..1515442dd1f94 100644 --- a/aten/src/ATen/templates/TensorBody.h +++ b/aten/src/ATen/templates/TensorBody.h @@ -398,7 +398,7 @@ class TORCH_API Tensor: public TensorBase { /// // f requires grad, has no operation creating it /// @endcode - /// \fn void backward(const Tensor & gradient={}, c10::optional retain_graph=c10::nullopt, bool create_graph=false, c10::optional inputs=c10::nullopt) const; + /// \fn void backward(const Tensor & gradient={}, std::optional retain_graph=c10::nullopt, bool create_graph=false, c10::optional inputs=c10::nullopt) const; /// /// Computes the gradient of current tensor with respect to graph leaves. /// @@ -433,7 +433,7 @@ class TORCH_API Tensor: public TensorBase { /// the current implementation will call its grad_fn (even though it is not strictly needed to get this gradients). /// It is an implementation detail on which the user should not rely. /// See https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for more details. - void backward(const Tensor & gradient={}, c10::optional retain_graph=c10::nullopt, bool create_graph=false, c10::optional inputs=c10::nullopt) const { + void backward(const Tensor & gradient={}, std::optional retain_graph=c10::nullopt, bool create_graph=false, c10::optional inputs=c10::nullopt) const { // NB: Adding this wrapper to _backward here because we'd like our // 'backwards' api to accept the 'inputs' argument optionally. Since code gen // currently does not support optional of TensorList our approach is to replace @@ -626,7 +626,7 @@ class TORCH_API Tensor: public TensorBase { return TensorBase::data(); } - void _backward(TensorList inputs, const c10::optional& gradient, c10::optional keep_graph, bool create_graph) const; + void _backward(TensorList inputs, const std::optional& gradient, c10::optional keep_graph, bool create_graph) const; const Tensor& requires_grad_(bool _requires_grad=true) const { TensorBase::requires_grad_(_requires_grad); @@ -737,7 +737,7 @@ struct ExclusivelyOwnedTraits { namespace at { inline c10::MaybeOwned borrow_from_optional_tensor( - const c10::optional& opt) { + const std::optional& opt) { return opt.has_value() ? c10::MaybeOwned::borrowed(*opt) : c10::MaybeOwned::owned(std::in_place); diff --git a/aten/src/ATen/test/cpu_rng_test.cpp b/aten/src/ATen/test/cpu_rng_test.cpp index ebc3eee12f3f6..593d78d47887f 100644 --- a/aten/src/ATen/test/cpu_rng_test.cpp +++ b/aten/src/ATen/test/cpu_rng_test.cpp @@ -22,10 +22,10 @@ struct TestCPUGenerator : public c10::GeneratorImpl { ~TestCPUGenerator() override = default; uint32_t random() { return value_; } uint64_t random64() { return value_; } - c10::optional next_float_normal_sample() { return next_float_normal_sample_; } - c10::optional next_double_normal_sample() { return next_double_normal_sample_; } - void set_next_float_normal_sample(c10::optional randn) { next_float_normal_sample_ = randn; } - void set_next_double_normal_sample(c10::optional randn) { next_double_normal_sample_ = randn; } + std::optional next_float_normal_sample() { return next_float_normal_sample_; } + std::optional next_double_normal_sample() { return next_double_normal_sample_; } + void set_next_float_normal_sample(std::optional randn) { next_float_normal_sample_ = randn; } + void set_next_double_normal_sample(std::optional randn) { next_double_normal_sample_ = randn; } void set_current_seed(uint64_t seed) override { throw std::runtime_error("not implemented"); } void set_offset(uint64_t offset) override { throw std::runtime_error("not implemented"); } uint64_t get_offset() const override { throw std::runtime_error("not implemented"); } @@ -38,95 +38,95 @@ struct TestCPUGenerator : public c10::GeneratorImpl { static DeviceType device_type() { return DeviceType::CPU; } uint64_t value_; - c10::optional next_float_normal_sample_; - c10::optional next_double_normal_sample_; + std::optional next_float_normal_sample_; + std::optional next_double_normal_sample_; }; // ==================================================== Random ======================================================== -Tensor& random_(Tensor& self, c10::optional generator) { +Tensor& random_(Tensor& self, std::optional generator) { return at::native::templates::random_impl(self, generator); } -Tensor& random_from_to(Tensor& self, int64_t from, optional to, c10::optional generator) { +Tensor& random_from_to(Tensor& self, int64_t from, optional to, std::optional generator) { return at::native::templates::random_from_to_impl(self, from, to, generator); } -Tensor& random_to(Tensor& self, int64_t to, c10::optional generator) { +Tensor& random_to(Tensor& self, int64_t to, std::optional generator) { return random_from_to(self, 0, to, generator); } // ==================================================== Normal ======================================================== -Tensor& normal_(Tensor& self, double mean, double std, c10::optional gen) { +Tensor& normal_(Tensor& self, double mean, double std, std::optional gen) { return at::native::templates::normal_impl_(self, mean, std, gen); } -Tensor& normal_Tensor_float_out(const Tensor& mean, double std, c10::optional gen, Tensor& output) { +Tensor& normal_Tensor_float_out(const Tensor& mean, double std, std::optional gen, Tensor& output) { return at::native::templates::normal_out_impl(output, mean, std, gen); } -Tensor& normal_float_Tensor_out(double mean, const Tensor& std, c10::optional gen, Tensor& output) { +Tensor& normal_float_Tensor_out(double mean, const Tensor& std, std::optional gen, Tensor& output) { return at::native::templates::normal_out_impl(output, mean, std, gen); } -Tensor& normal_Tensor_Tensor_out(const Tensor& mean, const Tensor& std, c10::optional gen, Tensor& output) { +Tensor& normal_Tensor_Tensor_out(const Tensor& mean, const Tensor& std, std::optional gen, Tensor& output) { return at::native::templates::normal_out_impl(output, mean, std, gen); } -Tensor normal_Tensor_float(const Tensor& mean, double std, c10::optional gen) { +Tensor normal_Tensor_float(const Tensor& mean, double std, std::optional gen) { return at::native::templates::normal_impl(mean, std, gen); } -Tensor normal_float_Tensor(double mean, const Tensor& std, c10::optional gen) { +Tensor normal_float_Tensor(double mean, const Tensor& std, std::optional gen) { return at::native::templates::normal_impl(mean, std, gen); } -Tensor normal_Tensor_Tensor(const Tensor& mean, const Tensor& std, c10::optional gen) { +Tensor normal_Tensor_Tensor(const Tensor& mean, const Tensor& std, std::optional gen) { return at::native::templates::normal_impl(mean, std, gen); } // ==================================================== Uniform ======================================================= -Tensor& uniform_(Tensor& self, double from, double to, c10::optional generator) { +Tensor& uniform_(Tensor& self, double from, double to, std::optional generator) { return at::native::templates::uniform_impl_(self, from, to, generator); } // ==================================================== Cauchy ======================================================== -Tensor& cauchy_(Tensor& self, double median, double sigma, c10::optional generator) { +Tensor& cauchy_(Tensor& self, double median, double sigma, std::optional generator) { return at::native::templates::cauchy_impl_(self, median, sigma, generator); } // ================================================== LogNormal ======================================================= -Tensor& log_normal_(Tensor& self, double mean, double std, c10::optional gen) { +Tensor& log_normal_(Tensor& self, double mean, double std, std::optional gen) { return at::native::templates::log_normal_impl_(self, mean, std, gen); } // ================================================== Geometric ======================================================= -Tensor& geometric_(Tensor& self, double p, c10::optional gen) { +Tensor& geometric_(Tensor& self, double p, std::optional gen) { return at::native::templates::geometric_impl_(self, p, gen); } // ================================================== Exponential ===================================================== -Tensor& exponential_(Tensor& self, double lambda, c10::optional gen) { +Tensor& exponential_(Tensor& self, double lambda, std::optional gen) { return at::native::templates::exponential_impl_(self, lambda, gen); } // ================================================== Bernoulli ======================================================= -Tensor& bernoulli_Tensor(Tensor& self, const Tensor& p_, c10::optional gen) { +Tensor& bernoulli_Tensor(Tensor& self, const Tensor& p_, std::optional gen) { return at::native::templates::bernoulli_impl_(self, p_, gen); } -Tensor& bernoulli_float(Tensor& self, double p, c10::optional gen) { +Tensor& bernoulli_float(Tensor& self, double p, std::optional gen) { return at::native::templates::bernoulli_impl_(self, p, gen); } -Tensor& bernoulli_out(const Tensor& self, c10::optional gen, Tensor& result) { +Tensor& bernoulli_out(const Tensor& self, std::optional gen, Tensor& result) { return at::native::templates::bernoulli_out_impl(result, self, gen); } diff --git a/aten/src/ATen/test/cuda_distributions_test.cu b/aten/src/ATen/test/cuda_distributions_test.cu index 82d3d7777bc23..dcb5c9cc19cf0 100644 --- a/aten/src/ATen/test/cuda_distributions_test.cu +++ b/aten/src/ATen/test/cuda_distributions_test.cu @@ -173,7 +173,7 @@ TEST(RandomPermutationTest, TestIslandShuffle) { bool shuffled2 = false; for (int i = 0; i < 100; i++) { cudaDeviceSynchronize(); - c10::optional gen = c10::nullopt; + std::optional gen = c10::nullopt; randperm_handle_duplicate_keys(keys, values, 8, 5, gen); cudaDeviceSynchronize(); std::vector slice1 = {values[0], values[1], values[2]}; diff --git a/aten/src/ATen/test/cuda_optional_test.cu b/aten/src/ATen/test/cuda_optional_test.cu index b35180d921e9f..be51a4cbe8c97 100644 --- a/aten/src/ATen/test/cuda_optional_test.cu +++ b/aten/src/ATen/test/cuda_optional_test.cu @@ -11,8 +11,8 @@ using namespace at; // optional in cuda files TEST(OptionalTest, OptionalTestCUDA) { if (!at::cuda::is_available()) return; - c10::optional trivially_destructible; - c10::optional> non_trivially_destructible; + std::optional trivially_destructible; + std::optional> non_trivially_destructible; ASSERT_FALSE(trivially_destructible.has_value()); ASSERT_FALSE(non_trivially_destructible.has_value()); diff --git a/aten/src/ATen/test/cuda_stream_test.cpp b/aten/src/ATen/test/cuda_stream_test.cpp index 77100482b5955..b6b3bf7f9e7de 100644 --- a/aten/src/ATen/test/cuda_stream_test.cpp +++ b/aten/src/ATen/test/cuda_stream_test.cpp @@ -408,7 +408,7 @@ TEST(TestStream, ExternalMultiThreadTest) { std::promise aToBProm; std::promise bToAProm; - c10::optional foundStream; + std::optional foundStream; std::thread threadA([&]() { at::cuda::CUDAGuard device_guard(0); diff --git a/aten/src/ATen/test/extension_backend_test.cpp b/aten/src/ATen/test/extension_backend_test.cpp index 4be68b1d0a710..3b2345f347d63 100644 --- a/aten/src/ATen/test/extension_backend_test.cpp +++ b/aten/src/ATen/test/extension_backend_test.cpp @@ -15,8 +15,8 @@ using namespace at; static int test_int; -Tensor empty_override(SymIntArrayRef size, c10::optional dtype, c10::optional layout, - c10::optional device, c10::optional pin_memory, c10::optional optional_memory_format) { +Tensor empty_override(SymIntArrayRef size, std::optional dtype, c10::optional layout, + std::optional device, c10::optional pin_memory, c10::optional optional_memory_format) { test_int = 1; auto tensor_impl = c10::make_intrusive( Storage( @@ -39,10 +39,10 @@ Tensor add_override(const Tensor & a, const Tensor & b , const Scalar& c) { Tensor empty_strided_override( IntArrayRef size, IntArrayRef stride, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { return empty_override(fromIntArrayRefSlow(size), dtype, layout, device, pin_memory, c10::nullopt); } diff --git a/aten/src/ATen/test/operator_name_test.cpp b/aten/src/ATen/test/operator_name_test.cpp index 6d074572dd748..f670a434cb638 100644 --- a/aten/src/ATen/test/operator_name_test.cpp +++ b/aten/src/ATen/test/operator_name_test.cpp @@ -9,7 +9,7 @@ TEST(OperatorNameTest, SetNamespaceIfNotSetWithoutExistingNamespace) { EXPECT_TRUE(result); EXPECT_EQ(testName.name, "ns::operator"); EXPECT_EQ(testName.overload_name, "operator.overload"); - EXPECT_EQ(testName.getNamespace(), c10::optional("ns")); + EXPECT_EQ(testName.getNamespace(), std::optional("ns")); } TEST(OperatorNameTest, SetNamespaceIfNotSetWithExistingNamespace) { @@ -18,5 +18,5 @@ TEST(OperatorNameTest, SetNamespaceIfNotSetWithExistingNamespace) { EXPECT_FALSE(result); EXPECT_EQ(namespacedName.name, "already_namespaced::operator"); EXPECT_EQ(namespacedName.overload_name, "operator.overload"); - EXPECT_EQ(namespacedName.getNamespace(), c10::optional("already_namespaced")); + EXPECT_EQ(namespacedName.getNamespace(), std::optional("already_namespaced")); } diff --git a/aten/src/ATen/test/rng_test.h b/aten/src/ATen/test/rng_test.h index df04d340893fb..82b9c6d5a836e 100644 --- a/aten/src/ATen/test/rng_test.h +++ b/aten/src/ATen/test/rng_test.h @@ -68,14 +68,14 @@ void test_random_from_to(const at::Device& device) { constexpr auto uint64_max_val = std::numeric_limits::max(); std::vector froms; - std::vector> tos; + std::vector<::std::optional> tos; if constexpr (::std::is_same_v) { froms = { 0L }; tos = { 1L, - static_cast>(c10::nullopt) + static_cast<::std::optional>(c10::nullopt) }; } else if constexpr (::std::is_signed_v) { constexpr int64_t min_from = _min_from(); @@ -86,11 +86,11 @@ void test_random_from_to(const at::Device& device) { 42L }; tos = { - c10::optional(-42L), - c10::optional(0L), - c10::optional(42L), - c10::optional(max_to), - static_cast>(c10::nullopt) + ::std::optional(-42L), + ::std::optional(0L), + ::std::optional(42L), + ::std::optional(max_to), + static_cast<::std::optional>(c10::nullopt) }; } else { froms = { @@ -98,9 +98,9 @@ void test_random_from_to(const at::Device& device) { 42L }; tos = { - c10::optional(42L), - c10::optional(max_to), - static_cast>(c10::nullopt) + ::std::optional(42L), + ::std::optional(max_to), + static_cast<::std::optional>(c10::nullopt) }; } @@ -116,7 +116,7 @@ void test_random_from_to(const at::Device& device) { bool from_to_case_covered = false; bool from_case_covered = false; for (const int64_t from : froms) { - for (const c10::optional to : tos) { + for (const ::std::optional to : tos) { if (!to.has_value() || from < *to) { for (const uint64_t val : vals) { auto gen = at::make_generator(val); diff --git a/aten/src/ATen/test/type_test.cpp b/aten/src/ATen/test/type_test.cpp index 3ea64a4da2124..955d60c586c0f 100644 --- a/aten/src/ATen/test/type_test.cpp +++ b/aten/src/ATen/test/type_test.cpp @@ -9,7 +9,7 @@ namespace c10 { TEST(TypeCustomPrinter, Basic) { TypePrinter printer = - [](const Type& t) -> c10::optional { + [](const Type& t) -> std::optional { if (auto tensorType = t.cast()) { return "CustomTensor"; } @@ -29,7 +29,7 @@ TEST(TypeCustomPrinter, Basic) { TEST(TypeCustomPrinter, ContainedTypes) { TypePrinter printer = - [](const Type& t) -> c10::optional { + [](const Type& t) -> std::optional { if (auto tensorType = t.cast()) { return "CustomTensor"; } @@ -53,7 +53,7 @@ TEST(TypeCustomPrinter, ContainedTypes) { TEST(TypeCustomPrinter, NamedTuples) { TypePrinter printer = - [](const Type& t) -> c10::optional { + [](const Type& t) -> std::optional { if (auto tupleType = t.cast()) { // Rewrite only NamedTuples if (tupleType->name()) { diff --git a/aten/src/ATen/test/vulkan_api_test.cpp b/aten/src/ATen/test/vulkan_api_test.cpp index 5b6a31e0b5147..687691a370bf4 100644 --- a/aten/src/ATen/test/vulkan_api_test.cpp +++ b/aten/src/ATen/test/vulkan_api_test.cpp @@ -177,8 +177,8 @@ static void gen_all_subsets( static void slice_test( const std::vector& size, int64_t dim, - c10::optional start, - c10::optional end, + std::optional start, + std::optional end, int64_t step) { // Arrange const auto in_cpu = at::rand(size, at::device(at::kCPU).dtype(at::kFloat)); @@ -212,7 +212,7 @@ static void slice_tests(const std::unordered_map>& } } -static void clone_test(const std::vector& size, c10::optional optional_memory_format) { +static void clone_test(const std::vector& size, std::optional optional_memory_format) { // Arrange const auto in_cpu = at::rand(size, at::device(at::kCPU).dtype(at::kFloat)); const auto in_vulkan = in_cpu.vulkan(); @@ -249,7 +249,7 @@ inline std::vector callOpByName( const char* func_name, const char* overload_name, Args... args) { - const c10::optional op_handle = + const std::optional op_handle = c10::Dispatcher::singleton().findSchema({func_name, overload_name}); assert(op_handle.has_value()); return callOpByHandle(op_handle.value(), std::forward(args)...); @@ -7120,7 +7120,7 @@ TEST_F(VulkanAPITest, zeros) { TEST_F(VulkanAPITest, clone_success) { // Arrange - std::multimap, std::vector> mem2sizes { + std::multimap, std::vector> mem2sizes { {c10::MemoryFormat::Preserve, {2, 3, 5, 161}}, // 4D tensors with MemoryFormat::Preserve {c10::MemoryFormat::Contiguous, {2, 3, 5, 161}}, // 4D tensors with MemoryFormat::Contiguous {{}, {2, 3, 5, 161}}, // 4D tensors with null diff --git a/aten/src/ATen/test/vulkan_quantized_api_test.cpp b/aten/src/ATen/test/vulkan_quantized_api_test.cpp index 031154de17f85..cf243d5ce50c9 100644 --- a/aten/src/ATen/test/vulkan_quantized_api_test.cpp +++ b/aten/src/ATen/test/vulkan_quantized_api_test.cpp @@ -136,7 +136,7 @@ inline std::vector callOpByName( const char* func_name, const char* overload_name, Args... args) { - const c10::optional op_handle = + const std::optional op_handle = c10::Dispatcher::singleton().findSchema({func_name, overload_name}); assert(op_handle.has_value()); return callOpByHandle(op_handle.value(), std::forward(args)...); diff --git a/aten/src/ATen/xpu/CachingHostAllocator.cpp b/aten/src/ATen/xpu/CachingHostAllocator.cpp index 13cd1b6124a9b..332114a8715b7 100644 --- a/aten/src/ATen/xpu/CachingHostAllocator.cpp +++ b/aten/src/ATen/xpu/CachingHostAllocator.cpp @@ -20,7 +20,7 @@ struct XPUCachingHostAllocatorImpl } void record_stream( - c10::optional>& events, + std::optional>& events, XPUStream stream) override { XPUEvent event; event.record(stream); diff --git a/binaries/compare_models_torch.cc b/binaries/compare_models_torch.cc index 5e90445560bc7..c8338fe546a59 100644 --- a/binaries/compare_models_torch.cc +++ b/binaries/compare_models_torch.cc @@ -305,7 +305,7 @@ int main(int argc, char** argv) { torch::jit::GraphOptimizerEnabledGuard no_optimizer_guard(false); c10::CPUCachingAllocator caching_allocator; - c10::optional caching_allocator_guard; + std::optional caching_allocator_guard; if (FLAGS_use_caching_allocator) { caching_allocator_guard.emplace(&caching_allocator); } diff --git a/binaries/speed_benchmark_torch.cc b/binaries/speed_benchmark_torch.cc index b2c521e569b16..00b17ddd47488 100644 --- a/binaries/speed_benchmark_torch.cc +++ b/binaries/speed_benchmark_torch.cc @@ -294,7 +294,7 @@ int main(int argc, char** argv) { } c10::CPUCachingAllocator caching_allocator; - c10::optional caching_allocator_guard; + std::optional caching_allocator_guard; if (FLAGS_use_caching_allocator) { caching_allocator_guard.emplace(&caching_allocator); } diff --git a/c10/core/ConstantSymNodeImpl.h b/c10/core/ConstantSymNodeImpl.h index 4df1d1010f807..3c0fb66f7469f 100644 --- a/c10/core/ConstantSymNodeImpl.h +++ b/c10/core/ConstantSymNodeImpl.h @@ -69,14 +69,14 @@ class C10_API ConstantSymNodeImpl : public SymNodeImpl { return ::std::get(value_) ? "true" : "false"; } } - c10::optional constant_int() override { + std::optional constant_int() override { if constexpr (is_int_()) { return ::std::get(value_); } else { return c10::nullopt; } } - c10::optional constant_bool() override { + std::optional constant_bool() override { if constexpr (is_bool_()) { return ::std::get(value_); } else { diff --git a/c10/core/StorageImpl.cpp b/c10/core/StorageImpl.cpp index dc36064ddca4e..9dd6f5f431316 100644 --- a/c10/core/StorageImpl.cpp +++ b/c10/core/StorageImpl.cpp @@ -68,7 +68,7 @@ c10::intrusive_ptr make_storage_impl( c10::DataPtr data_ptr, c10::Allocator* allocator, bool resizable, - c10::optional device_opt) { + std::optional device_opt) { // This will be non-nullptr only when there is a custom StorageImpl // constructor for the given device c10::StorageImplCreateHelper fptr = nullptr; diff --git a/c10/core/StorageImpl.h b/c10/core/StorageImpl.h index 4ee9f62e620f5..abe6218fbc941 100644 --- a/c10/core/StorageImpl.h +++ b/c10/core/StorageImpl.h @@ -325,6 +325,6 @@ C10_API c10::intrusive_ptr make_storage_impl( c10::DataPtr data_ptr, c10::Allocator* allocator, bool resizable, - c10::optional device_opt); + std::optional device_opt); } // namespace c10 diff --git a/c10/core/SymBool.h b/c10/core/SymBool.h index cf984611e2340..9f9f141293a37 100644 --- a/c10/core/SymBool.h +++ b/c10/core/SymBool.h @@ -34,7 +34,7 @@ class C10_API SymBool { SymNode wrap_node(const SymNode& base) const; bool expect_bool() const { - c10::optional c = maybe_as_bool(); + std::optional c = maybe_as_bool(); TORCH_CHECK(c.has_value()); return *c; } @@ -66,7 +66,7 @@ class C10_API SymBool { return data_; } - c10::optional maybe_as_bool() const { + std::optional maybe_as_bool() const { if (!is_heap_allocated()) { return c10::make_optional(data_); } diff --git a/c10/core/SymInt.h b/c10/core/SymInt.h index 79ce4054b8640..025c351334a01 100644 --- a/c10/core/SymInt.h +++ b/c10/core/SymInt.h @@ -229,7 +229,7 @@ class C10_API SymInt { return data_; } - c10::optional maybe_as_int() const { + std::optional maybe_as_int() const { if (!is_heap_allocated()) { return c10::make_optional(data_); } diff --git a/c10/core/SymIntArrayRef.h b/c10/core/SymIntArrayRef.h index 76137aa47bdbb..760f4ba4e79a2 100644 --- a/c10/core/SymIntArrayRef.h +++ b/c10/core/SymIntArrayRef.h @@ -19,7 +19,7 @@ inline at::IntArrayRef asIntArrayRefUnchecked(c10::SymIntArrayRef ar) { // allocate another buffer and write the integers into it. If you need it, // we can do it. But I don't think you need it. -inline c10::optional asIntArrayRefSlowOpt( +inline std::optional asIntArrayRefSlowOpt( c10::SymIntArrayRef ar) { for (const c10::SymInt& sci : ar) { if (sci.is_heap_allocated()) { diff --git a/c10/core/SymNodeImpl.h b/c10/core/SymNodeImpl.h index b99d0302dc4c5..9ffab5065109e 100644 --- a/c10/core/SymNodeImpl.h +++ b/c10/core/SymNodeImpl.h @@ -188,19 +188,19 @@ class C10_API SymNodeImpl : public c10::intrusive_ptr_target { virtual std::string str() { TORCH_CHECK(false, "NYI"); }; - virtual c10::optional nested_int() { + virtual std::optional nested_int() { return c10::nullopt; } - virtual c10::optional nested_int_coeff() { + virtual std::optional nested_int_coeff() { return c10::nullopt; } - virtual c10::optional constant_int() { + virtual std::optional constant_int() { return c10::nullopt; } - virtual c10::optional constant_bool() { + virtual std::optional constant_bool() { return c10::nullopt; } - virtual c10::optional maybe_as_int() { + virtual std::optional maybe_as_int() { return c10::nullopt; } virtual bool is_constant() { diff --git a/c10/core/SymbolicShapeMeta.cpp b/c10/core/SymbolicShapeMeta.cpp index 04b2f8da832f4..62b03d36ec71c 100644 --- a/c10/core/SymbolicShapeMeta.cpp +++ b/c10/core/SymbolicShapeMeta.cpp @@ -28,7 +28,7 @@ SymbolicShapeMeta::SymbolicShapeMeta(const SymbolicShapeMeta& other) } // base, sizes, strides -static c10::optional< +static std::optional< std::tuple, std::vector>> normalize_sym_sizes_strides(SymIntArrayRef sizes, SymIntArrayRef strides) { // Look for a SymNode to dispatch on diff --git a/c10/core/TensorImpl.cpp b/c10/core/TensorImpl.cpp index 320dc7796877e..47f83c78e5789 100644 --- a/c10/core/TensorImpl.cpp +++ b/c10/core/TensorImpl.cpp @@ -127,7 +127,7 @@ TensorImpl::TensorImpl( TensorImpl::TensorImpl( DispatchKeySet key_set, const caffe2::TypeMeta data_type, - c10::optional device_opt) + std::optional device_opt) : TensorImpl({}, key_set, data_type, device_opt) {} // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) @@ -135,7 +135,7 @@ TensorImpl::TensorImpl( Storage&& storage, DispatchKeySet key_set, const caffe2::TypeMeta data_type, - c10::optional device_opt) + std::optional device_opt) : storage_(std::move(storage)), numel_(0), @@ -846,7 +846,7 @@ static void clone_symvec(SymIntArrayRef src, SymDimVector& dst) { void TensorImpl::set_sizes_and_strides( c10::SymIntArrayRef sizes, c10::SymIntArrayRef strides, - c10::optional storage_offset) { + std::optional storage_offset) { auto int_sizes = asIntArrayRefSlowOpt(sizes); auto int_strides = asIntArrayRefSlowOpt(strides); if (int_sizes && int_strides && diff --git a/c10/core/TensorImpl.h b/c10/core/TensorImpl.h index 3a74c8936297e..e49a66c916ffb 100644 --- a/c10/core/TensorImpl.h +++ b/c10/core/TensorImpl.h @@ -233,8 +233,8 @@ struct C10_API ExtraMeta { std::unique_ptr symbolic_shape_meta_ = nullptr; std::unique_ptr named_tensor_meta_ = nullptr; intrusive_ptr backend_meta_ = nullptr; - c10::optional custom_data_ptr_error_msg_ = c10::nullopt; - c10::optional custom_storage_error_msg_ = c10::nullopt; + std::optional custom_data_ptr_error_msg_ = c10::nullopt; + std::optional custom_storage_error_msg_ = c10::nullopt; ExtraMeta() = default; ExtraMeta(const ExtraMeta& other) { @@ -260,8 +260,8 @@ struct C10_API ExtraMeta { std::unique_ptr symbolic_shape_meta, std::unique_ptr named_tensor_meta, intrusive_ptr backend_meta, - c10::optional custom_data_ptr_error_msg = c10::nullopt, - c10::optional custom_storage_access_error_msg = c10::nullopt) + std::optional custom_data_ptr_error_msg = c10::nullopt, + std::optional custom_storage_access_error_msg = c10::nullopt) : symbolic_shape_meta_(std::move(symbolic_shape_meta)), named_tensor_meta_(std::move(named_tensor_meta)), backend_meta_(std::move(backend_meta)), @@ -528,7 +528,7 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target { TensorImpl( DispatchKeySet, const caffe2::TypeMeta data_type, - c10::optional device_opt); + std::optional device_opt); // Legacy constructors so I don't have to go update call sites. // TODO: When Variable is added, delete these constructors @@ -543,7 +543,7 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target { TensorImpl( DispatchKey dispatch_key, const caffe2::TypeMeta data_type, - c10::optional device_opt) + std::optional device_opt) : TensorImpl(DispatchKeySet(dispatch_key), data_type, device_opt) {} private: @@ -555,7 +555,7 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target { Storage&& storage, DispatchKeySet, const caffe2::TypeMeta data_type, - c10::optional); + std::optional); public: TensorImpl(const TensorImpl&) = delete; @@ -1253,7 +1253,7 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target { protected: c10::Device device_default() const { TORCH_CHECK(device_opt_.has_value(), "tensor does not have a device"); - // See NOTE [c10::optional operator usage in CUDA] + // See NOTE [std::optional operator usage in CUDA] return *device_opt_; } @@ -1687,7 +1687,7 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target { } void release_storage_and_set_meta_custom_data_ptr_error_msg_( - c10::optional s) { + std::optional s) { storage_ = {}; set_storage_access_should_throw(); get_extra_meta().custom_data_ptr_error_msg_ = s; @@ -1737,7 +1737,7 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target { void set_sizes_and_strides( c10::SymIntArrayRef sizes, c10::SymIntArrayRef strides, - c10::optional storage_offset = c10::nullopt); + std::optional storage_offset = c10::nullopt); // This is renamed to avoid breaking overload BC void generic_set_sizes_contiguous(c10::SymIntArrayRef sizes); void generic_set_sizes_contiguous(c10::IntArrayRef sizes) { @@ -1834,7 +1834,7 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target { void set_sizes_and_strides( IntArrayRef new_size, IntArrayRef new_stride, - c10::optional storage_offset = c10::nullopt) { + std::optional storage_offset = c10::nullopt) { TORCH_CHECK( allow_tensor_metadata_change(), "set_sizes_and_strides ", @@ -2129,10 +2129,10 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target { } private: - // See NOTE [c10::optional operator usage in CUDA] + // See NOTE [std::optional operator usage in CUDA] // We probably don't want to expose this publicly until // the note is addressed. - c10::optional device_opt() const { + std::optional device_opt() const { return device_opt_; } @@ -2146,7 +2146,7 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target { TORCH_CHECK( device_opt_.has_value(), "device_type cannot be run on undefined Tensor"); - // See NOTE [c10::optional operator usage in CUDA] + // See NOTE [std::optional operator usage in CUDA] return (*device_opt_).type(); } @@ -2875,7 +2875,7 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target { // agree with the type meta in storage caffe2::TypeMeta data_type_; - // NOTE [c10::optional operator usage in CUDA] + // NOTE [std::optional operator usage in CUDA] // Our optional definition doesn't compile in .cu file if `value()` or // `operator->` are used. Instead, we always use `operator*`. // See https://github.com/pytorch/pytorch/issues/18496 for more info. @@ -2887,7 +2887,7 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target { // // INVARIANT: device_opt_ is only nullopt for undefined tensors // (which do not have a device.) - c10::optional device_opt_; + std::optional device_opt_; // default member initializers for bit-fields only available with -std=c++2a // or -std=gnu++2a diff --git a/c10/core/TensorOptions.h b/c10/core/TensorOptions.h index 765f474702ef7..d99005d3d28f8 100644 --- a/c10/core/TensorOptions.h +++ b/c10/core/TensorOptions.h @@ -24,28 +24,28 @@ namespace c10 { DispatchKey computeDispatchKey( - c10::optional dtype, - c10::optional layout, - c10::optional device); + std::optional dtype, + std::optional layout, + std::optional device); -inline ScalarType dtype_or_default(c10::optional dtype) { +inline ScalarType dtype_or_default(std::optional dtype) { return value_or_else(dtype, [] { return get_default_dtype_as_scalartype(); }); } inline caffe2::TypeMeta dtype_or_default( - c10::optional dtype) { + std::optional dtype) { return value_or_else(dtype, [] { return get_default_dtype(); }); } -inline Layout layout_or_default(c10::optional layout) { +inline Layout layout_or_default(std::optional layout) { return layout.value_or(kStrided); } -inline Device device_or_default(c10::optional device) { +inline Device device_or_default(std::optional device) { return value_or_else(device, [] { return Device(kCPU); }); } -inline bool pinned_memory_or_default(c10::optional pinned_memory) { +inline bool pinned_memory_or_default(std::optional pinned_memory) { return pinned_memory.value_or(false); } @@ -193,19 +193,19 @@ struct C10_API TensorOptions { /// Return a copy of `TensorOptions` with `device` set to the given one, or /// cleared if `device` is `nullopt`. C10_NODISCARD TensorOptions - device(c10::optional device) const noexcept { + device(std::optional device) const noexcept { TensorOptions r = *this; r.set_device(device); return r; } /// Return a copy of `TensorOptions` with `device` set to the given one. - /// (This overload ensures that variadic template c10::optional constructor + /// (This overload ensures that variadic template std::optional constructor /// for Device work correctly.) template C10_NODISCARD TensorOptions device(Args&&... args) const noexcept { return device( - c10::optional(std::in_place, std::forward(args)...)); + std::optional(std::in_place, std::forward(args)...)); } /// Return a copy of `TensorOptions`, but with device set to CUDA, and the @@ -220,7 +220,7 @@ struct C10_API TensorOptions { /// Return a copy of `TensorOptions` with `dtype` set to the given one. C10_NODISCARD TensorOptions - dtype(c10::optional dtype) const noexcept { + dtype(std::optional dtype) const noexcept { TensorOptions r = *this; r.set_dtype(dtype); return r; @@ -228,7 +228,7 @@ struct C10_API TensorOptions { // legacy function to support ScalarType C10_NODISCARD TensorOptions - dtype(c10::optional dtype) const noexcept { + dtype(std::optional dtype) const noexcept { TensorOptions r = *this; r.set_dtype(dtype); return r; @@ -244,7 +244,7 @@ struct C10_API TensorOptions { /// Sets the layout of the `TensorOptions`. C10_NODISCARD TensorOptions - layout(c10::optional layout) const noexcept { + layout(std::optional layout) const noexcept { TensorOptions r = *this; r.set_layout(layout); return r; @@ -252,7 +252,7 @@ struct C10_API TensorOptions { /// Sets the `requires_grad` property of the `TensorOptions`. C10_NODISCARD TensorOptions - requires_grad(c10::optional requires_grad) const noexcept { + requires_grad(std::optional requires_grad) const noexcept { TensorOptions r = *this; r.set_requires_grad(requires_grad); return r; @@ -260,7 +260,7 @@ struct C10_API TensorOptions { /// Sets the `pinned_memory` property on the `TensorOptions`. C10_NODISCARD TensorOptions - pinned_memory(c10::optional pinned_memory) const noexcept { + pinned_memory(std::optional pinned_memory) const noexcept { TensorOptions r = *this; r.set_pinned_memory(pinned_memory); return r; @@ -268,7 +268,7 @@ struct C10_API TensorOptions { /// Sets the `memory_format` property on `TensorOptions`. C10_NODISCARD TensorOptions - memory_format(c10::optional memory_format) const noexcept { + memory_format(std::optional memory_format) const noexcept { TensorOptions r = *this; r.set_memory_format(memory_format); return r; @@ -286,7 +286,7 @@ struct C10_API TensorOptions { /// Returns the device of the `TensorOptions`, or `c10::nullopt` if /// device is not specified. - c10::optional device_opt() const noexcept { + std::optional device_opt() const noexcept { return has_device_ ? c10::make_optional(device_) : c10::nullopt; } @@ -307,7 +307,7 @@ struct C10_API TensorOptions { /// Returns the dtype of the `TensorOptions`, or `c10::nullopt` if /// device is not specified. - c10::optional dtype_opt() const noexcept { + std::optional dtype_opt() const noexcept { return has_dtype_ ? c10::make_optional(dtype_) : c10::nullopt; } @@ -323,7 +323,7 @@ struct C10_API TensorOptions { /// Returns the layout of the `TensorOptions`, or `c10::nullopt` if /// layout is not specified. - c10::optional layout_opt() const noexcept { + std::optional layout_opt() const noexcept { return has_layout_ ? c10::make_optional(layout_) : c10::nullopt; } @@ -339,7 +339,7 @@ struct C10_API TensorOptions { /// Returns the `requires_grad` property of the `TensorOptions`, or /// `c10::nullopt` if `requires_grad` is not specified. - c10::optional requires_grad_opt() const noexcept { + std::optional requires_grad_opt() const noexcept { return has_requires_grad_ ? c10::make_optional(requires_grad_) : c10::nullopt; } @@ -379,7 +379,7 @@ struct C10_API TensorOptions { /// Returns the `pinned_memory` property of the `TensorOptions`, or /// `c10::nullopt` if `pinned_memory` is not specified. - c10::optional pinned_memory_opt() const noexcept { + std::optional pinned_memory_opt() const noexcept { return has_pinned_memory_ ? c10::make_optional(pinned_memory_) : c10::nullopt; } @@ -394,7 +394,7 @@ struct C10_API TensorOptions { /// Returns the `memory_layout` property of `TensorOptions, or /// `c10::nullopt` if `memory_format` is not specified. - c10::optional memory_format_opt() const noexcept { + std::optional memory_format_opt() const noexcept { return has_memory_format_ ? c10::make_optional(memory_format_) : c10::nullopt; } @@ -435,7 +435,7 @@ struct C10_API TensorOptions { // TODO remove after TensorOptions rationalization TensorOptions merge_memory_format( - c10::optional optional_memory_format) const noexcept { + std::optional optional_memory_format) const noexcept { TensorOptions merged = *this; if (optional_memory_format.has_value()) { merged.set_memory_format(*optional_memory_format); @@ -466,7 +466,7 @@ struct C10_API TensorOptions { // on temporaries.) /// Mutably set the device of `TensorOptions`. - void set_device(c10::optional device) & noexcept { + void set_device(std::optional device) & noexcept { if (device) { device_ = *device; has_device_ = true; @@ -476,7 +476,7 @@ struct C10_API TensorOptions { } /// Mutably set the dtype of `TensorOptions`. - void set_dtype(c10::optional dtype) & noexcept { + void set_dtype(std::optional dtype) & noexcept { if (dtype) { dtype_ = *dtype; has_dtype_ = true; @@ -486,7 +486,7 @@ struct C10_API TensorOptions { } // legacy function to support ScalarType - void set_dtype(c10::optional dtype) & noexcept { + void set_dtype(std::optional dtype) & noexcept { if (dtype) { dtype_ = scalarTypeToTypeMeta(*dtype); has_dtype_ = true; @@ -496,7 +496,7 @@ struct C10_API TensorOptions { } /// Mutably set the layout of `TensorOptions`. - void set_layout(c10::optional layout) & noexcept { + void set_layout(std::optional layout) & noexcept { if (layout) { layout_ = *layout; has_layout_ = true; @@ -506,7 +506,7 @@ struct C10_API TensorOptions { } /// Mutably set the `requires_grad` property of `TensorOptions`. - void set_requires_grad(c10::optional requires_grad) & noexcept { + void set_requires_grad(std::optional requires_grad) & noexcept { if (requires_grad) { requires_grad_ = *requires_grad; has_requires_grad_ = true; @@ -516,7 +516,7 @@ struct C10_API TensorOptions { } /// Mutably set the `pinned_memory` property of `TensorOptions`. - void set_pinned_memory(c10::optional pinned_memory) & noexcept { + void set_pinned_memory(std::optional pinned_memory) & noexcept { if (pinned_memory) { pinned_memory_ = *pinned_memory; has_pinned_memory_ = true; @@ -526,7 +526,7 @@ struct C10_API TensorOptions { } /// Mutably set the `memory_Format` property of `TensorOptions`. - void set_memory_format(c10::optional memory_format) & noexcept { + void set_memory_format(std::optional memory_format) & noexcept { if (memory_format) { memory_format_ = *memory_format; has_memory_format_ = true; @@ -544,7 +544,7 @@ struct C10_API TensorOptions { // // TODO: MemoryFormat is not implemented in this way - // NB: We didn't use c10::optional here, because then we can't pack + // NB: We didn't use std::optional here, because then we can't pack // the has_***_ boolean fields. Device device_ = at::kCPU; // 16-bit @@ -632,9 +632,9 @@ inline std::string toString(const TensorOptions& options) { // This is intended to be a centralized location by which we can determine // what an appropriate DispatchKey for a tensor is. inline DispatchKey computeDispatchKey( - c10::optional dtype, - c10::optional layout, - c10::optional device) { + std::optional dtype, + std::optional layout, + std::optional device) { const auto layout_ = layout_or_default(layout); const auto device_ = device_or_default(device); switch (layout_) { diff --git a/c10/core/impl/PyObjectSlot.h b/c10/core/impl/PyObjectSlot.h index b3a4b85f05e8e..518b0e63e4921 100644 --- a/c10/core/impl/PyObjectSlot.h +++ b/c10/core/impl/PyObjectSlot.h @@ -93,8 +93,8 @@ struct C10_API PyObjectSlot { // be properly treated as a nonhermetic PyObject. // // NB: this lives in header so that we can avoid actually creating the - // c10::optional - c10::optional check_pyobj( + // std::optional + std::optional check_pyobj( PyInterpreter* self_interpreter, bool ignore_hermetic_tls = false) const { // Note [Memory ordering on Python interpreter tag] diff --git a/c10/core/impl/TorchDispatchModeTLS.cpp b/c10/core/impl/TorchDispatchModeTLS.cpp index e558a70522aca..f1847cb005b4c 100644 --- a/c10/core/impl/TorchDispatchModeTLS.cpp +++ b/c10/core/impl/TorchDispatchModeTLS.cpp @@ -121,7 +121,7 @@ int64_t TorchDispatchModeTLS::stack_len() { return stack_len + infra_modes_len; } -const c10::optional> +const std::optional> TorchDispatchModeTLS::get_mode(TorchDispatchModeKey mode_key) { return torchDispatchModeState.infra_modes_[static_cast(mode_key)]; } @@ -145,7 +145,7 @@ void TorchDispatchModeTLS::set_mode( torchDispatchModeState.infra_modes_[static_cast(mode_key)] = mode; } -const c10::optional> +const std::optional> TorchDispatchModeTLS::unset_mode(TorchDispatchModeKey mode_key) { auto out = torchDispatchModeState.infra_modes_[static_cast(mode_key)]; torchDispatchModeState.infra_modes_[static_cast(mode_key)] = diff --git a/c10/core/impl/TorchDispatchModeTLS.h b/c10/core/impl/TorchDispatchModeTLS.h index d9ac8d8449b49..7179d52c35162 100644 --- a/c10/core/impl/TorchDispatchModeTLS.h +++ b/c10/core/impl/TorchDispatchModeTLS.h @@ -35,9 +35,9 @@ struct C10_API TorchDispatchModeTLS { int64_t idx); static int64_t stack_len(); - static const c10::optional> + static const std::optional> get_mode(TorchDispatchModeKey mode_key); - static const c10::optional> + static const std::optional> unset_mode(TorchDispatchModeKey mode_key); static void set_mode( const std::shared_ptr& mode, @@ -55,7 +55,7 @@ struct C10_API TorchDispatchModeTLS { // However, we only allow a single FakeTensorMode onto the stack at a time // (Pushing additional FakeTensorModes onto the stack is a no-op) std::array< - c10::optional>, + std::optional>, static_cast(TorchDispatchModeKey::NUM_MODE_KEYS)> infra_modes_; }; diff --git a/c10/cuda/CUDACachingAllocator.cpp b/c10/cuda/CUDACachingAllocator.cpp index 8af2c41dfab7e..f3a0ca3ff73ad 100644 --- a/c10/cuda/CUDACachingAllocator.cpp +++ b/c10/cuda/CUDACachingAllocator.cpp @@ -550,7 +550,7 @@ struct ExpandableSegment { CUdeviceptr ptr_{}; size_t max_handles_{0}; size_t segment_size_; - std::vector> handles_; + std::vector> handles_; // devices on which this memory should be mapped in addition // to the device where the physical memory lives (device_). std::vector peers_; diff --git a/c10/cuda/CUDAFunctions.cpp b/c10/cuda/CUDAFunctions.cpp index 652f222385465..2b53eb4d7c7cb 100644 --- a/c10/cuda/CUDAFunctions.cpp +++ b/c10/cuda/CUDAFunctions.cpp @@ -151,7 +151,7 @@ void warn_or_error_on_sync() { } } -c10::optional getDeviceIndexWithPrimaryContext() { +std::optional getDeviceIndexWithPrimaryContext() { // check current device first auto current_device_index = current_device(); if (current_device_index >= 0) { diff --git a/c10/cuda/CUDAFunctions.h b/c10/cuda/CUDAFunctions.h index 72fdfc6fd692f..192fafbad10f4 100644 --- a/c10/cuda/CUDAFunctions.h +++ b/c10/cuda/CUDAFunctions.h @@ -111,6 +111,6 @@ C10_CUDA_API void __inline__ stream_synchronize(cudaStream_t stream) { } C10_CUDA_API bool hasPrimaryContext(DeviceIndex device_index); -C10_CUDA_API c10::optional getDeviceIndexWithPrimaryContext(); +C10_CUDA_API std::optional getDeviceIndexWithPrimaryContext(); } // namespace c10::cuda diff --git a/c10/cuda/impl/CUDAGuardImpl.h b/c10/cuda/impl/CUDAGuardImpl.h index 113f896c6fa29..ec50c8152b33e 100644 --- a/c10/cuda/impl/CUDAGuardImpl.h +++ b/c10/cuda/impl/CUDAGuardImpl.h @@ -40,7 +40,7 @@ struct CUDAGuardImpl final : public c10::impl::DeviceGuardImplInterface { C10_CUDA_CHECK(c10::cuda::GetDevice(&device)); return Device(DeviceType::CUDA, device); } - c10::optional uncheckedGetDevice() const noexcept { + std::optional uncheckedGetDevice() const noexcept { DeviceIndex device{-1}; const auto err = C10_CUDA_ERROR_HANDLED(c10::cuda::GetDevice(&device)); C10_CUDA_CHECK_WARN(err); diff --git a/c10/test/util/optional_test.cpp b/c10/test/util/optional_test.cpp index f17cc61c51b1c..f95fc864d812c 100644 --- a/c10/test/util/optional_test.cpp +++ b/c10/test/util/optional_test.cpp @@ -22,7 +22,7 @@ using testing::Not; template class OptionalTest : public ::testing::Test { public: - using optional = c10::optional; + using optional = std::optional; }; template @@ -96,10 +96,10 @@ TYPED_TEST(OptionalTest, Initialized) { } } -class SelfCompareTest : public testing::TestWithParam> {}; +class SelfCompareTest : public testing::TestWithParam> {}; TEST_P(SelfCompareTest, SelfCompare) { - c10::optional x = GetParam(); + std::optional x = GetParam(); EXPECT_THAT(x, Eq(x)); EXPECT_THAT(x, Le(x)); EXPECT_THAT(x, Ge(x)); @@ -118,7 +118,7 @@ INSTANTIATE_TEST_SUITE_P( testing::Values(c10::make_optional(2))); TEST(OptionalTest, Nullopt) { - c10::optional x = 2; + std::optional x = 2; EXPECT_THAT(c10::nullopt, Not(Eq(x))); EXPECT_THAT(x, Not(Eq(c10::nullopt))); @@ -142,17 +142,17 @@ TEST(OptionalTest, Nullopt) { // Ensure comparisons work... using CmpTestTypes = testing::Types< // between two optionals - std::pair, c10::optional>, + std::pair, c10::optional>, // between an optional and a value - std::pair, int>, + std::pair, int>, // between a value and an optional - std::pair>, + std::pair>, // between an optional and a differently typed value - std::pair, long>, + std::pair, long>, // between a differently typed value and an optional - std::pair>>; + std::pair>>; template class CmpTest : public testing::Test {}; TYPED_TEST_SUITE(CmpTest, CmpTestTypes); diff --git a/c10/util/ArrayRef.h b/c10/util/ArrayRef.h index 59ea43f8d959c..2a56e60832993 100644 --- a/c10/util/ArrayRef.h +++ b/c10/util/ArrayRef.h @@ -61,7 +61,7 @@ class ArrayRef final { void debugCheckNullptrInvariant() { TORCH_INTERNAL_ASSERT_DEBUG_ONLY( Data != nullptr || Length == 0, - "created ArrayRef with nullptr and non-zero length! c10::optional relies on this being illegal"); + "created ArrayRef with nullptr and non-zero length! std::optional relies on this being illegal"); } public: diff --git a/c10/util/Backtrace.cpp b/c10/util/Backtrace.cpp index deeb1fee3e7ab..7d0fedbb335a2 100644 --- a/c10/util/Backtrace.cpp +++ b/c10/util/Backtrace.cpp @@ -134,7 +134,7 @@ bool is_python_frame(const FrameInformation& frame) { (frame.object_file.find("libpython") != std::string::npos); } -c10::optional parse_frame_information( +std::optional parse_frame_information( const std::string& frame_string) { FrameInformation frame; diff --git a/c10/util/OptionalArrayRef.h b/c10/util/OptionalArrayRef.h index 2c2b88722d4d7..98237bba92f56 100644 --- a/c10/util/OptionalArrayRef.h +++ b/c10/util/OptionalArrayRef.h @@ -1,11 +1,11 @@ // This file defines OptionalArrayRef, a class that has almost the same -// exact functionality as c10::optional>, except that its +// exact functionality as std::optional>, except that its // converting constructor fixes a dangling pointer issue. // -// The implicit converting constructor of both c10::optional> and +// The implicit converting constructor of both std::optional> and // std::optional> can cause the underlying ArrayRef to store // a dangling pointer. OptionalArrayRef prevents this by wrapping -// a c10::optional> and fixing the constructor implementation. +// a std::optional> and fixing the constructor implementation. // // See https://github.com/pytorch/pytorch/issues/63645 for more on this. diff --git a/c10/xpu/test/impl/XPUStreamTest.cpp b/c10/xpu/test/impl/XPUStreamTest.cpp index 16f6e20c2163e..01a1dbb62621b 100644 --- a/c10/xpu/test/impl/XPUStreamTest.cpp +++ b/c10/xpu/test/impl/XPUStreamTest.cpp @@ -82,7 +82,7 @@ TEST(XPUStreamTest, StreamBehavior) { EXPECT_NE(stream.device_index(), c10::xpu::current_device()); } -void thread_fun(c10::optional& cur_thread_stream) { +void thread_fun(std::optional& cur_thread_stream) { auto new_stream = c10::xpu::getStreamFromPool(); c10::xpu::setCurrentXPUStream(new_stream); cur_thread_stream = {c10::xpu::getCurrentXPUStream()}; @@ -94,7 +94,7 @@ TEST(XPUStreamTest, MultithreadStreamBehavior) { if (!has_xpu()) { return; } - c10::optional s0, s1; + std::optional s0, s1; std::thread t0{thread_fun, std::ref(s0)}; std::thread t1{thread_fun, std::ref(s1)}; diff --git a/caffe2/core/context.h b/caffe2/core/context.h index 36fd4e400fe8c..eb46f78f8b0d9 100644 --- a/caffe2/core/context.h +++ b/caffe2/core/context.h @@ -63,23 +63,23 @@ class TORCH_API CPUContext final : public BaseContext { return (static_cast(random1) << 32) | random2; } - c10::optional next_float_normal_sample() { + std::optional next_float_normal_sample() { return next_float_normal_sample_; } - c10::optional next_double_normal_sample() { + std::optional next_double_normal_sample() { return next_double_normal_sample_; } - void set_next_float_normal_sample(c10::optional randn) { + void set_next_float_normal_sample(std::optional randn) { next_float_normal_sample_ = randn; } - void set_next_double_normal_sample(c10::optional randn) { + void set_next_double_normal_sample(std::optional randn) { next_double_normal_sample_ = randn; } private: at::mt19937 engine_; - c10::optional next_float_normal_sample_; - c10::optional next_double_normal_sample_; + std::optional next_float_normal_sample_; + std::optional next_double_normal_sample_; }; #else typedef std::mt19937 rand_gen_type; diff --git a/caffe2/core/export_c10_op_to_caffe2.h b/caffe2/core/export_c10_op_to_caffe2.h index b8bbfda84a50e..f03da90c1b861 100644 --- a/caffe2/core/export_c10_op_to_caffe2.h +++ b/caffe2/core/export_c10_op_to_caffe2.h @@ -185,7 +185,7 @@ class C10OperatorWrapper final : public Operator { template IValue get_nontensor_argument_( const std::string& name, - const c10::optional& default_value) { + const std::optional& default_value) { if (default_value.has_value()) { return this->template GetSingleArgument(name, default_value->to()); } else { diff --git a/caffe2/core/export_caffe2_op_to_c10.h b/caffe2/core/export_caffe2_op_to_c10.h index 216d3833648bf..7e803e545e212 100644 --- a/caffe2/core/export_caffe2_op_to_c10.h +++ b/caffe2/core/export_caffe2_op_to_c10.h @@ -126,7 +126,7 @@ void call_caffe2_op_from_c10( inline FunctionSchema make_function_schema_for_c10( const char* schema_str, - c10::optional optional_alias_analysis_kind) { + std::optional optional_alias_analysis_kind) { #if !defined(EXPOSE_C2_OPS) && \ (defined(CAFFE2_IS_XPLAT_BUILD) || defined(C10_MOBILE)) throw std::logic_error( diff --git a/caffe2/core/operator.cc b/caffe2/core/operator.cc index a978cfd164ce8..7cf1ef909f18b 100644 --- a/caffe2/core/operator.cc +++ b/caffe2/core/operator.cc @@ -825,7 +825,7 @@ std::function GetOperatorLogger() { return OperatorLogger; } -c10::optional OperatorBase::argumentIndexWithName( +std::optional OperatorBase::argumentIndexWithName( c10::string_view name) const { #if defined(EXPOSE_C2_OPS) || \ !defined(CAFFE2_IS_XPLAT_BUILD) && !defined(C10_MOBILE) diff --git a/caffe2/core/operator.h b/caffe2/core/operator.h index 0dbf31e5932b0..3277357b4f34c 100644 --- a/caffe2/core/operator.h +++ b/caffe2/core/operator.h @@ -605,7 +605,7 @@ class TORCH_API OperatorBase : public Observable { std::string type_; vector inputs_; vector outputs_; - // Preferably use c10::optional, but nvcc doesn't work + // Preferably use std::optional, but nvcc doesn't work #if defined(EXPOSE_C2_OPS) || \ !defined(CAFFE2_IS_XPLAT_BUILD) && !defined(C10_MOBILE) std::unique_ptr fn_schema_; @@ -649,7 +649,7 @@ class TORCH_API OperatorBase : public Observable { } } - c10::optional argumentIndexWithName(c10::string_view name) const; + std::optional argumentIndexWithName(c10::string_view name) const; // An event used by asynchronous execution. std::unique_ptr event_; diff --git a/functorch/csrc/dim/arena.h b/functorch/csrc/dim/arena.h index 3251321f998b2..fa68e67268d53 100644 --- a/functorch/csrc/dim/arena.h +++ b/functorch/csrc/dim/arena.h @@ -55,7 +55,7 @@ struct Slice { T& operator[](int i) const { return begin_[i]; } - c10::optional index(const T& value) { + std::optional index(const T& value) { for (int i : enumerate()) { if (begin_[i] == value) { return i; diff --git a/functorch/csrc/dim/dim.cpp b/functorch/csrc/dim/dim.cpp index e25b8d0e5731a..066f9517acefd 100644 --- a/functorch/csrc/dim/dim.cpp +++ b/functorch/csrc/dim/dim.cpp @@ -1123,7 +1123,7 @@ int64_t _Tensor_ndim(mpy::handle h) { mpy::handle handle_from_tensor(Arena& A, TensorRef t) { // fast case: tensor is live in python - c10::optional mb_obj = + std::optional mb_obj = t->unsafeGetTensorImpl()->pyobj_slot()->check_pyobj(getPyInterpreter(), /*ignore_hermetic_tls=*/false); if (mb_obj.has_value() && !t->unsafeGetTensorImpl()->pyobj_slot()->owns_pyobj()) { return *mb_obj; diff --git a/test/cpp/api/autograd.cpp b/test/cpp/api/autograd.cpp index 3d1604752dbc4..4d6bb485be518 100644 --- a/test/cpp/api/autograd.cpp +++ b/test/cpp/api/autograd.cpp @@ -1265,7 +1265,7 @@ int64_t ret_single_non_tensor( torch::Tensor opt_op( const torch::Tensor& self, - const c10::optional& other) { + const std::optional& other) { if (other.has_value()) { return self + other.value(); } else { @@ -1461,11 +1461,11 @@ TEST(TestAutogradNotImplementedFallback, OptOp) { auto opHandle = c10::Dispatcher::singleton().findSchemaOrThrow("_test::opt_op", ""); auto op = [&](const torch::Tensor& _1, - const c10::optional& _2) { + const std::optional& _2) { return callOpUnboxed< torch::Tensor, const torch::Tensor&, - const c10::optional&>(opHandle, _1, _2); + const std::optional&>(opHandle, _1, _2); }; auto a = torch::tensor({1.}, {torch::kFloat32}).set_requires_grad(true); diff --git a/test/cpp/api/memory.cpp b/test/cpp/api/memory.cpp index d9f44ea3f7a40..a3adc81406b7b 100644 --- a/test/cpp/api/memory.cpp +++ b/test/cpp/api/memory.cpp @@ -6,8 +6,8 @@ struct TestValue { explicit TestValue(const int& x) : lvalue_(x) {} explicit TestValue(int&& x) : rvalue_(x) {} - c10::optional lvalue_; - c10::optional rvalue_; + std::optional lvalue_; + std::optional rvalue_; }; TEST(MakeUniqueTest, ForwardRvaluesCorrectly) { diff --git a/test/cpp/c10d/ProcessGroupNCCLTest.cpp b/test/cpp/c10d/ProcessGroupNCCLTest.cpp index d1c2380274278..edf4f03c2d692 100644 --- a/test/cpp/c10d/ProcessGroupNCCLTest.cpp +++ b/test/cpp/c10d/ProcessGroupNCCLTest.cpp @@ -42,7 +42,7 @@ class NCCLTestBase { void initialize( int rank, int size, - c10::optional<::std::shared_ptr<::c10d::ProcessGroupNCCL>> split_from = + std::optional<::std::shared_ptr<::c10d::ProcessGroupNCCL>> split_from = c10::nullopt) { store_ = c10::make_intrusive<::c10d::FileStore>(path_, size); diff --git a/test/cpp/jit/test_argument_spec.cpp b/test/cpp/jit/test_argument_spec.cpp index 6ffe167c58768..71785d889952a 100644 --- a/test/cpp/jit/test_argument_spec.cpp +++ b/test/cpp/jit/test_argument_spec.cpp @@ -111,7 +111,7 @@ TEST(ArgumentSpecTest, CompleteArgumentSpec_CUDA) { // } // TEST(ArgumentSpecTest, VaryingShape) { -// c10::VaryingShape vs(c10::optional{}); +// c10::VaryingShape vs(std::optional{}); // auto ptt_empty1 = TensorType::create({}, {}, vs, vs, false); // auto ptt_empty2 = TensorType::create({}, {}, vs, vs, false); // ASSERT_EQ(hashCode(ptt_empty1), hashCode(ptt_empty2)); diff --git a/test/cpp/jit/test_custom_class_registrations.cpp b/test/cpp/jit/test_custom_class_registrations.cpp index 2595c64c9b170..819d5495b06c3 100644 --- a/test/cpp/jit/test_custom_class_registrations.cpp +++ b/test/cpp/jit/test_custom_class_registrations.cpp @@ -27,7 +27,7 @@ struct DefaultArgs : torch::CustomClassHolder { x = scale * x + add; return x; } - int64_t divide(c10::optional factor) { + int64_t divide(std::optional factor) { if (factor) { // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) x = x / *factor; @@ -334,7 +334,7 @@ struct ElementwiseInterpreter : torch::CustomClassHolder { // collection types like vector, optional, and dict. using SerializationType = std::tuple< std::vector /*input_names_*/, - c10::optional /*output_name_*/, + std::optional /*output_name_*/, c10::Dict /*constants_*/, std::vector /*instructions_*/ >; @@ -360,7 +360,7 @@ struct ElementwiseInterpreter : torch::CustomClassHolder { // Class members std::vector input_names_; - c10::optional output_name_; + std::optional output_name_; c10::Dict constants_; std::vector instructions_; }; diff --git a/test/cpp/jit/test_exception.cpp b/test/cpp/jit/test_exception.cpp index be23548e16d15..a4932e76b3e24 100644 --- a/test/cpp/jit/test_exception.cpp +++ b/test/cpp/jit/test_exception.cpp @@ -31,7 +31,7 @@ TEST(TestException, TestAssertion) { bool is_jit_exception = false; std::string message; - c10::optional exception_class; + std::optional exception_class; try { cu_ptr->run_method("foo"); } catch (JITException& e) { @@ -140,7 +140,7 @@ TEST(TestException, TestCustomException) { (torch::jit::GraphFunction*)&cu->get_function("foo"); std::cerr << "Graph is\n" << *gf->graph() << std::endl; bool is_jit_exception = false; - c10::optional exception_class; + std::optional exception_class; std::string message; try { cu->run_method("foo"); diff --git a/test/cpp/jit/test_ir.cpp b/test/cpp/jit/test_ir.cpp index e9a0edabaaf0f..19910cbf31f00 100644 --- a/test/cpp/jit/test_ir.cpp +++ b/test/cpp/jit/test_ir.cpp @@ -194,17 +194,17 @@ TEST(IRTest, OperatorMap) { ASSERT_FALSE(op_map.contains(*op6)); op_map.insert(op1, 1); ASSERT_TRUE(op_map.contains(*op1)); - c10::optional o1 = op_map.find(*op1); + std::optional o1 = op_map.find(*op1); ASSERT_TRUE(o1.has_value()); - c10::optional o2 = op_map.find(*op2); + std::optional o2 = op_map.find(*op2); ASSERT_TRUE(o2.has_value()); - c10::optional o3 = op_map.find(*op3); + std::optional o3 = op_map.find(*op3); ASSERT_FALSE(o3.has_value()); - c10::optional o4 = op_map.find(*op4); + std::optional o4 = op_map.find(*op4); ASSERT_TRUE(o4.has_value()); - c10::optional o5 = op_map.find(*op5); + std::optional o5 = op_map.find(*op5); ASSERT_TRUE(o5.has_value()); - c10::optional o6 = op_map.find(*op6); + std::optional o6 = op_map.find(*op6); ASSERT_FALSE(o6.has_value()); } diff --git a/test/cpp/jit/test_jit_type.cpp b/test/cpp/jit/test_jit_type.cpp index 606c1b0fa36e0..08f7f360731b7 100644 --- a/test/cpp/jit/test_jit_type.cpp +++ b/test/cpp/jit/test_jit_type.cpp @@ -12,7 +12,7 @@ TEST(JitTypeTest, IsComplete) { auto tt = c10::TensorType::create( at::kFloat, at::kCPU, - c10::SymbolicShape(std::vector>({1, 49})), + c10::SymbolicShape(std::vector>({1, 49})), std::vector( {c10::Stride{2, true, 1}, c10::Stride{1, true, 1}, diff --git a/test/cpp/jit/test_misc.cpp b/test/cpp/jit/test_misc.cpp index efe377aad72ce..9c74eb45e535f 100644 --- a/test/cpp/jit/test_misc.cpp +++ b/test/cpp/jit/test_misc.cpp @@ -1302,7 +1302,7 @@ TEST(RecordFunctionTest, OperatorNameOverload) { at::addGlobalCallback(at::RecordFunctionCallback( [](const at::RecordFunction& fn) -> std::unique_ptr { - c10::optional op_name = + std::optional op_name = fn.operator_name(); if (op_name.has_value()) { operator_names.insert(c10::toString(*op_name)); diff --git a/test/cpp/jit/test_shape_analysis.cpp b/test/cpp/jit/test_shape_analysis.cpp index 4940d277ce043..0ff3908d639a5 100644 --- a/test/cpp/jit/test_shape_analysis.cpp +++ b/test/cpp/jit/test_shape_analysis.cpp @@ -296,7 +296,7 @@ TEST(ShapeAnalysisTest, MovingConstantOutOfFusionGroups) { namespace { -c10::optional sym_dim = c10::nullopt; +std::optional sym_dim = c10::nullopt; // NOLINTNEXTLINE(bugprone-easily-swappable-parameters) void assertShapeEqual(c10::SymbolicShape& a, c10::SymbolicShape& e) { @@ -306,8 +306,8 @@ void assertShapeEqual(c10::SymbolicShape& a, c10::SymbolicShape& e) { } void assertShapeEqual( - c10::optional>& actual, - std::vector> expected) { + std::optional>& actual, + std::vector> expected) { ASSERT_TRUE(actual.has_value()); ASSERT_EQ(actual->size(), 1); @@ -332,12 +332,12 @@ TEST(ShapeAnalysisTest, SymbolicShapeAPI) { // Check vector initializer list syntax c10::SymbolicShape ss_concrete = - std::vector>{1, 56, 56}; - c10::SymbolicShape ss1 = std::vector>{sym_dim, 56, 56}; + std::vector>{1, 56, 56}; + c10::SymbolicShape ss1 = std::vector>{sym_dim, 56, 56}; c10::SymbolicShape ss2 = - std::vector>{64, sym_dim, sym_dim}; + std::vector>{64, sym_dim, sym_dim}; c10::SymbolicShape ss3 = - std::vector>{sym_dim, sym_dim, sym_dim, sym_dim}; + std::vector>{sym_dim, sym_dim, sym_dim, sym_dim}; auto res = calculateSymbolicShapesOnOp( schema, std::vector{const_size_1, const_size_1}); @@ -484,7 +484,7 @@ TEST(ShapeAnalysisTest, TestShapeMultipleReturns) { auto res = calculateSymbolicShapesOnOp(max_dim_op, {ss1, const_int, false_ival}); c10::SymbolicShape expected_res = - c10::SymbolicShape(std::vector>{sym_dim}); + c10::SymbolicShape(std::vector>{sym_dim}); assertShapeEqual(res->at(0), expected_res); // res0 and res1 should share the same symbolic symbol EXPECT_EQ(res->at(0), res->at(1)); diff --git a/test/cpp/lazy/test_lazy_ops.cpp b/test/cpp/lazy/test_lazy_ops.cpp index aa31ffc59bb51..745f40729f02d 100644 --- a/test/cpp/lazy/test_lazy_ops.cpp +++ b/test/cpp/lazy/test_lazy_ops.cpp @@ -475,7 +475,7 @@ TEST_F(LazyOpsTest, TestDiv) { } TEST_F(LazyOpsTest, TestDivWithRoundingMode) { - c10::optional rounding_modes[] = { + std::optional rounding_modes[] = { "trunc", "floor", c10::nullopt}; for (const auto& rounding_mode : rounding_modes) { for (torch::ScalarType scalar_type1 : @@ -535,7 +535,7 @@ TEST_F(LazyOpsTest, TestDivInPlace) { } TEST_F(LazyOpsTest, TestDivInPlaceWithRoundingMode) { - c10::optional rounding_modes[] = { + std::optional rounding_modes[] = { "trunc", "floor", c10::nullopt}; for (const auto& rounding_mode : rounding_modes) { for (torch::ScalarType scalar_type1 : {torch::kFloat}) { @@ -1553,7 +1553,7 @@ TEST_F(LazyOpsTest, TestStdWithCorrection) { torch::Tensor a = torch::rand( {4, 3, 4}, torch::TensorOptions(torch::kFloat).device(DefaultDevice())); // int rank = a.dim(); - c10::optional corrections[] = {1, 2, c10::nullopt}; + std::optional corrections[] = {1, 2, c10::nullopt}; for (const auto& correction : corrections) { for (auto keepdim : {true, false}) { for (const auto& dim : @@ -1573,7 +1573,7 @@ TEST_F(LazyOpsTest, TestStdMeanWithCorrection) { torch::Tensor a = torch::rand( {4, 3, 4}, torch::TensorOptions(torch::kFloat).device(DefaultDevice())); // int rank = a.dim(); - c10::optional corrections[] = {1, 2, c10::nullopt}; + std::optional corrections[] = {1, 2, c10::nullopt}; for (const auto& correction : corrections) { for (auto keepdim : {true, false}) { for (const auto& dim : @@ -1710,7 +1710,7 @@ TEST_F(LazyOpsTest, TestVarWithDim) { TEST_F(LazyOpsTest, TestVarWithCorrection) { torch::Tensor a = torch::rand( {4, 3, 4}, torch::TensorOptions(torch::kFloat).device(DefaultDevice())); - c10::optional corrections[] = {1, 2, c10::nullopt}; + std::optional corrections[] = {1, 2, c10::nullopt}; for (const auto& dim : std::vector>{{0, 1}, {-3, -2}}) { for (bool keepDim : {true, false}) { for (const auto& correction : corrections) { @@ -1730,7 +1730,7 @@ TEST_F(LazyOpsTest, TestVarWithCorrection) { TEST_F(LazyOpsTest, TestVarMeanWithCorrection) { torch::Tensor a = torch::rand( {4, 3, 4}, torch::TensorOptions(torch::kFloat).device(DefaultDevice())); - c10::optional corrections[] = {1, 2, c10::nullopt}; + std::optional corrections[] = {1, 2, c10::nullopt}; for (const auto& dim : std::vector>{{0, 1}, {-3, -2}}) { for (const auto& correction : corrections) { for (auto keepdim : {true, false}) { diff --git a/test/cpp/lazy/test_misc.cpp b/test/cpp/lazy/test_misc.cpp index aa4cd1b7e798e..441e5c41eee13 100644 --- a/test/cpp/lazy/test_misc.cpp +++ b/test/cpp/lazy/test_misc.cpp @@ -63,10 +63,10 @@ TEST(HashTest, Sanity) { test_hash_repeatable_sensitive(c10::Scalar(true), c10::Scalar(false)); test_hash_repeatable_sensitive(c10::Scalar(12345), c10::Scalar(12354)); - // c10::optional + // std::optional test_hash_repeatable_sensitive( - c10::optional("I have value!"), - c10::optional(c10::nullopt)); + std::optional("I have value!"), + std::optional(c10::nullopt)); // Containers auto a = std::vector({0, 1, 1, 2, 3, 5, 8}); diff --git a/test/cpp/tensorexpr/test_external_calls.cpp b/test/cpp/tensorexpr/test_external_calls.cpp index 7a4291f0ba447..c26c800a16bf6 100644 --- a/test/cpp/tensorexpr/test_external_calls.cpp +++ b/test/cpp/tensorexpr/test_external_calls.cpp @@ -507,11 +507,11 @@ TEST(ExternalCall, Prepacked_Linear_float) { .findSchemaOrThrow("prepacked::linear_clamp_prepack", "") .typed( at::Tensor, - c10::optional, - const c10::optional&, - const c10::optional&)>(); + std::optional, + const std::optional&, + const std::optional&)>(); auto prepacked = linear_clamp_prepack_op.call( - weight, bias, c10::optional(), c10::optional()); + weight, bias, std::optional(), c10::optional()); BufHandle DummyPrepacked("DummyPrepacked", {1}, kFloat); Tensor Result = Tensor( @@ -581,13 +581,13 @@ TEST(ExternalCall, Prepacked_Conv2d_float) { .findSchemaOrThrow("prepacked::conv2d_clamp_prepack", "") .typed( at::Tensor, - c10::optional, + std::optional, std::vector, std::vector, std::vector, int64_t, - const c10::optional&, - const c10::optional&)>(); + const std::optional&, + const std::optional&)>(); auto prepacked = conv2d_clamp_prepack_op.call( weight, bias, @@ -595,8 +595,8 @@ TEST(ExternalCall, Prepacked_Conv2d_float) { {pad, pad}, {dilation, dilation}, groups, - c10::optional(), - c10::optional()); + std::optional(), + std::optional()); BufHandle DummyPrepacked("DummyPrepacked", {1}, kFloat); Tensor Result = Tensor( @@ -945,7 +945,7 @@ TEST(ExternalCall, JitCustomFusionOp) { const std::vector& inputs, const std::vector& output_shape, const std::vector& output_strides, - const c10::optional& output_type, + const std::optional& output_type, at::Device device) { auto output_dtype = Dtype(*output_type); torch::jit::tensorexpr::BufHandle result_buf( diff --git a/test/cpp/tensorexpr/test_kernel.cpp b/test/cpp/tensorexpr/test_kernel.cpp index 21b86e9b00707..22f6b64efe1a8 100644 --- a/test/cpp/tensorexpr/test_kernel.cpp +++ b/test/cpp/tensorexpr/test_kernel.cpp @@ -888,7 +888,7 @@ TEST_F(Kernel, SumAllAxes) { parseIR(graph_string, &*graph); auto o = at::empty({}, TensorOptions(kCPU)); - c10::optional dtype; + std::optional dtype; if (scalar_type != ScalarType::Undefined) { dtype = static_cast(scalar_type); } @@ -947,7 +947,7 @@ TEST_F(Kernel, SumOneAxis) { env.d("dim", dim); env.d("keepdim", keepdim); env.s("dtype", dtypeConstant(scalar_type)); - c10::optional dtype; + std::optional dtype; if (scalar_type != ScalarType::Undefined) { dtype = static_cast(scalar_type); } @@ -1665,7 +1665,7 @@ Tensor lowerNanToNum( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { auto input_buf = std::get(inputs[0]); auto e = Compute( diff --git a/test/cpp/tensorexpr/test_quantization.cpp b/test/cpp/tensorexpr/test_quantization.cpp index a689358276f2c..af6b539ff33e9 100644 --- a/test/cpp/tensorexpr/test_quantization.cpp +++ b/test/cpp/tensorexpr/test_quantization.cpp @@ -390,8 +390,8 @@ at::Tensor quantized_cat( .typed const&, int64_t, - c10::optional, - c10::optional)>(); + std::optional, + std::optional)>(); return op.redispatch( DispatchKeySet({DispatchKey::QuantizedCPU}), xs, dim, scale, zero); } diff --git a/test/cpp_extensions/extension.cpp b/test/cpp_extensions/extension.cpp index f476a983b14c2..1de9e03971115 100644 --- a/test/cpp_extensions/extension.cpp +++ b/test/cpp_extensions/extension.cpp @@ -23,7 +23,7 @@ struct MatrixMultiplier { torch::Tensor tensor_; }; -bool function_taking_optional(c10::optional tensor) { +bool function_taking_optional(std::optional tensor) { return tensor.has_value(); } diff --git a/test/cpp_extensions/maia_extension.cpp b/test/cpp_extensions/maia_extension.cpp index 13315810f54c4..8dbc64f82076d 100644 --- a/test/cpp_extensions/maia_extension.cpp +++ b/test/cpp_extensions/maia_extension.cpp @@ -20,8 +20,8 @@ Tensor get_tensor(caffe2::TypeMeta dtype, IntArrayRef size) { return Tensor(std::move(tensor_impl)); } -Tensor empty_override(IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, - c10::optional pin_memory, c10::optional optional_memory_format) { +Tensor empty_override(IntArrayRef size, std::optional dtype, c10::optional layout, c10::optional device, + std::optional pin_memory, c10::optional optional_memory_format) { test_int = 0; return get_tensor(scalarTypeToTypeMeta(dtype_or_default(dtype)), size); } @@ -32,7 +32,7 @@ Tensor& add_out_override(const Tensor & a, const Tensor & b , const Scalar& c, T } Tensor fake_convolution( - const Tensor& input, const Tensor& weight, const c10::optional& bias, + const Tensor& input, const Tensor& weight, const std::optional& bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups) { test_int = 2; diff --git a/test/cpp_extensions/open_registration_extension.cpp b/test/cpp_extensions/open_registration_extension.cpp index f5b61102af7b2..df46d827339b4 100644 --- a/test/cpp_extensions/open_registration_extension.cpp +++ b/test/cpp_extensions/open_registration_extension.cpp @@ -277,11 +277,11 @@ REGISTER_ALLOCATOR(c10::DeviceType::PrivateUse1, &global_custom_alloc); // basic dummy empty function, so we can directly construct tensors on the custom device // This dummy test device will just use the CPU allocator, and ignores pinned memory. at::Tensor custom_empty_memory_format(at::IntArrayRef size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, - c10::optional memory_format) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, + std::optional memory_format) { constexpr c10::DispatchKeySet private_use_ks(c10::DispatchKey::PrivateUse1); return at::detail::empty_generic(size, &global_custom_alloc, @@ -290,11 +290,11 @@ at::Tensor custom_empty_memory_format(at::IntArrayRef size, memory_format); } at::Tensor custom_empty_symint(c10::IntArrayRef size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, - c10::optional memory_format) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, + std::optional memory_format) { constexpr c10::DispatchKeySet private_use_ks(c10::DispatchKey::PrivateUse1); return at::detail::empty_generic(size, &global_custom_alloc, private_use_ks, c10::dtype_or_default(dtype), memory_format); @@ -368,10 +368,10 @@ at::Tensor custom__copy_from_and_resize(const at::Tensor& self, const at::Tensor at::Tensor custom_empty_strided(c10::IntArrayRef size, c10::IntArrayRef stride, - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt) { + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt) { constexpr c10::DispatchKeySet private_use_ks(c10::DispatchKey::PrivateUse1); auto dtype = c10::dtype_or_default(dtype_opt); return at::detail::empty_strided_generic(size, stride, &global_custom_alloc, private_use_ks, dtype); @@ -406,7 +406,7 @@ at::Tensor& custom_set_source_Storage_storage_offset(at::Tensor& result, // basic dummy functions related to pin_memory. std::vector custom_pinned_data_ptr; -at::Tensor custom__pin_memory(const at::Tensor& self, c10::optional device) { +at::Tensor custom__pin_memory(const at::Tensor& self, std::optional device) { TORCH_CHECK( self.device().is_cpu(), "cannot pin '", @@ -420,7 +420,7 @@ at::Tensor custom__pin_memory(const at::Tensor& self, c10::optional return dump_pinned_tensor; } -bool custom_is_pinned(const at::Tensor& self, c10::optional device) { +bool custom_is_pinned(const at::Tensor& self, std::optional device) { // Only CPU tensors can be pinned if (!self.is_cpu()) { return false; @@ -436,7 +436,7 @@ bool custom_is_pinned(const at::Tensor& self, c10::optional device) } const at::Tensor& custom_resize_(const at::Tensor& self, at::IntArrayRef size, - c10::optional optional_memory_format) { + std::optional optional_memory_format) { at::TensorImpl* tensor_impl = self.unsafeGetTensorImpl(); tensor_impl->set_sizes_contiguous(size); const auto itemsize = tensor_impl->dtype().itemsize(); diff --git a/test/cpp_extensions/rng_extension.cpp b/test/cpp_extensions/rng_extension.cpp index 2e657d15a3979..3fc62ee69f752 100644 --- a/test/cpp_extensions/rng_extension.cpp +++ b/test/cpp_extensions/rng_extension.cpp @@ -33,15 +33,15 @@ struct TestCPUGenerator : public c10::GeneratorImpl { uint64_t value_; }; -Tensor& random_(Tensor& self, c10::optional generator) { +Tensor& random_(Tensor& self, std::optional generator) { return at::native::templates::random_impl(self, generator); } -Tensor& random_from_to(Tensor& self, int64_t from, optional to, c10::optional generator) { +Tensor& random_from_to(Tensor& self, int64_t from, optional to, std::optional generator) { return at::native::templates::random_from_to_impl(self, from, to, generator); } -Tensor& random_to(Tensor& self, int64_t to, c10::optional generator) { +Tensor& random_to(Tensor& self, int64_t to, std::optional generator) { return random_from_to(self, 0, to, generator); } diff --git a/test/custom_operator/op.cpp b/test/custom_operator/op.cpp index c9389713428bc..ab0506a822f61 100644 --- a/test/custom_operator/op.cpp +++ b/test/custom_operator/op.cpp @@ -29,7 +29,7 @@ struct CustomOpAutogradFunction : public torch::autograd::Function var3) { + std::optional var3) { ctx->saved_data["mul"] = mul; ctx->saved_data["var3_has_value"] = var3.has_value(); ctx->save_for_backward({var1, var2}); @@ -59,7 +59,7 @@ torch::Tensor custom_op_with_autograd( torch::Tensor var1, int64_t mul, torch::Tensor var2, - c10::optional var3) { + std::optional var3) { return CustomOpAutogradFunction::apply(var1, mul, var2, var3); } diff --git a/test/custom_operator/test_custom_ops.cpp b/test/custom_operator/test_custom_ops.cpp index b1e830f7b65c7..a526bebd26144 100644 --- a/test/custom_operator/test_custom_ops.cpp +++ b/test/custom_operator/test_custom_ops.cpp @@ -57,7 +57,7 @@ void get_autograd_operator_from_registry_and_execute() { torch::Tensor z = torch::randn({5,5}, torch::requires_grad()); torch::Tensor output = - helpers::get_operator_from_registry_and_execute("custom::op_with_autograd", x, 2, y, c10::optional()); + helpers::get_operator_from_registry_and_execute("custom::op_with_autograd", x, 2, y, std::optional()); TORCH_INTERNAL_ASSERT(output.allclose(x + 2*y + x*y)); auto go = torch::ones({}, torch::requires_grad()); @@ -88,7 +88,7 @@ void get_autograd_operator_from_registry_and_execute_in_nograd_mode() { torch::Tensor y = torch::randn({5,5}, torch::requires_grad()); torch::Tensor output = - helpers::get_operator_from_registry_and_execute("custom::op_with_autograd", x, 2, y, c10::optional()); + helpers::get_operator_from_registry_and_execute("custom::op_with_autograd", x, 2, y, std::optional()); TORCH_INTERNAL_ASSERT(output.allclose(x + 2*y + x*y)); } diff --git a/test/inductor/extension_backends/cpp/extension_device.cpp b/test/inductor/extension_backends/cpp/extension_device.cpp index 71f3f5919a9b2..c801f9ea06837 100644 --- a/test/inductor/extension_backends/cpp/extension_device.cpp +++ b/test/inductor/extension_backends/cpp/extension_device.cpp @@ -44,7 +44,7 @@ at::Tensor custom_to_device( at::ScalarType dtype, bool non_blocking, bool copy, - c10::optional memory_format) { + std::optional memory_format) { TORCH_CHECK(self.is_cpu() || self.device().type() == c10::DeviceType::PrivateUse1, "Dummy test only allows copy from cpu -> dummy device."); TORCH_CHECK(device.is_cpu() || device.type() == c10::DeviceType::PrivateUse1, "Dummy test only allows copy from cpu -> dummy device."); // Some dummy asserts for the basic use case: inputs are the same size / dtype, all contiguous. @@ -121,11 +121,11 @@ at::Tensor custom__copy_from(const at::Tensor& self, const at::Tensor& dst, bool } at::Tensor custom_empty_memory_format(at::IntArrayRef size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, - c10::optional memory_format) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, + std::optional memory_format) { constexpr c10::DispatchKeySet private_use_ks(c10::DispatchKey::PrivateUse1); return at::detail::empty_generic(size, &global_custom_alloc, @@ -134,7 +134,7 @@ at::Tensor custom_empty_memory_format(at::IntArrayRef size, memory_format); } -at::Tensor custom_empty_strided(c10::IntArrayRef size, c10::IntArrayRef stride, c10::optional dtype_opt, c10::optional layout_opt, c10::optional device_opt, c10::optional pin_memory_opt) { +at::Tensor custom_empty_strided(c10::IntArrayRef size, c10::IntArrayRef stride, std::optional dtype_opt, c10::optional layout_opt, c10::optional device_opt, c10::optional pin_memory_opt) { op_counter += 1; constexpr c10::DispatchKeySet private_use_ks(c10::DispatchKey::PrivateUse1); diff --git a/tools/autograd/templates/Functions.h b/tools/autograd/templates/Functions.h index 437a5e8e89889..1780df8edaab7 100644 --- a/tools/autograd/templates/Functions.h +++ b/tools/autograd/templates/Functions.h @@ -22,7 +22,7 @@ using at::ArrayRef; using at::Type; using at::TensorGeometry; using at::ScalarType; -using c10::optional; +using std::optional; using c10::fmap; inline std::vector unpack_list(at::ArrayRef xs, std::shared_ptr saved_for = nullptr) { @@ -34,12 +34,12 @@ inline std::vector unpack_list(at::ArrayRef xs, std::shar }); } -inline c10::List> unpack_opt_list(at::ArrayRef xs, std::shared_ptr saved_for = nullptr) { - torch::List> result; +inline c10::List> unpack_opt_list(at::ArrayRef xs, std::shared_ptr saved_for = nullptr) { + torch::List> result; result.reserve(xs.size()); for (const SavedVariable& v : xs) { auto var = v.unpack(saved_for); - result.push_back(var.defined() ? c10::optional(var) : c10::nullopt); + result.push_back(var.defined() ? std::optional(var) : c10::nullopt); } return result; } diff --git a/tools/autograd/templates/VariableType.h b/tools/autograd/templates/VariableType.h index 065812694cfe4..08da173f94bf8 100644 --- a/tools/autograd/templates/VariableType.h +++ b/tools/autograd/templates/VariableType.h @@ -42,7 +42,7 @@ using at::Quantizer; // we'll remove them when we are actually exposing Quantizer class // to frontend using ConstQuantizerPtr = const c10::intrusive_ptr&; -using c10::optional; +using std::optional; namespace VariableType { TORCH_API std::vector allCUDATypes(); diff --git a/tools/autograd/templates/ViewFuncs.h b/tools/autograd/templates/ViewFuncs.h index faf5ab6881f18..1f69c062d344e 100644 --- a/tools/autograd/templates/ViewFuncs.h +++ b/tools/autograd/templates/ViewFuncs.h @@ -20,7 +20,7 @@ using at::IntArrayRef; using at::ArrayRef; using at::Type; using at::ScalarType; -using c10::optional; +using std::optional; using c10::fmap; ${view_func_declarations} diff --git a/tools/autograd/templates/python_variable_methods.cpp b/tools/autograd/templates/python_variable_methods.cpp index 437ea23d079bf..242adcd205336 100644 --- a/tools/autograd/templates/python_variable_methods.cpp +++ b/tools/autograd/templates/python_variable_methods.cpp @@ -397,7 +397,7 @@ static PyObject * THPVariable_invert(PyObject* self, PyObject* args) { END_HANDLE_TH_ERRORS } -static Tensor dispatch_to(const Tensor & self, Device device, bool non_blocking, bool copy, c10::optional optional_memory_format) { +static Tensor dispatch_to(const Tensor & self, Device device, bool non_blocking, bool copy, std::optional optional_memory_format) { pybind11::gil_scoped_release no_gil; // NOTE: this is where we record aten::to in the graph during tracing. However, the behavior of aten::to // is different with respect to TensorOptions fields that are not present: aten::to inherits fields that @@ -407,18 +407,18 @@ static Tensor dispatch_to(const Tensor & self, Device device, bool non_blocking, return self.to(self.options().device(device).memory_format(optional_memory_format), non_blocking, copy); } -static Tensor dispatch_to(const Tensor & self, bool non_blocking, bool copy, c10::optional optional_memory_format) { +static Tensor dispatch_to(const Tensor & self, bool non_blocking, bool copy, std::optional optional_memory_format) { pybind11::gil_scoped_release no_gil; return self.to(self.options().memory_format(optional_memory_format), non_blocking, copy); } -static Tensor dispatch_to(const Tensor & self, ScalarType dtype, bool non_blocking, bool copy, c10::optional optional_memory_format) { +static Tensor dispatch_to(const Tensor & self, ScalarType dtype, bool non_blocking, bool copy, std::optional optional_memory_format) { pybind11::gil_scoped_release no_gil; // TODO: Make this call the TensorOptions version, maybe? return self.to(dtype, non_blocking, copy, optional_memory_format); } -static Tensor dispatch_to(const Tensor & self, Device device, ScalarType dtype, bool non_blocking, bool copy, c10::optional optional_memory_format) { +static Tensor dispatch_to(const Tensor & self, Device device, ScalarType dtype, bool non_blocking, bool copy, std::optional optional_memory_format) { pybind11::gil_scoped_release no_gil; // TODO: Make this call the TensorOptions version, maybe? return self.to(device, dtype, non_blocking, copy, optional_memory_format); @@ -546,7 +546,7 @@ static PyObject * THPVariable_ipu(PyObject* self, PyObject* args, PyObject* kwar END_HANDLE_TH_ERRORS } -static PyObject * THPVariable_to_type(PyObject* self, ScalarType scalarType, c10::optional optional_memory_format) { +static PyObject * THPVariable_to_type(PyObject* self, ScalarType scalarType, std::optional optional_memory_format) { HANDLE_TH_ERRORS auto& self_ = THPVariable_Unpack(self); return THPVariable_Wrap(dispatch_to(self_, scalarType, false, false, optional_memory_format)); diff --git a/torch/csrc/Module.cpp b/torch/csrc/Module.cpp index 22a257909bf12..4efb9c838563f 100644 --- a/torch/csrc/Module.cpp +++ b/torch/csrc/Module.cpp @@ -396,10 +396,10 @@ PyObject* THPModule_swap_tensor_impl(PyObject* _unused, PyObject* args) { // The TensorImpls contain PyObjectSlots that have a reference to the PyObject // associated with the TensorImpl. Swap this field as well. - c10::optional mb_obj_a = + std::optional mb_obj_a = a->cdata->unsafeGetTensorImpl()->pyobj_slot()->check_pyobj( getPyInterpreter(), /*ignore_hermetic_tls=*/false); - c10::optional mb_obj_b = + std::optional mb_obj_b = b->cdata->unsafeGetTensorImpl()->pyobj_slot()->check_pyobj( getPyInterpreter(), /*ignore_hermetic_tls=*/false); TORCH_INTERNAL_ASSERT( @@ -1803,7 +1803,7 @@ Call this whenever a new thread is created in order to propagate values from "_select_conv_backend", [](const at::Tensor& input, const at::Tensor& weight, - const c10::optional& bias_opt, + const std::optional& bias_opt, at::SymIntArrayRef stride_, at::SymIntArrayRef padding_, at::SymIntArrayRef dilation_, @@ -1837,14 +1837,14 @@ Call this whenever a new thread is created in order to propagate values from "_select_conv_backend", [](const at::Tensor& input, const at::Tensor& weight, - const c10::optional& bias, + const std::optional& bias, at::SymIntArrayRef stride_, at::SymIntArrayRef padding_, at::SymIntArrayRef dilation_, bool transposed_, at::SymIntArrayRef output_padding_, c10::SymInt groups_, - c10::optional> bias_sizes_opt) { + std::optional> bias_sizes_opt) { c10::OptionalArrayRef ref = c10::nullopt; if (bias_sizes_opt) { ref = (*bias_sizes_opt); @@ -1883,7 +1883,7 @@ Call this whenever a new thread is created in order to propagate values from .def(py::init([](at::Tensor const& query, at::Tensor const& key, at::Tensor const& value, - c10::optional attn_mask, + std::optional attn_mask, double dropout, bool is_causal) { return sdp::sdp_params{ @@ -2034,7 +2034,7 @@ Call this whenever a new thread is created in order to propagate values from py_module.def( "_get_accelerator", - [](c10::optional check = c10::nullopt) { + [](std::optional check = c10::nullopt) { return c10::Device( at::getAccelerator(check.value_or(false)) .value_or(c10::DeviceType::CPU), @@ -2175,7 +2175,7 @@ Call this whenever a new thread is created in order to propagate values from _DeviceDtypeHasher>; py_module.def( "_group_tensors_by_device_and_dtype", - [](const std::vector>>& + [](const std::vector>>& nested_tensorlist, const bool with_indices) { _FlatMap map; diff --git a/torch/csrc/PyInterpreter.cpp b/torch/csrc/PyInterpreter.cpp index 4582cb2a8340c..a7e5c5e9fb873 100644 --- a/torch/csrc/PyInterpreter.cpp +++ b/torch/csrc/PyInterpreter.cpp @@ -592,7 +592,7 @@ static void set_tensor_attr_with_capsule( const c10::TensorImpl* tensor, py::capsule& capsule, const char* attr_name) { - c10::optional mb_obj = tensor->pyobj_slot()->check_pyobj( + std::optional mb_obj = tensor->pyobj_slot()->check_pyobj( getPyInterpreter(), /*ignore_hermetic_tls=*/false); TORCH_CHECK( mb_obj.has_value(), "Tensor subclass's PyInterpreter has no value"); @@ -620,7 +620,7 @@ static c10::ArrayRef get_set_cached_attr( const c10::TensorImpl* tensor, const char* base_attr_name, const py::object& obj) { - c10::optional mb_obj = + std::optional mb_obj = tensor->pyobj_slot()->check_pyobj(getPyInterpreter()); TORCH_CHECK( mb_obj.has_value(), "Tensor subclass's PyInterpreter has no value"); diff --git a/torch/csrc/Storage.cpp b/torch/csrc/Storage.cpp index 878a66ad2461c..aa5584abd39e4 100644 --- a/torch/csrc/Storage.cpp +++ b/torch/csrc/Storage.cpp @@ -108,7 +108,7 @@ PyObject* THPStorage_Wrap(c10::Storage storage) { c10::newStorageImplFromRefcountedDataPtr(storage), c10::impl::PyInterpreterStatus::DEFINITELY_UNINITIALIZED); } - c10::optional maybe_pyobj = pyobj_slot->check_pyobj( + std::optional maybe_pyobj = pyobj_slot->check_pyobj( getPyInterpreter(), /*ignore_hermetic_tls=*/false); c10::impl::PyInterpreterStatus status = c10::impl::PyInterpreterStatus::TAGGED_BY_US; @@ -316,8 +316,8 @@ static PyObject* THPStorage_pynew( device_arg_idx = 2; } - c10::optional allocator_opt = r.toInt64Optional(allocator_arg_idx); - c10::optional device_opt = r.deviceOptional(device_arg_idx); + std::optional allocator_opt = r.toInt64Optional(allocator_arg_idx); + std::optional device_opt = r.deviceOptional(device_arg_idx); TORCH_CHECK( !allocator_opt.has_value() || !device_opt.has_value(), @@ -498,7 +498,7 @@ static PyObject* THPStorage_get(THPStorage* self, PyObject* index) { at::StorageImpl* old_storage_impl = storage.unsafeGetStorageImpl(); c10::raw::intrusive_ptr::incref(old_storage_impl); - c10::optional device_opt = old_storage_impl->device(); + std::optional device_opt = old_storage_impl->device(); auto new_storage_impl = make_storage_impl( c10::StorageImpl::use_byte_size_t(), #ifdef THQUANTIZED diff --git a/torch/csrc/Stream.cpp b/torch/csrc/Stream.cpp index 06dac515c1a5e..179f4f1390aff 100644 --- a/torch/csrc/Stream.cpp +++ b/torch/csrc/Stream.cpp @@ -82,7 +82,7 @@ static PyObject* THPStream_pynew( // It requires other device backends override getNewStream method. How the new // stream is created is backend specific. Backend should be able to correctly // manage the lifetime of streams. - c10::optional stream_opt; + std::optional stream_opt; if (r.idx == 0) { c10::impl::VirtualGuardImpl impl{static_cast(device_type)}; stream_opt = impl.getNewStream( diff --git a/torch/csrc/api/include/torch/expanding_array.h b/torch/csrc/api/include/torch/expanding_array.h index aa4fecf4ff37c..f77c05119ebf7 100644 --- a/torch/csrc/api/include/torch/expanding_array.h +++ b/torch/csrc/api/include/torch/expanding_array.h @@ -104,15 +104,15 @@ std::ostream& operator<<( } /// A utility class that accepts either a container of `D`-many -/// `c10::optional` values, or a single `c10::optional` value, which is +/// `std::optional` values, or a single `c10::optional` value, which is /// internally repeated `D` times. It has the additional ability to accept /// containers of the underlying type `T` and convert them to a container of -/// `c10::optional`. +/// `std::optional`. template class ExpandingArrayWithOptionalElem - : public ExpandingArray> { + : public ExpandingArray> { public: - using ExpandingArray>::ExpandingArray; + using ExpandingArray>::ExpandingArray; /// Constructs an `ExpandingArrayWithOptionalElem` from an `initializer_list` /// of the underlying type `T`. The extent of the length is checked against @@ -130,7 +130,7 @@ class ExpandingArrayWithOptionalElem /// the underlying type `T`. The extent of the length is checked against the /// `ExpandingArrayWithOptionalElem`'s extent parameter `D` at runtime. /*implicit*/ ExpandingArrayWithOptionalElem(at::ArrayRef values) - : ExpandingArray>(0) { + : ExpandingArray>(0) { // clang-format off TORCH_CHECK( values.size() == D, @@ -145,7 +145,7 @@ class ExpandingArrayWithOptionalElem /// underlying type `T`, which is repeated `D` times (where `D` is the extent /// parameter of the `ExpandingArrayWithOptionalElem`). /*implicit*/ ExpandingArrayWithOptionalElem(T single_size) - : ExpandingArray>(0) { + : ExpandingArray>(0) { for (const auto i : c10::irange(this->values_.size())) { this->values_[i] = single_size; } @@ -154,7 +154,7 @@ class ExpandingArrayWithOptionalElem /// Constructs an `ExpandingArrayWithOptionalElem` from a correctly sized /// `std::array` of the underlying type `T`. /*implicit*/ ExpandingArrayWithOptionalElem(const std::array& values) - : ExpandingArray>(0) { + : ExpandingArray>(0) { for (const auto i : c10::irange(this->values_.size())) { this->values_[i] = values[i]; } diff --git a/torch/csrc/api/include/torch/fft.h b/torch/csrc/api/include/torch/fft.h index da1f7e518ae54..d9a3430a7a249 100644 --- a/torch/csrc/api/include/torch/fft.h +++ b/torch/csrc/api/include/torch/fft.h @@ -15,9 +15,9 @@ namespace fft { /// ``` inline Tensor fft( const Tensor& self, - c10::optional n = c10::nullopt, + std::optional n = c10::nullopt, int64_t dim = -1, - c10::optional norm = c10::nullopt) { + std::optional norm = c10::nullopt) { return torch::fft_fft_symint(self, n, dim, norm); } @@ -31,9 +31,9 @@ inline Tensor fft( /// ``` inline Tensor ifft( const Tensor& self, - c10::optional n = c10::nullopt, + std::optional n = c10::nullopt, int64_t dim = -1, - c10::optional norm = c10::nullopt) { + std::optional norm = c10::nullopt) { return torch::fft_ifft_symint(self, n, dim, norm); } @@ -49,7 +49,7 @@ inline Tensor fft2( const Tensor& self, OptionalIntArrayRef s = c10::nullopt, IntArrayRef dim = {-2, -1}, - c10::optional norm = c10::nullopt) { + std::optional norm = c10::nullopt) { return torch::fft_fft2(self, s, dim, norm); } @@ -65,7 +65,7 @@ inline Tensor ifft2( const Tensor& self, at::OptionalIntArrayRef s = c10::nullopt, IntArrayRef dim = {-2, -1}, - c10::optional norm = c10::nullopt) { + std::optional norm = c10::nullopt) { return torch::fft_ifft2(self, s, dim, norm); } @@ -81,7 +81,7 @@ inline Tensor fftn( const Tensor& self, at::OptionalIntArrayRef s = c10::nullopt, at::OptionalIntArrayRef dim = c10::nullopt, - c10::optional norm = c10::nullopt) { + std::optional norm = c10::nullopt) { return torch::fft_fftn(self, s, dim, norm); } @@ -97,7 +97,7 @@ inline Tensor ifftn( const Tensor& self, at::OptionalIntArrayRef s = c10::nullopt, at::OptionalIntArrayRef dim = c10::nullopt, - c10::optional norm = c10::nullopt) { + std::optional norm = c10::nullopt) { return torch::fft_ifftn(self, s, dim, norm); } @@ -112,9 +112,9 @@ inline Tensor ifftn( /// ``` inline Tensor rfft( const Tensor& self, - c10::optional n = c10::nullopt, + std::optional n = c10::nullopt, int64_t dim = -1, - c10::optional norm = c10::nullopt) { + std::optional norm = c10::nullopt) { return torch::fft_rfft_symint(self, n, dim, norm); } @@ -131,9 +131,9 @@ inline Tensor rfft( /// ``` inline Tensor irfft( const Tensor& self, - c10::optional n = c10::nullopt, + std::optional n = c10::nullopt, int64_t dim = -1, - c10::optional norm = c10::nullopt) { + std::optional norm = c10::nullopt) { return torch::fft_irfft_symint(self, n, dim, norm); } @@ -149,7 +149,7 @@ inline Tensor rfft2( const Tensor& self, at::OptionalIntArrayRef s = c10::nullopt, IntArrayRef dim = {-2, -1}, - c10::optional norm = c10::nullopt) { + std::optional norm = c10::nullopt) { return torch::fft_rfft2(self, s, dim, norm); } @@ -165,7 +165,7 @@ inline Tensor irfft2( const Tensor& self, at::OptionalIntArrayRef s = c10::nullopt, IntArrayRef dim = {-2, -1}, - c10::optional norm = c10::nullopt) { + std::optional norm = c10::nullopt) { return torch::fft_irfft2(self, s, dim, norm); } @@ -181,7 +181,7 @@ inline Tensor rfftn( const Tensor& self, at::OptionalIntArrayRef s = c10::nullopt, at::OptionalIntArrayRef dim = c10::nullopt, - c10::optional norm = c10::nullopt) { + std::optional norm = c10::nullopt) { return torch::fft_rfftn(self, s, dim, norm); } @@ -197,7 +197,7 @@ inline Tensor irfftn( const Tensor& self, at::OptionalIntArrayRef s = c10::nullopt, at::OptionalIntArrayRef dim = c10::nullopt, - c10::optional norm = c10::nullopt) { + std::optional norm = c10::nullopt) { return torch::fft_irfftn(self, s, dim, norm); } @@ -215,9 +215,9 @@ inline Tensor irfftn( /// ``` inline Tensor hfft( const Tensor& self, - c10::optional n = c10::nullopt, + std::optional n = c10::nullopt, int64_t dim = -1, - c10::optional norm = c10::nullopt) { + std::optional norm = c10::nullopt) { return torch::fft_hfft_symint(self, n, dim, norm); } @@ -234,9 +234,9 @@ inline Tensor hfft( /// ``` inline Tensor ihfft( const Tensor& self, - c10::optional n = c10::nullopt, + std::optional n = c10::nullopt, int64_t dim = -1, - c10::optional norm = c10::nullopt) { + std::optional norm = c10::nullopt) { return torch::fft_ihfft_symint(self, n, dim, norm); } @@ -255,7 +255,7 @@ inline Tensor hfft2( const Tensor& self, at::OptionalIntArrayRef s = c10::nullopt, IntArrayRef dim = {-2, -1}, - c10::optional norm = c10::nullopt) { + std::optional norm = c10::nullopt) { return torch::fft_hfft2(self, s, dim, norm); } @@ -275,7 +275,7 @@ inline Tensor ihfft2( const Tensor& self, at::OptionalIntArrayRef s = c10::nullopt, IntArrayRef dim = {-2, -1}, - c10::optional norm = c10::nullopt) { + std::optional norm = c10::nullopt) { return torch::fft_ihfft2(self, s, dim, norm); } @@ -294,7 +294,7 @@ inline Tensor hfftn( const Tensor& self, at::OptionalIntArrayRef s = c10::nullopt, IntArrayRef dim = {-2, -1}, - c10::optional norm = c10::nullopt) { + std::optional norm = c10::nullopt) { return torch::fft_hfftn(self, s, dim, norm); } @@ -314,7 +314,7 @@ inline Tensor ihfftn( const Tensor& self, at::OptionalIntArrayRef s = c10::nullopt, IntArrayRef dim = {-2, -1}, - c10::optional norm = c10::nullopt) { + std::optional norm = c10::nullopt) { return torch::fft_ihfftn(self, s, dim, norm); } diff --git a/torch/csrc/api/include/torch/linalg.h b/torch/csrc/api/include/torch/linalg.h index 38010fbfcd4d2..3b398fa935b91 100644 --- a/torch/csrc/api/include/torch/linalg.h +++ b/torch/csrc/api/include/torch/linalg.h @@ -118,8 +118,8 @@ inline std::tuple lu_out( inline std::tuple lstsq( const Tensor& self, const Tensor& b, - c10::optional cond, - c10::optional driver) { + std::optional cond, + std::optional driver) { return torch::linalg_lstsq(self, b, cond, driver); } @@ -245,16 +245,16 @@ inline Tensor matrix_rank( inline Tensor matrix_rank( const Tensor& input, - c10::optional atol, - c10::optional rtol, + std::optional atol, + std::optional rtol, bool hermitian) { return torch::linalg_matrix_rank(input, atol, rtol, hermitian); } inline Tensor matrix_rank( const Tensor& input, - const c10::optional& atol, - const c10::optional& rtol, + const std::optional& atol, + const std::optional& rtol, bool hermitian) { return torch::linalg_matrix_rank(input, atol, rtol, hermitian); } @@ -278,8 +278,8 @@ inline Tensor& matrix_rank_out( inline Tensor& matrix_rank_out( Tensor& result, const Tensor& input, - c10::optional atol, - c10::optional rtol, + std::optional atol, + std::optional rtol, bool hermitian) { return torch::linalg_matrix_rank_out(result, input, atol, rtol, hermitian); } @@ -287,8 +287,8 @@ inline Tensor& matrix_rank_out( inline Tensor& matrix_rank_out( Tensor& result, const Tensor& input, - const c10::optional& atol, - const c10::optional& rtol, + const std::optional& atol, + const std::optional& rtol, bool hermitian) { return torch::linalg_matrix_rank_out(result, input, atol, rtol, hermitian); } @@ -382,7 +382,7 @@ inline Tensor& solve_triangular_out( inline std::tuple svd( const Tensor& input, bool full_matrices, - c10::optional driver) { + std::optional driver) { return torch::linalg_svd(input, full_matrices, driver); } @@ -392,20 +392,20 @@ inline std::tuple svd_out( Tensor& Vh, const Tensor& input, bool full_matrices, - c10::optional driver) { + std::optional driver) { return torch::linalg_svd_out(U, S, Vh, input, full_matrices, driver); } inline Tensor svdvals( const Tensor& input, - c10::optional driver) { + std::optional driver) { return torch::linalg_svdvals(input, driver); } inline Tensor& svdvals_out( Tensor& result, const Tensor& input, - c10::optional driver) { + std::optional driver) { return torch::linalg_svdvals_out(result, input, driver); } @@ -561,8 +561,8 @@ inline Tensor& householder_product_out( inline std::tuple lstsq( const Tensor& self, const Tensor& b, - c10::optional cond, - c10::optional driver) { + std::optional cond, + std::optional driver) { return detail::lstsq(self, b, cond, driver); } @@ -773,16 +773,16 @@ inline Tensor matrix_rank( inline Tensor matrix_rank( const Tensor& input, - c10::optional atol, - c10::optional rtol, + std::optional atol, + std::optional rtol, bool hermitian) { return detail::matrix_rank(input, atol, rtol, hermitian); } inline Tensor matrix_rank( const Tensor& input, - const c10::optional& atol, - const c10::optional& rtol, + const std::optional& atol, + const std::optional& rtol, bool hermitian) { return detail::matrix_rank(input, atol, rtol, hermitian); } @@ -806,8 +806,8 @@ inline Tensor& matrix_rank_out( inline Tensor& matrix_rank_out( Tensor& result, const Tensor& input, - c10::optional atol, - c10::optional rtol, + std::optional atol, + std::optional rtol, bool hermitian) { return detail::matrix_rank_out(result, input, atol, rtol, hermitian); } @@ -815,8 +815,8 @@ inline Tensor& matrix_rank_out( inline Tensor& matrix_rank_out( Tensor& result, const Tensor& input, - const c10::optional& atol, - const c10::optional& rtol, + const std::optional& atol, + const std::optional& rtol, bool hermitian) { return detail::matrix_rank_out(result, input, atol, rtol, hermitian); } @@ -976,7 +976,7 @@ inline Tensor& solve_triangular_out( inline std::tuple svd( const Tensor& input, bool full_matrices, - c10::optional driver) { + std::optional driver) { return detail::svd(input, full_matrices, driver); } @@ -986,7 +986,7 @@ inline std::tuple svd_out( Tensor& Vh, const Tensor& input, bool full_matrices, - c10::optional driver) { + std::optional driver) { return detail::svd_out(U, S, Vh, input, full_matrices, driver); } @@ -995,14 +995,14 @@ inline std::tuple svd_out( /// See https://pytorch.org/docs/main/linalg.html#torch.linalg.svdvals inline Tensor svdvals( const Tensor& input, - c10::optional driver) { + std::optional driver) { return detail::svdvals(input, driver); } inline Tensor& svdvals_out( Tensor& result, const Tensor& input, - c10::optional driver) { + std::optional driver) { return detail::svdvals_out(result, input, driver); } diff --git a/torch/csrc/api/include/torch/nested.h b/torch/csrc/api/include/torch/nested.h index 524b4d433186c..780aab4230472 100644 --- a/torch/csrc/api/include/torch/nested.h +++ b/torch/csrc/api/include/torch/nested.h @@ -72,8 +72,8 @@ inline at::Tensor nested_tensor( /// ``` inline at::Tensor as_nested_tensor( at::TensorList list, - c10::optional dtype = c10::nullopt, - c10::optional device = c10::nullopt) { + std::optional dtype = c10::nullopt, + std::optional device = c10::nullopt) { return at::_nested_tensor_from_tensor_list( list, dtype, c10::nullopt, device, c10::nullopt); } diff --git a/torch/csrc/api/include/torch/nn/functional/activation.h b/torch/csrc/api/include/torch/nn/functional/activation.h index 9c100287f9559..89e596f71d143 100644 --- a/torch/csrc/api/include/torch/nn/functional/activation.h +++ b/torch/csrc/api/include/torch/nn/functional/activation.h @@ -233,7 +233,7 @@ namespace detail { inline Tensor softmax( const Tensor& input, int64_t dim, - c10::optional dtype) { + std::optional dtype) { Tensor ret; if (dtype == c10::nullopt) { @@ -270,7 +270,7 @@ namespace detail { inline Tensor softmin( const Tensor& input, int64_t dim, - c10::optional dtype) { + std::optional dtype) { Tensor ret; if (dtype == c10::nullopt) { @@ -307,7 +307,7 @@ namespace detail { inline Tensor log_softmax( const Tensor& input, int64_t dim, - c10::optional dtype) { + std::optional dtype) { Tensor ret; if (dtype == c10::nullopt) { diff --git a/torch/csrc/api/include/torch/nn/functional/batchnorm.h b/torch/csrc/api/include/torch/nn/functional/batchnorm.h index 487bd78ad44fe..bc6f141281b39 100644 --- a/torch/csrc/api/include/torch/nn/functional/batchnorm.h +++ b/torch/csrc/api/include/torch/nn/functional/batchnorm.h @@ -17,7 +17,7 @@ inline Tensor batch_norm( Tensor weight, Tensor bias, bool training, - c10::optional momentum, + std::optional momentum, double eps) { TORCH_CHECK( input.dim() >= 2, diff --git a/torch/csrc/api/include/torch/nn/functional/embedding.h b/torch/csrc/api/include/torch/nn/functional/embedding.h index 99432c09d36be..b06b0a3dc1e85 100644 --- a/torch/csrc/api/include/torch/nn/functional/embedding.h +++ b/torch/csrc/api/include/torch/nn/functional/embedding.h @@ -24,8 +24,8 @@ inline void _no_grad_embedding_renorm_( inline Tensor embedding( const Tensor& input, const Tensor& weight, - c10::optional padding_idx, - c10::optional max_norm, + std::optional padding_idx, + std::optional max_norm, double norm_type, bool scale_grad_by_freq, bool sparse) { @@ -90,14 +90,14 @@ inline Tensor embedding_bag( const Tensor& input, const Tensor& weight, const Tensor& offsets, - c10::optional max_norm, + std::optional max_norm, double norm_type, bool scale_grad_by_freq, EmbeddingBagMode mode, bool sparse, const Tensor& per_sample_weights, bool include_last_offset, - c10::optional padding_idx) { + std::optional padding_idx) { auto input_ = input; auto offsets_ = offsets; auto per_sample_weights_ = per_sample_weights; diff --git a/torch/csrc/api/include/torch/nn/functional/loss.h b/torch/csrc/api/include/torch/nn/functional/loss.h index 17fa2be1afc7a..c4124c2b23859 100644 --- a/torch/csrc/api/include/torch/nn/functional/loss.h +++ b/torch/csrc/api/include/torch/nn/functional/loss.h @@ -346,7 +346,7 @@ inline Tensor smooth_l1_loss( const Tensor& input, const Tensor& target, SmoothL1LossFuncOptions::reduction_t reduction, - c10::optional beta_opt = c10::nullopt) { + std::optional beta_opt = c10::nullopt) { if (target.sizes() != input.sizes()) { TORCH_WARN( "Using a target size (", @@ -656,7 +656,7 @@ inline Tensor triplet_margin_with_distance_loss( const Tensor& anchor, const Tensor& positive, const Tensor& negative, - c10::optional + std::optional distance_function, double margin, bool swap, diff --git a/torch/csrc/api/include/torch/nn/functional/normalization.h b/torch/csrc/api/include/torch/nn/functional/normalization.h index a45fec6ca34f9..53bd61839f745 100644 --- a/torch/csrc/api/include/torch/nn/functional/normalization.h +++ b/torch/csrc/api/include/torch/nn/functional/normalization.h @@ -16,7 +16,7 @@ inline Tensor normalize( double p, int64_t dim, double eps, - c10::optional out) { + std::optional out) { if (out == c10::nullopt) { auto denom = input.norm(p, dim, true).clamp_min(eps).expand_as(input); return input / denom; diff --git a/torch/csrc/api/include/torch/nn/functional/padding.h b/torch/csrc/api/include/torch/nn/functional/padding.h index d4b81fb53f26a..1bb6f95382904 100644 --- a/torch/csrc/api/include/torch/nn/functional/padding.h +++ b/torch/csrc/api/include/torch/nn/functional/padding.h @@ -27,7 +27,7 @@ inline Tensor pad( TORCH_CHECK(false, "Unrecognised padding mode"); }(); - c10::optional fill_value; + std::optional fill_value; if (value != 0.0) { fill_value = value; } diff --git a/torch/csrc/api/include/torch/nn/functional/pooling.h b/torch/csrc/api/include/torch/nn/functional/pooling.h index 9f9708ce657ec..be3009f62201a 100644 --- a/torch/csrc/api/include/torch/nn/functional/pooling.h +++ b/torch/csrc/api/include/torch/nn/functional/pooling.h @@ -57,7 +57,7 @@ inline Tensor avg_pool2d( ExpandingArray<2> padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { return torch::avg_pool2d( input, kernel_size, @@ -104,7 +104,7 @@ inline Tensor avg_pool3d( ExpandingArray<3> padding, bool ceil_mode, bool count_include_pad, - c10::optional divisor_override) { + std::optional divisor_override) { return torch::avg_pool3d( input, kernel_size, @@ -632,7 +632,7 @@ inline std::vector _unpool_output_size( const IntArrayRef& kernel_size, const IntArrayRef& stride, const IntArrayRef& padding, - const c10::optional>& output_size) { + const std::optional>& output_size) { auto input_size = input.sizes(); std::vector default_size; for (const auto d : c10::irange(kernel_size.size())) { @@ -688,7 +688,7 @@ inline Tensor max_unpool1d( ExpandingArray<1> kernel_size, ExpandingArray<1> stride, ExpandingArray<1> padding, - const c10::optional>& output_size) { + const std::optional>& output_size) { auto output_size_ = _unpool_output_size(input, kernel_size, stride, padding, output_size); output_size_.push_back(1); @@ -733,7 +733,7 @@ inline Tensor max_unpool2d( ExpandingArray<2> kernel_size, ExpandingArray<2> stride, ExpandingArray<2> padding, - const c10::optional>& output_size) { + const std::optional>& output_size) { auto output_size_ = _unpool_output_size(input, kernel_size, stride, padding, output_size); @@ -776,7 +776,7 @@ inline Tensor max_unpool3d( ExpandingArray<3> kernel_size, ExpandingArray<3> stride, ExpandingArray<3> padding, - const c10::optional>& output_size) { + const std::optional>& output_size) { auto output_size_ = _unpool_output_size(input, kernel_size, stride, padding, output_size); @@ -817,8 +817,8 @@ namespace detail { inline std::tuple fractional_max_pool2d_with_indices( const Tensor& input, const ExpandingArray<2>& kernel_size, - const c10::optional>& output_size, - const c10::optional>& output_ratio, + const std::optional>& output_size, + const std::optional>& output_ratio, const Tensor& _random_samples) { if (output_size == c10::nullopt && output_ratio == c10::nullopt) { TORCH_CHECK( @@ -826,7 +826,7 @@ inline std::tuple fractional_max_pool2d_with_indices( "fractional_max_pool2d requires specifying either ", "an output_size or an output_ratio"); } - c10::optional> output_size_ = output_size; + std::optional> output_size_ = output_size; if (output_size_ == c10::nullopt) { TORCH_INTERNAL_ASSERT(output_ratio != c10::nullopt); output_size_ = { @@ -875,8 +875,8 @@ namespace detail { inline Tensor fractional_max_pool2d( const Tensor& input, ExpandingArray<2> kernel_size, - c10::optional> output_size, - c10::optional> output_ratio, + std::optional> output_size, + std::optional> output_ratio, const Tensor& _random_samples) { return std::get<0>(fractional_max_pool2d_with_indices( input, kernel_size, output_size, output_ratio, _random_samples)); @@ -910,8 +910,8 @@ namespace detail { inline std::tuple fractional_max_pool3d_with_indices( const Tensor& input, const ExpandingArray<3>& kernel_size, - const c10::optional>& output_size, - const c10::optional>& output_ratio, + const std::optional>& output_size, + const std::optional>& output_ratio, const Tensor& _random_samples) { if (output_size == c10::nullopt && output_ratio == c10::nullopt) { TORCH_CHECK( @@ -920,7 +920,7 @@ inline std::tuple fractional_max_pool3d_with_indices( "an output_size or an output_ratio"); } - c10::optional> output_size_ = output_size; + std::optional> output_size_ = output_size; if (output_size_ == c10::nullopt) { TORCH_INTERNAL_ASSERT(output_ratio != c10::nullopt); output_size_ = { @@ -971,8 +971,8 @@ namespace detail { inline Tensor fractional_max_pool3d( const Tensor& input, ExpandingArray<3> kernel_size, - c10::optional> output_size, - c10::optional> output_ratio, + std::optional> output_size, + std::optional> output_ratio, const Tensor& _random_samples) { return std::get<0>(fractional_max_pool3d_with_indices( input, kernel_size, output_size, output_ratio, _random_samples)); diff --git a/torch/csrc/api/include/torch/nn/functional/upsampling.h b/torch/csrc/api/include/torch/nn/functional/upsampling.h index 8fe1b3f00f85d..38c5c51f9a475 100644 --- a/torch/csrc/api/include/torch/nn/functional/upsampling.h +++ b/torch/csrc/api/include/torch/nn/functional/upsampling.h @@ -15,9 +15,9 @@ inline std::vector _interp_output_size( int64_t dim, std::tuple< Tensor, - c10::optional>, - c10::optional>, - c10::optional> closed_over_args) { + std::optional>, + std::optional>, + std::optional> closed_over_args) { auto [input, size, scale_factor, recompute_scale_factor] = closed_over_args; if (size == c10::nullopt && scale_factor == c10::nullopt) { TORCH_CHECK(false, "either size or scale_factor should be defined"); @@ -75,11 +75,11 @@ inline std::vector _interp_output_size( namespace detail { inline Tensor interpolate( const Tensor& input, - const c10::optional>& size, - const c10::optional>& scale_factor, + const std::optional>& size, + const std::optional>& scale_factor, InterpolateFuncOptions::mode_t mode, - c10::optional align_corners, - c10::optional recompute_scale_factor, + std::optional align_corners, + std::optional recompute_scale_factor, bool antialias) { if (std::holds_alternative(mode) || std::get_if(&mode)) { @@ -113,7 +113,7 @@ inline Tensor interpolate( ")"); auto scale_factor_len = input.dim() - 2; - std::vector> scale_factor_list( + std::vector> scale_factor_list( scale_factor_len, c10::nullopt); if (scale_factor != c10::nullopt && !recompute_scale_factor.value_or(false)) { auto _scale_factor_repeated = *scale_factor; diff --git a/torch/csrc/api/include/torch/nn/functional/vision.h b/torch/csrc/api/include/torch/nn/functional/vision.h index e9cb1eb11ac0f..a6c53e0c0a9ad 100644 --- a/torch/csrc/api/include/torch/nn/functional/vision.h +++ b/torch/csrc/api/include/torch/nn/functional/vision.h @@ -59,7 +59,7 @@ inline Tensor grid_sample( const Tensor& grid, GridSampleFuncOptions::mode_t mode, GridSampleFuncOptions::padding_mode_t padding_mode, - c10::optional align_corners) { + std::optional align_corners) { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) int64_t mode_enum, padding_mode_enum; diff --git a/torch/csrc/api/include/torch/nn/modules/conv.h b/torch/csrc/api/include/torch/nn/modules/conv.h index 65a2d6905c0a9..9c55254ddb910 100644 --- a/torch/csrc/api/include/torch/nn/modules/conv.h +++ b/torch/csrc/api/include/torch/nn/modules/conv.h @@ -315,7 +315,7 @@ class ConvTransposeNdImpl : public ConvNdImpl { std::vector _output_padding( const Tensor& input, - const c10::optional& output_size, + const std::optional& output_size, const ExpandingArray& stride, const ExpandingArray& padding, const ExpandingArray& kernel_size); @@ -350,10 +350,10 @@ class TORCH_API ConvTranspose1dImpl explicit ConvTranspose1dImpl(ConvTranspose1dOptions options_); Tensor forward( const Tensor& input, - const c10::optional& output_size = c10::nullopt); + const std::optional& output_size = c10::nullopt); protected: - FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(c10::optional())}) + FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(std::optional())}) }; /// A `ModuleHolder` subclass for `ConvTranspose1dImpl`. @@ -392,10 +392,10 @@ class TORCH_API ConvTranspose2dImpl explicit ConvTranspose2dImpl(ConvTranspose2dOptions options_); Tensor forward( const Tensor& input, - const c10::optional& output_size = c10::nullopt); + const std::optional& output_size = c10::nullopt); protected: - FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(c10::optional())}) + FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(std::optional())}) }; /// A `ModuleHolder` subclass for `ConvTranspose2dImpl`. @@ -434,10 +434,10 @@ class TORCH_API ConvTranspose3dImpl explicit ConvTranspose3dImpl(ConvTranspose3dOptions options_); Tensor forward( const Tensor& input, - const c10::optional& output_size = c10::nullopt); + const std::optional& output_size = c10::nullopt); protected: - FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(c10::optional())}) + FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(std::optional())}) }; /// A `ModuleHolder` subclass for `ConvTranspose3dImpl`. diff --git a/torch/csrc/api/include/torch/nn/modules/pooling.h b/torch/csrc/api/include/torch/nn/modules/pooling.h index a9db131b0dd08..6bcdca463b1ba 100644 --- a/torch/csrc/api/include/torch/nn/modules/pooling.h +++ b/torch/csrc/api/include/torch/nn/modules/pooling.h @@ -507,10 +507,10 @@ class TORCH_API MaxUnpool1dImpl : public MaxUnpoolImpl<1, MaxUnpool1dImpl> { Tensor forward( const Tensor& input, const Tensor& indices, - const c10::optional>& output_size = c10::nullopt); + const std::optional>& output_size = c10::nullopt); protected: - FORWARD_HAS_DEFAULT_ARGS({2, AnyValue(c10::optional>())}) + FORWARD_HAS_DEFAULT_ARGS({2, AnyValue(std::optional>())}) }; /// A `ModuleHolder` subclass for `MaxUnpool1dImpl`. @@ -539,10 +539,10 @@ class TORCH_API MaxUnpool2dImpl : public MaxUnpoolImpl<2, MaxUnpool2dImpl> { Tensor forward( const Tensor& input, const Tensor& indices, - const c10::optional>& output_size = c10::nullopt); + const std::optional>& output_size = c10::nullopt); protected: - FORWARD_HAS_DEFAULT_ARGS({2, AnyValue(c10::optional>())}) + FORWARD_HAS_DEFAULT_ARGS({2, AnyValue(std::optional>())}) }; /// A `ModuleHolder` subclass for `MaxUnpool2dImpl`. @@ -571,10 +571,10 @@ class TORCH_API MaxUnpool3dImpl : public MaxUnpoolImpl<3, MaxUnpool3dImpl> { Tensor forward( const Tensor& input, const Tensor& indices, - const c10::optional>& output_size = c10::nullopt); + const std::optional>& output_size = c10::nullopt); protected: - FORWARD_HAS_DEFAULT_ARGS({2, AnyValue(c10::optional>())}) + FORWARD_HAS_DEFAULT_ARGS({2, AnyValue(std::optional>())}) }; /// A `ModuleHolder` subclass for `MaxUnpool3dImpl`. diff --git a/torch/csrc/api/include/torch/nn/modules/utils.h b/torch/csrc/api/include/torch/nn/modules/utils.h index 6d3d383465f33..869027a241492 100644 --- a/torch/csrc/api/include/torch/nn/modules/utils.h +++ b/torch/csrc/api/include/torch/nn/modules/utils.h @@ -32,7 +32,7 @@ inline std::vector _reverse_repeat_vector( } inline std::vector _list_with_default( - torch::ArrayRef> out_size, + torch::ArrayRef> out_size, torch::IntArrayRef defaults) { TORCH_CHECK( defaults.size() > out_size.size(), diff --git a/torch/csrc/api/include/torch/nn/options/activation.h b/torch/csrc/api/include/torch/nn/options/activation.h index e51805d364852..165212e0e860c 100644 --- a/torch/csrc/api/include/torch/nn/options/activation.h +++ b/torch/csrc/api/include/torch/nn/options/activation.h @@ -252,7 +252,7 @@ struct TORCH_API SoftmaxFuncOptions { /// If specified, the input tensor is casted to `dtype` before the operation /// is performed. This is useful for preventing data type overflows. Default: /// None. - TORCH_ARG(c10::optional, dtype) = c10::nullopt; + TORCH_ARG(std::optional, dtype) = c10::nullopt; }; } // namespace functional @@ -293,7 +293,7 @@ struct TORCH_API SoftminFuncOptions { /// If specified, the input tensor is casted to `dtype` before the operation /// is performed. This is useful for preventing data type overflows. Default: /// None. - TORCH_ARG(c10::optional, dtype) = c10::nullopt; + TORCH_ARG(std::optional, dtype) = c10::nullopt; }; } // namespace functional @@ -334,7 +334,7 @@ struct TORCH_API LogSoftmaxFuncOptions { /// If specified, the input tensor is casted to `dtype` before the operation /// is performed. This is useful for preventing data type overflows. Default: /// None. - TORCH_ARG(c10::optional, dtype) = c10::nullopt; + TORCH_ARG(std::optional, dtype) = c10::nullopt; }; } // namespace functional diff --git a/torch/csrc/api/include/torch/nn/options/batchnorm.h b/torch/csrc/api/include/torch/nn/options/batchnorm.h index cd2d7f164203e..943673e2aae74 100644 --- a/torch/csrc/api/include/torch/nn/options/batchnorm.h +++ b/torch/csrc/api/include/torch/nn/options/batchnorm.h @@ -21,7 +21,7 @@ struct TORCH_API BatchNormOptions { /// A momentum multiplier for the mean and variance. /// Changing this parameter after construction __is effective__. - TORCH_ARG(c10::optional, momentum) = 0.1; + TORCH_ARG(std::optional, momentum) = 0.1; /// Whether to learn a scale and bias that are applied in an affine /// transformation on the input. @@ -82,7 +82,7 @@ struct TORCH_API BatchNormFuncOptions { /// A momentum multiplier for the mean and variance. /// Changing this parameter after construction __is effective__. - TORCH_ARG(c10::optional, momentum) = 0.1; + TORCH_ARG(std::optional, momentum) = 0.1; /// The epsilon value added for numerical stability. /// Changing this parameter after construction __is effective__. diff --git a/torch/csrc/api/include/torch/nn/options/embedding.h b/torch/csrc/api/include/torch/nn/options/embedding.h index d8d06716308e1..20eacf9073355 100644 --- a/torch/csrc/api/include/torch/nn/options/embedding.h +++ b/torch/csrc/api/include/torch/nn/options/embedding.h @@ -28,10 +28,10 @@ struct TORCH_API EmbeddingOptions { /// Embedding, the embedding vector at `padding_idx` will default to all /// zeros, but can be updated to another value to be used as the padding /// vector. - TORCH_ARG(c10::optional, padding_idx) = c10::nullopt; + TORCH_ARG(std::optional, padding_idx) = c10::nullopt; /// If given, each embedding vector with norm larger than `max_norm` is /// renormalized to have norm `max_norm`. - TORCH_ARG(c10::optional, max_norm) = c10::nullopt; + TORCH_ARG(std::optional, max_norm) = c10::nullopt; /// The p of the p-norm to compute for the `max_norm` option. Default ``2``. TORCH_ARG(double, norm_type) = 2.; /// If given, this will scale gradients by the inverse of frequency of the @@ -55,10 +55,10 @@ struct TORCH_API EmbeddingFromPretrainedOptions { /// If specified, the entries at `padding_idx` do not contribute to the /// gradient; therefore, the embedding vector at `padding_idx` is not updated /// during training, i.e. it remains as a fixed "pad". - TORCH_ARG(c10::optional, padding_idx) = c10::nullopt; + TORCH_ARG(std::optional, padding_idx) = c10::nullopt; /// If given, each embedding vector with norm larger than `max_norm` is /// renormalized to have norm `max_norm`. - TORCH_ARG(c10::optional, max_norm) = c10::nullopt; + TORCH_ARG(std::optional, max_norm) = c10::nullopt; /// The p of the p-norm to compute for the `max_norm` option. Default ``2``. TORCH_ARG(double, norm_type) = 2.; /// If given, this will scale gradients by the inverse of frequency of the @@ -84,10 +84,10 @@ struct TORCH_API EmbeddingFuncOptions { /// If specified, the entries at `padding_idx` do not contribute to the /// gradient; therefore, the embedding vector at `padding_idx` is not updated /// during training, i.e. it remains as a fixed "pad". - TORCH_ARG(c10::optional, padding_idx) = c10::nullopt; + TORCH_ARG(std::optional, padding_idx) = c10::nullopt; /// If given, each embedding vector with norm larger than `max_norm` is /// renormalized to have norm `max_norm`. - TORCH_ARG(c10::optional, max_norm) = c10::nullopt; + TORCH_ARG(std::optional, max_norm) = c10::nullopt; /// The p of the p-norm to compute for the `max_norm` option. Default ``2``. TORCH_ARG(double, norm_type) = 2.; /// If given, this will scale gradients by the inverse of frequency of the @@ -120,7 +120,7 @@ struct TORCH_API EmbeddingBagOptions { TORCH_ARG(int64_t, embedding_dim); /// If given, each embedding vector with norm larger than `max_norm` is /// renormalized to have norm `max_norm`. - TORCH_ARG(c10::optional, max_norm) = c10::nullopt; + TORCH_ARG(std::optional, max_norm) = c10::nullopt; /// The p of the p-norm to compute for the `max_norm` option. Default ``2``. TORCH_ARG(double, norm_type) = 2.; /// If given, this will scale gradients by the inverse of frequency of the @@ -148,7 +148,7 @@ struct TORCH_API EmbeddingBagOptions { /// zeros, but can be updated to another value to be used as the padding /// vector. Note that the embedding vector at `padding_idx` is excluded from /// the reduction. - TORCH_ARG(c10::optional, padding_idx) = c10::nullopt; + TORCH_ARG(std::optional, padding_idx) = c10::nullopt; }; // ============================================================================ @@ -161,7 +161,7 @@ struct TORCH_API EmbeddingBagFromPretrainedOptions { TORCH_ARG(bool, freeze) = true; /// If given, each embedding vector with norm larger than `max_norm` is /// renormalized to have norm `max_norm`. - TORCH_ARG(c10::optional, max_norm) = c10::nullopt; + TORCH_ARG(std::optional, max_norm) = c10::nullopt; /// The p of the p-norm to compute for the `max_norm` option. Default ``2``. TORCH_ARG(double, norm_type) = 2.; /// If given, this will scale gradients by the inverse of frequency of the @@ -184,7 +184,7 @@ struct TORCH_API EmbeddingBagFromPretrainedOptions { /// gradient; therefore, the embedding vector at padding_idx is not updated /// during training, i.e. it remains as a fixed "pad". Note that the embedding /// vector at `padding_idx` is excluded from the reduction. - TORCH_ARG(c10::optional, padding_idx) = c10::nullopt; + TORCH_ARG(std::optional, padding_idx) = c10::nullopt; }; // ============================================================================ @@ -205,7 +205,7 @@ struct TORCH_API EmbeddingBagFuncOptions { TORCH_ARG(torch::Tensor, offsets) = Tensor(); /// If given, each embedding vector with norm larger than `max_norm` is /// renormalized to have norm `max_norm`. - TORCH_ARG(c10::optional, max_norm) = c10::nullopt; + TORCH_ARG(std::optional, max_norm) = c10::nullopt; /// The p of the p-norm to compute for the `max_norm` option. Default ``2``. TORCH_ARG(double, norm_type) = 2.; /// If given, this will scale gradients by the inverse of frequency of the @@ -233,7 +233,7 @@ struct TORCH_API EmbeddingBagFuncOptions { /// gradient; therefore, the embedding vector at padding_idx is not updated /// during training, i.e. it remains as a fixed "pad". Note that the embedding /// vector at `padding_idx` is excluded from the reduction. - TORCH_ARG(c10::optional, padding_idx) = c10::nullopt; + TORCH_ARG(std::optional, padding_idx) = c10::nullopt; }; } // namespace functional diff --git a/torch/csrc/api/include/torch/nn/options/loss.h b/torch/csrc/api/include/torch/nn/options/loss.h index c9eb2b66f3e0b..f1fc7a4d41115 100644 --- a/torch/csrc/api/include/torch/nn/options/loss.h +++ b/torch/csrc/api/include/torch/nn/options/loss.h @@ -450,7 +450,7 @@ struct TORCH_API TripletMarginWithDistanceLossOptions { /// Specifies a nonnegative, real-valued function that quantifies the /// closeness of two tensors. If not specified, `F::pairwise_distance` will /// be used. Default: nullopt - TORCH_ARG(c10::optional, distance_function) = + TORCH_ARG(std::optional, distance_function) = c10::nullopt; /// Specifies a nonnegative margin representing the minimum difference /// between the positive and negative distances required for the loss to be 0. @@ -548,7 +548,7 @@ struct TORCH_API SmoothL1LossOptions { /// Specifies the threshold at which to change between L1 and L2 loss. /// If beta is not specified, a value of 1.0 will be used. /// Default: nullopt - TORCH_ARG(c10::optional, beta) = c10::nullopt; + TORCH_ARG(std::optional, beta) = c10::nullopt; }; namespace functional { diff --git a/torch/csrc/api/include/torch/nn/options/normalization.h b/torch/csrc/api/include/torch/nn/options/normalization.h index ae8c206736d50..a1e5b1a0aeab1 100644 --- a/torch/csrc/api/include/torch/nn/options/normalization.h +++ b/torch/csrc/api/include/torch/nn/options/normalization.h @@ -133,7 +133,7 @@ struct TORCH_API NormalizeFuncOptions { TORCH_ARG(double, eps) = 1e-12; /// the output tensor. If `out` is used, this /// operation won't be differentiable. - TORCH_ARG(c10::optional, out) = c10::nullopt; + TORCH_ARG(std::optional, out) = c10::nullopt; }; } // namespace functional diff --git a/torch/csrc/api/include/torch/nn/options/pooling.h b/torch/csrc/api/include/torch/nn/options/pooling.h index 41de605e90fb0..8f6cee99bff6a 100644 --- a/torch/csrc/api/include/torch/nn/options/pooling.h +++ b/torch/csrc/api/include/torch/nn/options/pooling.h @@ -32,7 +32,7 @@ struct AvgPoolOptions { /// if specified, it will be used as divisor, otherwise size of the pooling /// region will be used. - TORCH_ARG(c10::optional, divisor_override) = c10::nullopt; + TORCH_ARG(std::optional, divisor_override) = c10::nullopt; }; /// `AvgPoolOptions` specialized for the `AvgPool1d` module. @@ -401,7 +401,7 @@ struct MaxUnpoolFuncOptions { TORCH_ARG(ExpandingArray, padding) = 0; /// the targeted output size - TORCH_ARG(c10::optional>, output_size) = c10::nullopt; + TORCH_ARG(std::optional>, output_size) = c10::nullopt; }; /// `MaxUnpoolFuncOptions` specialized for @@ -450,12 +450,12 @@ struct FractionalMaxPoolOptions { TORCH_ARG(ExpandingArray, kernel_size); /// the target output size of the image - TORCH_ARG(c10::optional>, output_size) = c10::nullopt; + TORCH_ARG(std::optional>, output_size) = c10::nullopt; /// If one wants to have an output size as a ratio of the input size, this /// option can be given. This has to be a number or tuple in the range (0, 1) using ExpandingArrayDouble = torch::ExpandingArray; - TORCH_ARG(c10::optional, output_ratio) = c10::nullopt; + TORCH_ARG(std::optional, output_ratio) = c10::nullopt; TORCH_ARG(torch::Tensor, _random_samples) = Tensor(); }; diff --git a/torch/csrc/api/include/torch/nn/options/upsampling.h b/torch/csrc/api/include/torch/nn/options/upsampling.h index ca793beb97725..21df2b89998de 100644 --- a/torch/csrc/api/include/torch/nn/options/upsampling.h +++ b/torch/csrc/api/include/torch/nn/options/upsampling.h @@ -20,10 +20,10 @@ namespace nn { /// ``` struct TORCH_API UpsampleOptions { /// output spatial sizes. - TORCH_ARG(c10::optional>, size) = c10::nullopt; + TORCH_ARG(std::optional>, size) = c10::nullopt; /// multiplier for spatial size. - TORCH_ARG(c10::optional>, scale_factor) = c10::nullopt; + TORCH_ARG(std::optional>, scale_factor) = c10::nullopt; /// the upsampling algorithm: one of "nearest", "linear", "bilinear", /// "bicubic" and "trilinear". Default: "nearest" @@ -40,7 +40,7 @@ struct TORCH_API UpsampleOptions { /// aligned, and thus preserving the values at those pixels. This only has /// effect when :attr:`mode` is "linear", "bilinear", "bicubic", or /// "trilinear". Default: "False" - TORCH_ARG(c10::optional, align_corners) = c10::nullopt; + TORCH_ARG(std::optional, align_corners) = c10::nullopt; }; namespace functional { @@ -65,10 +65,10 @@ struct TORCH_API InterpolateFuncOptions { mode_t; /// output spatial sizes. - TORCH_ARG(c10::optional>, size) = c10::nullopt; + TORCH_ARG(std::optional>, size) = c10::nullopt; /// multiplier for spatial size. - TORCH_ARG(c10::optional>, scale_factor) = c10::nullopt; + TORCH_ARG(std::optional>, scale_factor) = c10::nullopt; /// the upsampling algorithm: one of "nearest", "linear", "bilinear", /// "bicubic", "trilinear", "area", "nearest-exact". Default: "nearest" @@ -83,7 +83,7 @@ struct TORCH_API InterpolateFuncOptions { /// this operation *independent* of input size when `scale_factor` is /// kept the same. It is *required* when interpolating mode is "linear", /// "bilinear", "bicubic" or "trilinear". Default: "False" - TORCH_ARG(c10::optional, align_corners) = c10::nullopt; + TORCH_ARG(std::optional, align_corners) = c10::nullopt; /// recompute the scale_factor for use in the /// interpolation calculation. When `scale_factor` is passed as a parameter, @@ -95,7 +95,7 @@ struct TORCH_API InterpolateFuncOptions { /// used in the interpolation computation. Note that when `scale_factor` is /// floating-point, the recomputed scale_factor may differ from the one passed /// in due to rounding and precision issues. - TORCH_ARG(c10::optional, recompute_scale_factor) = c10::nullopt; + TORCH_ARG(std::optional, recompute_scale_factor) = c10::nullopt; /// flag to apply anti-aliasing. Using anti-alias /// option together with :attr:`align_corners` equals "False", interpolation diff --git a/torch/csrc/api/include/torch/nn/options/vision.h b/torch/csrc/api/include/torch/nn/options/vision.h index 814f4b6684d96..c012b40d21f69 100644 --- a/torch/csrc/api/include/torch/nn/options/vision.h +++ b/torch/csrc/api/include/torch/nn/options/vision.h @@ -28,7 +28,7 @@ struct TORCH_API GridSampleFuncOptions { /// padding mode for outside grid values. Default: Zeros TORCH_ARG(padding_mode_t, padding_mode) = torch::kZeros; /// Specifies perspective to pixel as point. Default: false - TORCH_ARG(c10::optional, align_corners) = c10::nullopt; + TORCH_ARG(std::optional, align_corners) = c10::nullopt; }; } // namespace functional diff --git a/torch/csrc/api/include/torch/nn/utils/clip_grad.h b/torch/csrc/api/include/torch/nn/utils/clip_grad.h index e1023bd1eb5c7..fbb533662c7be 100644 --- a/torch/csrc/api/include/torch/nn/utils/clip_grad.h +++ b/torch/csrc/api/include/torch/nn/utils/clip_grad.h @@ -64,7 +64,7 @@ inline double clip_grad_norm_( // synchronizing the CPU and the gradients' device until the very end to // preserve async execution on the device. When checking for finite-ness, this // optional ensures we only sync once. - c10::optional total_norm = c10::nullopt; + std::optional total_norm = c10::nullopt; if (error_if_nonfinite) { total_norm = total_norm_tensor.item().toDouble(); TORCH_CHECK( diff --git a/torch/csrc/api/include/torch/nn/utils/convert_parameters.h b/torch/csrc/api/include/torch/nn/utils/convert_parameters.h index 2ac1d317c9922..6f62d483c4d8b 100644 --- a/torch/csrc/api/include/torch/nn/utils/convert_parameters.h +++ b/torch/csrc/api/include/torch/nn/utils/convert_parameters.h @@ -11,9 +11,9 @@ namespace utils { // in the same device. Currently, the conversion between model parameters // and single vector form is not supported for multiple allocations, // e.g. parameters in different GPUs, or mixture of CPU/GPU. -inline c10::optional _check_param_device( +inline std::optional _check_param_device( const torch::Tensor& param, - c10::optional old_param_device) { + std::optional old_param_device) { // Meet the first parameter if (old_param_device == c10::nullopt) { old_param_device = param.is_cuda() ? param.get_device() : -1; @@ -38,7 +38,7 @@ inline c10::optional _check_param_device( // Convert parameters to one vector inline torch::Tensor parameters_to_vector( const std::vector& parameters) { - c10::optional param_device; + std::optional param_device; std::vector vec; vec.reserve(parameters.size()); @@ -58,7 +58,7 @@ inline void vector_to_parameters( const torch::Tensor& vec, const std::vector& parameters) { // Flag for the device where the parameter is located - c10::optional param_device; + std::optional param_device; // Pointer for slicing the vector for each parameter int64_t pointer = 0; diff --git a/torch/csrc/api/include/torch/nn/utils/rnn.h b/torch/csrc/api/include/torch/nn/utils/rnn.h index eea517a2b60f3..ba8b0db427150 100644 --- a/torch/csrc/api/include/torch/nn/utils/rnn.h +++ b/torch/csrc/api/include/torch/nn/utils/rnn.h @@ -247,7 +247,7 @@ inline std::tuple pad_packed_sequence( PackedSequence sequence, bool batch_first = false, double padding_value = 0.0, - c10::optional total_length = torch::nullopt) { + std::optional total_length = torch::nullopt) { int64_t max_seq_length = sequence.batch_sizes().size(0); if (total_length.has_value()) { int64_t total_length_val = total_length.value(); diff --git a/torch/csrc/api/include/torch/optim/lbfgs.h b/torch/csrc/api/include/torch/optim/lbfgs.h index 99aa35d36e4b5..001b0cd33f259 100644 --- a/torch/csrc/api/include/torch/optim/lbfgs.h +++ b/torch/csrc/api/include/torch/optim/lbfgs.h @@ -17,11 +17,11 @@ struct TORCH_API LBFGSOptions : public OptimizerCloneableOptions { LBFGSOptions(double lr = 1); TORCH_ARG(double, lr) = 1; TORCH_ARG(int64_t, max_iter) = 20; - TORCH_ARG(c10::optional, max_eval) = c10::nullopt; + TORCH_ARG(std::optional, max_eval) = c10::nullopt; TORCH_ARG(double, tolerance_grad) = 1e-7; TORCH_ARG(double, tolerance_change) = 1e-9; TORCH_ARG(int64_t, history_size) = 100; - TORCH_ARG(c10::optional, line_search_fn) = c10::nullopt; + TORCH_ARG(std::optional, line_search_fn) = c10::nullopt; public: void serialize(torch::serialize::InputArchive& archive) override; @@ -45,7 +45,7 @@ struct TORCH_API LBFGSParamState TORCH_ARG(std::deque, old_dirs); TORCH_ARG(std::deque, old_stps); TORCH_ARG(std::deque, ro); - TORCH_ARG(c10::optional>, al) = c10::nullopt; + TORCH_ARG(std::optional>, al) = c10::nullopt; public: void serialize(torch::serialize::InputArchive& archive) override; @@ -82,7 +82,7 @@ class TORCH_API LBFGS : public Optimizer { void load(serialize::InputArchive& archive) override; private: - c10::optional _numel_cache; + std::optional _numel_cache; int64_t _numel(); Tensor _gather_flat_grad(); void _add_grad(const double step_size, const Tensor& update); diff --git a/torch/csrc/api/include/torch/serialize/input-archive.h b/torch/csrc/api/include/torch/serialize/input-archive.h index 83d1a543ddacb..f77b34aad0bd4 100644 --- a/torch/csrc/api/include/torch/serialize/input-archive.h +++ b/torch/csrc/api/include/torch/serialize/input-archive.h @@ -76,27 +76,27 @@ class TORCH_API InputArchive final { /// is not specified, the module is loaded to the original device. void load_from( const std::string& filename, - c10::optional device = c10::nullopt); + std::optional device = c10::nullopt); /// Loads the `InputArchive` from a serialized representation stored in the /// given `stream`. Storage are remapped using device option. If device /// is not specified, the module is loaded to the original device. void load_from( std::istream& stream, - c10::optional device = c10::nullopt); + std::optional device = c10::nullopt); // Loads given the specified flat array. void load_from( const char* data, size_t size, - c10::optional device = c10::nullopt); + std::optional device = c10::nullopt); // Loads given the specified read and size functions. void load_from( const std::function& read_func, const std::function& size_func, - c10::optional device = c10::nullopt); + std::optional device = c10::nullopt); // Returns the vector of keys in the input archive. std::vector keys(); diff --git a/torch/csrc/api/include/torch/special.h b/torch/csrc/api/include/torch/special.h index 7ad7e7689ebd6..d8346e1aa1d8c 100644 --- a/torch/csrc/api/include/torch/special.h +++ b/torch/csrc/api/include/torch/special.h @@ -596,7 +596,7 @@ inline Tensor& log1p_out(Tensor& result, const Tensor& self) { inline Tensor log_softmax( const Tensor& self, int64_t dim, - c10::optional dtype) { + std::optional dtype) { return torch::special_log_softmax(self, dim, dtype); } @@ -611,7 +611,7 @@ inline Tensor log_softmax( inline Tensor softmax( const Tensor& self, int64_t dim, - c10::optional dtype) { + std::optional dtype) { return torch::special_softmax(self, dim, dtype); } diff --git a/torch/csrc/api/include/torch/types.h b/torch/csrc/api/include/torch/types.h index 92be710cf4bf4..8a23cd122b8d1 100644 --- a/torch/csrc/api/include/torch/types.h +++ b/torch/csrc/api/include/torch/types.h @@ -39,7 +39,7 @@ namespace torch { using namespace at; // NOLINT using c10::nullopt; -using c10::optional; +using std::optional; using Dtype = at::ScalarType; diff --git a/torch/csrc/api/src/nn/modules/conv.cpp b/torch/csrc/api/src/nn/modules/conv.cpp index 20be11f221838..197c3cf0725cd 100644 --- a/torch/csrc/api/src/nn/modules/conv.cpp +++ b/torch/csrc/api/src/nn/modules/conv.cpp @@ -169,12 +169,12 @@ template class ConvNdImpl<3, Conv3dImpl>; template std::vector ConvTransposeNdImpl::_output_padding( const Tensor& input, - const c10::optional& output_size, + const std::optional& output_size, const ExpandingArray& stride, const ExpandingArray& padding, const ExpandingArray& kernel_size) { std::vector ret; - c10::optional output_size_ = output_size; + std::optional output_size_ = output_size; if (output_size_ == c10::nullopt) { ret = at::IntArrayRef(this->options.output_padding()).vec(); @@ -248,7 +248,7 @@ ConvTranspose1dImpl::ConvTranspose1dImpl(ConvTranspose1dOptions options_) Tensor ConvTranspose1dImpl::forward( const Tensor& input, - const c10::optional& output_size) { + const std::optional& output_size) { if (!std::get_if(&options.padding_mode())) { TORCH_CHECK( false, "Only `zeros` padding mode is supported for ConvTranspose1d"); @@ -285,7 +285,7 @@ ConvTranspose2dImpl::ConvTranspose2dImpl(ConvTranspose2dOptions options_) Tensor ConvTranspose2dImpl::forward( const Tensor& input, - const c10::optional& output_size) { + const std::optional& output_size) { if (!std::get_if(&options.padding_mode())) { TORCH_CHECK( false, "Only `zeros` padding mode is supported for ConvTranspose2d"); @@ -322,7 +322,7 @@ ConvTranspose3dImpl::ConvTranspose3dImpl(ConvTranspose3dOptions options_) Tensor ConvTranspose3dImpl::forward( const Tensor& input, - const c10::optional& output_size) { + const std::optional& output_size) { if (!std::get_if(&options.padding_mode())) { TORCH_CHECK( false, "Only `zeros` padding mode is supported for ConvTranspose3d"); diff --git a/torch/csrc/api/src/nn/modules/pooling.cpp b/torch/csrc/api/src/nn/modules/pooling.cpp index 1a3f29e235507..0b11b914dcc1c 100644 --- a/torch/csrc/api/src/nn/modules/pooling.cpp +++ b/torch/csrc/api/src/nn/modules/pooling.cpp @@ -229,7 +229,7 @@ void MaxUnpoolImpl::pretty_print(std::ostream& stream) const { Tensor MaxUnpool1dImpl::forward( const Tensor& input, const Tensor& indices, - const c10::optional>& output_size) { + const std::optional>& output_size) { return F::detail::max_unpool1d( input, indices, @@ -242,7 +242,7 @@ Tensor MaxUnpool1dImpl::forward( Tensor MaxUnpool2dImpl::forward( const Tensor& input, const Tensor& indices, - const c10::optional>& output_size) { + const std::optional>& output_size) { return F::detail::max_unpool2d( input, indices, @@ -255,7 +255,7 @@ Tensor MaxUnpool2dImpl::forward( Tensor MaxUnpool3dImpl::forward( const Tensor& input, const Tensor& indices, - const c10::optional>& output_size) { + const std::optional>& output_size) { return F::detail::max_unpool3d( input, indices, diff --git a/torch/csrc/api/src/optim/lbfgs.cpp b/torch/csrc/api/src/optim/lbfgs.cpp index bf54e9a878618..10739be623869 100644 --- a/torch/csrc/api/src/optim/lbfgs.cpp +++ b/torch/csrc/api/src/optim/lbfgs.cpp @@ -67,7 +67,7 @@ bool if_container_equal(T lhs, T rhs) { } bool operator==(const LBFGSParamState& lhs, const LBFGSParamState& rhs) { - auto isNull = [](const c10::optional>& val) { + auto isNull = [](const std::optional>& val) { return val == c10::nullopt; }; return (lhs.func_evals() == rhs.func_evals()) && @@ -194,7 +194,7 @@ static double _cubic_interpolate( double x2, double f2, double g2, - c10::optional> bounds = c10::nullopt) { + std::optional> bounds = c10::nullopt) { // ported from https://github.com/torch/optim/blob/master/polyinterp.lua // Compute bounds of interpolation area // NOLINTNEXTLINE(cppcoreguidelines-init-variables) diff --git a/torch/csrc/api/src/serialize/input-archive.cpp b/torch/csrc/api/src/serialize/input-archive.cpp index c18a041293aea..852f4eab1b52b 100644 --- a/torch/csrc/api/src/serialize/input-archive.cpp +++ b/torch/csrc/api/src/serialize/input-archive.cpp @@ -93,20 +93,20 @@ void InputArchive::read(const std::string& key, InputArchive& archive) { void InputArchive::load_from( const std::string& filename, - c10::optional device /*= c10::nullopt*/) { + std::optional device /*= c10::nullopt*/) { module_ = torch::jit::load(filename, std::move(device)); } void InputArchive::load_from( std::istream& stream, - c10::optional device /*= c10::nullopt*/) { + std::optional device /*= c10::nullopt*/) { module_ = torch::jit::load(stream, std::move(device)); } void InputArchive::load_from( const char* data, size_t size, - c10::optional device /*= c10::nullopt*/) { + std::optional device /*= c10::nullopt*/) { using caffe2::serialize::ReadAdapterInterface; class OurAdapter : public ReadAdapterInterface { public: @@ -136,7 +136,7 @@ void InputArchive::load_from( void InputArchive::load_from( const std::function& read_func, const std::function& size_func, - c10::optional device /*= c10::nullopt*/) { + std::optional device /*= c10::nullopt*/) { using caffe2::serialize::ReadAdapterInterface; class OurAdapter : public ReadAdapterInterface { public: diff --git a/torch/csrc/autograd/FunctionsManual.cpp b/torch/csrc/autograd/FunctionsManual.cpp index 4c0c324ad56ec..65c7fbb853610 100644 --- a/torch/csrc/autograd/FunctionsManual.cpp +++ b/torch/csrc/autograd/FunctionsManual.cpp @@ -60,19 +60,19 @@ Tensor apply_loss_reduction(const Tensor& unreduced, int64_t reduction) { return unreduced; } -static bool isDefined(const c10::optional& t) { +static bool isDefined(const std::optional& t) { return t.has_value() && t->defined(); } -Tensor toNonOptTensor(const c10::optional& t) { +Tensor toNonOptTensor(const std::optional& t) { return t.has_value() ? *t : Tensor(); } -Tensor toNonOptFwGrad(const c10::optional& t) { +Tensor toNonOptFwGrad(const std::optional& t) { return (t.has_value() && t->defined()) ? t->_fw_grad(/*level */ 0) : Tensor(); } -Tensor toNonOptPrimal(const c10::optional& t) { +Tensor toNonOptPrimal(const std::optional& t) { if (t.has_value() && t->defined()) { if (t->unsafeGetTensorImpl()->is_wrapped_number()) { return *t; @@ -605,7 +605,7 @@ Tensor div_tensor_self_backward( const Tensor& grad, T other, ScalarType self_st, - const c10::optional& rounding_mode) { + const std::optional& rounding_mode) { if (rounding_mode.has_value()) { return at::zeros_like(grad, grad.options().dtype(self_st)); } @@ -617,12 +617,12 @@ template Tensor div_tensor_self_backward( const Tensor&, Tensor, ScalarType, - const c10::optional&); + const std::optional&); template Tensor div_tensor_self_backward( const Tensor&, Scalar, ScalarType, - const c10::optional&); + const std::optional&); template Tensor div_tensor_self_backward( @@ -639,7 +639,7 @@ Tensor div_tensor_other_backward( const Tensor& grad, const Tensor& self, const Tensor& other, - const c10::optional& rounding_mode) { + const std::optional& rounding_mode) { if (rounding_mode.has_value()) { return at::zeros_like(grad, grad.options().dtype(other.scalar_type())); } @@ -1289,7 +1289,7 @@ Tensor convolution_jvp( at::SymIntArrayRef output_padding, const c10::SymInt& groups) { auto bias_t_opt = - bias_t.defined() ? c10::optional(bias_t) : c10::nullopt; + bias_t.defined() ? std::optional(bias_t) : c10::nullopt; return ( at::convolution_symint( input_t, @@ -1331,7 +1331,7 @@ Tensor _convolution_jvp( bool cudnn_enabled, bool allow_tf32) { auto bias_t_opt = - bias_t.defined() ? c10::optional(bias_t) : c10::nullopt; + bias_t.defined() ? std::optional(bias_t) : c10::nullopt; return ( at::_convolution_symint( input_t, @@ -1520,8 +1520,8 @@ static Tensor sparse_mask_like_grad( std::tuple sparse_sampled_addmm_backward( const Tensor& grad, const Tensor& self, - const c10::optional& mat1, - const c10::optional& mat2, + const std::optional& mat1, + const std::optional& mat2, const Scalar& alpha, const Scalar& beta, const std::array& grad_input_mask) { @@ -1819,7 +1819,7 @@ Tensor var_backward( Tensor grad, const Tensor& self, at::OptionalIntArrayRef dim_opt, - const c10::optional& correction_opt, + const std::optional& correction_opt, bool keepdim) { const auto correction = correction_opt.value_or(1).toSymFloat(); if (self.dim() == 0 || !dim_opt.has_value()) { @@ -1852,7 +1852,7 @@ Tensor std_backward( const Tensor& grad, const Tensor& self, at::OptionalIntArrayRef dim, - const c10::optional& correction_opt, + const std::optional& correction_opt, bool keepdim) { auto grad_var = (grad / (result * 2)).masked_fill_(result == 0, 0); return var_backward(std::move(grad_var), self, dim, correction_opt, keepdim); @@ -1863,7 +1863,7 @@ Tensor var_mean_backward( const Tensor& gmean, const Tensor& self, at::OptionalIntArrayRef dim_opt, - const c10::optional& correction_opt, + const std::optional& correction_opt, bool keepdim) { Tensor gself; if (gvar.defined()) { @@ -1887,7 +1887,7 @@ Tensor std_mean_backward( const Tensor& self, const Tensor& std, at::OptionalIntArrayRef dim_opt, - const c10::optional& correction_opt, + const std::optional& correction_opt, bool keepdim) { Tensor gself; if (gstd.defined()) { @@ -2241,7 +2241,7 @@ Tensor infinitely_differentiable_mish_backward( Tensor infinitely_differentiable_logit_backward( const Tensor& grad, const Tensor& self, - c10::optional eps) { + std::optional eps) { if (eps) { const double lo = eps.value(); const double hi = 1.0 - lo; @@ -2262,7 +2262,7 @@ Tensor binary_cross_entropy_target_backward( const Tensor& grad, const Tensor& self, const Tensor& target, - const c10::optional& weight, + const std::optional& weight, int64_t reduction) { auto grad_target = at::logit(self).neg_(); @@ -2295,7 +2295,7 @@ Tensor binary_cross_entropy_double_backward_target( const Tensor& grad_output, const Tensor& self, const Tensor& target, - const c10::optional& weight, + const std::optional& weight, int64_t reduction) { auto res = -grad * grad_output; @@ -2332,8 +2332,8 @@ Tensor binary_cross_entropy_with_logits_backward( const Tensor& grad, const Tensor& input, const Tensor& target, - const c10::optional& weight, - const c10::optional& pos_weight, + const std::optional& weight, + const std::optional& pos_weight, int64_t reduction) { // Trivial case if (grad._is_zerotensor()) { @@ -2387,8 +2387,8 @@ Tensor binary_cross_entropy_with_logits_target_backward( const Tensor& grad_output, const Tensor& self, const Tensor& target, - const c10::optional& weight, - const c10::optional& pos_weight, + const std::optional& weight, + const std::optional& pos_weight, int64_t reduction) { if (grad_output._is_zerotensor()) { return at::_efficientzerotensor(target.sizes(), target.options()); @@ -2479,7 +2479,7 @@ Tensor binary_cross_entropy_double_backward( const Tensor& grad, const Tensor& input, const Tensor& target, - const c10::optional& weight, + const std::optional& weight, int64_t reduction) { auto eps = 1e-12; auto inp_pl_eps = input + eps; @@ -2514,7 +2514,7 @@ Tensor binary_cross_entropy_double_backward_grad_output( const Tensor& grad, const Tensor& input, const Tensor& target, - const c10::optional& weight, + const std::optional& weight, int64_t reduction) { auto eps = 1e-12; // gradient wrt grad_output @@ -3186,7 +3186,7 @@ Tensor as_strided_backward( auto storage = grad.new_zeros_symint(c10::SymIntArrayRef(base_size)); // prepare indices tensor if we will do index_add_ later - c10::optional flatten_full_indices; + std::optional flatten_full_indices; if (inp_maybe_overlap || out_maybe_overlap) { flatten_full_indices = // TODO: should we symint-ify arange? Need SymScalar. @@ -3334,8 +3334,8 @@ Tensor slice_backward_wrapper( const at::Tensor& grad, const c10::SymIntArrayRef& input_sizes, int64_t dim, - c10::optional start, - c10::optional end, + std::optional start, + std::optional end, c10::SymInt step) { auto start_val = start.has_value() ? start.value() : 0; auto end_val = end.has_value() ? end.value() : INT64_MAX; @@ -4617,17 +4617,17 @@ static Tensor expand_as_dim1(const Tensor& src, const Tensor& target) { std::tuple batchnorm_double_backward( const Tensor& input, - const c10::optional& gamma, + const std::optional& gamma, const Tensor& ggI, const Tensor& ggG, const Tensor& ggB, const Tensor& gO, - const c10::optional& running_mean, - const c10::optional& running_var, + const std::optional& running_mean, + const std::optional& running_var, bool training, double eps, - const c10::optional& save_mean, - const c10::optional& save_invstd, + const std::optional& save_mean, + const std::optional& save_invstd, std::array output_mask) { bool affine = isDefined(gamma); // TODO: Do we have a ScalarOrTensor type? Would such a thing exist? @@ -4756,7 +4756,7 @@ std::tuple batchnorm_double_backward( std::tuple layer_norm_double_backward( const Tensor& input_t, - const c10::optional& gamma, + const std::optional& gamma, const Tensor& ggI, const Tensor& ggG, const Tensor& ggB, @@ -4905,7 +4905,7 @@ infinitely_differentiable_native_group_norm_backward( const Tensor& X, const Tensor& mean, const Tensor& rstd, - const c10::optional& gamma, + const std::optional& gamma, c10::SymInt N, const c10::SymInt& C, c10::SymInt HxW, @@ -4987,9 +4987,9 @@ infinitely_differentiable_native_group_norm_backward( std::tuple _trilinear_backward( const Tensor& grad_out, - const c10::optional& i1, - const c10::optional& i2, - const c10::optional& i3, + const std::optional& i1, + const std::optional& i2, + const std::optional& i3, IntArrayRef expand1, IntArrayRef expand2, IntArrayRef expand3, @@ -5083,7 +5083,7 @@ Tensor embedding_dense_double_backward_symint( Tensor index_backward( Tensor zeros_like_self, - const torch::List>& indices, + const torch::List>& indices, const Tensor& grad) { return (areAnyTensorSubclassLike({zeros_like_self, grad}) || areAnyOptionalTensorSubclassLike(indices)) @@ -6120,7 +6120,7 @@ static Tensor _norm_jvp( // Computes the jvp for `input * weight + bias` where weight and bias may be // undefined Possibly modifies the input inplace static Tensor _affine_jvp( - const c10::optional& input_p, + const std::optional& input_p, Tensor& input_t, const Tensor& weight_p, const Tensor& weight_t, @@ -6161,8 +6161,8 @@ Tensor batch_norm_jvp( const Tensor& weight_t, const Tensor& bias_p, const Tensor& bias_t, - const c10::optional& running_mean, - const c10::optional& running_var, + const std::optional& running_mean, + const std::optional& running_var, const Tensor& saved_mean, const Tensor& saved_invstd, bool train, @@ -6198,8 +6198,8 @@ Tensor batch_norm_jvp( result_t = input_t * invstd_p; } - c10::optional result_p = weight_p.defined() - ? c10::optional((input_p - mean_p) * invstd_p) + std::optional result_p = weight_p.defined() + ? std::optional((input_p - mean_p) * invstd_p) : c10::nullopt; return _affine_jvp( result_p, @@ -6237,8 +6237,8 @@ Tensor layer_norm_jvp( auto invstd_p = saved_invstd.view(view_size); auto result_t = _norm_jvp(input_p, input_t, mean_p, invstd_p, dims, numel); - c10::optional result_p = weight_p.defined() - ? c10::optional((input_p - mean_p) * invstd_p) + std::optional result_p = weight_p.defined() + ? std::optional((input_p - mean_p) * invstd_p) : c10::nullopt; return _affine_jvp( result_p, @@ -6280,7 +6280,7 @@ Tensor group_norm_jvp( /*eps=*/0) .view(input_shape); - c10::optional result_p = c10::nullopt; + std::optional result_p = c10::nullopt; if (weight_p.defined()) { std::vector view_size(input_t_reshaped.dim(), 1); view_size[1] = input_t_reshaped.size(1); @@ -6983,9 +6983,9 @@ mkldnn_rnn_layer_differentiable_backward( const Tensor& output, const Tensor& hy_, const Tensor& cy_, - const c10::optional& grad_output_r_opt, - const c10::optional& grad_hy_r_opt, - const c10::optional& grad_cy_r_opt, + const std::optional& grad_output_r_opt, + const std::optional& grad_hy_r_opt, + const std::optional& grad_cy_r_opt, bool reverse, int64_t mode, int64_t hidden_size, diff --git a/torch/csrc/autograd/FunctionsManual.h b/torch/csrc/autograd/FunctionsManual.h index c78f2b80c806a..dedff70be1ba3 100644 --- a/torch/csrc/autograd/FunctionsManual.h +++ b/torch/csrc/autograd/FunctionsManual.h @@ -31,14 +31,14 @@ struct TORCH_API IndexRangeGenerator { size_t i = 0; }; -TORCH_API Tensor toNonOptFwGrad(const c10::optional& t); -TORCH_API Tensor toNonOptPrimal(const c10::optional& t); -TORCH_API Tensor toNonOptTensor(const c10::optional& t); +TORCH_API Tensor toNonOptFwGrad(const std::optional& t); +TORCH_API Tensor toNonOptPrimal(const std::optional& t); +TORCH_API Tensor toNonOptTensor(const std::optional& t); -TORCH_API inline c10::optional wrap_opt_if( +TORCH_API inline std::optional wrap_opt_if( const Tensor& t, const bool cond) { - using OptTensor = c10::optional; + using OptTensor = std::optional; return cond ? OptTensor(t) : static_cast(c10::nullopt); } @@ -154,12 +154,12 @@ at::Tensor div_tensor_self_backward( const Tensor& grad, T other, ScalarType self_st, - const c10::optional& rounding_mode); + const std::optional& rounding_mode); at::Tensor div_tensor_other_backward( const Tensor& grad, const Tensor& self, const Tensor& other, - const c10::optional& rounding_mode); + const std::optional& rounding_mode); at::Tensor mvlgamma_backward( const at::Tensor& grad, const at::Tensor& self, @@ -314,8 +314,8 @@ at::Tensor mm_mat1_sparse_backward( std::tuple sparse_sampled_addmm_backward( const Tensor& grad, const Tensor& self, - const c10::optional& mat1, - const c10::optional& mat2, + const std::optional& mat1, + const std::optional& mat2, const Scalar& alpha, const Scalar& beta, const std::array& grad_input_mask); @@ -367,21 +367,21 @@ at::Tensor var_backward( at::Tensor grad, const at::Tensor& self, at::OptionalIntArrayRef dim, - const c10::optional& correction, + const std::optional& correction, bool keepdim); at::Tensor var_jvp( const at::Tensor& self_t, const at::Tensor& self_p, const at::Tensor& result, at::OptionalIntArrayRef dim_opt, - const c10::optional& correction, + const std::optional& correction, bool keepdim); at::Tensor std_backward( const at::Tensor& result, const at::Tensor& grad, const at::Tensor& self, at::OptionalIntArrayRef dim, - const c10::optional& correction, + const std::optional& correction, bool keepdim); Tensor mean_backward( const Tensor& grad, @@ -394,7 +394,7 @@ Tensor var_mean_backward( const Tensor& gmean, const Tensor& self, at::OptionalIntArrayRef dim_opt, - const c10::optional& correction, + const std::optional& correction, bool keepdim); Tensor std_mean_backward( const Tensor& gstd, @@ -402,7 +402,7 @@ Tensor std_mean_backward( const Tensor& self, const Tensor& std, at::OptionalIntArrayRef dim_opt, - const c10::optional& correction, + const std::optional& correction, bool keepdim); at::Tensor cholesky_backward( const at::Tensor& grad, @@ -465,33 +465,33 @@ at::Tensor infinitely_differentiable_mish_backward( Tensor infinitely_differentiable_logit_backward( const Tensor& grad, const Tensor& self, - c10::optional eps); + std::optional eps); Tensor binary_cross_entropy_target_backward( const Tensor& grad, const Tensor& self, const Tensor& target, - const c10::optional& weight, + const std::optional& weight, int64_t reduction); Tensor binary_cross_entropy_double_backward_target( const Tensor& grad, const Tensor& grad_output, const Tensor& self, const Tensor& target, - const c10::optional& weight, + const std::optional& weight, int64_t reduction); Tensor binary_cross_entropy_with_logits_backward( const Tensor& grad, const Tensor& input, const Tensor& target, - const c10::optional& weight_opt, - const c10::optional& pos_weight_opt, + const std::optional& weight_opt, + const std::optional& pos_weight_opt, int64_t reduction); at::Tensor binary_cross_entropy_with_logits_target_backward( const at::Tensor& grad_output, const at::Tensor& self, const at::Tensor& target, - const c10::optional& weight, - const c10::optional& pos_weight, + const std::optional& weight, + const std::optional& pos_weight, int64_t reduction); at::Tensor log_sigmoid_double_backward( const at::Tensor& grad, @@ -506,13 +506,13 @@ at::Tensor binary_cross_entropy_double_backward( const at::Tensor& grad, const at::Tensor& input, const at::Tensor& target, - const c10::optional& weight, + const std::optional& weight, int64_t reduction); at::Tensor binary_cross_entropy_double_backward_grad_output( const at::Tensor& grad, const at::Tensor& input, const at::Tensor& target, - const c10::optional& weight, + const std::optional& weight, int64_t reduction); at::Tensor smooth_l1_loss_double_backward( const at::Tensor& grad, @@ -577,7 +577,7 @@ at::Tensor embedding_dense_double_backward_symint( const c10::SymInt& padding_idx); at::Tensor index_backward( at::Tensor zeros_like_self, - const torch::List>& indices, + const torch::List>& indices, const at::Tensor& grad); at::Tensor _cudnn_ctc_loss_backward( const at::Tensor& grad_out, @@ -611,8 +611,8 @@ Tensor slice_backward_wrapper( const at::Tensor& grad, const c10::SymIntArrayRef& input_sizes, int64_t dim, - c10::optional start, - c10::optional end, + std::optional start, + std::optional end, c10::SymInt step); std::tuple linalg_eig_jvp( const Tensor& dA, @@ -667,9 +667,9 @@ std::tuple linalg_solve_triangular_backward( std::array output_mask); std::tuple _trilinear_backward( const Tensor& grad_out, - const c10::optional& i1, - const c10::optional& i2, - const c10::optional& i3, + const std::optional& i1, + const std::optional& i2, + const std::optional& i3, IntArrayRef expand1, IntArrayRef expand2, IntArrayRef expand3, @@ -692,17 +692,17 @@ Tensor linalg_matrix_exp_differential( bool adjoint); std::tuple batchnorm_double_backward( const Tensor& input, - const c10::optional& gamma, + const std::optional& gamma, const Tensor& ggI, const Tensor& ggG, const Tensor& ggB, const Tensor& gO, - const c10::optional& running_mean, - const c10::optional& running_var, + const std::optional& running_mean, + const std::optional& running_var, bool training, double eps, - const c10::optional& save_mean, - const c10::optional& save_invstd, + const std::optional& save_mean, + const std::optional& save_invstd, std::array output_mask); std::tuple _euclidean_dist_backward( const Tensor& grad, @@ -752,7 +752,7 @@ infinitely_differentiable_native_group_norm_backward( const Tensor& X, const Tensor& mean, const Tensor& rstd, - const c10::optional& gamma, + const std::optional& gamma, c10::SymInt N, const c10::SymInt& C, c10::SymInt HxW, @@ -790,7 +790,7 @@ Tensor amaxamin_jvp( bool keepdim); std::tuple layer_norm_double_backward( const Tensor& input, - const c10::optional& gamma, + const std::optional& gamma, const Tensor& ggI, const Tensor& ggG, const Tensor& ggB, @@ -919,8 +919,8 @@ Tensor batch_norm_jvp( const Tensor& weight_t, const Tensor& bias_p, const Tensor& bias_t, - const c10::optional& running_mean, - const c10::optional& running_var, + const std::optional& running_mean, + const std::optional& running_var, const Tensor& saved_mean, const Tensor& saved_invstd, bool train, @@ -1082,9 +1082,9 @@ mkldnn_rnn_layer_differentiable_backward( const Tensor& output, const Tensor& hy_, const Tensor& cy_, - const c10::optional& grad_output_r_opt, - const c10::optional& grad_hy_r_opt, - const c10::optional& grad_cy_r_opt, + const std::optional& grad_output_r_opt, + const std::optional& grad_hy_r_opt, + const std::optional& grad_cy_r_opt, bool reverse, int64_t mode, int64_t hidden_size, diff --git a/torch/csrc/autograd/TraceTypeManual.cpp b/torch/csrc/autograd/TraceTypeManual.cpp index 4134ef6d992ba..46e4014d8dd13 100644 --- a/torch/csrc/autograd/TraceTypeManual.cpp +++ b/torch/csrc/autograd/TraceTypeManual.cpp @@ -51,7 +51,7 @@ Tensor& copy_(Tensor& self, const Tensor& src, bool non_blocking) { const Tensor& resize_( const Tensor& self, IntArrayRef size, - c10::optional optional_memory_format) { + std::optional optional_memory_format) { if (torch::jit::tracer::isTracing()) { if (jit::tracer::ArgumentStash::hasIntArrayRef("size")) { jit::tracer::ArgumentStash::popIntArrayRef("size"); @@ -70,7 +70,7 @@ const Tensor& resize_( const Tensor& resize_as_( const Tensor& self, const Tensor& the_template, - c10::optional optional_memory_format) { + std::optional optional_memory_format) { if (torch::jit::tracer::isTracing()) { jit::tracer::warn("resize_as_", jit::tracer::WARN_RESIZE); jit::tracer::delValueTrace(self); diff --git a/torch/csrc/autograd/VariableTypeManual.cpp b/torch/csrc/autograd/VariableTypeManual.cpp index 38a63640c11e6..20f66694677e8 100644 --- a/torch/csrc/autograd/VariableTypeManual.cpp +++ b/torch/csrc/autograd/VariableTypeManual.cpp @@ -240,7 +240,7 @@ const Tensor& resize_( c10::DispatchKeySet ks, const Tensor& self, SymIntArrayRef size, - c10::optional optional_memory_format) { + std::optional optional_memory_format) { auto& self_ = unpack(self, "self", 0); if (self.requires_grad()) { AT_ERROR("cannot resize variables that require grad"); @@ -262,7 +262,7 @@ const Tensor& resize_as_( c10::DispatchKeySet ks, const Tensor& self, const Tensor& the_template, - c10::optional optional_memory_format) { + std::optional optional_memory_format) { auto& self_ = unpack(self, "self", 0); auto& the_template_ = unpack(the_template, "the_template", 1); if (self.requires_grad()) { @@ -400,7 +400,7 @@ static const Tensor& resize_( c10::DispatchKeySet ks, const Tensor& self, SymIntArrayRef size, - c10::optional optional_memory_format) { + std::optional optional_memory_format) { // Hold sizes to verify if we actually resize `self`. // Explicitly copy data, since resizing can move original data // and make references invalid. @@ -424,7 +424,7 @@ static const Tensor& resize_as_( c10::DispatchKeySet ks, const Tensor& self, const Tensor& the_template, - c10::optional optional_memory_format) { + std::optional optional_memory_format) { // Hold sizes to verify if we actually resize `self`. // Explicitly copy data, since resizing can move original data // and make references invalid. diff --git a/torch/csrc/autograd/VariableTypeUtils.h b/torch/csrc/autograd/VariableTypeUtils.h index b8fa4b6c101a7..d5fe8a70dae17 100644 --- a/torch/csrc/autograd/VariableTypeUtils.h +++ b/torch/csrc/autograd/VariableTypeUtils.h @@ -166,7 +166,7 @@ struct Flatten : IterArgs { void operator()(const at::Tensor& x) { out.emplace_back(x); } - void operator()(const c10::optional& x) { + void operator()(const std::optional& x) { if (x.has_value()) out.emplace_back(x.value()); } @@ -233,8 +233,8 @@ inline at::Tensor as_view( } // If they cannot be shared, create the required view infos - c10::optional new_bw_info; - c10::optional new_fw_info; + std::optional new_bw_info; + std::optional new_fw_info; if (is_bw_differentiable) { auto bw_view_func = view_func ? view_func->clone_and_set() : nullptr; @@ -298,7 +298,7 @@ inline void check_no_requires_grad( } inline void check_no_requires_grad( - const c10::optional& tensor, + const std::optional& tensor, const char* name, const char* fn_name = "") { if (tensor.has_value()) { @@ -320,14 +320,14 @@ inline void check_no_requires_grad( } inline void check_no_requires_grad( - const c10::List>& tensors, + const c10::List>& tensors, const char* name, const char* fn_name = "") { // GradMode check is expensive, so check it only once for TensorLists if (!GradMode::is_enabled()) { return; } - for (c10::optional tensor : tensors) { + for (std::optional tensor : tensors) { if (tensor.has_value()) { check_no_requires_grad(*tensor, name, fn_name, /*check_grad_mode*/ false); } @@ -345,11 +345,11 @@ inline std::vector make_saved_variable_list( // Assumed that saved tensor lists are never inplace outputs inline std::vector make_saved_variable_list( - const c10::List>& tensors, + const c10::List>& tensors, const bool is_output = false) { return fmap( tensors, - [&is_output](const c10::optional& tensor) -> SavedVariable { + [&is_output](const std::optional& tensor) -> SavedVariable { if (tensor.has_value()) { return SavedVariable{*tensor, is_output /* is output */}; } else { diff --git a/torch/csrc/autograd/autograd.cpp b/torch/csrc/autograd/autograd.cpp index fd4265619fccd..4a550e7006389 100644 --- a/torch/csrc/autograd/autograd.cpp +++ b/torch/csrc/autograd/autograd.cpp @@ -165,7 +165,7 @@ static variable_list run_backward( void backward( const variable_list& tensors, const variable_list& grad_tensors, - c10::optional retain_graph, + std::optional retain_graph, bool create_graph, const variable_list& inputs) { variable_list gradients = _make_grads(tensors, grad_tensors); @@ -186,7 +186,7 @@ variable_list grad( const variable_list& outputs, const variable_list& inputs, const variable_list& grad_outputs, - c10::optional retain_graph, + std::optional retain_graph, bool create_graph, bool allow_unused) { variable_list gradients = _make_grads(outputs, grad_outputs); diff --git a/torch/csrc/autograd/autograd.h b/torch/csrc/autograd/autograd.h index 3537df9bc4a7d..94ee179225a4c 100644 --- a/torch/csrc/autograd/autograd.h +++ b/torch/csrc/autograd/autograd.h @@ -47,7 +47,7 @@ namespace torch::autograd { TORCH_API void backward( const variable_list& tensors, const variable_list& grad_tensors = {}, - c10::optional retain_graph = c10::nullopt, + std::optional retain_graph = c10::nullopt, bool create_graph = false, const variable_list& inputs = {}); @@ -81,7 +81,7 @@ TORCH_API variable_list grad( const variable_list& outputs, const variable_list& inputs, const variable_list& grad_outputs = {}, - c10::optional retain_graph = c10::nullopt, + std::optional retain_graph = c10::nullopt, bool create_graph = false, bool allow_unused = false); diff --git a/torch/csrc/autograd/autograd_not_implemented_fallback.cpp b/torch/csrc/autograd/autograd_not_implemented_fallback.cpp index 2cfca6817e855..acc8986efa6a2 100644 --- a/torch/csrc/autograd/autograd_not_implemented_fallback.cpp +++ b/torch/csrc/autograd/autograd_not_implemented_fallback.cpp @@ -339,12 +339,12 @@ static void autogradNotImplementedFallbackImpl( std::vector(stack->begin() + stack_start, stack->end()); std::vector> impl_saved; impl_saved.reserve(num_tensor_inputs); - std::vector> storage_saved; + std::vector> storage_saved; storage_saved.reserve(num_tensor_inputs); _foreach_tensor( [&](size_t idx, size_t _, const at::Tensor& t) { storage_saved.push_back( - t.has_storage() ? c10::optional(t.storage()) + t.has_storage() ? std::optional(t.storage()) : c10::nullopt); impl_saved.push_back(t.getIntrusivePtr()); }, diff --git a/torch/csrc/autograd/custom_function.cpp b/torch/csrc/autograd/custom_function.cpp index 41e2f1991a52b..1cf94bbe048fe 100644 --- a/torch/csrc/autograd/custom_function.cpp +++ b/torch/csrc/autograd/custom_function.cpp @@ -28,7 +28,7 @@ namespace torch::autograd { static void _process_forward_mode_AD( const variable_list& inputs, std::unordered_map inputs_mapping, - const at::ArrayRef> raw_outputs, + const at::ArrayRef> raw_outputs, const optional_variable_list& outputs, const std::unordered_set& non_differentiable, const std::unordered_set& dirty_inputs, @@ -258,7 +258,7 @@ static optional_variable_list _process_backward_mode_ad( const std::unordered_map& inputs_mapping, const std::unordered_set& non_differentiable, const std::unordered_set& dirty_inputs, - const at::ArrayRef> raw_outputs, + const at::ArrayRef> raw_outputs, const std::shared_ptr& cdata, const std::unordered_set& to_save_if_setup_context, const _view_as_self_fn_t& view_as_self_fn) { @@ -438,7 +438,7 @@ optional_variable_list _wrap_outputs( const variable_list& input_vars, const std::unordered_set& non_differentiable, const std::unordered_set& dirty_inputs, - const at::ArrayRef> raw_outputs, + const at::ArrayRef> raw_outputs, const std::shared_ptr& cdata, const _jvp_fn_t& jvp_user_function, const std::unordered_set& to_save_if_setup_context, diff --git a/torch/csrc/autograd/custom_function.h b/torch/csrc/autograd/custom_function.h index ebabc45334a5d..8c20bd8078207 100644 --- a/torch/csrc/autograd/custom_function.h +++ b/torch/csrc/autograd/custom_function.h @@ -12,15 +12,15 @@ namespace torch::autograd { -using optional_variable_list = std::vector>; +using optional_variable_list = std::vector>; using _jvp_fn_t = std::function; using _view_as_self_fn_t = std::function; -TORCH_API std::vector> _wrap_outputs( +TORCH_API std::vector> _wrap_outputs( const variable_list& input_vars, const std::unordered_set& non_differentiable, const std::unordered_set& dirty_inputs, - const at::ArrayRef> raw_outputs, + const at::ArrayRef> raw_outputs, const std::shared_ptr& cdata, const _jvp_fn_t& jvp_user_function, const std::unordered_set& to_save_if_setup_context, @@ -41,7 +41,7 @@ using forward_t = decltype(X::forward(nullptr, std::declval()...)); /// `forward` can take as many arguments as you want and should return either a /// variable list or a Variable. Use of any direct Variable arguments will be /// registered in the graph but no vectors/sets or any other data structures -/// will be traversed. You can use c10::optional as one of the arguments +/// will be traversed. You can use std::optional as one of the arguments /// and it will be registered as a variable in the graph if the argument has a /// value. It should take a pointer to `torch::autograd::AutogradContext` as the /// first argument. Variables can be saved in the `ctx` using @@ -247,7 +247,7 @@ struct ExtractVariables : IterArgs { variable_list& list_; ExtractVariables(std::vector& is_var, variable_list& list) : is_var_(is_var), list_(list) {} - void operator()(const c10::optional& x) { + void operator()(const std::optional& x) { // NOLINTNEXTLINE(bugprone-branch-clone) if (x.has_value() && x.value().defined()) { is_var_.push_back(true); @@ -282,30 +282,30 @@ inline void extract_vars( template std::enable_if_t, T> to_output_type( - std::vector>& output_list) { + std::vector>& output_list) { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) variable_list result; std::transform( output_list.begin(), output_list.end(), std::back_inserter(result), - [](const c10::optional& var) { return *var; }); + [](const std::optional& var) { return *var; }); return result; } template std::enable_if_t, T> to_output_type( - std::vector>& output_list) { + std::vector>& output_list) { return *output_list[0]; } -inline std::vector> to_optional(Variable& output) { - return std::vector>{output}; +inline std::vector> to_optional(Variable& output) { + return std::vector>{output}; } -inline std::vector> to_optional(variable_list& output) { +inline std::vector> to_optional(variable_list& output) { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) - std::vector> result; + std::vector> result; std::transform( output.begin(), output.end(), diff --git a/torch/csrc/autograd/function.h b/torch/csrc/autograd/function.h index becc73396e66d..c8c3538a061f1 100644 --- a/torch/csrc/autograd/function.h +++ b/torch/csrc/autograd/function.h @@ -239,7 +239,7 @@ struct TORCH_API Node : std::enable_shared_from_this { * elements are on different devices (across multiple GPUs, for example) * they may have different streams. */ - c10::optional stream() { + std::optional stream() { auto opt_device_type = at::getAccelerator(); if (!opt_device_type.has_value()) { return c10::nullopt; @@ -703,7 +703,7 @@ struct MakeNextFunctionList : IterArgs { void operator()(const Variable* variable) { operator()(*variable); } - void operator()(const c10::optional& variable) { + void operator()(const std::optional& variable) { if (variable.has_value()) { operator()(*variable); } else { diff --git a/torch/csrc/autograd/functions/comm.cpp b/torch/csrc/autograd/functions/comm.cpp index 9bcd511285734..e2f23f363d7a0 100644 --- a/torch/csrc/autograd/functions/comm.cpp +++ b/torch/csrc/autograd/functions/comm.cpp @@ -17,9 +17,9 @@ namespace torch { namespace autograd { Scatter::Scatter( std::vector devices, - c10::optional> chunk_sizes, + std::optional> chunk_sizes, int64_t dim, - c10::optional>> streams, + std::optional>> streams, bool unsqueeze_scalars) : devices_(std::move(devices)), chunk_sizes_(std::move(chunk_sizes)), diff --git a/torch/csrc/autograd/functions/comm.h b/torch/csrc/autograd/functions/comm.h index 9b1f0daf50bce..b0e6900729955 100644 --- a/torch/csrc/autograd/functions/comm.h +++ b/torch/csrc/autograd/functions/comm.h @@ -17,9 +17,9 @@ namespace autograd { struct TORCH_CUDA_CU_API Scatter : public Node { explicit Scatter( std::vector devices, - c10::optional> chunk_sizes = c10::nullopt, + std::optional> chunk_sizes = c10::nullopt, int64_t dim = 0, - c10::optional>> streams = + std::optional>> streams = c10::nullopt, bool unsqueeze_scalars = false); ~Scatter() override; @@ -27,9 +27,9 @@ struct TORCH_CUDA_CU_API Scatter : public Node { variable_list apply(variable_list&& inputs) override; std::vector devices_; - c10::optional> chunk_sizes_; + std::optional> chunk_sizes_; int64_t dim_; - c10::optional>> streams_; + std::optional>> streams_; bool unsqueeze_scalars_; }; diff --git a/torch/csrc/autograd/functions/utils.h b/torch/csrc/autograd/functions/utils.h index 3cc2575da8f5d..db916dc0bbbfa 100644 --- a/torch/csrc/autograd/functions/utils.h +++ b/torch/csrc/autograd/functions/utils.h @@ -46,7 +46,7 @@ struct ComputeRequiresGrad : IterArgs { out = true; } } - void operator()(const c10::optional& tensor) { + void operator()(const std::optional& tensor) { if (tensor.has_value()) { (*this)(*tensor); } @@ -88,7 +88,7 @@ inline void set_history( } } -inline bool isFwGradDefined(const c10::optional& t) { +inline bool isFwGradDefined(const std::optional& t) { return t.has_value() && t->defined() && t->_fw_grad(/*level */ 0).defined(); } @@ -101,7 +101,7 @@ inline bool isFwGradDefinedTensorList(const at::ITensorListRef& variables) { } inline bool isFwGradDefinedTensorList( - const c10::List>& li) { + const c10::List>& li) { bool ret = false; for (auto i : c10::irange(li.size())) { auto t = li.get(i); diff --git a/torch/csrc/autograd/graph_task.h b/torch/csrc/autograd/graph_task.h index 03a9647cad833..e4a7ae4dad18e 100644 --- a/torch/csrc/autograd/graph_task.h +++ b/torch/csrc/autograd/graph_task.h @@ -125,7 +125,7 @@ struct GraphTask : std::enable_shared_from_this { // Per-device current streams of the execute() that called this GraphTask. // These will be synced with leaf_streams in exec_post_processing. - std::vector> caller_current_streams_; + std::vector> caller_current_streams_; // Collects caller_current_streams_ for the accelerator device. void stash_current_streams(); diff --git a/torch/csrc/autograd/init.cpp b/torch/csrc/autograd/init.cpp index e04d853198fbb..9eb1031ff02c0 100644 --- a/torch/csrc/autograd/init.cpp +++ b/torch/csrc/autograd/init.cpp @@ -1081,7 +1081,7 @@ static PyObject* push_on_torch_dispatch_stack( using c10::impl::TorchDispatchModeKey; // When we push a mode onto the mode stack, we need to // check if it's an "infra" mode, by checking its _mode_key attribute. - c10::optional mode_key = c10::nullopt; + std::optional mode_key = c10::nullopt; py::object maybe_mode_key_obj = PyObject_FastGetAttrString(arg, "_mode_key"); if (maybe_mode_key_obj) { @@ -1105,7 +1105,7 @@ static PyObject* pop_torch_dispatch_stack( PyObject* _unused, PyObject* maybe_mode_key) { HANDLE_TH_ERRORS - c10::optional mode_key = c10::nullopt; + std::optional mode_key = c10::nullopt; PyObject* r = nullptr; if (maybe_mode_key != Py_None) { mode_key = py::cast(maybe_mode_key); diff --git a/torch/csrc/autograd/input_buffer.cpp b/torch/csrc/autograd/input_buffer.cpp index 2adfc1fc7efae..6c12bbadc5d2d 100644 --- a/torch/csrc/autograd/input_buffer.cpp +++ b/torch/csrc/autograd/input_buffer.cpp @@ -129,8 +129,8 @@ static void accumulate( void InputBuffer::add( size_t pos, Variable&& var, - const c10::optional& opt_producer_stream, - const c10::optional& opt_consumer_stream) { + const std::optional& opt_producer_stream, + const std::optional& opt_consumer_stream) { TORCH_INTERNAL_ASSERT(pos < buffer.size()); if (!var.defined()) { return; @@ -159,7 +159,7 @@ void InputBuffer::add( // Accumulation happens on the var device's default stream. TORCH_INTERNAL_ASSERT(device_of(var)); - c10::optional opt_accumulate_stream = c10::nullopt; + std::optional opt_accumulate_stream = c10::nullopt; const auto device_type = device_of(var).value().type(); // NOLINTNEXTLINE(bugprone-unchecked-optional-access) if (device_of(var)->is_cuda() || device_of(var)->is_privateuseone()) { @@ -179,7 +179,7 @@ void InputBuffer::add( record_stream_any_impl(var, *opt_accumulate_stream); } } else { - c10::optional opt_sync_stream = c10::nullopt; + std::optional opt_sync_stream = c10::nullopt; const auto guard = c10::impl::VirtualGuardImpl{device_type}; if (on_consumer && !on_producer) { // (3a) diff --git a/torch/csrc/autograd/input_buffer.h b/torch/csrc/autograd/input_buffer.h index d8ef3396cb6d8..7e471ef528bb0 100644 --- a/torch/csrc/autograd/input_buffer.h +++ b/torch/csrc/autograd/input_buffer.h @@ -27,8 +27,8 @@ struct InputBuffer { TORCH_API void add( size_t pos, Variable&& var, - const c10::optional& opt_producer_stream, - const c10::optional& opt_consumer_stream); + const std::optional& opt_producer_stream, + const std::optional& opt_consumer_stream); at::Device device() const; diff --git a/torch/csrc/autograd/profiler_kineto.cpp b/torch/csrc/autograd/profiler_kineto.cpp index 3b095cef2a68a..5b57e5891a1c7 100644 --- a/torch/csrc/autograd/profiler_kineto.cpp +++ b/torch/csrc/autograd/profiler_kineto.cpp @@ -206,7 +206,7 @@ struct AddTensorboardFields : public MetadataBase { result->visit_if_base([&, this](const auto& i) -> void { this->addMetadata("Python id", std::to_string(i.id_)); - c10::optional parent_id; + std::optional parent_id; std::shared_ptr parent = result->parent_.lock(); while (parent && !parent_id.has_value()) { parent->visit_if_base( diff --git a/torch/csrc/autograd/profiler_legacy.cpp b/torch/csrc/autograd/profiler_legacy.cpp index 04c676fc2b497..b9387479667e8 100644 --- a/torch/csrc/autograd/profiler_legacy.cpp +++ b/torch/csrc/autograd/profiler_legacy.cpp @@ -169,7 +169,7 @@ struct ProfilerLegacyThreadLocalState : public ProfilerStateBase { std::unordered_map> event_lists_map_; - c10::optional>> remoteProfiledEvents_; + std::optional>> remoteProfiledEvents_; }; thread_event_lists ProfilerLegacyThreadLocalState::consolidate() { @@ -429,7 +429,7 @@ void enableProfilerLegacy( } thread_event_lists disableProfilerLegacy( - c10::optional profilerDisableOptions) { + std::optional profilerDisableOptions) { auto cleanupTLSState = profilerDisableOptions ? profilerDisableOptions->cleanupTLSState : true; auto consolidate = diff --git a/torch/csrc/autograd/profiler_legacy.h b/torch/csrc/autograd/profiler_legacy.h index e74ddd8a2296e..9bd88b0b3dc51 100644 --- a/torch/csrc/autograd/profiler_legacy.h +++ b/torch/csrc/autograd/profiler_legacy.h @@ -335,7 +335,7 @@ TORCH_API void enableProfilerLegacy( const torch::profiler::impl::ProfilerConfig&); using thread_event_lists = std::vector>; TORCH_API thread_event_lists disableProfilerLegacy( - c10::optional profilerDisableOptions = + std::optional profilerDisableOptions = c10::nullopt); // adds profiledEvents to the current thread local recorded events. Each event @@ -376,9 +376,9 @@ struct TORCH_API RecordProfile { struct TORCH_API TLSLegacyProfilerGuard { explicit TLSLegacyProfilerGuard( const torch::profiler::impl::ProfilerConfig& cfg, - c10::optional> + std::optional> resultCallback = c10::nullopt, - c10::optional profilerDisableOptions = + std::optional profilerDisableOptions = c10::nullopt) : cb_(std::move(resultCallback)), profilerDisableOptions_(profilerDisableOptions) { @@ -397,9 +397,9 @@ struct TORCH_API TLSLegacyProfilerGuard { } private: - c10::optional> cb_; + std::optional> cb_; // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) - const c10::optional profilerDisableOptions_; + const std::optional profilerDisableOptions_; }; } // namespace profiler diff --git a/torch/csrc/autograd/profiler_python.cpp b/torch/csrc/autograd/profiler_python.cpp index da1cedfdb5a97..799188be9a686 100644 --- a/torch/csrc/autograd/profiler_python.cpp +++ b/torch/csrc/autograd/profiler_python.cpp @@ -220,7 +220,7 @@ struct ExtendedPyCallConfig { struct Cache { // `nn.Module.forward` or `optim.Optimizer._optimizer_step_code` - c10::optional location_; + std::optional location_; ska::flat_hash_map cls_and_parameters_; ska::flat_hash_map cls_names_; }; @@ -300,7 +300,7 @@ class ValueCache { load(callsite.value_)}; } - c10::optional recordIfTensor(py::handle p); + std::optional recordIfTensor(py::handle p); std::vector> unpackTensorMap( const py::dict& tensor_map); void trimPrefixes(); @@ -348,9 +348,9 @@ TensorMetadata toTensorMetadata(PyObject* self) { m.layout_ == at::kStrided ? t.strides().vec() : std::vector()}; } -c10::optional ValueCache::recordIfTensor(py::handle p) { +std::optional ValueCache::recordIfTensor(py::handle p) { return THPVariable_CheckExact(p.ptr()) - ? c10::optional{toTensorMetadata(p.ptr())} + ? std::optional{toTensorMetadata(p.ptr())} : c10::nullopt; } diff --git a/torch/csrc/autograd/python_function.cpp b/torch/csrc/autograd/python_function.cpp index 341d2886699a1..33300b001819b 100644 --- a/torch/csrc/autograd/python_function.cpp +++ b/torch/csrc/autograd/python_function.cpp @@ -619,7 +619,7 @@ static void _wrap_outputs( auto non_differentiable = _parse_non_differentiable(self); auto dirty_inputs = _mark_dirty(self); - std::vector> raw_output_vars; + std::vector> raw_output_vars; raw_output_vars.reserve(num_outputs); for (const auto i : c10::irange(num_outputs)) { PyObject* obj = PyTuple_GET_ITEM(raw_output, i); @@ -746,7 +746,7 @@ static void _wrap_outputs( static void _get_tensors_to_save( THPFunction* self, std::unordered_set& to_save_if_setup_context, - std::vector>& tensors_to_save, + std::vector>& tensors_to_save, bool overridden_setup_context, bool is_executable) { if (self->saved_for_forward && overridden_setup_context) { @@ -804,7 +804,7 @@ static void _get_tensors_to_save( } // Save any variables that requested by to_save static void _save_variables( - const std::vector>& tensors_to_save, + const std::vector>& tensors_to_save, const std::shared_ptr& cdata_ptr, THPFunction* self) { if (!self->to_save) @@ -1106,7 +1106,7 @@ PyObject* process_outputs( } std::unordered_set to_save_if_setup_context{}; - std::vector> tensors_to_save{}; + std::vector> tensors_to_save{}; _get_tensors_to_save( grad_fn, to_save_if_setup_context, diff --git a/torch/csrc/autograd/python_variable.cpp b/torch/csrc/autograd/python_variable.cpp index 9b0beed493189..078b0f92124cb 100644 --- a/torch/csrc/autograd/python_variable.cpp +++ b/torch/csrc/autograd/python_variable.cpp @@ -267,7 +267,7 @@ PyObject* THPVariable_Wrap(at::TensorBase var) { c10::impl::PyInterpreterStatus::DEFINITELY_UNINITIALIZED); } - c10::optional mb_obj = + std::optional mb_obj = var.unsafeGetTensorImpl()->pyobj_slot()->check_pyobj( getPyInterpreter(), /*ignore_hermetic_tls=*/false); c10::impl::PyInterpreterStatus status{}; @@ -587,14 +587,14 @@ static PyObject* view_func_impl( auto& view_func = view_info.view_fn(); // Determine new SymInt / tensor state as needed. - c10::optional> new_symints = c10::nullopt; + std::optional> new_symints = c10::nullopt; if (symint_visitor_fn != Py_None) { new_symints = map_py_func( py::cast(symint_visitor_fn), view_func.get_symints()); } - c10::optional> new_tensors = c10::nullopt; + std::optional> new_tensors = c10::nullopt; if (tensor_visitor_fn != Py_None) { new_tensors = map_py_func( py::cast(tensor_visitor_fn), @@ -815,7 +815,7 @@ static PyObject* THPVariable_make_wrapper_subclass( auto sym_sizes = r.symintlist(1); auto sym_strides_own = r.symintlistOptional(2); auto sym_strides = - static_cast>(sym_strides_own); + static_cast>(sym_strides_own); auto sym_storage_offset = r.toSymIntOptional(3); c10::SymInt size_bytes; diff --git a/torch/csrc/autograd/python_variable_indexing.cpp b/torch/csrc/autograd/python_variable_indexing.cpp index e3cdd04f0965a..fdcafd6cd7091 100644 --- a/torch/csrc/autograd/python_variable_indexing.cpp +++ b/torch/csrc/autograd/python_variable_indexing.cpp @@ -178,7 +178,7 @@ static inline Variable applySlicing( variable_list& outIndices, bool is_tracing, const at::Device& self_device, - const c10::optional& self_ndim, + const std::optional& self_ndim, int64_t specified_dims) { int64_t size = PyTuple_GET_SIZE(index); // NOLINT(cppcoreguidelines-pro-type-cstyle-cast) @@ -200,9 +200,9 @@ static inline Variable applySlicing( // nested tensor does not have a size (yet) so for now we represent its size // as null may need to be changed after we reach a better solution for // nested tensor size - c10::optional result_sizes = result.is_nested() - ? c10::optional(c10::nullopt) - : c10::optional(result.sym_sizes()); + std::optional result_sizes = result.is_nested() + ? std::optional(c10::nullopt) + : std::optional(result.sym_sizes()); result = at::indexing::handleDimInMultiDimIndexing( /*prev_dim_result=*/result, /*original_tensor=*/self, diff --git a/torch/csrc/autograd/record_function_ops.cpp b/torch/csrc/autograd/record_function_ops.cpp index e5153ae4028aa..e3a3299dc9c59 100644 --- a/torch/csrc/autograd/record_function_ops.cpp +++ b/torch/csrc/autograd/record_function_ops.cpp @@ -20,7 +20,7 @@ namespace profiler { // callbacks. static void record_function_enter( const std::string& name, - const c10::optional& args, + const std::optional& args, at::RecordFunction& rec) { if (rec.isActive()) { if (rec.needsInputs() && args.has_value()) { @@ -35,7 +35,7 @@ static void record_function_enter( // Legacy signature using cpp_custom_type_hack static at::Tensor record_function_enter_legacy( const std::string& name, - const c10::optional& args) { + const std::optional& args) { auto rec = std::make_unique(at::RecordScope::USER_SCOPE); record_function_enter(name, args, *rec); return at::cpp_custom_type_hack::create(std::move(rec), at::TensorOptions()); @@ -44,7 +44,7 @@ static at::Tensor record_function_enter_legacy( // New signature using custom_class c10::intrusive_ptr record_function_enter_new( const std::string& name, - const c10::optional& args) { + const std::optional& args) { auto rec = c10::make_intrusive(at::RecordScope::USER_SCOPE); record_function_enter(name, args, rec->record); diff --git a/torch/csrc/autograd/record_function_ops.h b/torch/csrc/autograd/record_function_ops.h index d37aba7dfff85..a145523c1bf8a 100644 --- a/torch/csrc/autograd/record_function_ops.h +++ b/torch/csrc/autograd/record_function_ops.h @@ -17,7 +17,7 @@ struct PythonRecordFunction : public torch::CustomClassHolder { // callbacks. TORCH_API c10::intrusive_ptr record_function_enter_new( const std::string& name, - const c10::optional& args = c10::nullopt); + const std::optional& args = c10::nullopt); // Schedules RecordFunction's end callbacks to be run on completion of a future. TORCH_API c10::intrusive_ptr _call_end_callbacks_on_fut_new( diff --git a/torch/csrc/autograd/saved_variable.cpp b/torch/csrc/autograd/saved_variable.cpp index 4bd44339c3b45..c4d4566434325 100644 --- a/torch/csrc/autograd/saved_variable.cpp +++ b/torch/csrc/autograd/saved_variable.cpp @@ -117,7 +117,7 @@ void SavedVariable::reset_data() { } SavedVariable::SavedVariable( - const c10::optional& variable, + const std::optional& variable, bool is_output, bool is_inplace_on_view) : SavedVariable( diff --git a/torch/csrc/autograd/saved_variable.h b/torch/csrc/autograd/saved_variable.h index c9a358ede89e6..e249209f9f63b 100644 --- a/torch/csrc/autograd/saved_variable.h +++ b/torch/csrc/autograd/saved_variable.h @@ -26,7 +26,7 @@ class TORCH_API SavedVariable { bool is_output, bool is_inplace_on_view = false); SavedVariable( - const c10::optional& variable, + const std::optional& variable, bool is_output, bool is_inplace_on_view = false); SavedVariable(SavedVariable&&) = default; diff --git a/torch/csrc/autograd/utils/python_arg_parsing.h b/torch/csrc/autograd/utils/python_arg_parsing.h index 7701e97fe9189..326221e44d147 100644 --- a/torch/csrc/autograd/utils/python_arg_parsing.h +++ b/torch/csrc/autograd/utils/python_arg_parsing.h @@ -12,11 +12,11 @@ namespace utils { // The parameter allow_copy is to accept copy for Tensor.to (and by proxy // PackedSequences.to) but not nn.Module.to. inline std::tuple< - c10::optional, - c10::optional, + std::optional, + std::optional, bool, bool, - c10::optional> + std::optional> parse_to_conversion(PythonArgs& r, bool allow_copy) { if (r.idx == 0) { if (!allow_copy && !r.isNone(3)) diff --git a/torch/csrc/autograd/variable.cpp b/torch/csrc/autograd/variable.cpp index 07e37463cbd38..da987001e2ecc 100644 --- a/torch/csrc/autograd/variable.cpp +++ b/torch/csrc/autograd/variable.cpp @@ -42,8 +42,8 @@ static std::unique_ptr create_view_func_matching(const Variable& t) { DifferentiableViewMeta::DifferentiableViewMeta( at::TensorImpl* self_impl, - c10::optional backward_info, - c10::optional forward_info, + std::optional backward_info, + std::optional forward_info, bool shared_view_info, CreationMeta creation_meta) : AutogradMeta(self_impl), @@ -581,10 +581,10 @@ bool VariableHooks::retains_grad(const at::TensorBase& self) const { void VariableHooks::_backward( const Tensor& self, at::TensorList inputs, - const c10::optional& gradient, - c10::optional keep_graph, + const std::optional& gradient, + std::optional keep_graph, bool create_graph) const { - // TODO torch::autograd::backward should take the c10::optional + // TODO torch::autograd::backward should take the std::optional // gradient directly instead of us having to unwrap it to Tensor _gradient // here. Tensor _gradient = gradient.has_value() ? *gradient : Tensor(); diff --git a/torch/csrc/autograd/variable.h b/torch/csrc/autograd/variable.h index aa9ee76f3dc95..d60f37085f380 100644 --- a/torch/csrc/autograd/variable.h +++ b/torch/csrc/autograd/variable.h @@ -682,8 +682,8 @@ TORCH_API void handle_view_on_rebase( struct TORCH_API DifferentiableViewMeta : public AutogradMeta { private: /// Information about the views - c10::optional backward_info_; - c10::optional forward_info_; + std::optional backward_info_; + std::optional forward_info_; // Optimization to reduce the number of ViewInfo we create. // In the (very common) case where backward_info_ == forward_info_, we only @@ -766,8 +766,8 @@ struct TORCH_API DifferentiableViewMeta : public AutogradMeta { DifferentiableViewMeta( at::TensorImpl* self_impl, - c10::optional backward_info, - c10::optional forward_info, + std::optional backward_info, + std::optional forward_info, bool shared_view_info, CreationMeta creation_meta = CreationMeta::DEFAULT); }; @@ -796,8 +796,8 @@ struct TORCH_API DifferentiableViewMeta : public AutogradMeta { // Differentiable view. Track history with DifferentiableViewMeta. inline Variable make_variable_differentiable_view( const at::Tensor& data, - c10::optional backward_info, - c10::optional forward_info, + std::optional backward_info, + std::optional forward_info, bool shared_view_info, CreationMeta creation_meta, bool allow_tensor_metadata_change = true) { @@ -927,8 +927,8 @@ struct VariableHooks final : at::impl::VariableHooksInterface { void _backward( const at::Tensor& self, at::TensorList inputs, - const c10::optional& gradient, - c10::optional keep_graph, + const std::optional& gradient, + std::optional keep_graph, bool create_graph) const override; void requires_grad_(const at::TensorBase& self, bool _requires_grad) const override; diff --git a/torch/csrc/cuda/Graph.cpp b/torch/csrc/cuda/Graph.cpp index 83c60d059f8dd..472151fec6097 100644 --- a/torch/csrc/cuda/Graph.cpp +++ b/torch/csrc/cuda/Graph.cpp @@ -30,7 +30,7 @@ void THCPGraph_init(PyObject* module) { .def( "capture_begin", [](::at::cuda::CUDAGraph& self, - c10::optional pool_opt, + std::optional pool_opt, std::string capture_error_mode) { cudaStreamCaptureMode capture_mode; c10::cuda::MempoolId_t pool = pool_opt.has_value() diff --git a/torch/csrc/cuda/Module.cpp b/torch/csrc/cuda/Module.cpp index e622c254a5003..030c5a2b5ccf6 100644 --- a/torch/csrc/cuda/Module.cpp +++ b/torch/csrc/cuda/Module.cpp @@ -956,8 +956,8 @@ static void registerCudaDeviceProperties(PyObject* module) { m.def( "_cuda_record_memory_history", static_cast, - c10::optional, + std::optional, + std::optional, const std::string&, size_t)>(torch::cuda::_record_memory_history)); diff --git a/torch/csrc/cuda/comm.cpp b/torch/csrc/cuda/comm.cpp index c8bbec87caefb..c7c3cb396304c 100644 --- a/torch/csrc/cuda/comm.cpp +++ b/torch/csrc/cuda/comm.cpp @@ -37,7 +37,7 @@ struct unique_type_checker { unique = type_id_.value() == type_id; } - c10::optional type_id_; + std::optional type_id_; bool unique = true; }; @@ -232,7 +232,7 @@ std::vector& scatter_out( const at::Tensor& tensor, std::vector& out_tensors, int64_t dim, - const c10::optional>>& + const std::optional>>& streams) { TORCH_CHECK( !out_tensors.empty(), @@ -313,9 +313,9 @@ std::vector& scatter_out( std::vector scatter( const at::Tensor& tensor, at::IntArrayRef devices, - const c10::optional>& chunk_sizes, + const std::optional>& chunk_sizes, int64_t dim, - const c10::optional>>& + const std::optional>>& streams) { TORCH_CHECK(!devices.empty(), "Expected at least one device to scatter to"); if (chunk_sizes.has_value()) { @@ -446,7 +446,7 @@ at::Tensor& gather_out( at::Tensor gather( at::TensorList tensors, int64_t dim, - c10::optional destination_index) { + std::optional destination_index) { TORCH_CHECK(!tensors.empty(), "Expected at least one tensor to gather from"); int64_t total_size = 0; auto& first = tensors.front(); diff --git a/torch/csrc/cuda/comm.h b/torch/csrc/cuda/comm.h index cf89b365d0ce4..4bc0f60195a26 100644 --- a/torch/csrc/cuda/comm.h +++ b/torch/csrc/cuda/comm.h @@ -28,15 +28,15 @@ TORCH_CUDA_CU_API std::vector& scatter_out( const at::Tensor& tensor, std::vector& out_tensors, int64_t dim = 0, - const c10::optional>>& + const std::optional>>& streams = c10::nullopt); TORCH_CUDA_CU_API std::vector scatter( const at::Tensor& tensor, at::IntArrayRef devices, - const c10::optional>& chunk_sizes = c10::nullopt, + const std::optional>& chunk_sizes = c10::nullopt, int64_t dim = 0, - const c10::optional>>& + const std::optional>>& streams = c10::nullopt); TORCH_CUDA_CU_API at::Tensor& gather_out( @@ -47,6 +47,6 @@ TORCH_CUDA_CU_API at::Tensor& gather_out( TORCH_CUDA_CU_API at::Tensor gather( at::TensorList tensors, int64_t dim, - c10::optional destination_index); + std::optional destination_index); } // namespace torch::cuda diff --git a/torch/csrc/cuda/memory_snapshot.cpp b/torch/csrc/cuda/memory_snapshot.cpp index 49fefd97e2da1..82696abaee227 100644 --- a/torch/csrc/cuda/memory_snapshot.cpp +++ b/torch/csrc/cuda/memory_snapshot.cpp @@ -130,8 +130,8 @@ static void checkOptionIn( } void _record_memory_history( - c10::optional enabled, - c10::optional context, + std::optional enabled, + std::optional context, const std::string& stacks, size_t max_entries) { if (enabled) { diff --git a/torch/csrc/cuda/memory_snapshot.h b/torch/csrc/cuda/memory_snapshot.h index f5f9bdbed1620..eb22767a78f90 100644 --- a/torch/csrc/cuda/memory_snapshot.h +++ b/torch/csrc/cuda/memory_snapshot.h @@ -17,8 +17,8 @@ TORCH_CUDA_CU_API void _record_memory_history( bool record_cpp_context = false); TORCH_CUDA_CU_API void _record_memory_history( - c10::optional enabled = "all", - c10::optional context = "all", + std::optional enabled = "all", + std::optional context = "all", const std::string& stacks = "all", size_t max_entries = SIZE_MAX); diff --git a/torch/csrc/cuda/nccl.h b/torch/csrc/cuda/nccl.h index ebf51b7633abb..b118bd4600a56 100644 --- a/torch/csrc/cuda/nccl.h +++ b/torch/csrc/cuda/nccl.h @@ -111,7 +111,7 @@ TORCH_CUDA_CPP_API void check_inputs( } // namespace detail using comm_list = std::vector; -using stream_list = std::vector>; +using stream_list = std::vector>; TORCH_CUDA_CPP_API std::uint64_t version(); TORCH_CUDA_CPP_API const char* version_suffix(); diff --git a/torch/csrc/cuda/python_comm.cpp b/torch/csrc/cuda/python_comm.cpp index e65bb15103aab..ec9da9ac2d679 100644 --- a/torch/csrc/cuda/python_comm.cpp +++ b/torch/csrc/cuda/python_comm.cpp @@ -46,10 +46,10 @@ void initCommMethods(PyObject* module) { "_scatter", [](at::Tensor& tensor, std::vector& devices, - c10::optional> chunk_sizes, + std::optional> chunk_sizes, int64_t dim, - c10::optional py_streams) { - c10::optional>> + std::optional py_streams) { + std::optional>> streams; if (py_streams) { py::handle handle = *py_streams; @@ -69,8 +69,8 @@ void initCommMethods(PyObject* module) { [](at::Tensor& tensor, std::vector& out_tensors, int64_t dim, - c10::optional py_streams) { - c10::optional>> + std::optional py_streams) { + std::optional>> streams; if (py_streams) { py::handle handle = *py_streams; @@ -88,7 +88,7 @@ void initCommMethods(PyObject* module) { "_gather", [](std::vector& tensors, int64_t dim, - c10::optional destination_index) { + std::optional destination_index) { return gather(tensors, dim, destination_index); }, py::arg("tensors"), diff --git a/torch/csrc/cuda/python_nccl.cpp b/torch/csrc/cuda/python_nccl.cpp index db6f6c680701d..5060f9289a9e1 100644 --- a/torch/csrc/cuda/python_nccl.cpp +++ b/torch/csrc/cuda/python_nccl.cpp @@ -56,11 +56,11 @@ static void destroy_nccl_comm(PyObject* capsule) { END_HANDLE_TH_ERRORS_RET() } -static std::vector> unpack_streams( +static std::vector> unpack_streams( PyObject* obj, size_t size) { if (obj == Py_None) { - return std::vector>(size, c10::nullopt); + return std::vector>(size, c10::nullopt); } auto streams = THPUtils_PySequence_to_CUDAStreamList(obj); if (streams.size() != size) { @@ -147,7 +147,7 @@ PyObject* THCPModule_nccl_reduce(PyObject* self, PyObject* args) { std::vector inputs = extract_tensors(_inputs); auto output = extract_tensor(_output); - std::vector> streams = + std::vector> streams = unpack_streams(_streams, inputs.size()); auto user_comms = unpack_comms(_comms, inputs.size()); diff --git a/torch/csrc/cuda/utils.cpp b/torch/csrc/cuda/utils.cpp index e62e176473f2a..e2ad6622e6ffb 100644 --- a/torch/csrc/cuda/utils.cpp +++ b/torch/csrc/cuda/utils.cpp @@ -6,7 +6,7 @@ #ifdef USE_CUDA // NB: It's a list of *optional* CUDAStream; when nullopt, that means to use // whatever the current stream of the device the input is associated with was. -std::vector> +std::vector> THPUtils_PySequence_to_CUDAStreamList(PyObject* obj) { if (!PySequence_Check(obj)) { throw std::runtime_error( @@ -18,7 +18,7 @@ THPUtils_PySequence_to_CUDAStreamList(PyObject* obj) { "expected PySequence, but got " + std::string(THPUtils_typename(obj))); } - std::vector> streams; + std::vector> streams; Py_ssize_t length = PySequence_Fast_GET_SIZE(seq.get()); for (Py_ssize_t i = 0; i < length; i++) { PyObject* stream = PySequence_Fast_GET_ITEM(seq.get(), i); diff --git a/torch/csrc/distributed/c10d/Backend.hpp b/torch/csrc/distributed/c10d/Backend.hpp index 05a39ddc905aa..501cf59d86bad 100644 --- a/torch/csrc/distributed/c10d/Backend.hpp +++ b/torch/csrc/distributed/c10d/Backend.hpp @@ -375,7 +375,7 @@ class TORCH_API Backend : public torch::CustomClassHolder { } // See similar functions in ProcessGroup.hpp for context. - c10::optional getBoundDeviceId() const { + std::optional getBoundDeviceId() const { return bound_device_id_; } @@ -386,7 +386,7 @@ class TORCH_API Backend : public torch::CustomClassHolder { // backends may perform } - void setBoundDeviceId(c10::optional device) { + void setBoundDeviceId(std::optional device) { if (device) { TORCH_CHECK(device->has_index(), "setBoundDeviceId must have an index"); } @@ -410,7 +410,7 @@ class TORCH_API Backend : public torch::CustomClassHolder { std::function)> onCompletionHook_; - c10::optional bound_device_id_; + std::optional bound_device_id_; }; } // namespace c10d diff --git a/torch/csrc/distributed/c10d/NCCLUtils.cpp b/torch/csrc/distributed/c10d/NCCLUtils.cpp index 9a0c77a8623c3..e26ab22f1a9f3 100644 --- a/torch/csrc/distributed/c10d/NCCLUtils.cpp +++ b/torch/csrc/distributed/c10d/NCCLUtils.cpp @@ -159,7 +159,7 @@ std::string ncclGetErrorWithVersion(ncclResult_t error) { // thrown in the NCCL codebase. std::string getNcclErrorDetailStr( ncclResult_t error, - c10::optional processGroupFailureReason /* = c10::nullopt */ + std::optional processGroupFailureReason /* = c10::nullopt */ ) { // Prioritize failure reason provided by PG NCCL first, as it can abort // communicators when it encounters collective timeouts, etc. diff --git a/torch/csrc/distributed/c10d/NCCLUtils.hpp b/torch/csrc/distributed/c10d/NCCLUtils.hpp index a4b96a2a40762..5690c0591a7af 100644 --- a/torch/csrc/distributed/c10d/NCCLUtils.hpp +++ b/torch/csrc/distributed/c10d/NCCLUtils.hpp @@ -182,7 +182,7 @@ int nccl_nonblocking_timeout(); // thrown in the NCCL codebase. std::string getNcclErrorDetailStr( ncclResult_t error, - c10::optional processGroupFailureReason = c10::nullopt); + std::optional processGroupFailureReason = c10::nullopt); // Write NCCL debug info to local disk or any storage users define. // There are some constrains we set for the debug info writer: @@ -339,13 +339,13 @@ class NCCLComm { ncclComm_t getNcclComm(); - c10::optional getNcclCommFailureReason() const { + std::optional getNcclCommFailureReason() const { std::unique_lock lock(mutex_); return commFailureReason_; } void ncclCommAbort( - c10::optional commFailureReason = c10::nullopt) { + std::optional commFailureReason = c10::nullopt) { std::unique_lock lock(mutex_); #ifdef ENABLE_NCCL_ERROR_CHECKING if (aborted_) { @@ -491,7 +491,7 @@ class NCCLComm { int rank_; // Optional reason for communicator failure, provided by ProcessGroupNCCL for // better error messaging. - c10::optional commFailureReason_; + std::optional commFailureReason_; bool initialized_{false}; #ifdef NCCL_HAS_COMM_REGISTER // Stores handlers for tensors registered by NCCL diff --git a/torch/csrc/distributed/c10d/Ops.cpp b/torch/csrc/distributed/c10d/Ops.cpp index cf8b7cd966ef5..ae822ad397504 100644 --- a/torch/csrc/distributed/c10d/Ops.cpp +++ b/torch/csrc/distributed/c10d/Ops.cpp @@ -168,7 +168,7 @@ IMPL_BROADCAST(PrivateUse1) at::TensorList tensors, \ const c10::intrusive_ptr& process_group, \ const c10::intrusive_ptr& reduce_op, \ - const c10::optional& sparse_indices, \ + const std::optional& sparse_indices, \ int64_t timeout) { \ auto tensor_vec = tensors.vec(); \ auto work = process_group->getBackend(c10::DeviceType::DEV) -> allreduce( \ @@ -460,7 +460,7 @@ allreduce_sparse_cuda_( at::TensorList tensors, const c10::intrusive_ptr& process_group, const c10::intrusive_ptr& reduce_op, - const c10::optional& sparse_indices, + const std::optional& sparse_indices, int64_t timeout) { auto tensor_vec = tensors.vec(); auto work = process_group->getBackend(c10::DeviceType::CUDA) diff --git a/torch/csrc/distributed/c10d/ProcessGroup.hpp b/torch/csrc/distributed/c10d/ProcessGroup.hpp index 8c805020e8cf6..acf8c9c354a76 100644 --- a/torch/csrc/distributed/c10d/ProcessGroup.hpp +++ b/torch/csrc/distributed/c10d/ProcessGroup.hpp @@ -162,7 +162,7 @@ class TORCH_API ProcessGroup : public torch::CustomClassHolder { at::TensorList, const c10::intrusive_ptr<::c10d::ProcessGroup>&, const c10::intrusive_ptr<::c10d::ReduceOp>&, - const c10::optional& sparse_indices, + const std::optional& sparse_indices, int64_t)>(); return std::get<1>(op.call( @@ -620,7 +620,7 @@ class TORCH_API ProcessGroup : public torch::CustomClassHolder { void setBackend( c10::DeviceType deviceType, BackendType backendType, - const c10::optional>& backend) { + const std::optional>& backend) { // TODO: should we add these entries after the backend setting succeeds? deviceTypeToBackendType_[deviceType] = backendType; deviceTypes_.insert(deviceType); @@ -703,11 +703,11 @@ class TORCH_API ProcessGroup : public torch::CustomClassHolder { // optimizations such as automatic use of ncclCommSplit. The device // is specified in `init_process_group` and eventually makes it // here and then down into the actual backend instances. - c10::optional getBoundDeviceId() const { + std::optional getBoundDeviceId() const { return bound_device_id_; } - void setBoundDeviceId(c10::optional device) { + void setBoundDeviceId(std::optional device) { if (device) { TORCH_CHECK(device->has_index(), "setBoundDeviceId must have an index"); } @@ -742,7 +742,7 @@ class TORCH_API ProcessGroup : public torch::CustomClassHolder { std::unordered_map> backendTypeToBackend_; - c10::optional bound_device_id_; + std::optional bound_device_id_; }; } // namespace c10d diff --git a/torch/csrc/distributed/c10d/ProcessGroupGloo.cpp b/torch/csrc/distributed/c10d/ProcessGroupGloo.cpp index ada56cbee1990..cba0249829e68 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupGloo.cpp +++ b/torch/csrc/distributed/c10d/ProcessGroupGloo.cpp @@ -479,7 +479,7 @@ void returnFutureWithOutput( inline void ProcessGroupGloo::AsyncWork::recordAsyncWorkProfilingInfo( const char* profilingTitle, - const c10::optional>& inputTensors) { + const std::optional>& inputTensors) { auto recordingFunction = std::make_shared(at::RecordScope::USER_SCOPE); if (recordingFunction->isActive()) { @@ -511,7 +511,7 @@ ProcessGroupGloo::AsyncWork::AsyncWork( OpType opType, uint64_t seq, const char* profilingTitle, - const c10::optional>& inputTensors) + const std::optional>& inputTensors) // Profiler: Pass nullptr as profilingTitle to parent constructor to // replace default profiler implementation with async version that reports // correct timestamps for work that is asynchronously executed. @@ -547,7 +547,7 @@ ProcessGroupGloo::SendWork::SendWork( -1, OpType::SEND, "gloo:send", - c10::optional>({tensor})), + std::optional>({tensor})), tensor_(tensor), buffer_(std::move(buffer)), seq_(seq) {} @@ -588,7 +588,7 @@ ProcessGroupGloo::RecvWork::RecvWork( -1, opType, profilingTitle, - c10::optional>({tensor})), + std::optional>({tensor})), tensor_(tensor), buffer_(std::move(buffer)), srcRank_(-1), @@ -2424,7 +2424,7 @@ class AsyncScatterWork : public ProcessGroupGloo::AsyncWork { OpType::SCATTER, seq, "gloo:scatter", - !inputs.empty() ? c10::optional>(inputs[0]) + !inputs.empty() ? std::optional>(inputs[0]) : c10::nullopt), context(context), outputs(outputs), @@ -2620,7 +2620,7 @@ class AsyncAlltoallWork : public ProcessGroupGloo::AsyncWork { OpType::ALLTOALL, seq, "gloo:all_to_all", - c10::optional>({inputTensor})), + std::optional>({inputTensor})), context(context), outputTensor(outputTensor), inputTensor(inputTensor), diff --git a/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp b/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp index d40b205c25601..87c87b8f1ae9b 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp +++ b/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp @@ -73,7 +73,7 @@ class TORCH_API ProcessGroupGloo : public Backend { OpType opType, uint64_t seq, const char* profilingTitle = nullptr, - const c10::optional>& inputTensors = + const std::optional>& inputTensors = c10::nullopt); ~AsyncWork() override = default; @@ -95,7 +95,7 @@ class TORCH_API ProcessGroupGloo : public Backend { void finishWorkGlooError(const std::exception_ptr& eptr); inline void recordAsyncWorkProfilingInfo( const char* profilingTitle, - const c10::optional>& inputTensors); + const std::optional>& inputTensors); const std::vector> outputTensors_; c10::intrusive_ptr future_; diff --git a/torch/csrc/distributed/c10d/ProcessGroupMPI.cpp b/torch/csrc/distributed/c10d/ProcessGroupMPI.cpp index 29d05a9693b14..6d02f89f6005b 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupMPI.cpp +++ b/torch/csrc/distributed/c10d/ProcessGroupMPI.cpp @@ -121,7 +121,7 @@ ProcessGroupMPI::AsyncWork::AsyncWork( MPI_Request request, std::vector outputTensors, const char* profilingTitle, - const c10::optional>& inputTensors) + const std::optional>& inputTensors) : Work(-1, OpType::UNKNOWN, profilingTitle, inputTensors), outputTensors_(std::move(outputTensors)), request_(request) { @@ -379,7 +379,7 @@ void ProcessGroupMPI::runLoop() { c10::intrusive_ptr ProcessGroupMPI::enqueue( std::unique_ptr entry, const char* profilingTitle, - const c10::optional>& inputTensors) { + const std::optional>& inputTensors) { auto work = c10::make_intrusive(entry->dst, profilingTitle, inputTensors); std::unique_lock lock(pgMutex_); @@ -410,7 +410,7 @@ c10::intrusive_ptr ProcessGroupMPI::broadcast( return enqueue( std::move(entry), "mpi:broadcast", - c10::optional>(tensors)); + std::optional>(tensors)); } c10::intrusive_ptr ProcessGroupMPI::allreduce( @@ -436,7 +436,7 @@ c10::intrusive_ptr ProcessGroupMPI::allreduce( return enqueue( std::move(entry), "mpi:all_reduce", - c10::optional>(tensors)); + std::optional>(tensors)); } c10::intrusive_ptr ProcessGroupMPI::allreduce_coalesced( @@ -473,7 +473,7 @@ c10::intrusive_ptr ProcessGroupMPI::reduce( return enqueue( std::move(entry), "mpi:reduce", - c10::optional>(tensors)); + std::optional>(tensors)); } c10::intrusive_ptr ProcessGroupMPI::allgather( @@ -522,7 +522,7 @@ c10::intrusive_ptr ProcessGroupMPI::allgather( return enqueue( std::move(entry), "mpi:all_gather", - c10::optional>(inputTensors)); + std::optional>(inputTensors)); } c10::intrusive_ptr ProcessGroupMPI::allgather_coalesced( @@ -598,14 +598,14 @@ c10::intrusive_ptr ProcessGroupMPI::gather( return enqueue( std::move(entry), "mpi:gather", - c10::optional>(inputTensors)); + std::optional>(inputTensors)); } else { auto entry = std::make_unique(&inputTensors, nullptr, std::move(runFunc)); return enqueue( std::move(entry), "mpi:gather", - c10::optional>(inputTensors)); + std::optional>(inputTensors)); } } @@ -672,7 +672,7 @@ c10::intrusive_ptr ProcessGroupMPI::scatter( std::move(entry), "mpi:scatter", !inputTensors.empty() - ? c10::optional>(inputTensors[0]) + ? std::optional>(inputTensors[0]) : c10::nullopt); } else { auto entry = std::make_unique( @@ -681,7 +681,7 @@ c10::intrusive_ptr ProcessGroupMPI::scatter( std::move(entry), "mpi:scatter", !inputTensors.empty() - ? c10::optional>(inputTensors[0]) + ? std::optional>(inputTensors[0]) : c10::nullopt); } } @@ -734,7 +734,7 @@ c10::intrusive_ptr ProcessGroupMPI::alltoall_base( return enqueue( std::move(entry), "mpi:all_to_all", - c10::optional>(inputTensors)); + std::optional>(inputTensors)); } else { // Need alltoallv c10d::checkSplitSizes(inputSplitSizes, inputTensor, size_); @@ -772,7 +772,7 @@ c10::intrusive_ptr ProcessGroupMPI::alltoall_base( return enqueue( std::move(entry), "mpi:all_to_all", - c10::optional>(inputTensors)); + std::optional>(inputTensors)); } } @@ -835,7 +835,7 @@ c10::intrusive_ptr ProcessGroupMPI::alltoall( return enqueue( std::move(entry), "mpi:all_to_all", - c10::optional>(inputTensors)); + std::optional>(inputTensors)); } c10::intrusive_ptr ProcessGroupMPI::send( @@ -864,7 +864,7 @@ c10::intrusive_ptr ProcessGroupMPI::send( request, std::vector(), "mpi:send", - c10::optional>(tensors)); + std::optional>(tensors)); } c10::intrusive_ptr ProcessGroupMPI::recv( @@ -893,7 +893,7 @@ c10::intrusive_ptr ProcessGroupMPI::recv( request, tensors, "mpi:recv", - c10::optional>(tensors)); + std::optional>(tensors)); } c10::intrusive_ptr ProcessGroupMPI::recvAnysource( @@ -921,7 +921,7 @@ c10::intrusive_ptr ProcessGroupMPI::recvAnysource( request, tensors, "mpi:recvAnySource", - c10::optional>(tensors)); + std::optional>(tensors)); } c10::intrusive_ptr ProcessGroupMPI::barrier(const BarrierOptions& opts) { diff --git a/torch/csrc/distributed/c10d/ProcessGroupMPI.hpp b/torch/csrc/distributed/c10d/ProcessGroupMPI.hpp index dd586dda7024b..6e52e680e5c20 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupMPI.hpp +++ b/torch/csrc/distributed/c10d/ProcessGroupMPI.hpp @@ -86,7 +86,7 @@ class TORCH_API ProcessGroupMPI : public Backend { explicit WorkMPI( std::vector outputTensors, const char* profilingTitle = nullptr, - const c10::optional>& inputTensors = + const std::optional>& inputTensors = c10::nullopt) : Work(-1, OpType::UNKNOWN, profilingTitle, inputTensors), outputTensors_(std::move(outputTensors)), @@ -114,7 +114,7 @@ class TORCH_API ProcessGroupMPI : public Backend { MPI_Request request, std::vector outputTensors, const char* profilingTitle = nullptr, - const c10::optional>& inputTensors = + const std::optional>& inputTensors = c10::nullopt); ~AsyncWork() override; @@ -243,7 +243,7 @@ class TORCH_API ProcessGroupMPI : public Backend { c10::intrusive_ptr enqueue( std::unique_ptr entry, const char* profilingTitle = nullptr, - const c10::optional>& inputTensors = + const std::optional>& inputTensors = c10::nullopt); bool stop_; diff --git a/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp b/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp index 6cca50daff6c4..7437a4ef1846a 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp +++ b/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp @@ -352,9 +352,9 @@ std::string dump_nccl_trace() { } #endif -c10::optional)>>& +std::optional)>>& get_cpp_trace_dumper() { - static c10::optional< + static std::optional< std::function)>> dumper(c10::nullopt); return dumper; @@ -431,7 +431,7 @@ ProcessGroupNCCL::WorkNCCL::WorkNCCL( OpType opType, uint64_t seq, const char* profilingTitle, - const c10::optional>& inputs, + const std::optional>& inputs, bool desyncDebug, bool enableTiming, DebugLevel distDebugLevel) @@ -546,7 +546,7 @@ bool ProcessGroupNCCL::WorkNCCL::finishedGPUExecutionInternal() const { } bool ProcessGroupNCCL::WorkNCCL::checkTimeout( - c10::optional timeout) { + std::optional timeout) { auto currentTimepoint = std::chrono::steady_clock::now(); auto timeElapsed = std::chrono::duration_cast( currentTimepoint - workStartTime_); @@ -1036,7 +1036,7 @@ void ProcessGroupNCCL::waitForFutureOrTimeout( void ProcessGroupNCCL::abortCommsFromMap( std::unordered_map>& ncclCommsMap, - c10::optional abortReason) { + std::optional abortReason) { // The process may control multiple devices, loop through the communicators on // each device for (auto& it : ncclCommsMap) { @@ -1069,7 +1069,7 @@ void ProcessGroupNCCL::abortCommsFromMap( } // Abort all communicators on this rank -bool ProcessGroupNCCL::abort(c10::optional abortReason) { +bool ProcessGroupNCCL::abort(std::optional abortReason) { // Remove record from global ncclCommDevIdxMapMutex before aboarting, // so that a new cache segment would not register to already aborded // communicators. Note that ncclCommDevIdxMap is a global container which may @@ -1088,7 +1088,7 @@ bool ProcessGroupNCCL::abort(c10::optional abortReason) { return true; } -void ProcessGroupNCCL::shutdown(c10::optional reason) { +void ProcessGroupNCCL::shutdown(std::optional reason) { // Don't join threads here since the purpose of this method is to abort all // communicators and signal the threads to exit. Joining on the threads could // potentially block and hence avoid it in this method. @@ -1188,7 +1188,7 @@ void ProcessGroupNCCL::heartbeatMonitor() { : heartbeatTimeoutInSec_ * 1000; auto lastTimePollStore = std::chrono::steady_clock::now(); auto lastTimeHeartBeatCheck = std::chrono::steady_clock::now(); - c10::optional dumpPipe = c10::nullopt; + std::optional dumpPipe = c10::nullopt; if (uid_ == 0) { // DumpPipe is one per-trainer process, and its convenient to name them // after 'global' ranks in the system, So we assume processgroup (uid)==0 is @@ -2241,7 +2241,7 @@ c10::intrusive_ptr ProcessGroupNCCL::initWork( opType, seq_, profilingTitle, - profilingTitle != nullptr ? c10::optional>(inputs) + profilingTitle != nullptr ? std::optional>(inputs) : c10::nullopt, desyncDebug_, enableTiming_.load(), diff --git a/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp b/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp index fac9b6f38204e..4217d2fa4cea5 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp +++ b/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp @@ -248,7 +248,7 @@ class TORCH_API ProcessGroupNCCL : public Backend { OpType opType, uint64_t seq, const char* profilingTitle = nullptr, - const c10::optional>& inputs = c10::nullopt, + const std::optional>& inputs = c10::nullopt, bool desyncDebug = false, bool enableTiming = false, DebugLevel distDebugLevel = DebugLevel::Off); @@ -305,7 +305,7 @@ class TORCH_API ProcessGroupNCCL : public Backend { // and False otherwise. // In case of timeout, set exception on the WorkNCCL object. bool checkTimeout( - c10::optional timeout = c10::nullopt); + std::optional timeout = c10::nullopt); std::vector result() override; @@ -399,7 +399,7 @@ class TORCH_API ProcessGroupNCCL : public Backend { bool timingEnabled_; // unique id used to tell the trace buffer that this // work has completed - c10::optional trace_id_; + std::optional trace_id_; DebugLevel distDebugLevel_; friend class ProcessGroupNCCL; }; @@ -621,16 +621,16 @@ class TORCH_API ProcessGroupNCCL : public Backend { // Helper function for iteratively aborting communicators in the provided map void abortCommsFromMap( std::unordered_map>& ncclCommsMap, - c10::optional abortReason); + std::optional abortReason); c10::intrusive_ptr initIntraNodeComm(); // Provides an API to abort the ProcessGroup (similar to ncclCommAbort) // instead of relying on ProcessGroupNCCL destructor. // return true if abort is successful, otherwise false - bool abort(c10::optional abortReason = c10::nullopt); + bool abort(std::optional abortReason = c10::nullopt); - void shutdown(c10::optional reason = c10::nullopt); + void shutdown(std::optional reason = c10::nullopt); void eagerConnectSingleDevice(at::Device device) override; @@ -1092,7 +1092,7 @@ TORCH_API std::string dump_nccl_trace(); // Gets a mutable reference to a global optional function. Heartbeat Monitor // will use this function to dump traces, if available. Inside fbcode, we store // a function here that uses an internal tool for process tracing -TORCH_API c10::optional< +TORCH_API std::optional< std::function)>>& get_cpp_trace_dumper(); diff --git a/torch/csrc/distributed/c10d/ProcessGroupUCC.hpp b/torch/csrc/distributed/c10d/ProcessGroupUCC.hpp index 22fc58134566c..ab1e1e4c4899e 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupUCC.hpp +++ b/torch/csrc/distributed/c10d/ProcessGroupUCC.hpp @@ -119,7 +119,7 @@ class TORCH_API ProcessGroupUCC : public Backend { OpType opType, uint64_t seq, const char* prof_title, - const c10::optional>& inputs, + const std::optional>& inputs, const c10::intrusive_ptr& logger) : Work(-1, opType, prof_title, inputs), logger_(logger), seq_(seq) {} ~WorkUCC(); diff --git a/torch/csrc/distributed/c10d/Store.hpp b/torch/csrc/distributed/c10d/Store.hpp index 525440e767b47..af715ba98a794 100644 --- a/torch/csrc/distributed/c10d/Store.hpp +++ b/torch/csrc/distributed/c10d/Store.hpp @@ -13,7 +13,7 @@ namespace c10d { // callback function will be given arguments (optional oldValue, // optional newValue) using WatchKeyCallback = - std::function, c10::optional)>; + std::function, c10::optional)>; class TORCH_API Store : public torch::CustomClassHolder { public: diff --git a/torch/csrc/distributed/c10d/TCPStore.cpp b/torch/csrc/distributed/c10d/TCPStore.cpp index a95f0ebdb1e26..aee1d7677dc4d 100644 --- a/torch/csrc/distributed/c10d/TCPStore.cpp +++ b/torch/csrc/distributed/c10d/TCPStore.cpp @@ -268,7 +268,7 @@ using detail::Socket; TCPStore::TCPStore( const std::string& masterAddr, std::uint16_t masterPort, - c10::optional numWorkers, + std::optional numWorkers, bool isServer, const std::chrono::milliseconds& timeout, bool waitWorkers) @@ -277,7 +277,7 @@ TCPStore::TCPStore( TCPStoreOptions{ masterPort, isServer, - numWorkers ? c10::optional(*numWorkers) + numWorkers ? std::optional(*numWorkers) : c10::nullopt, waitWorkers, timeout}} {} diff --git a/torch/csrc/distributed/c10d/TCPStore.hpp b/torch/csrc/distributed/c10d/TCPStore.hpp index 03a7f124ca710..7080d50136e96 100644 --- a/torch/csrc/distributed/c10d/TCPStore.hpp +++ b/torch/csrc/distributed/c10d/TCPStore.hpp @@ -49,7 +49,7 @@ struct TCPStoreOptions { std::uint16_t port = kDefaultPort; bool isServer = false; - c10::optional numWorkers = c10::nullopt; + std::optional numWorkers = c10::nullopt; bool waitWorkers = true; std::chrono::milliseconds timeout = Store::kDefaultTimeout; @@ -60,7 +60,7 @@ struct TCPStoreOptions { // If specified, and if isServer is true, the underlying TCPServer will take // over the bound socket associated to this fd. This option is useful to avoid // port assignment races in certain scenarios. - c10::optional masterListenFd = c10::nullopt; + std::optional masterListenFd = c10::nullopt; // A boolean value indicating whether to use the experimental libUV backend. bool useLibUV = false; @@ -73,7 +73,7 @@ class TORCH_API TCPStore : public Store { [[deprecated("Use TCPStore(host, opts) instead.")]] explicit TCPStore( const std::string& masterAddr, std::uint16_t masterPort, - c10::optional numWorkers = c10::nullopt, + std::optional numWorkers = c10::nullopt, bool isServer = false, const std::chrono::milliseconds& timeout = kDefaultTimeout, bool waitWorkers = true); @@ -152,7 +152,7 @@ class TORCH_API TCPStore : public Store { detail::SocketAddress addr_; std::shared_ptr server_; std::unique_ptr client_; - c10::optional numWorkers_; + std::optional numWorkers_; const std::string initKey_ = "init/"; const std::string keyPrefix_ = "/"; diff --git a/torch/csrc/distributed/c10d/TraceUtils.h b/torch/csrc/distributed/c10d/TraceUtils.h index 32f0e1f41df01..ff1bf5b6ed9a2 100644 --- a/torch/csrc/distributed/c10d/TraceUtils.h +++ b/torch/csrc/distributed/c10d/TraceUtils.h @@ -417,18 +417,18 @@ struct NCCLTraceBuffer { // timestamp when the entry was created, likely close to the time the work // was 'enqueued'- not necessarily started c10::time_t time_created_; - c10::optional duration_; + std::optional duration_; // timestamp when our CPU threads discovered that the kernel started. // will always be _after_ it actually started, and can be very late // if the watchdog thread got stuck on CUDA APIs. - c10::optional time_discovered_started_; + std::optional time_discovered_started_; // timestamp when our CPU threads discovered that the kernel completed. // will always be _after_ it actually complated, and can be the same time // as the discovery of the start if the watchdog thread is stuck on CUDA // APIs - c10::optional time_discovered_completed_; + std::optional time_discovered_completed_; // size information for input/output tensors c10::SmallVector input_dims_; @@ -448,7 +448,7 @@ struct NCCLTraceBuffer { std::map, std::vector> pg_name_to_ranks_ = {}; - c10::optional record( + std::optional record( size_t pg_id, const std::tuple& pg_name, size_t seq_id, @@ -551,7 +551,7 @@ struct NCCLTraceBuffer { never hang. (timing must also be enabled for compute_duration - see TORCH_NCCL_ENABLE_TIMING). */ - void retire_id(c10::optional id, bool compute_duration = true) { + void retire_id(std::optional id, bool compute_duration = true) { if (!enabled_ || !id) { return; } @@ -559,7 +559,7 @@ struct NCCLTraceBuffer { bool can_compute_duration = false; Event* startEvent = nullptr; Event* endEvent = nullptr; - c10::optional duration = c10::nullopt; + std::optional duration = c10::nullopt; std::unique_lock guard(mutex_); @@ -601,7 +601,7 @@ struct NCCLTraceBuffer { } std::string dump( - const c10::optional>>& ncclDumpMap) { auto result = dump_entries(); diff --git a/torch/csrc/distributed/c10d/Types.hpp b/torch/csrc/distributed/c10d/Types.hpp index fab819798e555..669957a726735 100644 --- a/torch/csrc/distributed/c10d/Types.hpp +++ b/torch/csrc/distributed/c10d/Types.hpp @@ -121,7 +121,7 @@ struct BroadcastOptions { struct AllreduceOptions { ReduceOp reduceOp = ReduceOp::SUM; std::chrono::milliseconds timeout = kUnsetTimeout; - c10::optional sparseIndices = c10::nullopt; + std::optional sparseIndices = c10::nullopt; }; struct AllreduceCoalescedOptions : AllreduceOptions {}; @@ -162,7 +162,7 @@ struct AllToAllOptions { struct BarrierOptions { std::vector device_ids; std::chrono::milliseconds timeout = kUnsetTimeout; - c10::optional device; + std::optional device; }; struct DistributedBackendOptions { diff --git a/torch/csrc/distributed/c10d/Work.cpp b/torch/csrc/distributed/c10d/Work.cpp index 66c35b11e6c0f..8beb8f2936208 100644 --- a/torch/csrc/distributed/c10d/Work.cpp +++ b/torch/csrc/distributed/c10d/Work.cpp @@ -9,7 +9,7 @@ Work::Work( int rank, OpType opType, const char* profilingTitle, - const c10::optional>& inputTensors) + const std::optional>& inputTensors) : rank_(rank), opType_(opType) { if (profilingTitle != nullptr) { auto recordingFunction = diff --git a/torch/csrc/distributed/c10d/Work.hpp b/torch/csrc/distributed/c10d/Work.hpp index d106183231706..d29b838321176 100644 --- a/torch/csrc/distributed/c10d/Work.hpp +++ b/torch/csrc/distributed/c10d/Work.hpp @@ -50,7 +50,7 @@ class TORCH_API Work : public torch::CustomClassHolder { int rank = -1, OpType opType = OpType::UNKNOWN, const char* profilingTitle = nullptr, - const c10::optional>& inputTensors = + const std::optional>& inputTensors = c10::nullopt); ~Work() override; diff --git a/torch/csrc/distributed/c10d/comm.hpp b/torch/csrc/distributed/c10d/comm.hpp index d2c608532ba53..6f9203e214348 100644 --- a/torch/csrc/distributed/c10d/comm.hpp +++ b/torch/csrc/distributed/c10d/comm.hpp @@ -26,7 +26,7 @@ class TORCH_API GradBucket { std::vector lengths, std::vector sizes_vec, std::vector parameters, - c10::optional sparse_grad_indices) + std::optional sparse_grad_indices) : index_(index), bucket_count_(bucket_count), buffer_(std::move(tensor)), @@ -72,7 +72,7 @@ class TORCH_API GradBucket { return index_ == bucket_count_ - 1; } - c10::optional& getSparseGradIndices() { + std::optional& getSparseGradIndices() { return sparse_grad_indices_; } @@ -92,7 +92,7 @@ class TORCH_API GradBucket { // Predefined sparse indices for this bucket (only used for sparse tensors). // The gradients will be updated to have indices with these tensor values - c10::optional sparse_grad_indices_; + std::optional sparse_grad_indices_; }; // Base class of both `PythonCommHook` and `CppCommHook`. diff --git a/torch/csrc/distributed/c10d/init.cpp b/torch/csrc/distributed/c10d/init.cpp index 7cbd898499c38..483becbce0094 100644 --- a/torch/csrc/distributed/c10d/init.cpp +++ b/torch/csrc/distributed/c10d/init.cpp @@ -1409,14 +1409,14 @@ Example:: .def( py::init([](const std::string& host, uint16_t port, - c10::optional worldSize, + std::optional worldSize, bool isServer, std::chrono::milliseconds timeout, bool waitWorkers, bool multiTenant, - c10::optional masterListenFd, + std::optional masterListenFd, bool useLibUV) { - c10::optional numWorkers = c10::nullopt; + std::optional numWorkers = c10::nullopt; if (worldSize.has_value() && worldSize.value() > -1) { numWorkers = static_cast(worldSize.value()); } @@ -1801,14 +1801,14 @@ that adds a prefix to each key inserted to the store. [](const c10::intrusive_ptr<::c10d::ProcessGroup>& self, const c10::Device& device, const ::c10d::ProcessGroup::BackendType& backendType, - const c10::optional>& + const std::optional>& backend) { self->setBackend(device.type(), backendType, backend); }, py::arg("device"), py::arg("backend_type"), py::arg("backend") = - c10::optional>(), + std::optional>(), py::call_guard()) .def( "_get_backend", @@ -2432,7 +2432,7 @@ options :class:`~torch.distributed.ProcessGroupNCCL.Options`). py::init([](const c10::intrusive_ptr<::c10d::Store>& store, size_t rank, size_t world_size, - c10::optional buffer_size) { + std::optional buffer_size) { auto comm = c10::make_intrusive( store, rank, world_size, buffer_size); if (!comm->rendezvous()) { @@ -2744,7 +2744,7 @@ such as `dist.all_reduce(tensor, async_op=True)`. const std::vector& bucket_size_limits, const std::vector& expect_sparse_gradient, const std::vector& tensor_indices, - const c10::optional>& logger) { + const std::optional>& logger) { if (logger.has_value()) { std::weak_ptr<::c10d::Logger> logger_weakref = logger.value(); return ::c10d::compute_bucket_assignment_by_size( @@ -2766,14 +2766,14 @@ such as `dist.all_reduce(tensor, async_op=True)`. py::arg("bucket_size"), py::arg("expect_sparse_gradient") = std::vector(), py::arg("tensor_indices") = std::vector(), - py::arg("logger") = c10::optional>{}, + py::arg("logger") = std::optional>{}, py::call_guard()); module.def( "_verify_params_across_processes", [](const c10::intrusive_ptr<::c10d::ProcessGroup>& process_group, const std::vector& params, - const c10::optional>& logger) { + const std::optional>& logger) { if (logger.has_value()) { std::weak_ptr<::c10d::Logger> logger_weakref = logger.value(); verify_params_across_processes( @@ -2784,7 +2784,7 @@ such as `dist.all_reduce(tensor, async_op=True)`. }, py::arg("process_group"), py::arg("params"), - py::arg("logger") = c10::optional>{}, + py::arg("logger") = std::optional>{}, py::call_guard()); module.def( diff --git a/torch/csrc/distributed/c10d/intra_node_comm.cpp b/torch/csrc/distributed/c10d/intra_node_comm.cpp index d18262ecfa3f5..ceec7bbd0f9ce 100644 --- a/torch/csrc/distributed/c10d/intra_node_comm.cpp +++ b/torch/csrc/distributed/c10d/intra_node_comm.cpp @@ -207,7 +207,7 @@ IntraNodeComm::IntraNodeComm( c10::intrusive_ptr store, size_t rank, size_t worldSize, - c10::optional bufferSize) + std::optional bufferSize) : store_(std::move(store)), rank_(rank), worldSize_(worldSize), diff --git a/torch/csrc/distributed/c10d/intra_node_comm.cu b/torch/csrc/distributed/c10d/intra_node_comm.cu index 6d72bde221253..ce479cd187bc4 100644 --- a/torch/csrc/distributed/c10d/intra_node_comm.cu +++ b/torch/csrc/distributed/c10d/intra_node_comm.cu @@ -732,7 +732,7 @@ static __global__ void barrierKernel( } } -void IntraNodeComm::barrier(c10::optional> ranks) { +void IntraNodeComm::barrier(std::optional> ranks) { if (!ranks.has_value()) { ranks = std::vector(worldSize_); std::iota(ranks->begin(), ranks->end(), 0); diff --git a/torch/csrc/distributed/c10d/intra_node_comm.hpp b/torch/csrc/distributed/c10d/intra_node_comm.hpp index ab27ecef97338..fe591978c5332 100644 --- a/torch/csrc/distributed/c10d/intra_node_comm.hpp +++ b/torch/csrc/distributed/c10d/intra_node_comm.hpp @@ -33,7 +33,7 @@ class TORCH_API IntraNodeComm : public c10::intrusive_ptr_target { c10::intrusive_ptr store, size_t rank, size_t worldSize, - c10::optional bufferSize = c10::nullopt); + std::optional bufferSize = c10::nullopt); ~IntraNodeComm() override; @@ -61,7 +61,7 @@ class TORCH_API IntraNodeComm : public c10::intrusive_ptr_target { /** * Perform a barrier among the specified ranks. */ - void barrier(c10::optional> ranks = c10::nullopt); + void barrier(std::optional> ranks = c10::nullopt); /** * Puts the given tensor into the p2p buffer of the current rank at the diff --git a/torch/csrc/distributed/c10d/logger.cpp b/torch/csrc/distributed/c10d/logger.cpp index 3ce4880930cb2..711039bf48595 100644 --- a/torch/csrc/distributed/c10d/logger.cpp +++ b/torch/csrc/distributed/c10d/logger.cpp @@ -247,7 +247,7 @@ void Logger::calculate_avg_time( Timer::Event start_event, Timer::Event end_event) { TORCH_CHECK(num_iterations_stats_recorded_ > 0); - c10::optional maybe_time_duration = + std::optional maybe_time_duration = timer.measureDifference(start_event, end_event); if (!maybe_time_duration.has_value()) { return; diff --git a/torch/csrc/distributed/c10d/reducer.cpp b/torch/csrc/distributed/c10d/reducer.cpp index a885bd2e9e7cb..d600426192cef 100644 --- a/torch/csrc/distributed/c10d/reducer.cpp +++ b/torch/csrc/distributed/c10d/reducer.cpp @@ -51,7 +51,7 @@ class CpuTimer : public Timer { public: explicit CpuTimer(c10::Device /* unused */) {} - c10::optional measureDifference(Event start, Event end) override { + std::optional measureDifference(Event start, Event end) override { int64_t start_time = getTimeRef(start); int64_t end_time = getTimeRef(end); // If cpu_end_time is not recorded in this iteration, @@ -2096,7 +2096,7 @@ compute_bucket_assignment_by_size( const std::vector& bucket_size_limits, const std::vector& expect_sparse_gradient, const std::vector& tensor_indices, - const c10::optional>& logger) { + const std::optional>& logger) { // Either expect_sparse_gradient is not specified or it has as many elements // as the vector with tensors. TORCH_INTERNAL_ASSERT( @@ -2221,7 +2221,7 @@ compute_bucket_assignment_by_size( void verify_params_across_processes( const c10::intrusive_ptr& process_group, const std::vector& params, - const c10::optional>& logger) { + const std::optional>& logger) { // First verify number of parameters to avoid inconsistent inputs into // broadcast which can cause a crash. // See https://github.com/pytorch/pytorch/issues/73547 diff --git a/torch/csrc/distributed/c10d/reducer.hpp b/torch/csrc/distributed/c10d/reducer.hpp index e940a56bd650a..1f72b0eb37b9f 100644 --- a/torch/csrc/distributed/c10d/reducer.hpp +++ b/torch/csrc/distributed/c10d/reducer.hpp @@ -261,10 +261,10 @@ class TORCH_API Reducer { std::weak_ptr logger_; // List of futures installed by Reducer::install_futures that should be // awaited at the end of backwards pass. - c10::optional>> + std::optional>> installed_futures_{c10::nullopt}; // Mixed precision parameter dtype for bucket type checking. - c10::optional mixed_precision_param_dtype_{c10::nullopt}; + std::optional mixed_precision_param_dtype_{c10::nullopt}; // Work handle for allreduce on local_used_map_ c10::intrusive_ptr local_used_work_; @@ -389,7 +389,7 @@ class TORCH_API Reducer { bool expect_sparse_gradient = false; // Sparse indices tensor - c10::optional sparse_tensor_indices = c10::nullopt; + std::optional sparse_tensor_indices = c10::nullopt; // TODO(@pietern) // Memory copies from gradient tensors into the bucket are potentially @@ -576,12 +576,12 @@ compute_bucket_assignment_by_size( const std::vector& bucket_size, const std::vector& expect_sparse_gradient = {}, const std::vector& tensor_indices = {}, - const c10::optional>& logger = {}); + const std::optional>& logger = {}); // Verify models across all processes are the same as model on rank 0 with // respect to no. of params and matching dtype/size/layout. TORCH_API void verify_params_across_processes( const c10::intrusive_ptr& process_group, const std::vector& params, - const c10::optional>& logger); + const std::optional>& logger); } // namespace c10d diff --git a/torch/csrc/distributed/c10d/reducer_cuda.cpp b/torch/csrc/distributed/c10d/reducer_cuda.cpp index b63e9d3d6f3c8..84bff02072b60 100644 --- a/torch/csrc/distributed/c10d/reducer_cuda.cpp +++ b/torch/csrc/distributed/c10d/reducer_cuda.cpp @@ -48,7 +48,7 @@ class CudaTimer : public Timer { getEvent(event).record(); } - c10::optional measureDifference(Event start, Event end) override { + std::optional measureDifference(Event start, Event end) override { c10::DeviceGuard g(device); at::cuda::CUDAEvent& start_event = getEvent(start); at::cuda::CUDAEvent& end_event = getEvent(end); diff --git a/torch/csrc/distributed/c10d/reducer_timer.hpp b/torch/csrc/distributed/c10d/reducer_timer.hpp index 5f57051455f62..f9b9f11c8c963 100644 --- a/torch/csrc/distributed/c10d/reducer_timer.hpp +++ b/torch/csrc/distributed/c10d/reducer_timer.hpp @@ -39,12 +39,12 @@ class TORCH_API Timer { // Return the difference between when two events occurred, in nanoseconds. // Or nullopt if one of them hasn't been recorded. - virtual c10::optional measureDifference(Event start, Event end) = 0; + virtual std::optional measureDifference(Event start, Event end) = 0; virtual ~Timer() = default; // Return host-side timestamp, or nullopt if it has not yet been recorded. - c10::optional getTimestamp(Event event) { + std::optional getTimestamp(Event event) { auto time = getTimeRef(event); if (time == kUnsetTime) { return c10::nullopt; diff --git a/torch/csrc/distributed/c10d/sequence_num.hpp b/torch/csrc/distributed/c10d/sequence_num.hpp index 8c80642f42784..ce31f4b552728 100644 --- a/torch/csrc/distributed/c10d/sequence_num.hpp +++ b/torch/csrc/distributed/c10d/sequence_num.hpp @@ -59,7 +59,7 @@ class TORCH_API SequenceNum { SequenceNum(const SequenceNum& other); private: - c10::optional num_; + std::optional num_; mutable std::mutex lock_; }; diff --git a/torch/csrc/distributed/rpc/profiler/remote_profiler_manager.cpp b/torch/csrc/distributed/rpc/profiler/remote_profiler_manager.cpp index 255a16af6bb0d..3a37e7b02a5f0 100644 --- a/torch/csrc/distributed/rpc/profiler/remote_profiler_manager.cpp +++ b/torch/csrc/distributed/rpc/profiler/remote_profiler_manager.cpp @@ -9,7 +9,7 @@ namespace distributed { namespace rpc { const std::string REMOTE_PROFILING_KEY_PREFIX = "#remote_op: "; constexpr int kAutoIncrementBits = 48; -/*static */ thread_local c10::optional +/*static */ thread_local std::optional RemoteProfilerManager::currentThreadLocalKey_ = c10::nullopt; /*static */ RemoteProfilerManager& RemoteProfilerManager::getInstance() { static RemoteProfilerManager* handler = new RemoteProfilerManager(); diff --git a/torch/csrc/distributed/rpc/profiler/remote_profiler_manager.h b/torch/csrc/distributed/rpc/profiler/remote_profiler_manager.h index d85ee5a393078..c6f8b353806b5 100644 --- a/torch/csrc/distributed/rpc/profiler/remote_profiler_manager.h +++ b/torch/csrc/distributed/rpc/profiler/remote_profiler_manager.h @@ -50,7 +50,7 @@ class TORCH_API RemoteProfilerManager { local_id_t getNextLocalId(); std::unordered_map profiledRpcKeys_; - static thread_local c10::optional currentThreadLocalKey_; + static thread_local std::optional currentThreadLocalKey_; std::mutex mutex_; local_id_t currentLocalId_; }; diff --git a/torch/csrc/distributed/rpc/py_rref.h b/torch/csrc/distributed/rpc/py_rref.h index 432141a97cf5c..2c9fd3433d045 100644 --- a/torch/csrc/distributed/rpc/py_rref.h +++ b/torch/csrc/distributed/rpc/py_rref.h @@ -75,8 +75,8 @@ class PYBIND11_EXPORT PyRRef { private: c10::intrusive_ptr rref_; - c10::optional> profilingFuture_; - c10::optional type_; + std::optional> profilingFuture_; + std::optional type_; }; } // namespace rpc diff --git a/torch/csrc/distributed/rpc/rpc_agent.h b/torch/csrc/distributed/rpc/rpc_agent.h index 0b04c08287087..8f9222a2e8647 100644 --- a/torch/csrc/distributed/rpc/rpc_agent.h +++ b/torch/csrc/distributed/rpc/rpc_agent.h @@ -170,7 +170,7 @@ class TORCH_API RpcAgent { RpcRetryOptions retryOptions = RpcRetryOptions()); // Return a reference to the ``WorkerInfo`` of this RpcAgent. - // NB: not using ``c10::optional`` here because we might + // NB: not using ``std::optional`` here because we might // need to create a separate RPC API lib and avoid forcing all ``RpcAgent`` // implementations to depend on libtorch. const WorkerInfo& getWorkerInfo() const; diff --git a/torch/csrc/distributed/rpc/rref_impl.cpp b/torch/csrc/distributed/rpc/rref_impl.cpp index a770379438901..98d8f1afcb86b 100644 --- a/torch/csrc/distributed/rpc/rref_impl.cpp +++ b/torch/csrc/distributed/rpc/rref_impl.cpp @@ -248,7 +248,7 @@ OwnerRRef::OwnerRRef( worker_id_t ownerId, const RRefId& rrefId, TypePtr type, - c10::optional value, + std::optional value, std::vector devices) : RRef(ownerId, rrefId, type) { future_ = c10::make_intrusive(type_, std::move(devices)); diff --git a/torch/csrc/distributed/rpc/rref_impl.h b/torch/csrc/distributed/rpc/rref_impl.h index ccb00b45e1d5e..d6da3f2ea455f 100644 --- a/torch/csrc/distributed/rpc/rref_impl.h +++ b/torch/csrc/distributed/rpc/rref_impl.h @@ -366,7 +366,7 @@ class TORCH_API OwnerRRef final : public RRef { worker_id_t ownerId, const RRefId& rrefId, TypePtr type, - c10::optional value, + std::optional value, std::vector devices); inline bool isOwner() const override { diff --git a/torch/csrc/distributed/rpc/script_call.h b/torch/csrc/distributed/rpc/script_call.h index 2fc0efb8cdc71..dacded5cc1e62 100644 --- a/torch/csrc/distributed/rpc/script_call.h +++ b/torch/csrc/distributed/rpc/script_call.h @@ -58,10 +58,10 @@ class TORCH_API ScriptCall : public RpcCommandBase { // This field has value if this ScriptCall represents invocation of a builtin // operator. - c10::optional> op_; + std::optional> op_; // This field has non empty string if this ScriptCall represents invocation of // an annotated torchscript function defined by users. - c10::optional qualifiedName_; + std::optional qualifiedName_; std::vector stack_; const bool isAsyncExecution_; }; diff --git a/torch/csrc/distributed/rpc/tensorpipe_agent.cpp b/torch/csrc/distributed/rpc/tensorpipe_agent.cpp index 0f0cf00201612..8af4336c07467 100644 --- a/torch/csrc/distributed/rpc/tensorpipe_agent.cpp +++ b/torch/csrc/distributed/rpc/tensorpipe_agent.cpp @@ -111,7 +111,7 @@ std::vector getCurrentStreamsForDevices( std::vector getDevicesOfTensors( const std::vector& tensors) { - c10::optional impl; + std::optional impl; size_t deviceCount = 0; std::vector indexBitset; for (const torch::Tensor& tensor : tensors) { diff --git a/torch/csrc/distributed/rpc/tensorpipe_cuda.cpp b/torch/csrc/distributed/rpc/tensorpipe_cuda.cpp index 968f599752d64..50cc97785f61d 100644 --- a/torch/csrc/distributed/rpc/tensorpipe_cuda.cpp +++ b/torch/csrc/distributed/rpc/tensorpipe_cuda.cpp @@ -74,7 +74,7 @@ C10_REGISTER_CREATOR( class TensorpipeCudaConverter : public TensorpipeDeviceTypeConverter { public: - c10::optional> prepareTensorForSending( + std::optional> prepareTensorForSending( const c10::Storage& storage, const std::vector& streams, tensorpipe::Message& message) const override { diff --git a/torch/csrc/distributed/rpc/tensorpipe_utils.cpp b/torch/csrc/distributed/rpc/tensorpipe_utils.cpp index 0b3715f44f86d..929ae30f8a6d4 100644 --- a/torch/csrc/distributed/rpc/tensorpipe_utils.cpp +++ b/torch/csrc/distributed/rpc/tensorpipe_utils.cpp @@ -38,7 +38,7 @@ inline c10::Device indexToDevice(c10::DeviceIndex index) { class TensorpipeCpuConverter : public TensorpipeDeviceTypeConverter { public: - c10::optional> prepareTensorForSending( + std::optional> prepareTensorForSending( const c10::Storage& storage, const std::vector& /* streams */, tensorpipe::Message& message) const override { @@ -192,7 +192,7 @@ std::tuple tensorpipeSerialize( tensor.device()); TORCH_INTERNAL_ASSERT(tpMessage.tensors.size() == i); - c10::optional> maybeCopiedTensor = + std::optional> maybeCopiedTensor = converter->prepareTensorForSending( tensor.storage(), streams, tpMessage); TORCH_INTERNAL_ASSERT(tpMessage.tensors.size() == i + 1); diff --git a/torch/csrc/distributed/rpc/tensorpipe_utils.h b/torch/csrc/distributed/rpc/tensorpipe_utils.h index 1011a9c34c3d8..d269a5bfbf565 100644 --- a/torch/csrc/distributed/rpc/tensorpipe_utils.h +++ b/torch/csrc/distributed/rpc/tensorpipe_utils.h @@ -27,7 +27,7 @@ class TensorpipeDeviceTypeConverter { // cannot include the TensorPipe headers because it's a private dependency. // Thus we bend over backwards and entrust this method with appending that // object to the `tensors` field of the tensorpipe::Message object we pass. - virtual c10::optional> prepareTensorForSending( + virtual std::optional> prepareTensorForSending( const c10::Storage& storage, const std::vector& streams, tensorpipe::Message& message) const = 0; diff --git a/torch/csrc/dynamo/compiled_autograd.h b/torch/csrc/dynamo/compiled_autograd.h index a92d6ade0c002..ca2fd412cf8d4 100644 --- a/torch/csrc/dynamo/compiled_autograd.h +++ b/torch/csrc/dynamo/compiled_autograd.h @@ -232,7 +232,7 @@ class CompiledNodeArgs { collect(t.list); } template - void collect(const c10::optional& t) { + void collect(const std::optional& t) { if (cond(t.has_value())) { collect(*t); } @@ -520,20 +520,20 @@ class CompiledNodeArgs { struct TraceState { TraceState( - const std::vector>& ss, + const std::vector>& ss, size_t num_outputs) : sym_sizes(ss), outputs(num_outputs) {} void debug_asserts() { TORCH_INTERNAL_ASSERT(sym_sizes_index == sym_sizes.size()); } - c10::optional next_sym_size() { + std::optional next_sym_size() { TORCH_INTERNAL_ASSERT(sym_sizes_index < sym_sizes.size()); return sym_sizes[sym_sizes_index++]; } size_t sym_sizes_index{0}; - std::vector> sym_sizes; + std::vector> sym_sizes; variable_list outputs; }; @@ -664,13 +664,13 @@ class SwapSavedVariables { } template - void before(c10::optional& t) { + void before(std::optional& t) { if (t.has_value()) { before(*t); } } template - void after(c10::optional& t) { + void after(std::optional& t) { if (t.has_value()) { after(*t); } diff --git a/torch/csrc/dynamo/python_compiled_autograd.cpp b/torch/csrc/dynamo/python_compiled_autograd.cpp index dd5ea7cbd094f..fb27b39b28e6a 100644 --- a/torch/csrc/dynamo/python_compiled_autograd.cpp +++ b/torch/csrc/dynamo/python_compiled_autograd.cpp @@ -203,12 +203,12 @@ struct CacheNode { return pyinput; } - std::vector> unwrap_dynamic_inputs( + std::vector> unwrap_dynamic_inputs( PyObject* pyresult) const { TORCH_INTERNAL_ASSERT(PyList_CheckExact(pyresult)); size_t idx = 0; size_t result_len = PyList_GET_SIZE(pyresult); - std::vector> result; + std::vector> result; result.reserve(expected_sizes.size()); for (const auto& i : expected_sizes) { if (i.dyn_type == SizeInput::DYNAMIC) { diff --git a/torch/csrc/functorch/init.cpp b/torch/csrc/functorch/init.cpp index c2996fe5278a7..6bce80ad27766 100644 --- a/torch/csrc/functorch/init.cpp +++ b/torch/csrc/functorch/init.cpp @@ -375,7 +375,7 @@ static int64_t currentLevel() { return current_level; } -static c10::optional maybe_current_level() { +static std::optional maybe_current_level() { auto maybe_layer = maybeCurrentDynamicLayer(); if (maybe_layer.has_value()) { int current_level = maybe_layer->layerId(); @@ -438,7 +438,7 @@ struct PreserveDynamicLayerStack { } // anonymous namespace -static std::tuple> unwrapBatched( +static std::tuple> unwrapBatched( const Tensor& tensor, int64_t level) { auto* batched = maybeGetBatchedImpl(tensor); @@ -534,7 +534,7 @@ void initFuncTorchBindings(PyObject* module) { return maybe_get_level(tensor) != -1; }); m.def( - "get_interpreter_stack", []() -> c10::optional> { + "get_interpreter_stack", []() -> std::optional> { const auto& stack = getDynamicLayerStack(); if (stack.empty()) { return c10::nullopt; @@ -545,7 +545,7 @@ void initFuncTorchBindings(PyObject* module) { } return result; }); - m.def("peek_interpreter_stack", []() -> c10::optional { + m.def("peek_interpreter_stack", []() -> std::optional { const auto& stack = getDynamicLayerStack(); if (stack.empty()) { return c10::nullopt; diff --git a/torch/csrc/inductor/aoti_eager/kernel_holder.cpp b/torch/csrc/inductor/aoti_eager/kernel_holder.cpp index 7bdf60fb146c7..238050f501223 100644 --- a/torch/csrc/inductor/aoti_eager/kernel_holder.cpp +++ b/torch/csrc/inductor/aoti_eager/kernel_holder.cpp @@ -81,8 +81,8 @@ bool unpack_ivalue( // ivalue is scalar unpack_scalar_ivalue(ivalue, device, inputs); } else if ( - *argument.real_type() == *c10::getTypePtr>()) { - // ivalue is c10::optional + *argument.real_type() == *c10::getTypePtr>()) { + // ivalue is std::optional unpack_optional_tensor_ivalue(ivalue, device, inputs); } else { // Unsupport IValue type. diff --git a/torch/csrc/inductor/aoti_torch/shim_common.cpp b/torch/csrc/inductor/aoti_torch/shim_common.cpp index bd45a4a9f0f87..79cea0cb45ec8 100644 --- a/torch/csrc/inductor/aoti_torch/shim_common.cpp +++ b/torch/csrc/inductor/aoti_torch/shim_common.cpp @@ -775,7 +775,7 @@ AOTITorchError aoti_torch_index_put_out( const AtenTensorHandle values, bool accumulate) { AOTI_TORCH_CONVERT_EXCEPTION_TO_ERROR_CODE({ - c10::List> indices_; + c10::List> indices_; indices_.reserve(num_indices); for (size_t i = 0; i < num_indices; i++) { indices_.emplace_back( diff --git a/torch/csrc/inductor/aoti_torch/utils.h b/torch/csrc/inductor/aoti_torch/utils.h index a0739afabd5ee..0964479caabd8 100644 --- a/torch/csrc/inductor/aoti_torch/utils.h +++ b/torch/csrc/inductor/aoti_torch/utils.h @@ -39,29 +39,29 @@ inline AtenTensorHandle new_tensor_handle(at::Tensor&& tensor) { // utility functions to convert a pointer to an optional value template -inline c10::optional pointer_to_optional(T* ptr) { +inline std::optional pointer_to_optional(T* ptr) { return ptr ? c10::make_optional(*ptr) : c10::nullopt; } template >> -inline c10::optional pointer_to_optional(U* ptr) { +inline std::optional pointer_to_optional(U* ptr) { return ptr ? c10::make_optional(T(*ptr)) : c10::nullopt; } template <> -inline c10::optional pointer_to_optional(AtenTensorHandle* ptr) { +inline std::optional pointer_to_optional(AtenTensorHandle* ptr) { return ptr ? c10::make_optional(*tensor_handle_to_tensor_pointer(*ptr)) : c10::nullopt; } template <> -inline c10::optional pointer_to_optional( +inline std::optional pointer_to_optional( const AtenTensorHandle* ptr) { return ptr ? c10::make_optional(*tensor_handle_to_tensor_pointer(*ptr)) : c10::nullopt; } -inline c10::optional pointer_to_optional_device( +inline std::optional pointer_to_optional_device( int32_t* device_type, int32_t device_index) { return device_type ? c10::make_optional(c10::Device( @@ -74,7 +74,7 @@ inline c10::optional pointer_to_optional_device( template struct is_optional : std::false_type {}; template -struct is_optional> : std::true_type {}; +struct is_optional> : std::true_type {}; template inline c10::ArrayRef pointer_to_list(T* ptr, int64_t len) { @@ -123,10 +123,10 @@ inline std::vector pointer_to_list( } template <> -inline std::vector> pointer_to_list( +inline std::vector> pointer_to_list( const AtenTensorHandle** ptr, int64_t len) { - std::vector> result; + std::vector> result; result.reserve(len); for (int64_t i = 0; i < len; i++) { result.emplace_back(pointer_to_optional(ptr[i])); @@ -143,7 +143,7 @@ inline std::array pointer_to_list(const int32_t* ptr) { // Utility function to convert a pointer to an optional list of values template -inline c10::optional> pointer_to_optional_list( +inline std::optional> pointer_to_optional_list( U** ptr, int64_t len) { return ptr diff --git a/torch/csrc/jit/api/compilation_unit.h b/torch/csrc/jit/api/compilation_unit.h index 6203905732667..8e28ef4717b93 100644 --- a/torch/csrc/jit/api/compilation_unit.h +++ b/torch/csrc/jit/api/compilation_unit.h @@ -86,7 +86,7 @@ struct TORCH_API CompilationUnit { // for historic reasons, these are defined in ir_emitter.cpp // Returns the list of Functions just defined. std::vector define( - const c10::optional& prefix, + const std::optional& prefix, const std::vector& properties, const std::vector& propResolvers, const std::vector& definitions, @@ -97,10 +97,10 @@ struct TORCH_API CompilationUnit { const Self* self, // see [name mangling] bool shouldMangle = false, - c10::optional operator_set_version = c10::nullopt); + std::optional operator_set_version = c10::nullopt); void define_hooks( - const c10::optional& prefix, + const std::optional& prefix, const std::vector& hookDefs, const std::vector& hookResolvers, const std::vector& preHookDefs, @@ -112,7 +112,7 @@ struct TORCH_API CompilationUnit { // Returns the list of Functions just defined. std::vector define( // prefix namespace to put all the defined functions into - const c10::optional& prefix, + const std::optional& prefix, const std::string& source, const ResolverPtr& resolver, const Self* self); @@ -286,19 +286,19 @@ struct TORCH_API CompilationUnit { private: std::unique_ptr define( - const c10::optional& prefix, + const std::optional& prefix, const Def& def, const ResolverPtr& resolver, const Self* self, const std::unordered_map& function_table, bool shouldMangle = false, FunctionType type = FunctionType::Method, - c10::optional version = c10::nullopt) const; + std::optional version = c10::nullopt) const; // Define a property on \p self. struct PropertyPair; PropertyPair define_property( - const c10::optional& prefix, + const std::optional& prefix, const Property& prop, const ResolverPtr& resolver, const Self* self, diff --git a/torch/csrc/jit/api/function_impl.h b/torch/csrc/jit/api/function_impl.h index 74663cfb41ce7..6ed8cb36199ef 100644 --- a/torch/csrc/jit/api/function_impl.h +++ b/torch/csrc/jit/api/function_impl.h @@ -12,7 +12,7 @@ struct TORCH_API GraphFunction : public Function { c10::QualifiedName name, std::shared_ptr graph, std::function function_creator, - c10::optional executor_execution_mode = + std::optional executor_execution_mode = c10::nullopt) : name_(std::move(name)), graph_(std::move(graph)), @@ -108,7 +108,7 @@ struct TORCH_API GraphFunction : public Function { using Function::call; bool call( Stack& stack, - c10::optional bailOut, + std::optional bailOut, c10::function_ref f) override { f(get_executor().getPlanFor(stack, bailOut).code); return true; @@ -139,7 +139,7 @@ struct TORCH_API GraphFunction : public Function { // allows users to specify Simple/Profiling Executor for function // TODO: add more executors - mutable c10::optional executor_execution_mode_; + mutable std::optional executor_execution_mode_; // if invoked on a graph that has already traced through amp // don't invoke amp pass @@ -159,7 +159,7 @@ struct TORCH_API GraphFunction : public Function { // executor_[1] - autocast cpu on // executor_[2] - autocast gpu on // executor_[3] - autocast cpu & gpu on - std::array, SpecializationKey::TotalCount> + std::array, SpecializationKey::TotalCount> executors_; // an optional function that actually creates the method when diff --git a/torch/csrc/jit/api/module.cpp b/torch/csrc/jit/api/module.cpp index e32d2bba34501..1b9932ed34d4d 100644 --- a/torch/csrc/jit/api/module.cpp +++ b/torch/csrc/jit/api/module.cpp @@ -167,8 +167,8 @@ void Module::to(at::Device device, bool non_blocking) { static void module_state_to( const autograd::Variable& variable, - const c10::optional& device, - const c10::optional& dtype, + const std::optional& device, + const std::optional& dtype, bool non_blocking) { // Need to access the `at::Tensor` as a `Variable` here. // Use the data's original device or dtype if not supplied here. @@ -180,8 +180,8 @@ static void module_state_to( } void Module::to_impl( - const c10::optional& device, - const c10::optional& dtype, + const std::optional& device, + const std::optional& dtype, bool non_blocking) { for (at::Tensor e : parameters()) { module_state_to(e, device, dtype, non_blocking); @@ -317,7 +317,7 @@ Module Module::copy() const { return Module(_ivalue()->copy()); } -Module Module::deepcopy(c10::optional device) const { +Module Module::deepcopy(std::optional device) const { return Module(_ivalue()->deepcopy(device)); } @@ -476,7 +476,7 @@ IValue Module::create_class(const c10::QualifiedName& name, Stack stack) const { Module freeze( const Module& module, - const c10::optional>& preserved_attrs, + const std::optional>& preserved_attrs, bool optimize_numerics) { TORCH_CHECK( !module.hasattr("training") || !module.is_training(), diff --git a/torch/csrc/jit/api/module.h b/torch/csrc/jit/api/module.h index 6c49b695cb6b5..0787210a4aefe 100644 --- a/torch/csrc/jit/api/module.h +++ b/torch/csrc/jit/api/module.h @@ -238,7 +238,7 @@ struct TORCH_API Module : public Object { Module copy() const; - Module deepcopy(c10::optional device = c10::nullopt) const; + Module deepcopy(std::optional device = c10::nullopt) const; // Clones both the underlying `ClassType` and the module instance(data), this // function creates a new `ClassType` and returns a new instance that has the @@ -315,8 +315,8 @@ struct TORCH_API Module : public Object { } void to_impl( - const c10::optional& device, - const c10::optional& dtype, + const std::optional& device, + const std::optional& dtype, bool non_blocking); // Extra handle for the module to delete when itself is deleted @@ -333,7 +333,7 @@ struct TORCH_API Module : public Object { // details. TORCH_API Module freeze( const Module& module, - const c10::optional>& preserved_attrs = + const std::optional>& preserved_attrs = c10::nullopt, bool optimize_numerics = true); @@ -566,7 +566,7 @@ struct slot_list_impl { bool return_module_; // size of this list, cached on first request // when we need to filter the slot list - mutable c10::optional size_; + mutable std::optional size_; friend struct Module; }; diff --git a/torch/csrc/jit/api/object.cpp b/torch/csrc/jit/api/object.cpp index 0593916dbbaea..b707e76772765 100644 --- a/torch/csrc/jit/api/object.cpp +++ b/torch/csrc/jit/api/object.cpp @@ -14,7 +14,7 @@ Object::Object( c10::StrongTypePtr(std::move(cu), type), type->numAttributes())) {} -c10::optional Object::find_method(const std::string& basename) const { +std::optional Object::find_method(const std::string& basename) const { for (Function* fn : type()->methods()) { if (fn->name() == basename) { return Method(_ivalue(), fn); diff --git a/torch/csrc/jit/api/object.h b/torch/csrc/jit/api/object.h index 7ccacf385be53..164f6e2ac073a 100644 --- a/torch/csrc/jit/api/object.h +++ b/torch/csrc/jit/api/object.h @@ -46,7 +46,7 @@ struct TORCH_API Object { struct Property { std::string name; Method getter_func; - c10::optional setter_func; + std::optional setter_func; }; void setattr(const std::string& name, c10::IValue v) { @@ -129,7 +129,7 @@ struct TORCH_API Object { const Property get_property(const std::string& name) const { for (const auto& prop : type()->properties()) { if (prop.name == name) { - c10::optional setter = c10::nullopt; + std::optional setter = c10::nullopt; if (prop.setter) { setter = Method(_ivalue(), prop.setter); } @@ -142,7 +142,7 @@ struct TORCH_API Object { const std::vector get_properties() const { return c10::fmap(type()->properties(), [&](ClassType::Property prop) { - c10::optional setter = c10::nullopt; + std::optional setter = c10::nullopt; if (prop.setter) { setter = Method(_ivalue(), prop.setter); } @@ -153,7 +153,7 @@ struct TORCH_API Object { }); } - c10::optional find_method(const std::string& basename) const; + std::optional find_method(const std::string& basename) const; /// Run a method from this module. /// diff --git a/torch/csrc/jit/backends/backend_debug_info.h b/torch/csrc/jit/backends/backend_debug_info.h index 1d07beb6bdb3c..291eb48132e8e 100644 --- a/torch/csrc/jit/backends/backend_debug_info.h +++ b/torch/csrc/jit/backends/backend_debug_info.h @@ -27,7 +27,7 @@ class TORCH_API PyTorchBackendDebugInfo : public torch::CustomClassHolder { public: PyTorchBackendDebugInfo() = default; - c10::optional& getDebugInfoMap() { + std::optional& getDebugInfoMap() { return debug_info_map_; } @@ -36,7 +36,7 @@ class TORCH_API PyTorchBackendDebugInfo : public torch::CustomClassHolder { } private: - c10::optional debug_info_map_; + std::optional debug_info_map_; }; #else diff --git a/torch/csrc/jit/backends/xnnpack/xnnpack_graph_builder.cpp b/torch/csrc/jit/backends/xnnpack/xnnpack_graph_builder.cpp index 5bdcbe63797c4..a0b59a73f46f9 100644 --- a/torch/csrc/jit/backends/xnnpack/xnnpack_graph_builder.cpp +++ b/torch/csrc/jit/backends/xnnpack/xnnpack_graph_builder.cpp @@ -233,7 +233,7 @@ void XNNGraph::defineAllTensorValues() { size_t buffer_idx = 0; size_t num_bytes = 0; if (val->node()->kind() == prim::Constant) { - c10::optional constant = val->node()->t(attr::value); + std::optional constant = val->node()->t(attr::value); auto const_val = constant->toIValue().toTensor(); // Need tensor data to be contiguous for serialization auto cont_const_val = const_val.contiguous(); diff --git a/torch/csrc/jit/codegen/fuser/codegen.cpp b/torch/csrc/jit/codegen/fuser/codegen.cpp index 10ddf2267b21d..2f9217e133697 100644 --- a/torch/csrc/jit/codegen/fuser/codegen.cpp +++ b/torch/csrc/jit/codegen/fuser/codegen.cpp @@ -364,7 +364,7 @@ static void emitCheckFor( std::string generateKernel( const std::string& name, const Graph& graph, - const std::vector>>& + const std::vector>>& inputs, const std::vector>& outputs, const bool use_cuda) { diff --git a/torch/csrc/jit/codegen/fuser/codegen.h b/torch/csrc/jit/codegen/fuser/codegen.h index fc0b34e55fe7e..e42adc1314320 100644 --- a/torch/csrc/jit/codegen/fuser/codegen.h +++ b/torch/csrc/jit/codegen/fuser/codegen.h @@ -18,7 +18,7 @@ namespace fuser { TORCH_API std::string generateKernel( const std::string& name, const Graph& graph, - const std::vector>>& + const std::vector>>& inputs, const std::vector>& outputs, const bool use_cuda); diff --git a/torch/csrc/jit/codegen/fuser/compiler.cpp b/torch/csrc/jit/codegen/fuser/compiler.cpp index 52dc3a07fe765..3c05b70e8341a 100644 --- a/torch/csrc/jit/codegen/fuser/compiler.cpp +++ b/torch/csrc/jit/codegen/fuser/compiler.cpp @@ -225,7 +225,7 @@ std::shared_ptr compileKernel( // Creates chunk and flattened input descriptions std::vector chunk_desc; - std::vector>> + std::vector>> flat_inputs; { size_t input_index = 0; diff --git a/torch/csrc/jit/codegen/fuser/cpu/fused_kernel.cpp b/torch/csrc/jit/codegen/fuser/cpu/fused_kernel.cpp index c930f3293aa56..5f692d50e6b54 100644 --- a/torch/csrc/jit/codegen/fuser/cpu/fused_kernel.cpp +++ b/torch/csrc/jit/codegen/fuser/cpu/fused_kernel.cpp @@ -59,7 +59,7 @@ static bool programExists(const std::string& program) { } #ifdef _MSC_VER -c10::optional exec(const std::wstring& cmd) { +std::optional exec(const std::wstring& cmd) { std::array buffer; std::wstring result; std::unique_ptr pipe( @@ -82,7 +82,7 @@ inline std::wstring& rtrim(std::wstring& s, const wchar_t* t = L" \t\n\r\f\v") { void activate() { wchar_t* root = nullptr; std::wstring cmd; - c10::optional exec_out; + std::optional exec_out; std::wstring path; std::wstring vcruntime_plat; std::wstring envvars; diff --git a/torch/csrc/jit/codegen/fuser/executor.cpp b/torch/csrc/jit/codegen/fuser/executor.cpp index fad7cfcd630da..8abb99283ffc7 100644 --- a/torch/csrc/jit/codegen/fuser/executor.cpp +++ b/torch/csrc/jit/codegen/fuser/executor.cpp @@ -26,7 +26,7 @@ namespace fuser { // Returns the "map size" for this run, which is the common size for all // intermediate tensors. -static c10::optional> getMapSize( +static std::optional> getMapSize( const KernelSpec& spec, at::TensorList args, at::IntArrayRef arg_subset) { @@ -67,7 +67,7 @@ static c10::optional> getMapSize( } // Tries to determine a map size for the instantiated kernel (see above) -static c10::optional> canRunKernel( +static std::optional> canRunKernel( const KernelSpec& spec, at::TensorList args) { // Short-circuits on size mismatch @@ -78,7 +78,7 @@ static c10::optional> canRunKernel( " arguments, but got ", args.size()); - c10::optional> map_size; + std::optional> map_size; for (const auto& broadcast_group : spec.inputBroadcastGroups()) { if (!map_size) { map_size = getMapSize(spec, args, broadcast_group); diff --git a/torch/csrc/jit/codegen/fuser/kernel_spec.h b/torch/csrc/jit/codegen/fuser/kernel_spec.h index 57806ed436311..2fc52f2d76f0f 100644 --- a/torch/csrc/jit/codegen/fuser/kernel_spec.h +++ b/torch/csrc/jit/codegen/fuser/kernel_spec.h @@ -117,7 +117,7 @@ struct TORCH_API KernelSpec { } // Cache functions - c10::optional> findKernel( + std::optional> findKernel( const ArgSpec& arg_spec) const { std::lock_guard guard{mutex_}; const auto it = kernels_.find(arg_spec); diff --git a/torch/csrc/jit/codegen/onednn/defer_size_check.cpp b/torch/csrc/jit/codegen/onednn/defer_size_check.cpp index 1dbef6643dba8..4d0f12564bd9c 100644 --- a/torch/csrc/jit/codegen/onednn/defer_size_check.cpp +++ b/torch/csrc/jit/codegen/onednn/defer_size_check.cpp @@ -41,7 +41,7 @@ class SizeCheckMover { // tensorexpr_elementwise_set that's defined in // torch/csrc/jit/runtime/symbolic_shape_registry_util.cpp OperatorMap schemaMap = get_tensorexpr_elementwise_set(); - c10::optional mapping = + std::optional mapping = schemaMap.find(u.user->getOperator()); return mapping == "unary"; }); diff --git a/torch/csrc/jit/codegen/onednn/graph_fuser.h b/torch/csrc/jit/codegen/onednn/graph_fuser.h index ee83edc68fc41..ab37ad0211b7a 100644 --- a/torch/csrc/jit/codegen/onednn/graph_fuser.h +++ b/torch/csrc/jit/codegen/onednn/graph_fuser.h @@ -39,7 +39,7 @@ class GraphRewriter { std::pair scanNode( Node* consumer, graph_node_list::iterator workblock_begin); - c10::optional tryMerge(Node* consumer, Node* producer); + std::optional tryMerge(Node* consumer, Node* producer); }; // This pass creates the subgraphs for oneDNN Graph Fusion Nodes. diff --git a/torch/csrc/jit/codegen/onednn/graph_helper.cpp b/torch/csrc/jit/codegen/onednn/graph_helper.cpp index fdd69f85c5d52..f8e54c8743216 100644 --- a/torch/csrc/jit/codegen/onednn/graph_helper.cpp +++ b/torch/csrc/jit/codegen/onednn/graph_helper.cpp @@ -22,7 +22,7 @@ static void fixConvOptionalBias(Node* node) { } } -static c10::optional getDimensions(Value* v) { +static std::optional getDimensions(Value* v) { if (v->type()->isSubtypeOf(TensorType::get())) { return v->type()->cast()->sizes().size(); } else { diff --git a/torch/csrc/jit/codegen/onednn/graph_rewriter.cpp b/torch/csrc/jit/codegen/onednn/graph_rewriter.cpp index c91ff9b3917a4..dfbfe467e9765 100644 --- a/torch/csrc/jit/codegen/onednn/graph_rewriter.cpp +++ b/torch/csrc/jit/codegen/onednn/graph_rewriter.cpp @@ -127,7 +127,7 @@ std::pair GraphRewriter::scanNode( // Try to merge `producer` into `consumer`. If successful, this destroys // `producer` and returns the `consumer` group. -c10::optional GraphRewriter::tryMerge(Node* consumer, Node* producer) { +std::optional GraphRewriter::tryMerge(Node* consumer, Node* producer) { AT_ASSERT(llgaHelper_.isLlgaSubgraph(consumer)); bool canMerge = llgaHelper_.shouldMerge(producer, consumer) && aliasDb_.moveBeforeTopologicallyValid(producer, consumer); diff --git a/torch/csrc/jit/codegen/onednn/prepare_binary.cpp b/torch/csrc/jit/codegen/onednn/prepare_binary.cpp index 795fce27e0083..a4f6d268694e3 100644 --- a/torch/csrc/jit/codegen/onednn/prepare_binary.cpp +++ b/torch/csrc/jit/codegen/onednn/prepare_binary.cpp @@ -47,7 +47,7 @@ static void handleBinaryOpInputs(Node* node) { // 42 : Scalar --> tensor(42.0) : Float([]) auto t = g->insert(aten::as_tensor, {scalar}, {{"dtype", promotedDtype}}); // add dim & stride info to IR - c10::optional t_dim = 1; + std::optional t_dim = 1; auto target_type = TensorTypePtr( TensorType::create(promotedDtype, at::kCPU, t_dim, false)); target_type = target_type->withSizes({1}); @@ -67,7 +67,7 @@ static void handleBinaryOpInputs(Node* node) { // are the same dtype, as oneDNN Graph requires both inputs to have the // same dtype. We'll follow PyTorch's type-promotion rules here. auto second_input_typeptr = node->input(1)->type()->expect(); - c10::optional second_input_type = + std::optional second_input_type = second_input_typeptr->scalarType(); if (second_input_type != c10::nullopt) { // dtype of the second tensor might not be available in the IR diff --git a/torch/csrc/jit/cuda/cuda.h b/torch/csrc/jit/cuda/cuda.h index e8a0d04aa935e..80b2e2a82f788 100644 --- a/torch/csrc/jit/cuda/cuda.h +++ b/torch/csrc/jit/cuda/cuda.h @@ -15,7 +15,7 @@ class CUDAStream final : public CustomClassHolder { public: // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) CUDAStream( - c10::optional device = c10::nullopt, + std::optional device = c10::nullopt, int64_t priority = 0) { c10::DeviceIndex device_index = device.has_value() ? device->index() : c10::cuda::current_device(); @@ -155,7 +155,7 @@ void CUDAEvent::wait(c10::intrusive_ptr stream) { TORCH_LIBRARY(cuda, m) { auto stream_class = m.class_("Stream").def( - torch::init, int64_t>(), + torch::init, int64_t>(), "", {torch::arg("device") = c10::nullopt, torch::arg("priority") = 0}); auto event_class = m.class_("Event").def( diff --git a/torch/csrc/jit/frontend/concrete_module_type.cpp b/torch/csrc/jit/frontend/concrete_module_type.cpp index b18917d0dc01f..c15116ac3e244 100644 --- a/torch/csrc/jit/frontend/concrete_module_type.cpp +++ b/torch/csrc/jit/frontend/concrete_module_type.cpp @@ -149,14 +149,14 @@ TypePtr ConcreteModuleType::getJitType() const { return jitType_; } -c10::optional ConcreteModuleType::getPyClass() const { +std::optional ConcreteModuleType::getPyClass() const { if (!data_.pyClass_) { return c10::nullopt; } return data_.pyClass_; } -c10::optional> ConcreteModuleType::findOverloads( +std::optional> ConcreteModuleType::findOverloads( const std::string& name) const { const auto it = data_.overloads_.find(name); if (it != data_.overloads_.end()) { @@ -165,7 +165,7 @@ c10::optional> ConcreteModuleType::findOverloads( return c10::nullopt; } -c10::optional ConcreteModuleType::findFunctionAttribute( +std::optional ConcreteModuleType::findFunctionAttribute( const std::string& name) const { const auto it = data_.functionAttributes_.find(name); if (it != data_.functionAttributes_.end()) { @@ -174,7 +174,7 @@ c10::optional ConcreteModuleType::findFunctionAttribute( return c10::nullopt; } -c10::optional ConcreteModuleType::findBuiltinFunction( +std::optional ConcreteModuleType::findBuiltinFunction( const std::string& name) const { const auto it = data_.builtinFunctions_.find(name); if (it != data_.builtinFunctions_.end()) { @@ -183,7 +183,7 @@ c10::optional ConcreteModuleType::findBuiltinFunction( return c10::nullopt; } -c10::optional ConcreteModuleType::findFailedAttribute( +std::optional ConcreteModuleType::findFailedAttribute( const std::string& name) const { const auto it = data_.failedAttributes_.find(name); if (it != data_.failedAttributes_.end()) { diff --git a/torch/csrc/jit/frontend/concrete_module_type.h b/torch/csrc/jit/frontend/concrete_module_type.h index 22349936687ce..b3c3221253563 100644 --- a/torch/csrc/jit/frontend/concrete_module_type.h +++ b/torch/csrc/jit/frontend/concrete_module_type.h @@ -195,15 +195,15 @@ class VISIBILITY_HIDDEN ConcreteModuleType { static std::shared_ptr fromJitType(TypePtr type); TypePtr getJitType() const; - c10::optional getPyClass() const; + std::optional getPyClass() const; IterableModuleKind getIterableModuleKind() const; - c10::optional> findOverloads( + std::optional> findOverloads( const std::string& name) const; - c10::optional findFunctionAttribute(const std::string& name) const; - c10::optional findBuiltinFunction(const std::string& name) const; + std::optional findFunctionAttribute(const std::string& name) const; + std::optional findBuiltinFunction(const std::string& name) const; std::shared_ptr findSubmoduleConcreteType( const std::string& name) const; - c10::optional findFailedAttribute(const std::string& name) const; + std::optional findFailedAttribute(const std::string& name) const; bool isIgnoredAttribute(const std::string& name) const; // These getters are only here to return things as types that can be diff --git a/torch/csrc/jit/frontend/function_schema_parser.cpp b/torch/csrc/jit/frontend/function_schema_parser.cpp index 4b681055bd075..a651b35786cea 100644 --- a/torch/csrc/jit/frontend/function_schema_parser.cpp +++ b/torch/csrc/jit/frontend/function_schema_parser.cpp @@ -149,9 +149,9 @@ struct SchemaParser { auto fake_type = std::move(std::get<0>(p)); auto real_type = std::move(std::get<1>(p)); auto alias_info = std::move(std::get<2>(p)); - c10::optional N; - c10::optional default_value; - c10::optional alias_set; + std::optional N; + std::optional default_value; + std::optional alias_set; std::string name; if (L.nextIf('[')) { // note: an array with a size hint can only occur at the Argument level @@ -162,7 +162,7 @@ struct SchemaParser { auto container = type_parser.parseAliasAnnotation(); if (alias_info) { if (!container) { - container = c10::optional(at::AliasInfo()); + container = std::optional(at::AliasInfo()); container->setIsWrite(alias_info->isWrite()); } container->addContainedType(std::move(*alias_info)); @@ -297,7 +297,7 @@ struct SchemaParser { IValue parseDefaultValue( const c10::Type& arg_type, TypeKind kind, - c10::optional arg_N) { + std::optional arg_N) { auto range = L.cur().range; switch (kind) { case TypeKind::TensorType: diff --git a/torch/csrc/jit/frontend/ir_emitter.cpp b/torch/csrc/jit/frontend/ir_emitter.cpp index 989a6eaf2dfe0..0aca3ea800623 100644 --- a/torch/csrc/jit/frontend/ir_emitter.cpp +++ b/torch/csrc/jit/frontend/ir_emitter.cpp @@ -168,7 +168,7 @@ struct CondValue { CondValue( Value* value, RefinementSet refinements, - c10::optional static_if) + std::optional static_if) : value_(value), refinements_(std::move(refinements)), static_if_(static_if) {} @@ -186,14 +186,14 @@ struct CondValue { const RefinementSet& refinements() const { return refinements_; } - c10::optional staticIf() const { + std::optional staticIf() const { return static_if_; } private: Value* value_; RefinementSet refinements_; - c10::optional + std::optional static_if_; // certain expression cause us to emit a static if statement // this value is present if this is the case. // this is not equivalent to value_ being a constant @@ -283,7 +283,7 @@ struct Environment { } // see if type error has been set for a variable - c10::optional findVariableTypeError(const std::string& name) { + std::optional findVariableTypeError(const std::string& name) { auto runner = this; while (runner->next) { runner = runner->next.get(); @@ -1200,7 +1200,7 @@ struct to_ir { } if (const auto union_type = lhs_value->type()->cast()) { std::vector to_subtract{NoneType::get()}; - c10::optional remaining = + std::optional remaining = union_type->subtractTypeSet(to_subtract); std::vector all_present; if (remaining) { @@ -1228,7 +1228,7 @@ struct to_ir { CondValue v = emitCondExpr(Expr(expr.tree()->trees()[0])); Value* result = emitBuiltinCall( expr.range(), *graph, aten::__not__, {v.value()}, {}); - c10::optional static_if; + std::optional static_if; if (v.staticIf()) { static_if = !*v.staticIf(); } @@ -1294,7 +1294,7 @@ struct to_ir { } } auto expr_out = emitToBool(expr.range(), emitExpr(expr)); - c10::optional static_if = c10::nullopt; + std::optional static_if = c10::nullopt; auto kind = expr_out->node()->kind(); if (kind == aten::is_scripting) { static_if = true; @@ -1559,7 +1559,7 @@ struct to_ir { ? refined_type_hint->cast()->getElementType() : nullptr; - c10::optional unified_elem_type = unifyTypes( + std::optional unified_elem_type = unifyTypes( list_value->type()->expect()->getElementType(), out->type(), /*default_to_union=*/true, @@ -1740,7 +1740,7 @@ struct to_ir { ? refined_type_hint->expect()->getValueType() : nullptr; - c10::optional unified_value_type = unifyTypes( + std::optional unified_value_type = unifyTypes( first_generated_value_type, v->type(), /*default_to_union=*/true, @@ -1832,7 +1832,7 @@ struct to_ir { // and the second expr in the false branch, if it's an AND the opposite auto get_const_expr = [&] { return graph->insertConstant(is_or, loc); }; - c10::optional rhs; + std::optional rhs; auto get_continue_expr = [&] { rhs = emitCondExpr(second_expr); return rhs->value(); @@ -1842,8 +1842,8 @@ struct to_ir { // If this is an AND, eval second expression if first expr is True // NOLINTNEXTLINE(cppcoreguidelines-init-variables) Value* new_result; - c10::optional refinements; - c10::optional static_if; + std::optional refinements; + std::optional static_if; if (is_or) { new_result = emitIfExpr(loc, lhs, get_const_expr, get_continue_expr); refinements = lhs.refinements().Or(rhs->refinements()); @@ -2320,8 +2320,8 @@ struct to_ir { const SourceRange& range, const std::function& emit_body, const SugaredValuePtr& iter_val, - c10::optional> targets, - c10::optional cond) { + std::optional> targets, + std::optional cond) { Value* max_trip_count_val = nullptr; if (iter_val != nullptr) { max_trip_count_val = iter_val->len(range, method); @@ -2968,7 +2968,7 @@ struct to_ir { auto outputs = rhs_output->asTuple( rhs_loc, method, - starred_unpack ? c10::nullopt : c10::optional{n_binders}); + starred_unpack ? c10::nullopt : std::optional{n_binders}); if (outputs.size() < n_binders) { throw ErrorReport(tl) << "need " << (starred_unpack ? "at least " : "") << n_binders @@ -3655,7 +3655,7 @@ struct to_ir { auto iterable_value = expr_sv->iter(loc, method); // range should have the same static length as the other iterable - c10::optional iter_static_len = iterable_value->staticLen(); + std::optional iter_static_len = iterable_value->staticLen(); SugaredValuePtr range_sv = std::make_shared( loc, method, range_inputs, iter_static_len); @@ -4454,7 +4454,7 @@ struct to_ir { ? refined_type_hint->cast()->getElementType() : nullptr; - c10::optional unified_elem_type = unifyTypeList( + std::optional unified_elem_type = unifyTypeList( types, nowhere, /*default_to_union=*/true, elem_type_hint); if (!refined_type_hint && @@ -4885,7 +4885,7 @@ struct to_ir { return graph->insertConstant(dim, loc); }; std::vector dims(subscript_exprs.size()); - std::vector> exprs( + std::vector> exprs( subscript_exprs.size(), c10::nullopt); auto handle_indexing = [&](const Expr& subscript_expr, @@ -5352,7 +5352,7 @@ struct CompilationUnit::PropertyPair }; CompilationUnit::PropertyPair CompilationUnit::define_property( - const c10::optional& prefix, + const std::optional& prefix, const Property& prop, const ResolverPtr& resolver, const Self* self, @@ -5386,14 +5386,14 @@ CompilationUnit::PropertyPair CompilationUnit::define_property( } std::unique_ptr CompilationUnit::define( - const c10::optional& prefix, + const std::optional& prefix, const Def& def, const ResolverPtr& resolver, const Self* self, const std::unordered_map& function_table, bool shouldMangle, CompilationUnit::FunctionType type, - c10::optional operator_set_version) const { + std::optional operator_set_version) const { TORCH_INTERNAL_ASSERT(resolver); auto _resolver = resolver; if (!self) { @@ -5444,14 +5444,14 @@ std::unique_ptr CompilationUnit::define( } std::vector CompilationUnit::define( - const c10::optional& prefix, + const std::optional& prefix, const std::vector& properties, const std::vector& propResolvers, const std::vector& definitions, const std::vector& defResolvers, const Self* self, bool shouldMangle, - c10::optional operator_set_version) { + std::optional operator_set_version) { TORCH_INTERNAL_ASSERT(definitions.size() == defResolvers.size()); TORCH_INTERNAL_ASSERT(properties.size() == propResolvers.size()); std::vector functions; @@ -5515,7 +5515,7 @@ std::vector CompilationUnit::define( } void CompilationUnit::define_hooks( - const c10::optional& prefix, + const std::optional& prefix, const std::vector& hookDefs, const std::vector& hookResolvers, const std::vector& preHookDefs, @@ -5620,7 +5620,7 @@ void CompilationUnit::define_hooks( } std::vector CompilationUnit::define( - const c10::optional& prefix, + const std::optional& prefix, const std::string& source, const ResolverPtr& resolver, const Self* self) { diff --git a/torch/csrc/jit/frontend/parse_string_literal.h b/torch/csrc/jit/frontend/parse_string_literal.h index 2ca1f150aacdd..5b924864bebd8 100644 --- a/torch/csrc/jit/frontend/parse_string_literal.h +++ b/torch/csrc/jit/frontend/parse_string_literal.h @@ -12,7 +12,7 @@ inline bool isCharCount(char c, const std::string& str, size_t start, int len) { std::count(str.begin() + start, str.begin() + start + len, c) == len; } -inline c10::optional parseOctal(const std::string& str, size_t pos) { +inline std::optional parseOctal(const std::string& str, size_t pos) { //\xxx where x are 0-7 if (pos + 3 >= str.size()) return c10::nullopt; diff --git a/torch/csrc/jit/frontend/parser.cpp b/torch/csrc/jit/frontend/parser.cpp index 02e22547edd44..ae2c98028e071 100644 --- a/torch/csrc/jit/frontend/parser.cpp +++ b/torch/csrc/jit/frontend/parser.cpp @@ -210,7 +210,7 @@ struct ParserImpl { } return prefix; } - c10::optional maybeParseAssignmentOp() { + std::optional maybeParseAssignmentOp() { auto r = L.cur().range; switch (L.cur().kind) { case TK_PLUS_EQ: diff --git a/torch/csrc/jit/frontend/schema_matching.cpp b/torch/csrc/jit/frontend/schema_matching.cpp index 0b4fa8ef65b2e..87ec9992141d8 100644 --- a/torch/csrc/jit/frontend/schema_matching.cpp +++ b/torch/csrc/jit/frontend/schema_matching.cpp @@ -247,7 +247,7 @@ static Value* tryMatchArgument( return value; } -c10::optional findInputWithName( +std::optional findInputWithName( const std::string& name, at::ArrayRef kwargs, bool is_aten) { @@ -354,13 +354,13 @@ bool isBlockListedSchema(const FunctionSchema& schema) { return false; } -static c10::optional tryMatchSchema( +static std::optional tryMatchSchema( const FunctionSchema& schema, const SourceRange& loc, Graph& graph, at::ArrayRef args, at::ArrayRef kwargs, - c10::optional self, + std::optional self, std::ostream* failure_messages, bool allow_conversions) { if (isBlockListedSchema(schema)) { @@ -389,7 +389,7 @@ static c10::optional tryMatchSchema( size_t used_args = 0; for (const auto schema_i : c10::irange(schema.arguments().size())) { const auto& arg = schema.arguments()[schema_i]; - c10::optional actual_named_value; + std::optional actual_named_value; if (arg.name() == "self" && self) { actual_named_value = self; self = c10::nullopt; @@ -540,7 +540,7 @@ MatchedSchema matchSchema( Graph& graph, at::ArrayRef args, at::ArrayRef kwargs, - const c10::optional& self) { + const std::optional& self) { std::stringstream failure_messages; if (auto result = tryMatchSchema( schema, @@ -576,7 +576,7 @@ std::pair matchSchemas( Graph& graph, at::ArrayRef args, at::ArrayRef kwargs, - const c10::optional& self, + const std::optional& self, bool render_errors) { TORCH_INTERNAL_ASSERT(!schemas.empty()); // if there is only one schema, we do not need to try without conversions @@ -645,7 +645,7 @@ static Value* emitBuiltinNode( const SourceRange& loc, Graph& graph, Symbol name, - c10::optional version) { + std::optional version) { auto n = graph.insertNode(graph.create(name, matched_schema.inputs, 0)) ->setSourceRange(loc); @@ -681,7 +681,7 @@ Value* emitBuiltinCall( Symbol name, at::ArrayRef args, at::ArrayRef kwargs, - const c10::optional& self) { + const std::optional& self) { const auto& variants = getAllOperatorsFor(name); const auto& builtin_functions = getAllBuiltinFunctionsFor(name); diff --git a/torch/csrc/jit/frontend/schema_matching.h b/torch/csrc/jit/frontend/schema_matching.h index 754ede24597e5..0c69df521df6a 100644 --- a/torch/csrc/jit/frontend/schema_matching.h +++ b/torch/csrc/jit/frontend/schema_matching.h @@ -28,7 +28,7 @@ TORCH_API MatchedSchema matchSchema( Graph& graph, at::ArrayRef args, at::ArrayRef kwargs, - const c10::optional& self = c10::nullopt); + const std::optional& self = c10::nullopt); TORCH_API std::pair matchSchemas( const std::vector& schemas, @@ -36,7 +36,7 @@ TORCH_API std::pair matchSchemas( Graph& graph, at::ArrayRef args, at::ArrayRef kwargs, - const c10::optional& self = c10::nullopt, + const std::optional& self = c10::nullopt, bool render_errors = false); TORCH_API bool convertibleToList( @@ -51,9 +51,9 @@ TORCH_API Value* emitBuiltinCall( Symbol name, at::ArrayRef args, at::ArrayRef kwargs, - const c10::optional& self = c10::nullopt); + const std::optional& self = c10::nullopt); -TORCH_API c10::optional findInputWithName( +TORCH_API std::optional findInputWithName( const std::string& name, at::ArrayRef kwargs, bool is_aten = false); diff --git a/torch/csrc/jit/frontend/schema_type_parser.cpp b/torch/csrc/jit/frontend/schema_type_parser.cpp index 7c4b8ba0cac26..89465bca3f7a3 100644 --- a/torch/csrc/jit/frontend/schema_type_parser.cpp +++ b/torch/csrc/jit/frontend/schema_type_parser.cpp @@ -98,7 +98,7 @@ TypePtr SchemaTypeParser::parseBaseType() { // Tensor! // shorthand for Tensor(fresh_identifier!) // Tensor(a! -> a|b) // Tensor is in set a, written to, // and after the write is in set a AND b. -c10::optional SchemaTypeParser::parseAliasAnnotation() { +std::optional SchemaTypeParser::parseAliasAnnotation() { AliasInfo alias_info; if (L.nextIf('(')) { // optional 'alias set annotation' @@ -147,7 +147,7 @@ c10::optional SchemaTypeParser::parseAliasAnnotation() { return alias_info; } -c10::optional SchemaTypeParser::parseTensorDType( +std::optional SchemaTypeParser::parseTensorDType( const std::string& dtype) { #define DEFINE_SCALAR_TYPE(_1, n) {#n, at::ScalarType::n}, @@ -161,7 +161,7 @@ c10::optional SchemaTypeParser::parseTensorDType( return c10::nullopt; } -c10::optional SchemaTypeParser::tryToParseDeviceType() { +std::optional SchemaTypeParser::tryToParseDeviceType() { L.expect('='); const std::string& dev = L.expect(TK_IDENT).text(); @@ -195,7 +195,7 @@ c10::optional SchemaTypeParser::tryToParseDeviceType() { throw ErrorReport(L.cur()) << "cannot parse device type '" << dev << "'\n"; } -c10::optional SchemaTypeParser::tryToParseRequiresGrad() { +std::optional SchemaTypeParser::tryToParseRequiresGrad() { L.expect('='); const std::string& num = L.expect(TK_NUMBER).text(); // NOLINTNEXTLINE(cppcoreguidelines-init-variables) @@ -218,8 +218,8 @@ TypePtr SchemaTypeParser::parseRefinedTensor() { TypePtr ptr; L.expect('('); TypePtr tensor_type; - c10::optional device; - c10::optional requires_grad; + std::optional device; + std::optional requires_grad; // Parse a type with either no ranks, known ranks with sizes, ranks with // unknown sizes, a mix of ranks with known and unknown sizes, or ranks with // known sizes and strides. The type might also have requires_grad and/or @@ -227,7 +227,7 @@ TypePtr SchemaTypeParser::parseRefinedTensor() { // Long(10, 8, 6, strides=[48, 6, 1], requires_grad=0, device=cuda:1) // Float(10, *, 20, device=cuda:1) // Float(requires_grad=1) - std::vector> dims; + std::vector> dims; bool seen_strides = false; std::vector strides; parseList(TK_NOTHING, ',', ')', [&] { @@ -339,16 +339,16 @@ TypePtr SchemaTypeParser::parseRefinedTensor() { return ptr; } -std::pair> SchemaTypeParser::parseType() { +std::pair> SchemaTypeParser::parseType() { auto r = parseFakeAndRealType(); return std::make_pair(std::move(std::get<0>(r)), std::move(std::get<2>(r))); } -std::tuple> +std::tuple> SchemaTypeParser::parseFakeAndRealType() { TypePtr fake_value; TypePtr real_value; - c10::optional alias_info; + std::optional alias_info; // Tuple type if (L.cur().kind == '(') { std::vector types; @@ -465,7 +465,7 @@ SchemaTypeParser::parseFakeAndRealType() { auto container = parseAliasAnnotation(); if (alias_info) { if (!container) { - container = c10::optional(AliasInfo()); + container = std::optional(AliasInfo()); container->setIsWrite(alias_info->isWrite()); } container->addContainedType(std::move(*alias_info)); diff --git a/torch/csrc/jit/frontend/schema_type_parser.h b/torch/csrc/jit/frontend/schema_type_parser.h index c43e4363da386..e8c830cd5ae06 100644 --- a/torch/csrc/jit/frontend/schema_type_parser.h +++ b/torch/csrc/jit/frontend/schema_type_parser.h @@ -13,19 +13,19 @@ using TypePtr = c10::TypePtr; struct TORCH_API SchemaTypeParser { TypePtr parseBaseType(); - c10::optional parseAliasAnnotation(); - std::pair> parseType(); - std::tuple> + std::optional parseAliasAnnotation(); + std::pair> parseType(); + std::tuple> parseFakeAndRealType(); - c10::optional parseTensorDType(const std::string& dtype); + std::optional parseTensorDType(const std::string& dtype); TypePtr parseRefinedTensor(); SchemaTypeParser(Lexer& L, bool parse_complete_tensor_types) : complete_tensor_types(parse_complete_tensor_types), L(L) {} private: - c10::optional tryToParseRequiresGrad(); - c10::optional tryToParseDeviceType(); + std::optional tryToParseRequiresGrad(); + std::optional tryToParseDeviceType(); void parseList( int begin, int sep, diff --git a/torch/csrc/jit/frontend/script_type_parser.cpp b/torch/csrc/jit/frontend/script_type_parser.cpp index 245a7496d8f36..9295a3ed4007a 100644 --- a/torch/csrc/jit/frontend/script_type_parser.cpp +++ b/torch/csrc/jit/frontend/script_type_parser.cpp @@ -118,7 +118,7 @@ TypePtr ScriptTypeParser::subscriptToType( } } -c10::optional> ScriptTypeParser::parseBroadcastList( +std::optional> ScriptTypeParser::parseBroadcastList( const Expr& expr) const { // Alias torch.nn._common_types._size_?_t to BroadcastingList?[int] if (expr.kind() == TK_VAR) { @@ -191,7 +191,7 @@ c10::optional> ScriptTypeParser::parseBroadcastList( // gets the base type name given namespaces where the types live // turns torch.Tensor -> Tensor, X -> X -c10::optional ScriptTypeParser::parseBaseTypeName( +std::optional ScriptTypeParser::parseBaseTypeName( const Expr& expr) const { switch (expr.kind()) { case TK_VAR: { @@ -407,7 +407,7 @@ std::vector ScriptTypeParser::parseArgsFromDecl( auto decl_arg = *it; TypePtr type; - c10::optional N = c10::nullopt; + std::optional N = c10::nullopt; if (!decl_arg.type().present()) { // If this param doesn't have a type, default to "tensor" type = TensorType::getInferred(); @@ -421,7 +421,7 @@ std::vector ScriptTypeParser::parseArgsFromDecl( type = parseTypeFromExpr(decl_arg.type().get()); } } - c10::optional default_value = c10::nullopt; + std::optional default_value = c10::nullopt; if (decl_arg.defaultValue().present()) { default_value = *defaults_it++; } diff --git a/torch/csrc/jit/frontend/script_type_parser.h b/torch/csrc/jit/frontend/script_type_parser.h index 3a05af9c598ab..66c963b7d6d3d 100644 --- a/torch/csrc/jit/frontend/script_type_parser.h +++ b/torch/csrc/jit/frontend/script_type_parser.h @@ -21,7 +21,7 @@ class TORCH_API ScriptTypeParser { c10::TypePtr parseTypeFromExpr(const Expr& expr) const; - c10::optional> parseBroadcastList( + std::optional> parseBroadcastList( const Expr& expr) const; c10::TypePtr parseType(const std::string& str); @@ -33,7 +33,7 @@ class TORCH_API ScriptTypeParser { private: c10::TypePtr parseTypeFromExprImpl(const Expr& expr) const; - c10::optional parseBaseTypeName(const Expr& expr) const; + std::optional parseBaseTypeName(const Expr& expr) const; at::TypePtr subscriptToType( const std::string& typeName, const Subscript& subscript) const; diff --git a/torch/csrc/jit/frontend/source_range.cpp b/torch/csrc/jit/frontend/source_range.cpp index 03c366878af99..20ffbfd4601e3 100644 --- a/torch/csrc/jit/frontend/source_range.cpp +++ b/torch/csrc/jit/frontend/source_range.cpp @@ -151,7 +151,7 @@ size_t SourceRangeHasher::operator()(const torch::jit::SourceRange& key) const { std::hash()(key.start()) ^ std::hash()(key.end())); } -c10::optional Source::findSourceRangeThatGenerated( +std::optional Source::findSourceRangeThatGenerated( const SourceRange& range) { if (!gen_ranges_) { return c10::nullopt; diff --git a/torch/csrc/jit/frontend/source_range.h b/torch/csrc/jit/frontend/source_range.h index 72710a94ed210..1f8715ad00969 100644 --- a/torch/csrc/jit/frontend/source_range.h +++ b/torch/csrc/jit/frontend/source_range.h @@ -190,7 +190,7 @@ struct TORCH_API Source { explicit Source( c10::string_view text_view, - c10::optional filename = c10::nullopt, + std::optional filename = c10::nullopt, size_t starting_line_no = 0, std::shared_ptr gen_ranges = nullptr, CopiesString copies_str = COPIES_STRING) @@ -210,7 +210,7 @@ struct TORCH_API Source { explicit Source( StringCordView str, - c10::optional filename = c10::nullopt, + std::optional filename = c10::nullopt, size_t starting_line_no = 0, std::shared_ptr gen_ranges = nullptr) : text_view_(std::move(str)), @@ -266,7 +266,7 @@ struct TORCH_API Source { return text_view_.size(); } - c10::optional& filename() { + std::optional& filename() { return filename_; } @@ -274,7 +274,7 @@ struct TORCH_API Source { return starting_line_no_; } - c10::optional findSourceRangeThatGenerated( + std::optional findSourceRangeThatGenerated( const SourceRange& range); ~Source() = default; @@ -291,7 +291,7 @@ struct TORCH_API Source { StringCordView text_view_; - c10::optional filename_; + std::optional filename_; // If filename_ is not present, starting_line_no_ is don't care size_t starting_line_no_; // Starting offsets for lines into the source. e.g. line 0 starts at @@ -358,14 +358,14 @@ struct TORCH_API SourceRange { return ss.str(); } - c10::optional> file_line_col() const { + std::optional> file_line_col() const { if (!source_view_ || !source()->filename()) { return c10::nullopt; } auto lineno = source_view_->lineno_for_offset(start_); auto col_offset = (int)start_ - (int)source_view_->offset_for_line(lineno); - // TODO: c10::optional<>::value returns an rvalue ref so can't use it here?? + // TODO: std::optional<>::value returns an rvalue ref so can't use it here?? return std::make_tuple( source_view_->filename().value_or(""), source_view_->lineno_to_source_lineno(lineno), @@ -381,7 +381,7 @@ struct TORCH_API SourceRange { return !(*this == rhs); } - c10::optional findSourceRangeThatGenerated() const { + std::optional findSourceRangeThatGenerated() const { if (!source_view_) { return c10::nullopt; } diff --git a/torch/csrc/jit/frontend/sugared_value.cpp b/torch/csrc/jit/frontend/sugared_value.cpp index 80b5d27fba079..4b65903529d23 100644 --- a/torch/csrc/jit/frontend/sugared_value.cpp +++ b/torch/csrc/jit/frontend/sugared_value.cpp @@ -283,7 +283,7 @@ std::shared_ptr SimpleValue::attr( std::vector> SimpleValue::asTuple( const SourceRange& loc, GraphFunction& m, - const c10::optional& size_hint) { + const std::optional& size_hint) { static const auto make_simple_value = [](Value* v) -> std::shared_ptr { return std::make_shared(v); @@ -525,7 +525,7 @@ RangeValue::RangeValue( const SourceRange& loc, GraphFunction& m, std::vector inputs, - c10::optional static_len) { + std::optional static_len) { for (const auto i : c10::irange(inputs.size())) { auto typ = inputs[i]->type(); if (!typ->cast()) { @@ -645,7 +645,7 @@ void IterableTree::addChild( const SourceRange& range, GraphFunction& m, const SugaredValuePtr& iter_value) { - c10::optional child_len = iter_value->staticLen(); + std::optional child_len = iter_value->staticLen(); if (children_.empty()) { unroll_length_ = child_len; } else { @@ -748,7 +748,7 @@ std::shared_ptr NamedTupleConstructor::call( std::shared_ptr BuiltinFunction::tryCreate( Symbol symbol, - c10::optional self) { + std::optional self) { for (const std::shared_ptr& op : getAllOperatorsFor(symbol)) { if (!self) { return std::make_shared(symbol, nullptr); diff --git a/torch/csrc/jit/frontend/sugared_value.h b/torch/csrc/jit/frontend/sugared_value.h index 9bf09f4a56e17..97b092cad3ce7 100644 --- a/torch/csrc/jit/frontend/sugared_value.h +++ b/torch/csrc/jit/frontend/sugared_value.h @@ -67,7 +67,7 @@ struct TORCH_API SugaredValue virtual std::vector> asTuple( const SourceRange& loc, GraphFunction& m, - const c10::optional& size_hint = {}) { + const std::optional& size_hint = {}) { throw ErrorReport(loc) << kind() << " cannot be used as a tuple"; } @@ -121,7 +121,7 @@ struct TORCH_API SugaredValue // function, then we emit an unrolled loop over the variable. This allows us // to support containers of Heterogenous types, like Module Containers & // Tuples - virtual c10::optional staticLen() { + virtual std::optional staticLen() { return c10::nullopt; } @@ -169,7 +169,7 @@ struct TORCH_API SimpleValue : public SugaredValue { std::vector> asTuple( const SourceRange& loc, GraphFunction& m, - const c10::optional& size_hint = {}) override; + const std::optional& size_hint = {}) override; std::shared_ptr attr( const SourceRange& loc, GraphFunction& m, @@ -213,14 +213,14 @@ struct TORCH_API SimpleValue : public SugaredValue { }; struct TORCH_API BuiltinFunction : public SugaredValue { - BuiltinFunction(Symbol symbol, c10::optional self) + BuiltinFunction(Symbol symbol, std::optional self) : symbol(symbol), self(std::move(self)) {} // The symbol of the function (e.g. `aten::relu`). Symbol symbol; // if this is method, then this is the self argument. - c10::optional self; + std::optional self; std::string kind() const override { return "builtin"; } @@ -236,7 +236,7 @@ struct TORCH_API BuiltinFunction : public SugaredValue { // not clear if it is a valid builtin static std::shared_ptr tryCreate( Symbol symbol, - c10::optional self); + std::optional self); }; struct TORCH_API SugaredTupleValue : public SugaredValue { @@ -246,7 +246,7 @@ struct TORCH_API SugaredTupleValue : public SugaredValue { std::vector> asTuple( const SourceRange& loc, GraphFunction& m, - const c10::optional& size_hint = {}) override { + const std::optional& size_hint = {}) override { return tup_; }; @@ -297,7 +297,7 @@ struct TORCH_API SugaredTupleValue : public SugaredValue { // Because this is used to contain SugaredValues of Heterogenous types, // we define staticLen() so that when this is iterated over it is emitted // as an unrolled loop. - c10::optional staticLen() override { + std::optional staticLen() override { return static_cast(tup_.size()); } @@ -305,7 +305,7 @@ struct TORCH_API SugaredTupleValue : public SugaredValue { }; struct TORCH_API BuiltinModule : public SugaredValue { - BuiltinModule(std::string name, c10::optional version = at::nullopt) + BuiltinModule(std::string name, std::optional version = at::nullopt) : name(std::move(name)), version(version) {} std::string kind() const override { @@ -330,7 +330,7 @@ struct TORCH_API BuiltinModule : public SugaredValue { std::string name; // when we add operator versioning, emit this op as it exising at 'version' // if not set, use the latest version - c10::optional version; + std::optional version; }; // Represents a class, analagous to `int` or `dict`. Instances of classes, @@ -638,7 +638,7 @@ struct TORCH_API RangeValue : SugaredValue { const SourceRange& loc, GraphFunction& m, std::vector input, - c10::optional static_len = c10::nullopt); + std::optional static_len = c10::nullopt); std::string kind() const override { return "range"; @@ -654,7 +654,7 @@ struct TORCH_API RangeValue : SugaredValue { // When Range is instantiated via enumerate(iterable_with_static_len), // then it takes the static length of the iterable - c10::optional staticLen() override { + std::optional staticLen() override { return static_len_; } @@ -667,7 +667,7 @@ struct TORCH_API RangeValue : SugaredValue { // derivation nodes to simplify the graph and enable more possible // optimizations bool has_only_end_{}; - c10::optional static_len_; + std::optional static_len_; }; // Specialized Tree structure to matched against for special handling @@ -712,7 +712,7 @@ struct TORCH_API IterableTree : SugaredValue { // If this iterable contains a ModuleList or Tuple, then it will have a // static length, and we will emit it as an unrolled for loop. - c10::optional staticLen() override { + std::optional staticLen() override { return unroll_length_; } @@ -730,7 +730,7 @@ struct TORCH_API IterableTree : SugaredValue { TypePtr type_hint = nullptr) override; private: - c10::optional unroll_length_ = c10::nullopt; + std::optional unroll_length_ = c10::nullopt; std::vector children_; }; diff --git a/torch/csrc/jit/frontend/tracer.cpp b/torch/csrc/jit/frontend/tracer.cpp index 823b27f30fcb1..9616e0f83dfbe 100644 --- a/torch/csrc/jit/frontend/tracer.cpp +++ b/torch/csrc/jit/frontend/tracer.cpp @@ -44,7 +44,7 @@ template void genericAddOptionalInput( Node* n, const char* name, - const c10::optional& value) { + const std::optional& value) { if (value) { jit::tracer::addInputs(n, name, *value); } else { @@ -110,7 +110,7 @@ void TracingState::delValue(const IValue& var) { Value* getValueTrace(const IValue& var) { return getTracingState()->getValue(var); } -static Value* getOptTensorValueTrace(const c10::optional& var) { +static Value* getOptTensorValueTrace(const std::optional& var) { return getValueTrace(IValue(var)); } Value* TracingState::getValue(const IValue& var) { @@ -617,7 +617,7 @@ void addInputs(Node* n, const char* name, c10::SymInt value) { addInputs(n, name, value.guard_int(__FILE__, __LINE__)); } -void addInputs(Node* n, const char* name, c10::optional value) { +void addInputs(Node* n, const char* name, std::optional value) { using ArgumentStash = jit::tracer::ArgumentStash; if (ArgumentStash::hasValue(name)) { Value* v = ArgumentStash::popValue(name); @@ -633,13 +633,13 @@ void addInputs(Node* n, const char* name, c10::optional value) { void addInputs(Node* n, const char* name, bool value) { detail::genericAddInput(n, value); } -void addInputs(Node* n, const char* name, const c10::optional& value) { +void addInputs(Node* n, const char* name, const std::optional& value) { detail::genericAddOptionalInput(n, name, value); } void addInputs(Node* n, const char* name, double value) { detail::genericAddInput(n, value); } -void addInputs(Node* n, const char* name, const c10::optional& value) { +void addInputs(Node* n, const char* name, const std::optional& value) { detail::genericAddOptionalInput(n, name, value); } void addInputs(Node* n, const char* name, const at::Scalar& value) { @@ -654,7 +654,7 @@ void addInputs(Node* n, const char* name, const at::Scalar& value) { void addInputs( Node* n, const char* name, - const c10::optional& value) { + const std::optional& value) { detail::genericAddOptionalInput(n, name, value); } void addInputs(Node* n, const char* name, const c10::string_view value) { @@ -663,7 +663,7 @@ void addInputs(Node* n, const char* name, const c10::string_view value) { void addInputs( Node* n, const char* name, - const c10::optional& value) { + const std::optional& value) { detail::genericAddOptionalInput(n, name, value); } void addInputs(Node* n, const char* name, const at::Tensor& value) { @@ -672,13 +672,13 @@ void addInputs(Node* n, const char* name, const at::Tensor& value) { void addInputs( Node* n, const char* name, - const c10::optional& value) { + const std::optional& value) { detail::genericAddOptionalInput(n, name, value); } void addInputs( Node* n, const char* name, - const c10::optional& value) { + const std::optional& value) { Graph* g = n->owningGraph(); if (value.has_value() && value->defined()) { @@ -706,31 +706,31 @@ void addInputs(Node* n, const char* name, at::MemoryFormat value) { void addInputs( Node* n, const char* name, - const c10::optional& value) { + const std::optional& value) { detail::genericAddOptionalInput(n, name, value); } void addInputs( Node* n, const char* name, - const c10::optional& value) { + const std::optional& value) { detail::genericAddOptionalInput(n, name, value); } void addInputs( Node* n, const char* name, - const c10::optional& value) { + const std::optional& value) { detail::genericAddOptionalInput(n, name, value); } void addInputs( Node* n, const char* name, - c10::optional value) { + std::optional value) { TORCH_CHECK(false, "NYI: Named tensors are not supported with the tracer"); } void addInputs( Node* n, const char* name, - const c10::optional& value) { + const std::optional& value) { detail::genericAddOptionalInput(n, name, value); } void addInputs( @@ -767,7 +767,7 @@ void addInputs( TORCH_API void addInputs( Node* n, const char* name, - const List>& value) { + const List>& value) { Graph* g = n->owningGraph(); Node* list_node = nullptr; list_node = g->insertNode(g->createList( @@ -813,7 +813,7 @@ void addInputs(Node* n, const char* name, c10::SymIntArrayRef value) { addInputs(n, name, C10_AS_INTARRAYREF_SLOW(value)); } -void addInputs(Node* n, const char* name, c10::optional value) { +void addInputs(Node* n, const char* name, std::optional value) { addInputs( n, name, @@ -825,7 +825,7 @@ void addInputs(Node* n, const char* name, c10::optional value) { void addInputs( Node* n, const char* name, - const c10::optional& opt_value) { + const std::optional& opt_value) { detail::genericAddOptionalInput(n, name, opt_value); } @@ -869,7 +869,7 @@ void addInputs(Node* n, const char* name, ArrayRef value) { void addInputs( Node* n, const char* name, - const c10::optional>& opt_value) { + const std::optional>& opt_value) { detail::genericAddOptionalInput(n, name, opt_value); } @@ -995,7 +995,7 @@ void ensureUniqueIfOutOfPlaced(const char* name, const at::Tensor& tensor) { } void ensureUniqueIfOutOfPlaced( const char* name, - const c10::optional& tensor) { + const std::optional& tensor) { ensureUniqueIfOutOfPlaced(name, tensor.has_value() ? *tensor : at::Tensor()); } diff --git a/torch/csrc/jit/frontend/tracer.h b/torch/csrc/jit/frontend/tracer.h index f265d57b649dd..a1cc856a22e19 100644 --- a/torch/csrc/jit/frontend/tracer.h +++ b/torch/csrc/jit/frontend/tracer.h @@ -236,37 +236,37 @@ TORCH_API void addInputs(Node* n, const char* name, c10::SymInt value); TORCH_API void addInputs( Node* n, const char* name, - c10::optional value); + std::optional value); TORCH_API void addInputs(Node* n, const char* name, bool value); TORCH_API void addInputs( Node* n, const char* name, - const c10::optional& value); + const std::optional& value); TORCH_API void addInputs(Node* n, const char* name, double value); TORCH_API void addInputs( Node* n, const char* name, - const c10::optional& value); + const std::optional& value); TORCH_API void addInputs(Node* n, const char* name, const at::Scalar& value); TORCH_API void addInputs( Node* n, const char* name, - const c10::optional& value); + const std::optional& value); TORCH_API void addInputs(Node* n, const char* name, const at::Tensor& value); TORCH_API void addInputs( Node* n, const char* name, - const c10::optional& value); + const std::optional& value); TORCH_API void addInputs(Node* n, const char* name, ArrayRef value); TORCH_API void addInputs(Node* n, const char* name, c10::SymIntArrayRef value); TORCH_API void addInputs( Node* n, const char* name, - c10::optional value); + std::optional value); TORCH_API void addInputs( Node* n, const char* name, - const c10::optional>& value); + const std::optional>& value); TORCH_API void addInputs( Node* n, const char* name, @@ -293,7 +293,7 @@ TORCH_API void addInputs( TORCH_API void addInputs( Node* n, const char* name, - const List>& value); + const List>& value); TORCH_API void addInputs( Node* n, const char* name, @@ -303,7 +303,7 @@ TORCH_API void addInputs(Node* n, const char* name, ArrayRef value); TORCH_API void addInputs( Node* n, const char* name, - const c10::optional>& value); + const std::optional>& value); TORCH_API void addInputs( Node* n, const char* name, @@ -311,7 +311,7 @@ TORCH_API void addInputs( TORCH_API void addInputs( Node* n, const char* name, - const c10::optional& value); + const std::optional& value); TORCH_API void addInputs(Node* n, const char* name, at::Device value); TORCH_API void addInputs(Node* n, const char* name, c10::Stream stream); TORCH_API void addInputs(Node* n, const char* name, at::Layout value); @@ -319,28 +319,28 @@ TORCH_API void addInputs(Node* n, const char* name, at::ScalarType value); TORCH_API void addInputs( Node* n, const char* name, - const c10::optional& value); + const std::optional& value); TORCH_API void addInputs( Node* n, const char* name, - const c10::optional& value); + const std::optional& value); TORCH_API void addInputs( Node* n, const char* name, - const c10::optional& value); + const std::optional& value); TORCH_API void addInputs(Node* n, const char* name, at::MemoryFormat value); TORCH_API void addInputs( Node* n, const char* name, - c10::optional value); + std::optional value); TORCH_API void addInputs( Node* n, const char* name, - const c10::optional& value); + const std::optional& value); TORCH_API void addInputs( Node* n, const char* name, - const c10::optional& value); + const std::optional& value); inline void addInputs( Node* n, @@ -377,7 +377,7 @@ TORCH_API void ensureUniqueIfOutOfPlaced( const at::Tensor& tensor); TORCH_API void ensureUniqueIfOutOfPlaced( const char* name, - const c10::optional& tensor); + const std::optional& tensor); template < typename T, diff --git a/torch/csrc/jit/ir/alias_analysis.cpp b/torch/csrc/jit/ir/alias_analysis.cpp index 29953ecd19a3e..f9b2ed5dd7ce9 100644 --- a/torch/csrc/jit/ir/alias_analysis.cpp +++ b/torch/csrc/jit/ir/alias_analysis.cpp @@ -54,7 +54,7 @@ class MutableTypePtrHelper { // of dimension 4 would map to the same type as a Tensor of // dimension 1. This allows us to treat all subclasses of Tensor // as a single, homogenous "Tensor" type. - c10::optional mapTypeToAliasTypeSet(const TypePtr& type) { + std::optional mapTypeToAliasTypeSet(const TypePtr& type) { if (mutable_type_cache_) { const AliasTypeSet* result = mapTypeToBorrowedAliasTypeSet(type); if (result) { @@ -82,7 +82,7 @@ class MutableTypePtrHelper { } private: - c10::optional mapTypeToAliasTypeSetImpl(const TypePtr& type) { + std::optional mapTypeToAliasTypeSetImpl(const TypePtr& type) { switch (type->kind()) { case TypeKind::ListType: case TypeKind::DictType: @@ -1097,7 +1097,7 @@ void AliasDb::analyzeRpcAsync(Node* node) { } namespace { -c10::optional getConstantBooleanInput( +std::optional getConstantBooleanInput( Node* node, const std::string& inputName) { TORCH_INTERNAL_ASSERT( @@ -1893,7 +1893,7 @@ bool AliasDb::mayAliasWildcard(const at::ArrayRef vs) const { vs.begin(), vs.end(), [&](Value* v) { return mayAliasWildcard(v); }); } -c10::optional AliasDb::tryGetOrCreateWildcard(const TypePtr& type) { +std::optional AliasDb::tryGetOrCreateWildcard(const TypePtr& type) { auto maybe_mut_types = mapTypeToAliasTypeSetPtr(type); if (!maybe_mut_types) { return c10::nullopt; @@ -1966,8 +1966,8 @@ Element* AliasDb::getWildcard(const TypePtr& type) const { } // Register `v` as a wildcard value. -c10::optional AliasDb::setWildcard(const Value* v) { - c10::optional maybe_wildcardElement = +std::optional AliasDb::setWildcard(const Value* v) { + std::optional maybe_wildcardElement = tryGetOrCreateWildcard(v->type()); if (!maybe_wildcardElement) { return c10::nullopt; diff --git a/torch/csrc/jit/ir/alias_analysis.h b/torch/csrc/jit/ir/alias_analysis.h index 380943635ea35..c06a4a88080b4 100644 --- a/torch/csrc/jit/ir/alias_analysis.h +++ b/torch/csrc/jit/ir/alias_analysis.h @@ -203,7 +203,7 @@ class AliasDb { * Wildcard methods */ // Register `v` as a wildcard value. - c10::optional setWildcard(const Value* v); + std::optional setWildcard(const Value* v); // Is this a value which will not alias? bool nonAliasingValue(const Value* elem) const; @@ -274,7 +274,7 @@ class AliasDb { // All wildcard Elements (one for each unique mutable type) ska::flat_hash_map wildcardIndex_; Element* getWildcard(const TypePtr& type) const; - c10::optional tryGetOrCreateWildcard(const TypePtr& type); + std::optional tryGetOrCreateWildcard(const TypePtr& type); void addContainedTypesToFreshElement( Element* container_elem, const AliasTypeSet& mut_types); @@ -301,9 +301,9 @@ class AliasDb { // Map of nodes to the memory locations that they write to using TWriteIndex = ska::flat_hash_map; - c10::optional writeIndex_; + std::optional writeIndex_; // Collection of all memory locations that are written to. - c10::optional writtenToLocationsIndex_; + std::optional writtenToLocationsIndex_; void buildWrittenToLocationsIndex(); std::unordered_set wildcards_; diff --git a/torch/csrc/jit/ir/constants.cpp b/torch/csrc/jit/ir/constants.cpp index 905088a20d1e2..ef697a5af7680 100644 --- a/torch/csrc/jit/ir/constants.cpp +++ b/torch/csrc/jit/ir/constants.cpp @@ -48,8 +48,8 @@ static bool insertableIValue(const IValue& ivalue) { Value* insertConstant( Graph& g, const IValue& val, - c10::optional loc, - c10::optional scope) { + std::optional loc, + std::optional scope) { auto value = tryInsertConstant(g, val, std::move(loc), std::move(scope)); if (value) { return *value; @@ -59,11 +59,11 @@ Value* insertConstant( } // IValue -> Constant node -c10::optional tryInsertConstant( +std::optional tryInsertConstant( Graph& g, const IValue& val, - c10::optional loc, - c10::optional scope) { + std::optional loc, + std::optional scope) { Node* n = g.create(prim::Constant); if (val.isTensor()) { at::Tensor ref = val.toTensor(); @@ -153,7 +153,7 @@ c10::optional tryInsertConstant( return g.insertNode(n)->output(); } -c10::optional toIValue(const Value* v) { +std::optional toIValue(const Value* v) { if (v->node()->kind() != prim::Constant || v->type()->cast()) { return c10::nullopt; } diff --git a/torch/csrc/jit/ir/constants.h b/torch/csrc/jit/ir/constants.h index d9d11075dd204..118da1e932d9c 100644 --- a/torch/csrc/jit/ir/constants.h +++ b/torch/csrc/jit/ir/constants.h @@ -25,8 +25,8 @@ struct TORCH_API constant_not_supported_error : public std::runtime_error { TORCH_API Value* insertConstant( Graph& g, const IValue& val, - c10::optional loc = c10::nullopt, - c10::optional scope = c10::nullopt); + std::optional loc = c10::nullopt, + std::optional scope = c10::nullopt); // note: prefer g.insertConsant(val, loc) which does exactly the same thing // this function is only declared/defined here because its implementation is @@ -34,11 +34,11 @@ TORCH_API Value* insertConstant( // constants.cpp. // // returns a c10::nullopt if the IValue kind cannot be inserted as a constant -TORCH_API c10::optional tryInsertConstant( +TORCH_API std::optional tryInsertConstant( Graph& g, const IValue& val, - c10::optional loc = c10::nullopt, - c10::optional scope = c10::nullopt); + std::optional loc = c10::nullopt, + std::optional scope = c10::nullopt); //////////////////////////////////////////////////////////////////////////////// // Helper for retrieving constants @@ -46,12 +46,12 @@ TORCH_API c10::optional tryInsertConstant( // attempt to convert a (possibly constant) Value* into an interpreter value // (IValue). returns c10::nullopt if the Value* was not constant -TORCH_API c10::optional toIValue(const Value* v); +TORCH_API std::optional toIValue(const Value* v); // if a value is a constant then try to turn into type T using the // same rules as the interpreter template -c10::optional constant_as(const Value* v) { +std::optional constant_as(const Value* v) { if (auto ivalue = toIValue(v)) { return ivalue->to(); } diff --git a/torch/csrc/jit/ir/ir.cpp b/torch/csrc/jit/ir/ir.cpp index a320570de5ca9..e288f78875c62 100644 --- a/torch/csrc/jit/ir/ir.cpp +++ b/torch/csrc/jit/ir/ir.cpp @@ -418,7 +418,7 @@ std::ostream& operator<<(std::ostream& out, const Graph& g) { static void checkSameDevice(const Node* node) { bool has_device = false; - c10::optional device = c10::nullopt; + std::optional device = c10::nullopt; auto checkValue = [&](const Value* v) { if (TensorTypePtr type = v->type()->cast()) { if (type->device() && !has_device) { @@ -984,7 +984,7 @@ static size_t findArgument(const FunctionSchema& the_schema, Symbol name) { return findArgument(the_schema, unqualName); } -c10::optional Node::get(Symbol name) const { +std::optional Node::get(Symbol name) const { return toIValue(namedInput(name)); } @@ -1686,7 +1686,7 @@ Value* Graph::insert( Symbol opname, at::ArrayRef args, at::ArrayRef kwargs, - const c10::optional& range) { + const std::optional& range) { return emitBuiltinCall( range.value_or(fakeRange()), *this, opname, args, kwargs); } @@ -1993,8 +1993,8 @@ Node* Graph::createClone( Value* Graph::insertConstant( const IValue& val, - c10::optional loc, - c10::optional scope) { + std::optional loc, + std::optional scope) { return jit::insertConstant(*this, val, std::move(loc), std::move(scope)); } @@ -2051,14 +2051,14 @@ void inlineCallStackOfNode( std::unordered_map& new_cs_entries, Function* callee, Node* to_replace, - c10::optional m_info); + std::optional m_info); static void inlineCallStackOfBlock( Block* b, std::unordered_map& new_cs_entries, Function* callee, Node* to_replace, - c10::optional m_info) { + std::optional m_info) { for (auto n : b->nodes()) { inlineCallStackOfNode(n, new_cs_entries, callee, to_replace, m_info); } @@ -2069,7 +2069,7 @@ void inlineCallStackOfNode( std::unordered_map& new_cs_entries, Function* callee, Node* to_replace, - c10::optional m_info) { + std::optional m_info) { auto new_node_cs = new_node->callstack(); InlinedCallStack* raw_callstack_ptr = @@ -2108,7 +2108,7 @@ std::vector inlineCallTo( std::unordered_map new_callstack_entries; - c10::optional module_instance_info = c10::nullopt; + std::optional module_instance_info = c10::nullopt; if (to_replace->kind() == prim::CallMethod) { auto class_type_ptr = to_replace->input(0)->type()->cast(); if (to_replace->input(0)->node()->kind() == prim::GetAttr) { diff --git a/torch/csrc/jit/ir/ir.h b/torch/csrc/jit/ir/ir.h index 4781b15229cbb..549f4a11001f5 100644 --- a/torch/csrc/jit/ir/ir.h +++ b/torch/csrc/jit/ir/ir.h @@ -332,9 +332,9 @@ struct TORCH_API Node { std::vector blocks_; Graph* graph_; Block* owning_block_; - c10::optional source_range_; + std::optional source_range_; ScopePtr scope_; - c10::optional callstack_; + std::optional callstack_; // Assumes FunctionSchemas are persistent, so we don't manage their lifetime. // This field is effective a cache that's populated on attribute lookups and // invalidated every time we perform an operation that could potentially @@ -348,7 +348,7 @@ struct TORCH_API Node { // is changed, we need to rely on this name // to retrieve old schemas to successfully apply upgraders // for this operator. - c10::optional historic_schema_name_ = c10::nullopt; + std::optional historic_schema_name_ = c10::nullopt; protected: Node(Graph* graph_, NodeKind kind_); // defined after graph @@ -373,7 +373,7 @@ struct TORCH_API Node { return wrap_; } - const c10::optional getHistoricSchemaName() { + const std::optional getHistoricSchemaName() { return historic_schema_name_; } @@ -442,7 +442,7 @@ struct TORCH_API Node { return this; } - c10::optional callstack() const { + std::optional callstack() const { return callstack_; } void setCallStack(InlinedCallStackPtr cs) { @@ -527,10 +527,10 @@ struct TORCH_API Node { Value* namedInput(const std::string& unqualName) const; Value* namedInput(Symbol name) const; - c10::optional get(Symbol name) const; + std::optional get(Symbol name) const; template - c10::optional get(Symbol name) const { + std::optional get(Symbol name) const { if (auto v = get(name)) { return v->template to(); } @@ -1208,7 +1208,7 @@ struct Graph : std::enable_shared_from_this { Node* insert_before_; int64_t predicted_insert_count_ = 0; - c10::optional op_version_; + std::optional op_version_; public: Graph(ScopePtr scope_root = c10::make_intrusive()) @@ -1261,11 +1261,11 @@ struct Graph : std::enable_shared_from_this { return current_scope_; } - void set_op_version(c10::optional version) { + void set_op_version(std::optional version) { op_version_ = version; } - c10::optional get_op_version() { + std::optional get_op_version() { return op_version_; } @@ -1368,8 +1368,8 @@ struct Graph : std::enable_shared_from_this { // Insert constant IValue into the graph. TORCH_API Value* insertConstant( const IValue& val, - c10::optional loc = c10::nullopt, - c10::optional scope = c10::nullopt); + std::optional loc = c10::nullopt, + std::optional scope = c10::nullopt); // Schema-driven insert: // This inserts a node into the graph with inputs determined from args and @@ -1382,7 +1382,7 @@ struct Graph : std::enable_shared_from_this { Symbol opname, at::ArrayRef args, at::ArrayRef kwargs = {}, - const c10::optional& range = {}); + const std::optional& range = {}); Node* appendNode(Node* n) { return block_->appendNode(n); @@ -1591,7 +1591,7 @@ struct TORCH_API PythonOp : public Node { // recover the autograd.Function instance, if this PythonOp's function // was originally SomeFunction.apply // used in ONNX for discovering symbolics - virtual c10::optional autogradFunction() const = 0; + virtual std::optional autogradFunction() const = 0; virtual void lint_python() const = 0; }; @@ -1730,7 +1730,7 @@ struct OperatorMap { return n->maybeOperator() && contains(n->getOperator()); } - c10::optional find(const Operator& op) { + std::optional find(const Operator& op) { const auto it = map.find(Symbol::fromQualString(op.schema().name())); if (it == map.end()) { return c10::nullopt; @@ -1806,7 +1806,7 @@ struct FunctionSchemaMap { return false; } - c10::optional find(const FunctionSchema& schema) const { + std::optional find(const FunctionSchema& schema) const { const auto it = map.find(Symbol::fromQualString(schema.name())); if (it == map.end()) { return c10::nullopt; diff --git a/torch/csrc/jit/ir/irparser.cpp b/torch/csrc/jit/ir/irparser.cpp index c37988e322a8d..06e0a66fa055c 100644 --- a/torch/csrc/jit/ir/irparser.cpp +++ b/torch/csrc/jit/ir/irparser.cpp @@ -169,7 +169,7 @@ void IRParser::parseOperatorOutputs(std::vector* outs) { ParsedLiteral IRParser::parseScalarLiteral(Node* n) { auto token = L.cur(); std::string str; - std::pair> type_alias; + std::pair> type_alias; ParsedLiteral r; switch (token.kind) { case TK_STRINGLITERAL: diff --git a/torch/csrc/jit/ir/named_value.h b/torch/csrc/jit/ir/named_value.h index ead3d73e9a86b..277e7f2699695 100644 --- a/torch/csrc/jit/ir/named_value.h +++ b/torch/csrc/jit/ir/named_value.h @@ -73,8 +73,8 @@ struct NamedValue { at::TypePtr type() const; private: - c10::optional loc_; - c10::optional name_; + std::optional loc_; + std::optional name_; Value* value_{nullptr}; // only valid if value_ == nullptr; IValue ivalue_; diff --git a/torch/csrc/jit/ir/scope.cpp b/torch/csrc/jit/ir/scope.cpp index dfb1ef36f359e..3ff1c22b8d119 100644 --- a/torch/csrc/jit/ir/scope.cpp +++ b/torch/csrc/jit/ir/scope.cpp @@ -113,7 +113,7 @@ InlinedCallStack::InlinedCallStack(Function* fn, SourceRange source_range) InlinedCallStack::InlinedCallStack( Function* fn, SourceRange source_range, - c10::optional module_instance_info) + std::optional module_instance_info) : fn_(fn), fn_name_(fn_ ? fn_->name() : ""), source_range_(std::move(source_range)), @@ -122,7 +122,7 @@ InlinedCallStack::InlinedCallStack( InlinedCallStack::InlinedCallStack( Function* fn, SourceRange source_range, - c10::optional module_instance_info, + std::optional module_instance_info, std::string& function_name) : fn_(fn), fn_name_(std::move(function_name)), @@ -142,7 +142,7 @@ InlinedCallStack::InlinedCallStack( InlinedCallStackPtr callee, Function* fn, SourceRange source_range, - c10::optional module_instance_info, + std::optional module_instance_info, std::string& function_name) : callee_(std::move(callee)), fn_(fn), @@ -154,22 +154,22 @@ InlinedCallStack::InlinedCallStack( InlinedCallStackPtr callee, Function* fn, SourceRange source_range, - c10::optional module_instance_info) + std::optional module_instance_info) : callee_(std::move(callee)), fn_(fn), fn_name_(fn_ ? fn_->name() : ""), source_range_(std::move(source_range)), module_instance_info_(std::move(module_instance_info)) {} -c10::optional InlinedCallStack::callee() const { +std::optional InlinedCallStack::callee() const { return callee_; } -void InlinedCallStack::setCallee(c10::optional callee) { +void InlinedCallStack::setCallee(std::optional callee) { callee_ = std::move(callee); } -c10::optional InlinedCallStack::module_instance() const { +std::optional InlinedCallStack::module_instance() const { return module_instance_info_; } @@ -187,7 +187,7 @@ const std::string& InlinedCallStack::function_name() const { std::vector InlinedCallStack::vec() { std::vector r; - c10::optional current = intrusive_from_this(); + std::optional current = intrusive_from_this(); while (current) { r.emplace_back( (*current)->fn_, diff --git a/torch/csrc/jit/ir/scope.h b/torch/csrc/jit/ir/scope.h index 423bbbd3ab2e1..5449803032238 100644 --- a/torch/csrc/jit/ir/scope.h +++ b/torch/csrc/jit/ir/scope.h @@ -120,11 +120,11 @@ struct ModuleInstanceInfo { */ using InlinedCallStackPtr = c10::intrusive_ptr; using InlinedCallStackEntry = - std::tuple>; + std::tuple>; struct TORCH_API InlinedCallStack : public c10::intrusive_ptr_target { private: - c10::optional callee_; + std::optional callee_; Function* fn_; // Reason for fn_name_ even though we have fn_ // Serialized callstack is used in circustmances where InlinedCallstack @@ -137,7 +137,7 @@ struct TORCH_API InlinedCallStack : public c10::intrusive_ptr_target { const std::string fn_name_; SourceRange source_range_; InlinedCallStackPtr intrusive_from_this(); - c10::optional module_instance_info_; + std::optional module_instance_info_; public: // Constructor for a leaf callstack node. @@ -147,13 +147,13 @@ struct TORCH_API InlinedCallStack : public c10::intrusive_ptr_target { InlinedCallStack( Function* fn, SourceRange source_range, - c10::optional module_instance_info); + std::optional module_instance_info); // Constructor for a leaf callstack node. InlinedCallStack( Function* fn, SourceRange source_range, - c10::optional module_instance_info, + std::optional module_instance_info, std::string& function_name); // Constructor for an inner callstack node. @@ -166,20 +166,20 @@ struct TORCH_API InlinedCallStack : public c10::intrusive_ptr_target { InlinedCallStackPtr callee, Function* fn, SourceRange source_range, - c10::optional module_instance_info); + std::optional module_instance_info); InlinedCallStack( InlinedCallStackPtr callee, Function* fn, SourceRange source_range, - c10::optional module_instance_info, + std::optional module_instance_info, std::string& function_name); // Return next element in the callstack list. - c10::optional callee() const; + std::optional callee() const; // Return module instance associated with the current element. - c10::optional module_instance() const; + std::optional module_instance() const; // Returns the source range of the node SourceRange source_range() const; @@ -191,7 +191,7 @@ struct TORCH_API InlinedCallStack : public c10::intrusive_ptr_target { // Return callstack as a vector of [Function, SourceRange] pairs. std::vector vec(); - void setCallee(c10::optional); + void setCallee(std::optional); bool operator==(const InlinedCallStack& rhs) const { // No need to compare fn_, since source_range equivalence check diff --git a/torch/csrc/jit/mobile/compatibility/model_compatibility.cpp b/torch/csrc/jit/mobile/compatibility/model_compatibility.cpp index de120c8fa1e87..1980023e8fc4a 100644 --- a/torch/csrc/jit/mobile/compatibility/model_compatibility.cpp +++ b/torch/csrc/jit/mobile/compatibility/model_compatibility.cpp @@ -31,7 +31,7 @@ using caffe2::serialize::ReadAdapterInterface; c10::IValue readArchive( const std::string& archive_name, PyTorchStreamReader& stream_reader) { - c10::optional device; + std::optional device; std::shared_ptr compilation_unit = std::make_shared(); diff --git a/torch/csrc/jit/mobile/compatibility/runtime_compatibility.cpp b/torch/csrc/jit/mobile/compatibility/runtime_compatibility.cpp index b3516e5bafc80..1cda81045b81a 100644 --- a/torch/csrc/jit/mobile/compatibility/runtime_compatibility.cpp +++ b/torch/csrc/jit/mobile/compatibility/runtime_compatibility.cpp @@ -53,7 +53,7 @@ std::unordered_map _get_runtime_ops_and_info() { for (auto& op : dispatcherOperators) { // grab schema const auto op_handle = c10::Dispatcher::singleton().findOp(op); - c10::optional num_schema_args; + std::optional num_schema_args; if (op_handle->hasSchema()) { num_schema_args = op_handle->schema().arguments().size(); } diff --git a/torch/csrc/jit/mobile/compatibility/runtime_compatibility.h b/torch/csrc/jit/mobile/compatibility/runtime_compatibility.h index 13adf04c0cc9d..2e65f1f38bd8d 100644 --- a/torch/csrc/jit/mobile/compatibility/runtime_compatibility.h +++ b/torch/csrc/jit/mobile/compatibility/runtime_compatibility.h @@ -13,7 +13,7 @@ namespace jit { // Struct storing metadata of an operator that can be useful for versioning struct OperatorInfo { // The number of arguments within the schema of the op - c10::optional num_schema_args; + std::optional num_schema_args; }; struct RuntimeCompatibilityInfo { diff --git a/torch/csrc/jit/mobile/flatbuffer_loader.cpp b/torch/csrc/jit/mobile/flatbuffer_loader.cpp index f906f4e2b9eb4..239deb76d2673 100644 --- a/torch/csrc/jit/mobile/flatbuffer_loader.cpp +++ b/torch/csrc/jit/mobile/flatbuffer_loader.cpp @@ -359,7 +359,7 @@ std::unique_ptr FlatbufferLoader::parseFunction( (operator_version < caffe2::serialize::kProducedFileFormatVersion); for (const auto* op : *method->operators()) { - c10::optional num_args = c10::nullopt; + std::optional num_args = c10::nullopt; if (op->num_args_serialized() > -1) { num_args = op->num_args_serialized(); } @@ -752,7 +752,7 @@ void FlatbufferLoader::extractJitSourceAndConstants( mobile::Module parse_and_initialize_mobile_module( void* data, size_t size, - c10::optional, + std::optional, ExtraFilesMap* extra_files, bool should_copy_tensor_memory) { // TODO(T128189662): If not copying, enforce that data is aligned to @@ -781,7 +781,7 @@ mobile::Module parse_and_initialize_mobile_module( mobile::Module parse_and_initialize_mobile_module( std::shared_ptr data, size_t size, - c10::optional device, + std::optional device, ExtraFilesMap* extra_files) { mobile::Module m = parse_and_initialize_mobile_module( data.get(), @@ -798,7 +798,7 @@ mobile::Module parse_and_initialize_mobile_module_for_jit( size_t size, ExtraFilesMap& jit_sources, std::vector& jit_constants, - c10::optional, + std::optional, ExtraFilesMap* extra_files) { TORCH_CHECK( mobile::serialization::ModuleBufferHasIdentifier(data), "Format error"); @@ -825,7 +825,7 @@ mobile::Module parse_and_initialize_mobile_module_for_jit( mobile::Module load_mobile_module_from_file( const std::string& filename, - c10::optional device, + std::optional device, ExtraFilesMap* extra_files) { auto [data, size] = get_file_content(filename.c_str()); return parse_and_initialize_mobile_module( @@ -885,7 +885,7 @@ mobile::ModuleInfo get_module_info_from_flatbuffer(char* flatbuffer_content) { mobile::Module load_mobile_module_from_stream_with_copy( std::istream& in, - c10::optional device, + std::optional device, ExtraFilesMap* extra_files) { auto [data, size] = get_stream_content(in); return parse_and_initialize_mobile_module( @@ -895,7 +895,7 @@ mobile::Module load_mobile_module_from_stream_with_copy( mobile::Module parse_flatbuffer_no_object( std::shared_ptr data, size_t size, - c10::optional device) { + std::optional device) { (void)device; (void)size; diff --git a/torch/csrc/jit/mobile/flatbuffer_loader.h b/torch/csrc/jit/mobile/flatbuffer_loader.h index f29fe5b2e4942..9ac9636f3f14b 100644 --- a/torch/csrc/jit/mobile/flatbuffer_loader.h +++ b/torch/csrc/jit/mobile/flatbuffer_loader.h @@ -58,7 +58,7 @@ using ExtraFilesMap = std::unordered_map; TORCH_API mobile::Module parse_and_initialize_mobile_module( void* data, size_t size, // of `data`, in bytes. - c10::optional device = c10::nullopt, + std::optional device = c10::nullopt, ExtraFilesMap* extra_files = nullptr, bool should_copy_tensor_memory = false); @@ -74,7 +74,7 @@ TORCH_API mobile::Module parse_and_initialize_mobile_module( TORCH_API mobile::Module parse_and_initialize_mobile_module( std::shared_ptr data, size_t size, // of `data`, in bytes. - c10::optional device = c10::nullopt, + std::optional device = c10::nullopt, ExtraFilesMap* extra_files = nullptr); // Parse a mobile::Module from raw bytes, also returning JIT-related metadata. @@ -87,7 +87,7 @@ TORCH_API mobile::Module parse_and_initialize_mobile_module_for_jit( size_t size, // of `data`, in bytes. ExtraFilesMap& jit_sources, std::vector& jit_constants, - c10::optional device = c10::nullopt, + std::optional device = c10::nullopt, ExtraFilesMap* extra_files = nullptr); // Load a mobile::Module from a filepath. @@ -100,7 +100,7 @@ TORCH_API mobile::Module parse_and_initialize_mobile_module_for_jit( // directly. TORCH_API mobile::Module load_mobile_module_from_file( const std::string& filename, - c10::optional device = c10::nullopt, + std::optional device = c10::nullopt, ExtraFilesMap* extra_files = nullptr); TORCH_API uint64_t get_bytecode_version(std::istream& in); @@ -114,18 +114,18 @@ TORCH_API mobile::ModuleInfo get_module_info_from_flatbuffer( // its entirity to a buffer TORCH_API mobile::Module load_mobile_module_from_stream_with_copy( std::istream& in, - c10::optional device = c10::nullopt, + std::optional device = c10::nullopt, ExtraFilesMap* extra_files = nullptr); TORCH_API mobile::Module parse_flatbuffer_no_object( std::shared_ptr data, size_t size, - c10::optional device); + std::optional device); TORCH_API mobile::Module parse_and_initialize_mobile_module( void* data, size_t, - c10::optional, + std::optional, ExtraFilesMap* extra_files, bool should_copy_tensor_memory); diff --git a/torch/csrc/jit/mobile/frame.h b/torch/csrc/jit/mobile/frame.h index 2db12f7d19374..45c51fef0085e 100644 --- a/torch/csrc/jit/mobile/frame.h +++ b/torch/csrc/jit/mobile/frame.h @@ -32,11 +32,11 @@ class Frame { return code_.instructions_.at(pc_); } - c10::optional getDebugHandle() const { + std::optional getDebugHandle() const { return getDebugHandle(pc_); } - c10::optional getDebugHandle(size_t pc) const { + std::optional getDebugHandle(size_t pc) const { if (pc >= code_.debug_handles_.size()) { return {}; } diff --git a/torch/csrc/jit/mobile/function.cpp b/torch/csrc/jit/mobile/function.cpp index b410bf7765cc7..36f19fb1fac41 100644 --- a/torch/csrc/jit/mobile/function.cpp +++ b/torch/csrc/jit/mobile/function.cpp @@ -47,7 +47,7 @@ void Function::append_instruction(OpCode op, int X, int N) { void Function::append_operator( const std::string& name, const std::string& overload_name, - const c10::optional& num_specified_args) { + const std::optional& num_specified_args) { // Keep the original opname in code_ code_.op_names_.emplace_back(name, overload_name); code_.operator_input_sizes_.emplace_back(num_specified_args.value_or(-1)); @@ -71,8 +71,8 @@ bool Function::initialize_operators(bool should_check_operators) { for (unsigned i = 0; i < code_.op_names_.size(); i++) { const auto& opname = code_.op_names_[i]; int num_args = code_.operator_input_sizes_[i]; - c10::optional num_specified_args = - num_args < 0 ? c10::nullopt : c10::optional(num_args); + std::optional num_specified_args = + num_args < 0 ? c10::nullopt : std::optional(num_args); auto func = makeOperatorFunction(opname, num_specified_args); if (!func.has_value()) { unsupported_op_names.insert(operator_str(opname)); @@ -165,9 +165,9 @@ const std::vector& Function::getExceptionDebugHandles() const { return getInterpretersExceptionDebugHandles(); } -c10::optional> makeOperatorFunction( +std::optional> makeOperatorFunction( c10::OperatorName opname, - c10::optional num_specified_args) { + std::optional num_specified_args) { std::function fn; const auto full_name = c10::toString(opname); const std::vector* pArgs = nullptr; diff --git a/torch/csrc/jit/mobile/function.h b/torch/csrc/jit/mobile/function.h index fb6f77fa64d76..42065d4a1c1b0 100644 --- a/torch/csrc/jit/mobile/function.h +++ b/torch/csrc/jit/mobile/function.h @@ -37,7 +37,7 @@ class TORCH_API Function : public torch::jit::Function { void append_operator( const std::string& name, const std::string& overload_name, - const c10::optional& num_specified_args); + const std::optional& num_specified_args); void append_constant(const c10::IValue& constant); void append_type(const c10::TypePtr& type); void append_function(mobile::Function& func); @@ -75,9 +75,9 @@ class TORCH_API Function : public torch::jit::Function { at::optional schema_; // (byte-code version 4+) }; -c10::optional> makeOperatorFunction( +std::optional> makeOperatorFunction( c10::OperatorName opname, - c10::optional num_specified_args); + std::optional num_specified_args); TORCH_API std::string operator_str(const c10::OperatorName& opname); diff --git a/torch/csrc/jit/mobile/import.cpp b/torch/csrc/jit/mobile/import.cpp index a82e7d69366ec..96ff6c88779d9 100644 --- a/torch/csrc/jit/mobile/import.cpp +++ b/torch/csrc/jit/mobile/import.cpp @@ -191,12 +191,12 @@ class BytecodeDeserializer final { explicit BytecodeDeserializer( std::unique_ptr reader, uint64_t module_load_options = 0); - mobile::Module deserialize(c10::optional device); + mobile::Module deserialize(std::optional device); mobile::Module deserialize( - c10::optional device, + std::optional device, ExtraFilesMap& extra_files); void deserialize_only_extra( - c10::optional device, + std::optional device, ExtraFilesMap& extra_files); private: @@ -204,7 +204,7 @@ class BytecodeDeserializer final { void init_upgrader(mobile::Function* function); void parseMethods( c10::ivalue::TupleElements&& vals, - c10::optional&& debug_handles, + std::optional&& debug_handles, mobile::CompilationUnit& mcu); c10::IValue readArchive( const std::string& archive_name, @@ -217,7 +217,7 @@ class BytecodeDeserializer final { std::shared_ptr compilation_unit_; std::unordered_set imported_libs_; std::unique_ptr reader_{}; - c10::optional device_; + std::optional device_; uint64_t module_load_options_; // From `version` or `.data/version` in model.ptl and it's compute // dynamically. It's used for finding the minimum required runtime to run all @@ -305,7 +305,7 @@ void BytecodeDeserializer::init_upgrader(mobile::Function* function) { void BytecodeDeserializer::parseMethods( c10::ivalue::TupleElements&& vals, - c10::optional&& debug_handles, + std::optional&& debug_handles, mobile::CompilationUnit& mcu) { TORCH_CHECK(!vals.empty(), "Bytecode has no elements. "); // Initialized with the version number when kProducedBytecodeVersion was @@ -417,7 +417,7 @@ void BytecodeDeserializer::parseMethods( } void BytecodeDeserializer::deserialize_only_extra( - c10::optional device, + std::optional device, ExtraFilesMap& extra_files) { device_ = device; for (const auto& kv : extra_files) { @@ -431,14 +431,14 @@ void BytecodeDeserializer::deserialize_only_extra( } mobile::Module BytecodeDeserializer::deserialize( - c10::optional device, + std::optional device, ExtraFilesMap& extra_files) { deserialize_only_extra(device, extra_files); return deserialize(device); } mobile::Module BytecodeDeserializer::deserialize( - c10::optional device) { + std::optional device) { device_ = device; auto mcu = std::make_shared(); @@ -453,7 +453,7 @@ mobile::Module BytecodeDeserializer::deserialize( // auto bvals = std::move(readArchive("bytecode", mcu).toTupleRef()).elements(); - c10::optional debug_handles; + std::optional debug_handles; bool has_debug_handles{false}; if (reader_->hasRecord("mobile_debug_handles.pkl")) { debug_handles = @@ -504,7 +504,7 @@ c10::IValue BytecodeDeserializer::readArchive( mobile::Module _load_for_mobile_impl( std::unique_ptr rai, - c10::optional device, + std::optional device, ExtraFilesMap& extra_files, uint64_t module_load_options) { auto observer = torch::observerConfig().getModuleObserver(); @@ -577,7 +577,7 @@ mobile::Module _load_for_mobile_impl( mobile::Module _load_mobile_from_bytes( const std::shared_ptr& data, size_t size, - c10::optional device, + std::optional device, ExtraFilesMap& extra_files, uint64_t module_load_options) { TORCH_CHECK(size >= kFileFormatHeaderSize, "Format error"); @@ -603,28 +603,28 @@ mobile::Module _load_mobile_from_bytes( mobile::Module _load_for_mobile( std::istream& in, - c10::optional device) { + std::optional device) { ExtraFilesMap extra_files; return _load_for_mobile(in, device, extra_files); } mobile::Module _load_for_mobile( const std::string& filename, - c10::optional device) { + std::optional device) { ExtraFilesMap extra_files; return _load_for_mobile(filename, device, extra_files); } mobile::Module _load_for_mobile( std::unique_ptr rai, - c10::optional device) { + std::optional device) { ExtraFilesMap extra_files; return _load_for_mobile(std::move(rai), device, extra_files); } mobile::Module _load_for_mobile( std::istream& in, - c10::optional device, + std::optional device, ExtraFilesMap& extra_files, uint64_t module_load_options) { if (getFileFormat(in) == FileFormat::FlatbufferFileFormat) { @@ -640,7 +640,7 @@ mobile::Module _load_for_mobile( mobile::Module _load_for_mobile( const std::string& filename, - c10::optional device, + std::optional device, ExtraFilesMap& extra_files) { return _load_for_mobile( filename, device, extra_files, kDefaultMobileLoadOptions); @@ -648,7 +648,7 @@ mobile::Module _load_for_mobile( mobile::Module _load_for_mobile( const std::string& filename, - c10::optional device, + std::optional device, ExtraFilesMap& extra_files, uint64_t module_load_options) { auto format = getFileFormat(filename); @@ -666,7 +666,7 @@ mobile::Module _load_for_mobile( TORCH_API mobile::Module _load_for_mobile( std::unique_ptr rai, - c10::optional device, + std::optional device, ExtraFilesMap& extra_files, uint64_t module_load_options) { // TODO optimize file read for non-flatbuffer models @@ -677,7 +677,7 @@ TORCH_API mobile::Module _load_for_mobile( void _load_extra_only_for_mobile( const std::string& filename, - c10::optional device, + std::optional device, ExtraFilesMap& extra_files) { auto observer = torch::observerConfig().getModuleObserver(); // NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand) diff --git a/torch/csrc/jit/mobile/import.h b/torch/csrc/jit/mobile/import.h index 26bc112f9a760..77a801e62571d 100644 --- a/torch/csrc/jit/mobile/import.h +++ b/torch/csrc/jit/mobile/import.h @@ -22,38 +22,38 @@ constexpr const char* kArchiveNameVersion = "version"; // into a mobile::Module object. TORCH_API mobile::Module _load_for_mobile( std::istream& in, - c10::optional device, + std::optional device, ExtraFilesMap& extra_file, uint64_t module_load_options = kDefaultMobileLoadOptions); TORCH_API mobile::Module _load_for_mobile( const std::string& filename, - c10::optional device, + std::optional device, ExtraFilesMap& extra_files); TORCH_API mobile::Module _load_for_mobile( std::unique_ptr rai, - c10::optional device, + std::optional device, ExtraFilesMap& extra_files, uint64_t module_load_options = kDefaultMobileLoadOptions); TORCH_API mobile::Module _load_for_mobile( const std::string& filename, - c10::optional device, + std::optional device, ExtraFilesMap& extra_files, uint64_t module_load_options); TORCH_API mobile::Module _load_for_mobile( std::istream& in, - c10::optional device = c10::nullopt); + std::optional device = c10::nullopt); TORCH_API mobile::Module _load_for_mobile( const std::string& filename, - c10::optional device = c10::nullopt); + std::optional device = c10::nullopt); TORCH_API mobile::Module _load_for_mobile( std::unique_ptr rai, - c10::optional device = c10::nullopt); + std::optional device = c10::nullopt); /** * Load only the contents of the "extra/" files whose names are @@ -69,7 +69,7 @@ TORCH_API mobile::Module _load_for_mobile( */ void _load_extra_only_for_mobile( const std::string& filename, - c10::optional device, + std::optional device, ExtraFilesMap& extra_files); // Currently used by both mobile/import.cpp and model_compatibility.cpp. diff --git a/torch/csrc/jit/mobile/import_data.cpp b/torch/csrc/jit/mobile/import_data.cpp index 11fbcbc45e3f2..32825f1f5e17f 100644 --- a/torch/csrc/jit/mobile/import_data.cpp +++ b/torch/csrc/jit/mobile/import_data.cpp @@ -40,13 +40,13 @@ namespace { class IValueUnpickler final { public: explicit IValueUnpickler(std::unique_ptr reader); - c10::IValue deserialize(c10::optional device); + c10::IValue deserialize(std::optional device); private: c10::IValue readArchive( const std::string& archive_name, std::shared_ptr mcu, - c10::optional device); + std::optional device); std::shared_ptr compilation_unit_; std::unique_ptr reader_; @@ -56,7 +56,7 @@ IValueUnpickler::IValueUnpickler(std::unique_ptr reader) : compilation_unit_(std::make_shared()), reader_(std::move(reader)) {} -c10::IValue IValueUnpickler::deserialize(c10::optional device) { +c10::IValue IValueUnpickler::deserialize(std::optional device) { auto mcu = std::make_shared(); // NOLINTNEXTLINE(performance-move-const-arg) @@ -66,7 +66,7 @@ c10::IValue IValueUnpickler::deserialize(c10::optional device) { c10::IValue IValueUnpickler::readArchive( const std::string& archive_name, std::shared_ptr mcu, - c10::optional device) { + std::optional device) { std::stringstream picklename; picklename << archive_name << ".pkl"; at::DataPtr pickle_ptr; @@ -169,7 +169,7 @@ c10::IValue IValueUnpickler::readArchive( */ std::map load_parameters_from_zip( std::unique_ptr rai, - c10::optional device) { + std::optional device) { auto reader = std::make_unique(std::move(rai)); IValueUnpickler unpickler(std::move(reader)); auto result = unpickler.deserialize(device).toGenericDict(); @@ -241,7 +241,7 @@ std::map mobile_module_to_parameter_map( static std::map _load_parameters_bytes( std::shared_ptr data, size_t size, - c10::optional device) { + std::optional device) { TORCH_CHECK(size >= kFileFormatHeaderSize, "Unrecognized data format"); FileFormat format = getFileFormat(data.get()); // Call the appropriate parser. @@ -268,14 +268,14 @@ static std::map _load_parameters_bytes( std::map _load_parameters( std::istream& in, - c10::optional device) { + std::optional device) { auto [data, size] = get_stream_content(in); return _load_parameters_bytes(std::move(data), size, device); } std::map _load_parameters( const std::string& filename, - c10::optional device) { + std::optional device) { auto [data, size] = get_file_content(filename.c_str()); return _load_parameters_bytes(std::move(data), size, device); } diff --git a/torch/csrc/jit/mobile/import_data.h b/torch/csrc/jit/mobile/import_data.h index f3eb202b7f00a..25e1fd81341c1 100644 --- a/torch/csrc/jit/mobile/import_data.h +++ b/torch/csrc/jit/mobile/import_data.h @@ -19,7 +19,7 @@ namespace jit { */ TORCH_API std::map _load_parameters( std::istream& in, - c10::optional device = c10::nullopt); + std::optional device = c10::nullopt); /** * Loads named parameters from the serialized data in @p filename. @@ -28,7 +28,7 @@ TORCH_API std::map _load_parameters( */ TORCH_API std::map _load_parameters( const std::string& filename, - c10::optional device = c10::nullopt); + std::optional device = c10::nullopt); // NOTE: Please prefer using _load_parameters over using the function below. TORCH_API std::map mobile_module_to_parameter_map( diff --git a/torch/csrc/jit/mobile/model_tracer/OperatorCallTracer.cpp b/torch/csrc/jit/mobile/model_tracer/OperatorCallTracer.cpp index 0da724ade0bf8..c273b41537e40 100644 --- a/torch/csrc/jit/mobile/model_tracer/OperatorCallTracer.cpp +++ b/torch/csrc/jit/mobile/model_tracer/OperatorCallTracer.cpp @@ -10,7 +10,7 @@ OperatorCallTracer::OperatorCallTracer() { auto recorder_cb = [](const at::RecordFunction& fn) -> std::unique_ptr { - c10::optional op_name = fn.operator_name(); + std::optional op_name = fn.operator_name(); if (op_name.has_value()) { getCalledOperators().withLock( [op_name](std::set& called_operators) { diff --git a/torch/csrc/jit/mobile/module.cpp b/torch/csrc/jit/mobile/module.cpp index 55ec47d8e9387..23dfe9ff36785 100644 --- a/torch/csrc/jit/mobile/module.cpp +++ b/torch/csrc/jit/mobile/module.cpp @@ -46,7 +46,7 @@ Method Module::get_method(const std::string& name) const { bool Module::compareMethodSchemas( const std::string& name_1, const std::string& name_2) { - c10::optional schema_1, schema_2; + std::optional schema_1, schema_2; for (const auto& fn : cu_->methods()) { if (fn->name() == name_1) { schema_1 = fn->getSchema(); @@ -87,7 +87,7 @@ void Module::unsafeCopyMethod( cu_->register_function(std::move(new_fn)); } -c10::optional Module::find_method(const std::string& basename) const { +std::optional Module::find_method(const std::string& basename) const { for (const auto& fn : cu_->methods()) { if (fn->name() == basename) { return c10::make_optional(Method(this, fn.get())); @@ -316,7 +316,7 @@ c10::IValue Method::operator()(std::vector stack) const { return stack.front(); } -static c10::optional print_type(const c10::Type& t) { +static std::optional print_type(const c10::Type& t) { auto namedType = t.cast(); if (namedType && namedType->name()) { return namedType->name().value().qualifiedName(); diff --git a/torch/csrc/jit/mobile/module.h b/torch/csrc/jit/mobile/module.h index 5e5d87f946355..3d37c7dc436ad 100644 --- a/torch/csrc/jit/mobile/module.h +++ b/torch/csrc/jit/mobile/module.h @@ -76,7 +76,7 @@ class TORCH_API Module { c10::IValue forward(std::vector inputs) { return get_method("forward")(std::move(inputs)); } - c10::optional find_method(const std::string& basename) const; + std::optional find_method(const std::string& basename) const; const std::string name() const { return object_->name(); diff --git a/torch/csrc/jit/mobile/nnc/aot_compiler.cpp b/torch/csrc/jit/mobile/nnc/aot_compiler.cpp index 3b3fb8af6185a..1f7ba264048ff 100644 --- a/torch/csrc/jit/mobile/nnc/aot_compiler.cpp +++ b/torch/csrc/jit/mobile/nnc/aot_compiler.cpp @@ -328,7 +328,7 @@ static std::string getNncKernelFuncName( static std::pair, std::vector> preprocessGraphPasses( std::shared_ptr& graph, - const std::vector>& example_inputs, + const std::vector>& example_inputs, const std::vector& dynamic_sizes) { GRAPH_DEBUG("Before preprocessing graph passes: ", *graph); torch::jit::RemoveTensorMutation(graph); @@ -368,11 +368,11 @@ preprocessGraphPasses( return std::make_pair(graph, sym_val); } -static std::vector> generateExampleInputs( +static std::vector> generateExampleInputs( const std::vector>& inputShapes, const std::vector& inputTypes, const std::vector& inputMemoryFormats) { - std::vector> example_inputs; + std::vector> example_inputs; example_inputs.reserve(inputShapes.size()); for (const auto i : c10::irange(inputShapes.size())) { const auto dtype = at::dtype(inputTypes[i]); diff --git a/torch/csrc/jit/mobile/nnc/context.h b/torch/csrc/jit/mobile/nnc/context.h index ddc179740549e..3976d28ec8944 100644 --- a/torch/csrc/jit/mobile/nnc/context.h +++ b/torch/csrc/jit/mobile/nnc/context.h @@ -47,8 +47,8 @@ struct TORCH_API OutputSpec { std::vector sizes_; c10::ScalarType dtype_{c10::ScalarType::Undefined}; - c10::optional qscale_; - c10::optional qzero_; + std::optional qscale_; + std::optional qzero_; }; // Hold the temporary buffers / states needed during the execution. diff --git a/torch/csrc/jit/mobile/parse_operators.cpp b/torch/csrc/jit/mobile/parse_operators.cpp index 03415657c780b..c260a2e5d832a 100644 --- a/torch/csrc/jit/mobile/parse_operators.cpp +++ b/torch/csrc/jit/mobile/parse_operators.cpp @@ -16,7 +16,7 @@ void parseOperators( "There should be either two parts (name and overload name), ", "or three parts (name, overload name and number of specified args) ", "for an operator"); - c10::optional num_args; + std::optional num_args; if (op_item.size() > 2) { num_args = op_item[2].toInt(); } diff --git a/torch/csrc/jit/mobile/promoted_prim_ops.cpp b/torch/csrc/jit/mobile/promoted_prim_ops.cpp index 7ee8140b931c5..8e49749042424 100644 --- a/torch/csrc/jit/mobile/promoted_prim_ops.cpp +++ b/torch/csrc/jit/mobile/promoted_prim_ops.cpp @@ -24,7 +24,7 @@ void raiseException(Stack& stack) { void raiseExceptionWithMessage(Stack& stack) { // this kernel supports RaiseException with only two arguments: the error and // the message Please make changes only to this kernel - c10::optional qualified_class_name = + std::optional qualified_class_name = pop(stack).toOptional(); std::string message; pop(stack, message); @@ -116,9 +116,9 @@ void toPrimDType(Stack& stack) { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) bool copy; pop(stack, non_blocking, copy); - c10::optional scalarType = + std::optional scalarType = pop(stack).toOptional(); - c10::optional device = c10::nullopt; + std::optional device = c10::nullopt; at::Tensor self = pop(stack).toTensor(); push(stack, to_dispatch(self, device, scalarType, non_blocking, copy)); } diff --git a/torch/csrc/jit/mobile/register_ops_common_utils.h b/torch/csrc/jit/mobile/register_ops_common_utils.h index b0ecaf055f5ee..904e8786b1611 100644 --- a/torch/csrc/jit/mobile/register_ops_common_utils.h +++ b/torch/csrc/jit/mobile/register_ops_common_utils.h @@ -17,8 +17,8 @@ int64_t normalizeIndex(int64_t idx, int64_t list_size); // reference function THPVariable_to in python_variable_methods.cpp static C10_UNUSED at::Tensor to_dispatch( at::Tensor self, - c10::optional device, - c10::optional scalarType, + std::optional device, + std::optional scalarType, bool non_blocking, bool copy) { if (device && device->is_cuda()) { diff --git a/torch/csrc/jit/mobile/upgrader_mobile.h b/torch/csrc/jit/mobile/upgrader_mobile.h index f339484214f8b..68094a62ceabb 100644 --- a/torch/csrc/jit/mobile/upgrader_mobile.h +++ b/torch/csrc/jit/mobile/upgrader_mobile.h @@ -28,7 +28,7 @@ getOperatorVersionMapForMobile(); struct OperatorString { const std::string name; const std::string overload_name; - const c10::optional num_specified_args; + const std::optional num_specified_args; }; struct ByteCodeFunctionWithOperator { diff --git a/torch/csrc/jit/operator_upgraders/utils.cpp b/torch/csrc/jit/operator_upgraders/utils.cpp index 2cfd7c0559fe0..fef7b92c83c95 100644 --- a/torch/csrc/jit/operator_upgraders/utils.cpp +++ b/torch/csrc/jit/operator_upgraders/utils.cpp @@ -10,7 +10,7 @@ namespace torch::jit { -c10::optional findUpgrader( +std::optional findUpgrader( const std::vector& upgraders_for_schema, size_t current_version) { // we want to find the entry which satisfies following two conditions: @@ -51,7 +51,7 @@ bool isOpSymbolCurrent(const std::string& name, size_t current_version) { std::vector loadPossibleHistoricOps( const std::string& name, - c10::optional version) { + std::optional version) { std::vector possibleSchemas; if (!version.has_value()) { diff --git a/torch/csrc/jit/operator_upgraders/utils.h b/torch/csrc/jit/operator_upgraders/utils.h index 78cb31b4bf60e..a30b8c1182b9c 100644 --- a/torch/csrc/jit/operator_upgraders/utils.h +++ b/torch/csrc/jit/operator_upgraders/utils.h @@ -16,7 +16,7 @@ struct UpgraderRange { // Given a list of upgrader entries for a single operator // and the model version for that operator, find a valid // upgrader. -TORCH_API c10::optional findUpgrader( +TORCH_API std::optional findUpgrader( const std::vector& upgraders_for_schema, size_t current_version); @@ -39,7 +39,7 @@ TORCH_API bool isOpSymbolCurrent( // can be multiple schemas for different overloads. TORCH_API std::vector loadPossibleHistoricOps( const std::string& name, - c10::optional version); + std::optional version); TORCH_API uint64_t getMaxOperatorVersion(); diff --git a/torch/csrc/jit/passes/autocast.cpp b/torch/csrc/jit/passes/autocast.cpp index 213f569f87b02..635162e049531 100644 --- a/torch/csrc/jit/passes/autocast.cpp +++ b/torch/csrc/jit/passes/autocast.cpp @@ -60,7 +60,7 @@ bool isAutocastNode(Value* value) { // 2. `prim::SetAttr` must follow `prim::CreateObject()` in the same block, // but there might be other nodes in between // -c10::optional parseAutocast( +std::optional parseAutocast( Value* value, const AutocastContext& context) { if (!isAutocastNode(value)) { @@ -71,7 +71,7 @@ c10::optional parseAutocast( AutocastScope scope; scope.instance = value; scope.context = context; - c10::optional enabled; + std::optional enabled; std::string device; c10::ScalarType dtype = c10::ScalarType::Undefined; for (Use use : value->uses()) { @@ -269,7 +269,7 @@ void updateAutocastEnabledCheck(Node* node, bool is_jit_enabled) { void handleBlock(Block* block, AutocastContext initial_state) { std::stack autocast_stack; - c10::optional incompatible_amp = c10::nullopt; + std::optional incompatible_amp = c10::nullopt; // The current autocast enabled/disabled state auto current_state = [&] { diff --git a/torch/csrc/jit/passes/canonicalize.cpp b/torch/csrc/jit/passes/canonicalize.cpp index 5a5b867a36d09..20a883a8d06fd 100644 --- a/torch/csrc/jit/passes/canonicalize.cpp +++ b/torch/csrc/jit/passes/canonicalize.cpp @@ -142,7 +142,7 @@ bool isBeforeOrAfter(const Use& a, const Use& b, bool checking_before) { return checking_before ? isBefore(a, b) : isAfter(a, b); } -c10::optional firstOrLastUse(Value* v, bool find_first) { +std::optional firstOrLastUse(Value* v, bool find_first) { if (v->uses().empty()) { return c10::nullopt; } @@ -157,9 +157,9 @@ c10::optional firstOrLastUse(Value* v, bool find_first) { return extreme_use; } -static std::vector> gatherFirstUses( +static std::vector> gatherFirstUses( at::ArrayRef values) { - return fmap(values, [&](Value* v) -> c10::optional { + return fmap(values, [&](Value* v) -> std::optional { return firstOrLastUse(v, true); }); } @@ -169,7 +169,7 @@ static std::vector sort_indexes(at::ArrayRef values) { std::vector idx(values.size()); std::iota(idx.begin(), idx.end(), 0); - std::vector> first_uses = gatherFirstUses(values); + std::vector> first_uses = gatherFirstUses(values); // Sort values based on canonical ordering of their first usage std::sort(idx.begin(), idx.end(), [&first_uses](size_t i1, size_t i2) { diff --git a/torch/csrc/jit/passes/canonicalize.h b/torch/csrc/jit/passes/canonicalize.h index 46d90d1a515f6..b84cdd9f6a355 100644 --- a/torch/csrc/jit/passes/canonicalize.h +++ b/torch/csrc/jit/passes/canonicalize.h @@ -11,7 +11,7 @@ TORCH_API std::shared_ptr Canonicalize( TORCH_API void CanonicalizeOutputs(std::shared_ptr& graph); -TORCH_API c10::optional firstOrLastUse(Value* v, bool find_first); +TORCH_API std::optional firstOrLastUse(Value* v, bool find_first); TORCH_API bool isBeforeOrAfter( const Use& a, diff --git a/torch/csrc/jit/passes/canonicalize_graph_fuser_ops.cpp b/torch/csrc/jit/passes/canonicalize_graph_fuser_ops.cpp index a8d7c75fbe7f3..72d419eeb9c16 100644 --- a/torch/csrc/jit/passes/canonicalize_graph_fuser_ops.cpp +++ b/torch/csrc/jit/passes/canonicalize_graph_fuser_ops.cpp @@ -12,7 +12,7 @@ struct ChunkOutput { size_t offset; }; -static c10::optional> getChunkOutputs(Node* chunk) { +static std::optional> getChunkOutputs(Node* chunk) { std::vector outputs; for (auto list_use : chunk->output()->uses()) { if (list_use.user->matches( diff --git a/torch/csrc/jit/passes/constant_propagation.cpp b/torch/csrc/jit/passes/constant_propagation.cpp index cd3fb6b1e2b06..6334cd75faa90 100644 --- a/torch/csrc/jit/passes/constant_propagation.cpp +++ b/torch/csrc/jit/passes/constant_propagation.cpp @@ -19,7 +19,7 @@ namespace torch { namespace jit { -c10::optional> runNodeIfInputsAreConstant( +std::optional> runNodeIfInputsAreConstant( const Node* n, bool ignore_custom_classes, AliasDb* db) { diff --git a/torch/csrc/jit/passes/constant_propagation.h b/torch/csrc/jit/passes/constant_propagation.h index 62293c8d7abc9..2200acfa39ede 100644 --- a/torch/csrc/jit/passes/constant_propagation.h +++ b/torch/csrc/jit/passes/constant_propagation.h @@ -23,7 +23,7 @@ TORCH_API bool ConstantPropagationImmutableTypes(std::shared_ptr& graph); // make their own determination if constant prop is appropriate - for example // non-deterministic ops or ops with side effects. If ignore_custom_classes is // specified, nodes that output user defined classes are not run. -TORCH_API c10::optional runNodeIfInputsAreConstant( +TORCH_API std::optional runNodeIfInputsAreConstant( const Node* node, bool ignore_custom_classes = false, AliasDb* db = nullptr); diff --git a/torch/csrc/jit/passes/create_autodiff_subgraphs.cpp b/torch/csrc/jit/passes/create_autodiff_subgraphs.cpp index 162487201da7b..c5fe65537669a 100644 --- a/torch/csrc/jit/passes/create_autodiff_subgraphs.cpp +++ b/torch/csrc/jit/passes/create_autodiff_subgraphs.cpp @@ -281,7 +281,7 @@ class SubgraphSlicer { // Try to merge `producer` into `consumer`. If successful, this destroys // `producer` and returns the `consumer` group. - c10::optional tryMerge(Node* consumer, Node* producer) { + std::optional tryMerge(Node* consumer, Node* producer) { AT_ASSERT(consumer->kind() == prim::DifferentiableGraph); bool canMerge = shouldConsiderForMerge(producer) && aliasDb_.moveBeforeTopologicallyValid(producer, consumer); @@ -302,7 +302,7 @@ class SubgraphSlicer { std::vector& diff_nodes_; }; -c10::optional getProfileNodeRequiresGrad(Node* n) { +std::optional getProfileNodeRequiresGrad(Node* n) { TORCH_INTERNAL_ASSERT(n->kind() == prim::profile); if (!n->hasAttribute(attr::profiled_type)) { return c10::nullopt; @@ -359,7 +359,7 @@ struct ContextMapping { } }; -c10::optional findRequiresGradForOutput( +std::optional findRequiresGradForOutput( Node* diff_graph, Value* output, const ContextMapping& ctx_mapping) { @@ -374,7 +374,7 @@ c10::optional findRequiresGradForOutput( } if (use.user->kind() == prim::profile) { - c10::optional req_grad_use; + std::optional req_grad_use; if ((req_grad_use = getProfileNodeRequiresGrad(use.user)).has_value()) { return req_grad_use.value(); } @@ -393,7 +393,7 @@ c10::optional findRequiresGradForOutput( } if (dg_use.user->kind() == prim::profile) { - c10::optional req_grad_use; + std::optional req_grad_use; if ((req_grad_use = getProfileNodeRequiresGrad(dg_use.user)) .has_value()) { return req_grad_use.value(); diff --git a/torch/csrc/jit/passes/decompose_ops.cpp b/torch/csrc/jit/passes/decompose_ops.cpp index 9f5b3c80b6a07..1276a1f97245a 100644 --- a/torch/csrc/jit/passes/decompose_ops.cpp +++ b/torch/csrc/jit/passes/decompose_ops.cpp @@ -22,7 +22,7 @@ c10::AliasAnalysisKind aliasAnalysisFromSchema() { // helper to determine if an optional tensor argument/value passed in is // statically defined (neither a None constant nor a Optional[Tensor] type) // return yes, no, or no value if we can't tell -static c10::optional isDefined(Value* tensor) { +static std::optional isDefined(Value* tensor) { if (tensor->type()->isSubtypeOf(*TensorType::get())) { return true; } diff --git a/torch/csrc/jit/passes/device_type_analysis.cpp b/torch/csrc/jit/passes/device_type_analysis.cpp index 590ac9e2896a8..7670292696ae6 100644 --- a/torch/csrc/jit/passes/device_type_analysis.cpp +++ b/torch/csrc/jit/passes/device_type_analysis.cpp @@ -27,7 +27,7 @@ of the Node (based on the rule itself) Returns: Bool indicating if anything was changed */ -bool setDeviceType(Value* value, c10::optional device) { +bool setDeviceType(Value* value, std::optional device) { auto tensor_type = value->type()->expect(); bool changed = tensor_type->device() != device; if (changed) { @@ -36,7 +36,7 @@ bool setDeviceType(Value* value, c10::optional device) { return changed; } -bool setReturnsToDevice(Node* n, c10::optional device) { +bool setReturnsToDevice(Node* n, std::optional device) { bool changed = false; for (Value* out : n->outputs()) { auto tensor_type = out->type()->cast(); @@ -93,7 +93,7 @@ bool propWithNoDevice(Node* n) { auto tensor_type = n->inputs()[input_num]->type()->expect(); bool only_seen_cpu_zerodim = isZerodimCPUTensor(tensor_type); - c10::optional device = tensor_type->device(); + std::optional device = tensor_type->device(); // Now see if all inputs have a consistent device type for (input_num++; input_num < n->inputs().size(); input_num++) { diff --git a/torch/csrc/jit/passes/dtype_analysis.cpp b/torch/csrc/jit/passes/dtype_analysis.cpp index feeb5f567cd0d..f63ea6f341948 100644 --- a/torch/csrc/jit/passes/dtype_analysis.cpp +++ b/torch/csrc/jit/passes/dtype_analysis.cpp @@ -99,7 +99,7 @@ static bool canBeInferredWithMetaTensor(Node* n) { return true; } -c10::optional inferWithMetaTensor(Node* n) { +std::optional inferWithMetaTensor(Node* n) { GRAPH_DEBUG("inferWithMetaTensor", getHeader(n)); if (!canBeInferredWithMetaTensor(n)) { return c10::nullopt; diff --git a/torch/csrc/jit/passes/fold_conv_bn.cpp b/torch/csrc/jit/passes/fold_conv_bn.cpp index 9df6887d24289..6f0c82e7bebe2 100644 --- a/torch/csrc/jit/passes/fold_conv_bn.cpp +++ b/torch/csrc/jit/passes/fold_conv_bn.cpp @@ -105,7 +105,7 @@ void addBiasForConvIfNone(Module& module, const std::string& pattern_name) { if (!t->hasAttribute("bias")) { auto optional_tensor_type = OptionalType::create(TensorType::get()); t->addAttribute("bias", std::move(optional_tensor_type), true); - auto optional_tensor = c10::optional(); + auto optional_tensor = std::optional(); module.setattr("bias", std::move(optional_tensor)); replaceConvBiasWithGetAttr(module); } diff --git a/torch/csrc/jit/passes/freeze_module.cpp b/torch/csrc/jit/passes/freeze_module.cpp index 9ebbaa4e53e0d..4d67d5d217813 100644 --- a/torch/csrc/jit/passes/freeze_module.cpp +++ b/torch/csrc/jit/passes/freeze_module.cpp @@ -167,7 +167,7 @@ class AttributePropagator { // Examples: // submodule1.submodule2.foo -> {submodule2, "foo"} // submodule1.non_existent_module.foo -> nullopt - c10::optional resolveName(const std::string& name) { + std::optional resolveName(const std::string& name) { auto sub_names = splitName(name); if (sub_names.empty()) { return c10::nullopt; @@ -225,7 +225,7 @@ class AttributePropagator { return true; } - c10::optional> getModulePath( + std::optional> getModulePath( Value* input, std::shared_ptr& graph) { bool success = _loadModulePath(input, graph); diff --git a/torch/csrc/jit/passes/frozen_ops_to_mkldnn.cpp b/torch/csrc/jit/passes/frozen_ops_to_mkldnn.cpp index f6f63de01a498..c28e99a445258 100644 --- a/torch/csrc/jit/passes/frozen_ops_to_mkldnn.cpp +++ b/torch/csrc/jit/passes/frozen_ops_to_mkldnn.cpp @@ -1099,7 +1099,7 @@ class MKLDNNSubgraphSlicer { // Try to merge `consumer` into `producer`. If successful, this destroys // `consumer` and returns the `producer` group. - c10::optional tryMerge(Node* producer, Node* consumer) { + std::optional tryMerge(Node* producer, Node* consumer) { AT_ASSERT(producer->kind() == prim::MKLDNNGroup); bool canMerge = shouldConsiderForMerge(consumer) && aliasDb_.moveAfterTopologicallyValid(consumer, producer); diff --git a/torch/csrc/jit/passes/graph_fuser.cpp b/torch/csrc/jit/passes/graph_fuser.cpp index 0acc6f9bd07bb..9848783072621 100644 --- a/torch/csrc/jit/passes/graph_fuser.cpp +++ b/torch/csrc/jit/passes/graph_fuser.cpp @@ -490,7 +490,7 @@ struct GraphFuser { return true; } - c10::optional findFusedChunk(Node* group, Value* input) { + std::optional findFusedChunk(Node* group, Value* input) { AT_ASSERT(group->kind() == prim::FusionGroup); auto it = std::find(group->inputs().begin(), group->inputs().end(), input); if (it == group->inputs().end()) { diff --git a/torch/csrc/jit/passes/graph_rewrite_helper.cpp b/torch/csrc/jit/passes/graph_rewrite_helper.cpp index cd06bee7fc4ab..edb9f5b9589a0 100644 --- a/torch/csrc/jit/passes/graph_rewrite_helper.cpp +++ b/torch/csrc/jit/passes/graph_rewrite_helper.cpp @@ -27,7 +27,7 @@ Value* getValue( return match_vmap.at(vmap.at(name)); } -c10::optional getIValue( +std::optional getIValue( const std::string& name, const std::unordered_map& match_vmap, const std::unordered_map& vmap) { diff --git a/torch/csrc/jit/passes/graph_rewrite_helper.h b/torch/csrc/jit/passes/graph_rewrite_helper.h index 0920830babb8b..9f8b9f0a1b8fa 100644 --- a/torch/csrc/jit/passes/graph_rewrite_helper.h +++ b/torch/csrc/jit/passes/graph_rewrite_helper.h @@ -14,7 +14,7 @@ Value* getValue( const std::string& name, const std::unordered_map& match_vmap, const std::unordered_map& vmap); -c10::optional getIValue( +std::optional getIValue( const std::string& name, const std::unordered_map& match_vmap, const std::unordered_map& vmap); diff --git a/torch/csrc/jit/passes/hoist_conv_packed_params.cpp b/torch/csrc/jit/passes/hoist_conv_packed_params.cpp index ef3b861772c31..c3db2373f2a3c 100644 --- a/torch/csrc/jit/passes/hoist_conv_packed_params.cpp +++ b/torch/csrc/jit/passes/hoist_conv_packed_params.cpp @@ -100,7 +100,7 @@ void HoistConvPackedParams(script::Module& m) { n->kind() == prim::GetAttr && n->s(attr::name) == "_packed_params"; if (isGetPackedParamsNode) { // make sure the foo in {foo}.{_packed_params} is a quantized conv - c10::optional moduleName = getModuleName(n->inputs()[0]); + std::optional moduleName = getModuleName(n->inputs()[0]); bool moduleNameIsQuantizedConv = moduleName.has_value() && (moduleName.value() == "__torch__.torch.ao.nn.quantized.modules.conv.Conv1d" || diff --git a/torch/csrc/jit/passes/integer_value_refinement.cpp b/torch/csrc/jit/passes/integer_value_refinement.cpp index e3a339efe6d7b..16a329b3b11f3 100644 --- a/torch/csrc/jit/passes/integer_value_refinement.cpp +++ b/torch/csrc/jit/passes/integer_value_refinement.cpp @@ -204,7 +204,7 @@ struct IntegerValueRefiner { return block_refinements; }; - c10::optional tryFindRefinement(Value* v) { + std::optional tryFindRefinement(Value* v) { for (const auto& ref : active_refinements_) { auto maybe_refinement = ref->find(v); if (maybe_refinement != ref->end()) { diff --git a/torch/csrc/jit/passes/loop_unrolling.cpp b/torch/csrc/jit/passes/loop_unrolling.cpp index 3df61ad8a7765..4fac1cfbe5fbf 100644 --- a/torch/csrc/jit/passes/loop_unrolling.cpp +++ b/torch/csrc/jit/passes/loop_unrolling.cpp @@ -19,7 +19,7 @@ static constexpr int64_t kMaxBodySize = 32; static constexpr int64_t kMaxBodyRepeats = 64; bool isTrueConstant(Value* val) { - c10::optional maybe_value = constant_as(val); + std::optional maybe_value = constant_as(val); return maybe_value && *maybe_value; } @@ -178,7 +178,7 @@ void unroll(Node* loop) { // Some optimization for constant-length loops. If we know they won't run too // many times, then we can unroll them entirely. Value* trip_count = loop->inputs().at(0); - c10::optional const_len = constant_as(trip_count); + std::optional const_len = constant_as(trip_count); if (const_len && *const_len < kMaxBodyRepeats) { Block* dest = loop->addBlock(); repeatBody(body, *const_len, dest); diff --git a/torch/csrc/jit/passes/onnx/constant_fold.cpp b/torch/csrc/jit/passes/onnx/constant_fold.cpp index 1d0457c65a5fb..4eeba79aae90c 100644 --- a/torch/csrc/jit/passes/onnx/constant_fold.cpp +++ b/torch/csrc/jit/passes/onnx/constant_fold.cpp @@ -64,7 +64,7 @@ void handleNegativeStartEndIndex( } } -c10::optional runTorchSlice_opset9( +std::optional runTorchSlice_opset9( const Node* node, std::vector& inputTensorValues) { assert(inputTensorValues.size() == 1); @@ -101,10 +101,10 @@ c10::optional runTorchSlice_opset9( return c10::nullopt; updated_val = at::narrow(updated_val, axis, start, length); } - return c10::optional(updated_val); + return std::optional(updated_val); } -c10::optional runTorchSlice_opset10( +std::optional runTorchSlice_opset10( const Node* node, std::vector& inputTensorValues) { const int maxSliceInputCount = 5; @@ -195,7 +195,7 @@ c10::optional runTorchSlice_opset10( return c10::nullopt; updated_val = at::narrow(updated_val, axis, start, length); } - return c10::optional(updated_val); + return std::optional(updated_val); } // Refer to AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_EXCEPT_COMPLEX_HALF @@ -259,7 +259,7 @@ at::Tensor IntToTensor(int64_t value) { return at::squeeze(f_copy, 0); } -c10::optional runTorchBackendForOnnx( +std::optional runTorchBackendForOnnx( const Node* node, std::vector& inputTensorValues, int opset_version) { @@ -280,10 +280,10 @@ c10::optional runTorchBackendForOnnx( } updated_val = at::cat(at::TensorList(inputTensorValues), node->i(attr::axis)); - return c10::optional(updated_val); + return std::optional(updated_val); } else if (node->kind() == onnx::Sqrt) { updated_val = at::sqrt(inputTensorValues[0]); - return c10::optional(updated_val); + return std::optional(updated_val); } else if (node->kind() == onnx::Div) { // One example shows at::div(CPULongType, CPULongType) = CPUFloatType, // So we add a cast below. @@ -292,16 +292,16 @@ c10::optional runTorchBackendForOnnx( inputTensorValues[1].scalar_type()) { updated_val = updated_val.to(inputTensorValues[0].scalar_type()); } - return c10::optional(updated_val); + return std::optional(updated_val); } else if (node->kind() == onnx::Mul) { updated_val = at::mul(inputTensorValues[0], inputTensorValues[1]); - return c10::optional(updated_val); + return std::optional(updated_val); } else if (node->kind() == onnx::Sub) { updated_val = at::sub(inputTensorValues[0], inputTensorValues[1]); - return c10::optional(updated_val); + return std::optional(updated_val); } else if (node->kind() == onnx::Add) { updated_val = at::add(inputTensorValues[0], inputTensorValues[1]); - return c10::optional(updated_val); + return std::optional(updated_val); } else if (node->kind() == onnx::Unsqueeze) { if (opset_version >= ONNX_OPSET_13) { assert(inputTensorValues.size() == 2); @@ -328,7 +328,7 @@ c10::optional runTorchBackendForOnnx( for (int64_t i = 0; i < inputTensorValues[1].sizes()[0]; ++i) { updated_val = at::unsqueeze(updated_val, axes[i]); } - return c10::optional(updated_val); + return std::optional(updated_val); } else if (opset_version >= ONNX_OPSET_9) { assert(inputTensorValues.size() == 1); if (!node->hasAttributeS("axes")) { @@ -340,7 +340,7 @@ c10::optional runTorchBackendForOnnx( for (auto axis : axesAttr) { updated_val = at::unsqueeze(updated_val, axis); } - return c10::optional(updated_val); + return std::optional(updated_val); } else { TORCH_WARN( "Constant folding - unsupported opset version. " @@ -373,7 +373,7 @@ c10::optional runTorchBackendForOnnx( updated_val = at::squeeze(updated_val, axes[i]); } } - return c10::optional(updated_val); + return std::optional(updated_val); } else if (opset_version >= ONNX_OPSET_9) { assert(inputTensorValues.size() == 1); updated_val = inputTensorValues[0]; @@ -384,7 +384,7 @@ c10::optional runTorchBackendForOnnx( updated_val = at::squeeze(updated_val, axis); } } - return c10::optional(updated_val); + return std::optional(updated_val); } else { TORCH_WARN( "Constant folding - unsupported opset version. " @@ -397,13 +397,13 @@ c10::optional runTorchBackendForOnnx( return c10::nullopt; } updated_val = inputTensorValues[0].permute(node->is(attr::perm)); - return c10::optional(updated_val); + return std::optional(updated_val); } else if (node->kind() == onnx::Cast) { assert(inputTensorValues.size() == 1); if (node->hasAttributeS("to") && ONNXTypeToATenType(node->i(attr::to))) { updated_val = inputTensorValues[0].to( ONNXTypeToATenType(node->i(attr::to)).value()); - return c10::optional(updated_val); + return std::optional(updated_val); } return c10::nullopt; } else if (node->kind() == onnx::Reshape) { @@ -433,11 +433,11 @@ c10::optional runTorchBackendForOnnx( shape[i] = shape_a[i]; } } - return c10::optional(at::reshape(updated_val, shape)); + return std::optional(at::reshape(updated_val, shape)); } else if (node->kind() == onnx::Shape) { TORCH_INTERNAL_ASSERT(inputTensorValues.size() == 1); updated_val = at::_shape_as_tensor(inputTensorValues[0]); - return c10::optional(updated_val); + return std::optional(updated_val); } else if (node->kind() == onnx::ReduceL1 || node->kind() == onnx::ReduceL2) { assert(inputTensorValues.size() == 1); if (!node->hasAttributeS("axes")) { @@ -449,7 +449,7 @@ c10::optional runTorchBackendForOnnx( int p = node->kind() == onnx::ReduceL1 ? 1 : 2; updated_val = at::norm( inputTensorValues[0], p, node->is(attr::axes), node->i(attr::keepdims)); - return c10::optional(updated_val); + return std::optional(updated_val); } else if (node->kind() == onnx::ReduceProd) { int64_t rank = inputTensorValues[0].sizes().size(); std::vector axes; @@ -469,7 +469,7 @@ c10::optional runTorchBackendForOnnx( for (const auto& axis : axes) { updated_val = at::prod(updated_val, axis, keepdims); } - return c10::optional(updated_val); + return std::optional(updated_val); } else if (node->kind() == onnx::Gather) { assert(inputTensorValues.size() == 2); // default axis = 0 @@ -503,41 +503,41 @@ c10::optional runTorchBackendForOnnx( if (q < 1) { updated_val = updated_val.squeeze(axis); } - return c10::optional(updated_val); + return std::optional(updated_val); } else if (node->kind() == onnx::Range) { updated_val = runTorchArange_opset11(node, inputTensorValues); - return c10::optional(updated_val); + return std::optional(updated_val); } else if (node->kind() == onnx::Where) { updated_val = at::where( inputTensorValues[0], inputTensorValues[1], inputTensorValues[2]); - return c10::optional(updated_val); + return std::optional(updated_val); } else if (node->kind() == onnx::Equal) { updated_val = at::eq(inputTensorValues[0], inputTensorValues[1]); - return c10::optional(updated_val); + return std::optional(updated_val); } else if (node->kind() == onnx::Greater) { updated_val = at::greater(inputTensorValues[0], inputTensorValues[1]); - return c10::optional(updated_val); + return std::optional(updated_val); } else if (node->kind() == onnx::Less) { updated_val = at::less(inputTensorValues[0], inputTensorValues[1]); - return c10::optional(updated_val); + return std::optional(updated_val); } else if (node->kind() == onnx::Neg) { updated_val = at::neg(inputTensorValues[0]); - return c10::optional(updated_val); + return std::optional(updated_val); } else if (node->kind() == onnx::Not) { auto ones = at::ones(inputTensorValues[0].sizes(), inputTensorValues[0].dtype()); updated_val = at::ne(inputTensorValues[0], ones); - return c10::optional(updated_val); + return std::optional(updated_val); } else if (node->kind() == onnx::Size) { int64_t total_size = 1; for (auto size : inputTensorValues[0].sizes()) { total_size *= size; } - return c10::optional(IntToTensor(total_size)); + return std::optional(IntToTensor(total_size)); } else if (node->kind() == onnx::Softmax) { int64_t axis = node->hasAttributeS("axis") ? node->i(attr::axis) : -1; updated_val = at::softmax(inputTensorValues[0], axis); - return c10::optional(updated_val); + return std::optional(updated_val); } else { return c10::nullopt; } diff --git a/torch/csrc/jit/passes/onnx/constant_fold.h b/torch/csrc/jit/passes/onnx/constant_fold.h index 8bfb0dd081c39..201c3def32685 100644 --- a/torch/csrc/jit/passes/onnx/constant_fold.h +++ b/torch/csrc/jit/passes/onnx/constant_fold.h @@ -19,7 +19,7 @@ namespace onnx_constant_fold { at::Tensor IntToTensor(int64_t value); -c10::optional runTorchBackendForOnnx( +std::optional runTorchBackendForOnnx( const Node* node, std::vector& inputTensorValues, int opset_version); diff --git a/torch/csrc/jit/passes/onnx/constant_map.cpp b/torch/csrc/jit/passes/onnx/constant_map.cpp index 50eeaef540bb4..716232cebbb03 100644 --- a/torch/csrc/jit/passes/onnx/constant_map.cpp +++ b/torch/csrc/jit/passes/onnx/constant_map.cpp @@ -32,7 +32,7 @@ bool ConstantValueMap::HasRank(const std::string& tensorName) { ConstantValueMap::getInstance().rankMap.end(); } -c10::optional ConstantValueMap::GetRank(const std::string& tensorName) { +std::optional ConstantValueMap::GetRank(const std::string& tensorName) { if (!HasRank(tensorName)) { return c10::nullopt; } @@ -60,7 +60,7 @@ bool ConstantValueMap::HasShape(const std::string& tensorName) { ConstantValueMap::getInstance().shapeMap.end(); } -c10::optional ConstantValueMap::GetShape( +std::optional ConstantValueMap::GetShape( const std::string& tensorName) { if (!HasShape(tensorName)) { return c10::nullopt; @@ -79,7 +79,7 @@ bool ConstantValueMap::HasValue(const std::string& tensorName) { ConstantValueMap::getInstance().tensorValueMap.end(); } -c10::optional ConstantValueMap::GetValue( +std::optional ConstantValueMap::GetValue( const std::string& tensorName) { if (!HasValue(tensorName)) { return c10::nullopt; @@ -103,7 +103,7 @@ std::vector ConstantValueMap::GetCompleteShapeInto1DInt64Vector( return shape_value; } -c10::optional> ConstantValueMap::GetShapeInto1DInt64Vector( +std::optional> ConstantValueMap::GetShapeInto1DInt64Vector( const std::string& value_name) { if (ConstantValueMap::HasShape(value_name)) { auto shape_size = ConstantValueMap::GetShape(value_name).value(); @@ -116,7 +116,7 @@ c10::optional> ConstantValueMap::GetShapeInto1DInt64Vector( return c10::nullopt; } -c10::optional> ConstantValueMap:: +std::optional> ConstantValueMap:: GetShapeInto1DInt64VectorWithOneUnknown(const std::string& value_name) { if (ConstantValueMap::HasShape(value_name)) { auto shape_size = ConstantValueMap::GetShape(value_name).value(); @@ -172,7 +172,7 @@ bool ConstantValueMap::HasTypeReliable(const std::string& tensorName) { ConstantValueMap::getInstance().typeReliableMap.end(); } -c10::optional ConstantValueMap::GetTypeReliable( +std::optional ConstantValueMap::GetTypeReliable( const std::string& tensorName) { if (!HasTypeReliable(tensorName)) { return c10::nullopt; @@ -191,7 +191,7 @@ bool ConstantValueMap::HasUseInferredType(const std::string& tensorName) { ConstantValueMap::getInstance().useInferredTypeMap.end(); } -c10::optional ConstantValueMap::GetUseInferredType( +std::optional ConstantValueMap::GetUseInferredType( const std::string& tensorName) { if (!HasUseInferredType(tensorName)) { return c10::nullopt; @@ -210,7 +210,7 @@ bool ConstantValueMap::HasShapeValue(const std::string& tensorName) { ConstantValueMap::getInstance().shapeValueMap.end(); } -c10::optional ConstantValueMap::GetShapeValue( +std::optional ConstantValueMap::GetShapeValue( const std::string& tensorName) { if (!HasShapeValue(tensorName)) { return c10::nullopt; diff --git a/torch/csrc/jit/passes/onnx/constant_map.h b/torch/csrc/jit/passes/onnx/constant_map.h index a5284ba65a04d..fe33183ef8d6c 100644 --- a/torch/csrc/jit/passes/onnx/constant_map.h +++ b/torch/csrc/jit/passes/onnx/constant_map.h @@ -24,7 +24,7 @@ class ConstantValueMap { static ConstantValueMap& getInstance(); static void SetRank(const std::string& tensorName, size_t rankValue); static bool HasRank(const std::string& tensorName); - static c10::optional GetRank(const std::string& tensorName); + static std::optional GetRank(const std::string& tensorName); static void SetAllGraphInputsStatic(bool all_static); static c10::optional GetAllGraphInputsStatic(); @@ -33,38 +33,38 @@ class ConstantValueMap { const std::string& tensorName, const c10::SymbolicShape& shapeValue); static bool HasShape(const std::string& tensorName); - static c10::optional GetShape( + static std::optional GetShape( const std::string& tensorName); static void SetValue(const std::string& tensorName, const at::Tensor& value); static bool HasValue(const std::string& tensorName); - static c10::optional GetValue(const std::string& tensorName); + static std::optional GetValue(const std::string& tensorName); static void EraseValue(const std::string& tensorName); static std::vector GetCompleteShapeInto1DInt64Vector( const c10::SymbolicShape& shape); - static c10::optional> GetShapeInto1DInt64Vector( + static std::optional> GetShapeInto1DInt64Vector( const std::string& value_name); - static c10::optional> + static std::optional> GetShapeInto1DInt64VectorWithOneUnknown(const std::string& value_name); static std::vector GetValueInto1DInt64Vector( const std::string& value_name); static void SetTypeReliable(const std::string& tensorName, bool reliable); static bool HasTypeReliable(const std::string& tensorName); - static c10::optional GetTypeReliable(const std::string& tensorName); + static std::optional GetTypeReliable(const std::string& tensorName); static void SetUseInferredType( const std::string& tensorName, bool useInferredType); static bool HasUseInferredType(const std::string& tensorName); - static c10::optional GetUseInferredType(const std::string& tensorName); + static std::optional GetUseInferredType(const std::string& tensorName); static void SetShapeValue( const std::string& tensorName, const c10::SymbolicShape& shapeValue); static bool HasShapeValue(const std::string& tensorName); - static c10::optional GetShapeValue( + static std::optional GetShapeValue( const std::string& tensorName); static ShapeDataMap& GetInferredShapeData(); diff --git a/torch/csrc/jit/passes/onnx/function_extraction.cpp b/torch/csrc/jit/passes/onnx/function_extraction.cpp index d6555c5c5bb70..c545c7aba823a 100644 --- a/torch/csrc/jit/passes/onnx/function_extraction.cpp +++ b/torch/csrc/jit/passes/onnx/function_extraction.cpp @@ -58,8 +58,8 @@ struct FunctionExtractor { scope_ctx_map& scope_ctxs); void DebugPrint() const; void SetAttrName(Node* ref_n, Symbol attr, const std::string& name); - c10::optional FindAttrName(Node* ref_n, Symbol attr); - c10::optional FindAttrName(Node* ref_const_n); + std::optional FindAttrName(Node* ref_n, Symbol attr); + std::optional FindAttrName(Node* ref_const_n); ScopePtr scope_key_; scope_ctx_map scope_ctxs_; @@ -76,10 +76,10 @@ struct FunctionExtractor { using func_ctx_map = std::unordered_map; static bool IsValidScope(ScopePtr s); - static c10::optional InferScope(Node* n); + static std::optional InferScope(Node* n); static bool IsAncestor(ScopePtr parent, ScopePtr child); - static c10::optional FindCommonAncestor(ScopePtr a, ScopePtr b); - static c10::optional FindCommonAncestor(const scope_list& scopes); + static std::optional FindCommonAncestor(ScopePtr a, ScopePtr b); + static std::optional FindCommonAncestor(const scope_list& scopes); std::shared_ptr ConstructFuncGraph(FunctionContext& ctx); void ConvertScopeToFunction( @@ -219,7 +219,7 @@ void FunctionExtractor::FunctionContext::SetAttrName( auto n_attr_it = node_attr_to_name_[n_in_def][attr.toUnqualString()] = name; } -c10::optional FunctionExtractor::FunctionContext::FindAttrName( +std::optional FunctionExtractor::FunctionContext::FindAttrName( Node* ref_n, Symbol attr) { auto v_it = @@ -297,7 +297,7 @@ bool FunctionExtractor::IsAncestor(ScopePtr parent, ScopePtr child) { return false; } -c10::optional FunctionExtractor::FindCommonAncestor( +std::optional FunctionExtractor::FindCommonAncestor( ScopePtr a, ScopePtr b) { if (!IsValidScope(a) || !IsValidScope(b)) { @@ -330,13 +330,13 @@ c10::optional FunctionExtractor::FindCommonAncestor( return c10::nullopt; } -c10::optional FunctionExtractor::FindCommonAncestor( +std::optional FunctionExtractor::FindCommonAncestor( const scope_list& scopes) { if (scopes.empty()) { return c10::nullopt; } - c10::optional common_ancestor = scopes.at(0); + std::optional common_ancestor = scopes.at(0); for (const auto& scope : scopes) { common_ancestor = FindCommonAncestor(common_ancestor.value(), scope); if (!common_ancestor.has_value()) { @@ -347,7 +347,7 @@ c10::optional FunctionExtractor::FindCommonAncestor( return common_ancestor; } -c10::optional FunctionExtractor::InferScope(Node* n) { +std::optional FunctionExtractor::InferScope(Node* n) { // The scope of node n is assigned based on the following rules. // 1. If all uses of outputs of n belongs to the same scope, // assign that scope, otherwise diff --git a/torch/csrc/jit/passes/onnx/function_substitution.cpp b/torch/csrc/jit/passes/onnx/function_substitution.cpp index a6e2f89e106ec..81bfa3fd6caf5 100644 --- a/torch/csrc/jit/passes/onnx/function_substitution.cpp +++ b/torch/csrc/jit/passes/onnx/function_substitution.cpp @@ -12,7 +12,7 @@ namespace { const std::string kTopModuleVariableName = ""; std::string TidyClassNameFromTorchScript( - const c10::optional& class_name) { + const std::optional& class_name) { if (!class_name) { return "UNKNOWN_CLASS"; } diff --git a/torch/csrc/jit/passes/onnx/helper.cpp b/torch/csrc/jit/passes/onnx/helper.cpp index d6b2a6385fab4..9d4c5061414c5 100644 --- a/torch/csrc/jit/passes/onnx/helper.cpp +++ b/torch/csrc/jit/passes/onnx/helper.cpp @@ -61,7 +61,7 @@ void buildParamsMapFromValueToParamsMap( } } -c10::optional ONNXTypeToATenType(int32_t onnx_type) { +std::optional ONNXTypeToATenType(int32_t onnx_type) { switch (onnx_type) { case ::ONNX_NAMESPACE::TensorProto_DataType_UNDEFINED: return at::ScalarType::Undefined; @@ -104,7 +104,7 @@ c10::optional ONNXTypeToATenType(int32_t onnx_type) { onnx_type, " is an unexpected tensor scalar type"); } - return c10::optional{}; + return std::optional{}; } Node* addNodeToBlock(Block* block, Symbol kind, ArrayRef inputs) { diff --git a/torch/csrc/jit/passes/onnx/helper.h b/torch/csrc/jit/passes/onnx/helper.h index 77eb98ba8a707..9e09c638779ef 100644 --- a/torch/csrc/jit/passes/onnx/helper.h +++ b/torch/csrc/jit/passes/onnx/helper.h @@ -40,7 +40,7 @@ TORCH_API Node* addNodeToBlock( TORCH_API Value* addInputToBlock(Block* block); -TORCH_API c10::optional ONNXTypeToATenType(int32_t onnx_type); +TORCH_API std::optional ONNXTypeToATenType(int32_t onnx_type); // Use int return type as no sable way exists to forward declare protobuf enum TORCH_API int ATenTypeToOnnxType(at::ScalarType at_type); diff --git a/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_encapsulation.cpp b/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_encapsulation.cpp index 41e3ac9ecc4e8..6110954990455 100644 --- a/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_encapsulation.cpp +++ b/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_encapsulation.cpp @@ -77,7 +77,7 @@ Node* EncapsulateInplaceIndexPutForONNX(Node* index_put_node) { } // namespace -c10::optional EncapsulatePatternIntoSubblock(Node* n) { +std::optional EncapsulatePatternIntoSubblock(Node* n) { switch (n->kind()) { case aten::index_put_: case aten::index_put: { diff --git a/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_encapsulation.h b/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_encapsulation.h index cd78663cffc47..6673d4aba3a75 100644 --- a/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_encapsulation.h +++ b/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_encapsulation.h @@ -28,7 +28,7 @@ namespace jit { // the subblock of a new placeholder node. The outputs of the new placeholder // node are used in place of the original nodes instead. The category of the // pattern is stored as attr::name. -TORCH_API c10::optional EncapsulatePatternIntoSubblock(Node* n); +TORCH_API std::optional EncapsulatePatternIntoSubblock(Node* n); } // namespace jit } // namespace torch diff --git a/torch/csrc/jit/passes/onnx/peephole.cpp b/torch/csrc/jit/passes/onnx/peephole.cpp index 9e1c17120f654..73c19851e569b 100644 --- a/torch/csrc/jit/passes/onnx/peephole.cpp +++ b/torch/csrc/jit/passes/onnx/peephole.cpp @@ -101,7 +101,7 @@ std::vector getBroadcastPositions(Node* node) { // Determine whether `from` can broadcast to `to`, and if so at which // position. `from` must be a suffix of `to`, except that any // occurrences of 1 in `from` are treated as wildcards. -c10::optional fusibleExpandTo( +std::optional fusibleExpandTo( at::IntArrayRef from, at::IntArrayRef to) { if (from.size() > to.size()) { @@ -156,7 +156,7 @@ void fuseBroadcast(Block* b) { } // Not all broadcasts are supported by ONNX broadcast. - c10::optional axis = fusibleExpandTo( + std::optional axis = fusibleExpandTo( unexpanded_input->type() ->expectRef() .sizes() diff --git a/torch/csrc/jit/passes/onnx/scalar_type_analysis.cpp b/torch/csrc/jit/passes/onnx/scalar_type_analysis.cpp index 638acd464adcd..427e5771a9f0f 100644 --- a/torch/csrc/jit/passes/onnx/scalar_type_analysis.cpp +++ b/torch/csrc/jit/passes/onnx/scalar_type_analysis.cpp @@ -97,7 +97,7 @@ static bool IsImplicitCastSupported(const NodeKind& nodeKind) { IsSelectorOp(nodeKind); } -static c10::optional PromoteScalarTypes( +static std::optional PromoteScalarTypes( const std::vector& types) { if (types.empty()) { return c10::nullopt; @@ -112,7 +112,7 @@ static c10::optional PromoteScalarTypes( // Type promotion between scalars and tensors // per logic here // https://pytorch.org/docs/main/tensor_attributes.html#tensor-attributes -static c10::optional PromoteScalarTypesWithCategory( +static std::optional PromoteScalarTypesWithCategory( const std::vector& typesFromTensors, const std::vector& typesFromScalars) { auto typeFromTensor = PromoteScalarTypes(typesFromTensors); @@ -146,12 +146,12 @@ static c10::optional PromoteScalarTypesWithCategory( return typeFromTensor; } -static c10::optional InferExpectedScalarType(const Node* n) { +static std::optional InferExpectedScalarType(const Node* n) { std::vector typesFromTensors; std::vector typesFromScalars; auto get_scalar_type = - [](const Value* input) -> c10::optional { + [](const Value* input) -> std::optional { if (auto* tensor_type = input->type()->castRaw()) { return tensor_type->scalarType(); } @@ -252,7 +252,7 @@ static c10::optional InferExpectedScalarType(const Node* n) { } }); - c10::optional st = c10::nullopt; + std::optional st = c10::nullopt; const auto output_st = get_scalar_type(n->output()); if (IsComparisonOp(n->kind())) { @@ -280,7 +280,7 @@ static c10::optional InferExpectedScalarType(const Node* n) { return st; } -static c10::optional LowPrecisionCastForStandardOps( +static std::optional LowPrecisionCastForStandardOps( const Node* n, const c10::ScalarType& scalar_type) { // Some of standardOps do not support uint8\int8\int16 type for ONNX diff --git a/torch/csrc/jit/passes/onnx/shape_type_inference.cpp b/torch/csrc/jit/passes/onnx/shape_type_inference.cpp index 65d0f3d5651e8..eefa9621ba1fb 100644 --- a/torch/csrc/jit/passes/onnx/shape_type_inference.cpp +++ b/torch/csrc/jit/passes/onnx/shape_type_inference.cpp @@ -93,7 +93,7 @@ c10::ShapeSymbol ONNXDimToShapeSymbol( if (dim.has_dim_value()) { return c10::ShapeSymbol::fromStaticSize(dim.dim_value()); } - c10::optional sym = c10::nullopt; + std::optional sym = c10::nullopt; if (dim.has_dim_param()) { // If this param is already known, assign the same Symbol. GRAPH_UPDATE("Got dim_param:", dim.dim_param()); @@ -116,7 +116,7 @@ c10::ShapeSymbol ONNXDimToShapeSymbol( TensorTypePtr TorchTensorTypeFromONNX( const onnx::TypeProto_Tensor& onnx_tensor_type, SymbolDimMap& symbol_dim_map) { - c10::optional scalar_type; + std::optional scalar_type; if (onnx_tensor_type.has_elem_type()) { scalar_type = ONNXTypeToATenType(onnx_tensor_type.elem_type()); } @@ -260,7 +260,7 @@ Value* CloneValueFromListConstruct( // is preserved. If the elemtype is Int, insert a onnx::Concat node into // the graph. TypePtr elem = v->type()->castRaw()->getElementType(); - c10::optional scalar_type = c10::nullopt; + std::optional scalar_type = c10::nullopt; if (elem->cast()) { scalar_type = at::kLong; if (isValidToTransformToONNXConcatNode(v->node())) { @@ -325,7 +325,7 @@ Node* CloneNodeToGraph( // Try to lookup input value and insert it into the graph. // If the input value is unknown, set it to graph input in the new // graph, and copy over metadata, such as datatype and shape. - ::c10::optional val = ::c10::nullopt; + ::std::optional val = ::c10::nullopt; auto v0 = params_dict.find(v->debugName()); if (v0 != params_dict.end()) { val = v0->second.toTensor(); @@ -407,7 +407,7 @@ void ConvertGraphToONNXProto( } } -c10::optional ComputeConstantFolding(Node* n, int opset_version) { +std::optional ComputeConstantFolding(Node* n, int opset_version) { if (n->inputs().empty()) { return c10::nullopt; } @@ -437,7 +437,7 @@ c10::optional ComputeConstantFolding(Node* n, int opset_version) { } // Similar to the function above, but for symbolic shapes. -c10::optional<::c10::SymbolicShape> ComputeShapeFromReshape( +std::optional<::c10::SymbolicShape> ComputeShapeFromReshape( Node* n, const c10::SymbolicShape& input_shape, const c10::SymbolicShape& shape, @@ -549,7 +549,7 @@ c10::optional<::c10::SymbolicShape> ComputeShapeFromReshape( return final_shape_0; } -c10::optional<::c10::SymbolicShape> ComputeShapeFromExpand( +std::optional<::c10::SymbolicShape> ComputeShapeFromExpand( const std::vector<::c10::ShapeSymbol>& input_shape, const std::vector& reshape) { for (const auto& it : reshape) { @@ -588,7 +588,7 @@ c10::optional<::c10::SymbolicShape> ComputeShapeFromExpand( return shape; } -c10::optional<::c10::SymbolicShape> ComputeShapeFromTile( +std::optional<::c10::SymbolicShape> ComputeShapeFromTile( const std::vector<::c10::ShapeSymbol>& input_shape, const std::vector& reshape) { TORCH_INTERNAL_ASSERT( @@ -616,7 +616,7 @@ c10::optional<::c10::SymbolicShape> ComputeShapeFromTile( void UpdateRank(Value* value, size_t rank) { ConstantValueMap::SetRank(value->debugName(), rank); if (TensorTypePtr value_type = value->type()->cast()) { - c10::optional rank_opt = rank; + std::optional rank_opt = rank; auto shape = ::c10::SymbolicShape(rank_opt); value->setType(value_type->withSymbolicShapes(shape)); } @@ -662,7 +662,7 @@ void UpdateShapeConstantValueMap( } } -c10::optional> GetValueFromListConstructNode( +std::optional> GetValueFromListConstructNode( Node* lc_node) { std::vector shape_size; for (const auto& input : lc_node->inputs()) { @@ -676,7 +676,7 @@ c10::optional> GetValueFromListConstructNode( } } return lc_node->inputs().size() == shape_size.size() - ? c10::optional>(shape_size) + ? std::optional>(shape_size) : c10::nullopt; } diff --git a/torch/csrc/jit/passes/onnx/unpack_quantized_weights.cpp b/torch/csrc/jit/passes/onnx/unpack_quantized_weights.cpp index 9270028b98808..7390bea56e77b 100644 --- a/torch/csrc/jit/passes/onnx/unpack_quantized_weights.cpp +++ b/torch/csrc/jit/passes/onnx/unpack_quantized_weights.cpp @@ -30,7 +30,7 @@ using namespace ::c10::onnx; // we traverse up the graph to get the scale from its input until we hit a node // where scale is explicitly specified. double getScaleFromInput(Node* input_node) { - c10::optional scale; + std::optional scale; std::string input_name = input_node->kind().toQualString(); std::unordered_set noscale_ops = { "quantized::max_pool2d", @@ -332,7 +332,7 @@ void unpackQuantizedWeightsHelper( "getValues: Quantized weight value not found amongst constant parameters."); } at::Tensor unpacked_weight; - c10::optional bias; + std::optional bias; constexpr int64_t stride_idx = 2; constexpr int64_t padding_idx = 3; int64_t output_padding_idx; @@ -346,10 +346,10 @@ void unpackQuantizedWeightsHelper( dilation_idx = 4; groups_idx = 5; } - c10::optional> stride, padding, dilation, + std::optional> stride, padding, dilation, output_padding; - c10::optional groups; - c10::optional transpose; + std::optional groups; + std::optional transpose; torch::List stride_int, padding_int, dilation_int, output_padding_int; @@ -371,9 +371,9 @@ void unpackQuantizedWeightsHelper( TORCH_INTERNAL_ASSERT(elements.size() == 3, "Wrong tuple size."); auto config_vals = elements[1].to>(); - auto tensors = elements[2].to>>(); + auto tensors = elements[2].to>>(); - c10::optional weight = tensors[1]; + std::optional weight = tensors[1]; TORCH_INTERNAL_ASSERT( weight, "Weight should always be present in serialized qconv."); unpacked_weight = *weight; @@ -534,7 +534,7 @@ void unpackQuantizedWeightsHelper( at::Tensor packed_weight = itr->second.toTensor(); auto op = Dispatcher::singleton() .findSchemaOrThrow(unpack_fn.c_str(), "") - .typed>( + .typed>( at::Tensor)>(); std::tie(unpacked_weight, bias) = op.call(packed_weight); } @@ -598,7 +598,7 @@ void unpackQuantizedWeightsHelper( if (stride.has_value() && padding.has_value() && dilation.has_value() && groups.has_value() && (!expect_output_padding || output_padding.has_value())) { - std::vector>> conv_ints_args; + std::vector>> conv_ints_args; conv_ints_args.push_back(stride); conv_ints_args.push_back(padding); if (expect_output_padding) { diff --git a/torch/csrc/jit/passes/peephole.cpp b/torch/csrc/jit/passes/peephole.cpp index b1e38697ef59d..aa4e2176f1905 100644 --- a/torch/csrc/jit/passes/peephole.cpp +++ b/torch/csrc/jit/passes/peephole.cpp @@ -19,7 +19,7 @@ namespace jit { // Conservatively compare two optionals. If both are undefined, assume // they aren't equal template -static bool mustBeEqual(const c10::optional& a, const c10::optional& b) { +static bool mustBeEqual(const std::optional& a, const c10::optional& b) { return a == b && a.has_value(); } diff --git a/torch/csrc/jit/passes/peephole_dict_idioms.cpp b/torch/csrc/jit/passes/peephole_dict_idioms.cpp index 4e2a56a9d06bd..d3a5cfa36261b 100644 --- a/torch/csrc/jit/passes/peephole_dict_idioms.cpp +++ b/torch/csrc/jit/passes/peephole_dict_idioms.cpp @@ -125,7 +125,7 @@ class DictNode { return 0; } - c10::optional getOrNullopt(const IValue& key) const { + std::optional getOrNullopt(const IValue& key) const { if (impl_ && impl_->contains(key)) { return impl_->get(key); } @@ -181,7 +181,7 @@ class PeepholeOptimizeDictIdiomsImpl { return cached->second; } - c10::optional getValueFromDict(Node* dict_creation_node, Value* key) { + std::optional getValueFromDict(Node* dict_creation_node, Value* key) { const DictNode& dict_node = getDictNode(dict_creation_node); auto key_opt = toIValue(key); // Key is not constant if we cannot convert to IValue @@ -195,7 +195,7 @@ class PeepholeOptimizeDictIdiomsImpl { return c10::nullopt; } - c10::optional computeLen(Node* dict_creation_node) { + std::optional computeLen(Node* dict_creation_node) { const DictNode& dict_node = getDictNode(dict_creation_node); if (dict_node.canOptimize()) { return static_cast(dict_node.size()); diff --git a/torch/csrc/jit/passes/peephole_list_idioms.cpp b/torch/csrc/jit/passes/peephole_list_idioms.cpp index 15f4c807335fd..9c106e13edf1f 100644 --- a/torch/csrc/jit/passes/peephole_list_idioms.cpp +++ b/torch/csrc/jit/passes/peephole_list_idioms.cpp @@ -14,7 +14,7 @@ namespace torch { namespace jit { -static c10::optional normalizeIndex(int64_t index, size_t len) { +static std::optional normalizeIndex(int64_t index, size_t len) { if (index < 0) { index = index + len; } @@ -129,7 +129,7 @@ struct ListLenRefiner { return block_refinements; }; - c10::optional tryFindRefinement(Value* v) { + std::optional tryFindRefinement(Value* v) { for (const auto& ref : active_refinements_) { auto maybe_refinement = ref->find(v); if (maybe_refinement != ref->end()) { diff --git a/torch/csrc/jit/passes/peephole_non_tensor.cpp b/torch/csrc/jit/passes/peephole_non_tensor.cpp index 5cd2b6c2ee65d..5fa9c89b1fb0e 100644 --- a/torch/csrc/jit/passes/peephole_non_tensor.cpp +++ b/torch/csrc/jit/passes/peephole_non_tensor.cpp @@ -19,7 +19,7 @@ namespace { * @post if there's one constant in two operands, then the second operand is * constant. */ -c10::optional checkArithNode(Node& node) { +std::optional checkArithNode(Node& node) { if (node.inputs().size() != 2 || node.input(0)->type() != IntType::get() || node.input(1)->type() != IntType::get()) { return {}; diff --git a/torch/csrc/jit/passes/quantization/helper.cpp b/torch/csrc/jit/passes/quantization/helper.cpp index a4ac1f6fe4be9..8a74ec01086a5 100644 --- a/torch/csrc/jit/passes/quantization/helper.cpp +++ b/torch/csrc/jit/passes/quantization/helper.cpp @@ -235,7 +235,7 @@ std::vector _propagate_quant_binary_ops = { bool matchAtenFuncToUse( const Use& use, const std::string& func_name, - c10::optional n) { + std::optional n) { Node* node = use.user; return node->kind() == Symbol::aten(func_name) && (!n.has_value() || static_cast(n.value()) == use.offset); @@ -244,7 +244,7 @@ bool matchAtenFuncToUse( bool matchCallFuncToUse( const Use& use, const std::string& func_name, - c10::optional n) { + std::optional n) { Node* node = use.user; return node->kind() == prim::CallFunction && getFuncName(node->inputs()[0]) == func_name && @@ -316,7 +316,7 @@ bool isEmbeddingBagNonInput(Value* v) { return result; } -c10::optional getClampScalarInputUse(Value* v) { +std::optional getClampScalarInputUse(Value* v) { for (const auto& use : v->uses()) { for (const auto& aten_func : _clamp_funcs) { if (matchAtenFuncToUse(use, aten_func, 1) || @@ -493,7 +493,7 @@ bool isBinaryOpWithScalarInput(Node* n) { return isPropagateQuantBinaryOp(n) && isScalar(n->input(1)); } -c10::optional> getFixedQParams(Node* n) { +std::optional> getFixedQParams(Node* n) { static std::vector fixed_qparam_funcs; std::transform( _fixed_qparams_map.begin(), @@ -642,7 +642,7 @@ Module getInvokedModule(Module& module, Node* n, Value* self) { return findChildModule(module, path); } -c10::optional getInvokedModuleOpt( +std::optional getInvokedModuleOpt( const Module& module, Node* n, Value* self) { @@ -686,7 +686,7 @@ std::string removeTorchMangle(const std::string& orig_name) { return qualified_name; } -c10::optional getModuleName(Value* value) { +std::optional getModuleName(Value* value) { auto type = value->type()->cast(); if (type && type->name()) { return removeTorchMangle(type->name()->qualifiedName()); diff --git a/torch/csrc/jit/passes/quantization/helper.h b/torch/csrc/jit/passes/quantization/helper.h index b5a5adf40b65c..680e3c7ca43d5 100644 --- a/torch/csrc/jit/passes/quantization/helper.h +++ b/torch/csrc/jit/passes/quantization/helper.h @@ -32,7 +32,7 @@ TORCH_API bool isBiasOfConvOrLinear(Value* v); TORCH_API bool isEmbeddingBagNonInput(Value* v); // Get the use as scalar input of clamp ops for the input value -c10::optional getClampScalarInputUse(Value* v); +std::optional getClampScalarInputUse(Value* v); // For a given value `v`, get the list of values that we need to check // if they are observed/quantized or not, if so, we can say the @@ -59,7 +59,7 @@ TORCH_API bool hitGraphInput(Value* value); TORCH_API std::string removeTorchMangle(const std::string& orig_name); // Return the module name that corresponds to the value. -TORCH_API c10::optional getModuleName(Value* value); +TORCH_API std::optional getModuleName(Value* value); // =========== helper functions for Node ========= TORCH_API bool isSingleInputGeneralShapeAtenFunction(Node* n); @@ -91,7 +91,7 @@ TORCH_API bool isPropagateQuantOp(Node* n); // quantized::{op}_scalar TORCH_API bool isBinaryOpWithScalarInput(Node* n); -TORCH_API c10::optional> getFixedQParams( +TORCH_API std::optional> getFixedQParams( Node* n); // We don't want to analyze the graph for some `builtin` CallFunctions @@ -121,14 +121,14 @@ TORCH_API std::shared_ptr getCallFunctionGraph(Node* n); bool matchCallFuncToUse( const Use& use, const std::string& func_name, - c10::optional nth_arg); + std::optional nth_arg); // Check if `use` is a AtenFunction of name `func_name` and if value // `v` is the nth argument (if provided) of the function bool matchAtenFuncToUse( const Use& use, const std::string& func_name, - c10::optional nth_arg); + std::optional nth_arg); // =========== helper functions for Block ========= // checks if a block will always raise an Exception @@ -151,7 +151,7 @@ TORCH_API Module getInvokedModule(Module& module, Node* n, Value* self); // Given an CallMethod node, get the module instance corresponding // to the instance Value if the instance is a module, otherwise return // c10::nullopt -c10::optional getInvokedModuleOpt( +std::optional getInvokedModuleOpt( const Module& module, Node* n, Value* self); diff --git a/torch/csrc/jit/passes/quantization/insert_observers.cpp b/torch/csrc/jit/passes/quantization/insert_observers.cpp index f514fbc193ddd..e5df64f1929c7 100644 --- a/torch/csrc/jit/passes/quantization/insert_observers.cpp +++ b/torch/csrc/jit/passes/quantization/insert_observers.cpp @@ -20,12 +20,12 @@ namespace torch { namespace jit { -using ModuleQConfigMap = std::unordered_map>; +using ModuleQConfigMap = std::unordered_map>; namespace { struct OptionalQConfigHash { - inline size_t operator()(const c10::optional& qconfig_opt) const { + inline size_t operator()(const std::optional& qconfig_opt) const { if (qconfig_opt.has_value()) { const auto& m1 = std::get<0>(*qconfig_opt); const auto& m2 = std::get<1>(*qconfig_opt); @@ -36,9 +36,9 @@ struct OptionalQConfigHash { } }; using QConfigTypePtrMap = - std::unordered_map, TypePtr, OptionalQConfigHash>; + std::unordered_map, TypePtr, OptionalQConfigHash>; using NameModuleVector = std::vector>; -using OptionalModuleVector = std::vector>; +using OptionalModuleVector = std::vector>; using ModuleMethodVector = std::vector>; using graph_rewrite_helper::PatternInfo; using graph_rewrite_helper::replaceConvolutionWithAtenConv; @@ -49,8 +49,8 @@ void fillQConfigMap( const QConfigDict& qconfig_dict, ModuleQConfigMap& map, const std::string& key = "", - const c10::optional& parent_qconfig = c10::nullopt) { - c10::optional qconfig; + const std::optional& parent_qconfig = c10::nullopt) { + std::optional qconfig; if (qconfig_dict.find(key) != qconfig_dict.end()) { GRAPH_DEBUG("Got module config for key:", key); qconfig = qconfig_dict.at(key); @@ -187,7 +187,7 @@ class ModuleCloneHelper { const Module& source, Module& target, const ModuleQConfigMap& module_qconfig_map, - const std::function)>& + const std::function)>& type_remap_fn) { // remap of %self will be done outside of the function // and we don't support the case when people pass in @@ -239,7 +239,7 @@ class ModuleCloneHelper { const Module& source, Module& target, const ModuleQConfigMap& module_qconfig_map, - const std::function)>& + const std::function)>& type_remap_fn) { remapTypes( graph->block(), @@ -257,7 +257,7 @@ class ModuleCloneHelper { const ModuleQConfigMap& module_qconfig_map, const std::unordered_map& type_remap) { auto type_remap_fn = [&](TypePtr type_ptr, - const c10::optional& qconfig) { + const std::optional& qconfig) { if (type_remap.find(type_ptr) != type_remap.end()) { const auto& qconfig_map = type_remap.at(type_ptr); if (qconfig_map.find(qconfig) != qconfig_map.end()) { @@ -401,7 +401,7 @@ class InsertObserversHelper { // Uses the state created by fillBoundaryValueMap and fillValueObserverMap // to return an observer configured for a value, if it is needed. - c10::optional getObserverFor(Value* v); + std::optional getObserverFor(Value* v); // Uses the state created by fillPassThroughValueMap to propagage observed // property which should pass through from inputs to outputs. @@ -1312,13 +1312,13 @@ void InsertObserversHelper::fillValueObserverMap( } } -c10::optional InsertObserversHelper::getObserverFor(Value* v) { +std::optional InsertObserversHelper::getObserverFor(Value* v) { if (observer_for_value_.count(v)) { auto observer = observer_for_value_.at(v); GRAPH_DEBUG("Got observer module config for:", v->debugName()); return observer; } - c10::optional result; + std::optional result; if (boundary_value_map_.count(v)) { for (Value* next : boundary_value_map_.at(v)) { GRAPH_DEBUG( @@ -1384,9 +1384,9 @@ InsertObserversHelper::insertObserversFor( // the graph itself can be shared std::unordered_set inputs_outputs; // list of observer modules for input values - std::vector> block_input_observers; + std::vector> block_input_observers; // list of observer modules for output values - std::vector> block_output_observers; + std::vector> block_output_observers; // if the current block is the block for entry point graph(the forward graph // of the top level module), we can insert observers in the block directly diff --git a/torch/csrc/jit/passes/quantization/insert_observers.h b/torch/csrc/jit/passes/quantization/insert_observers.h index 6fa7fe0449112..e8857318261c8 100644 --- a/torch/csrc/jit/passes/quantization/insert_observers.h +++ b/torch/csrc/jit/passes/quantization/insert_observers.h @@ -18,7 +18,7 @@ namespace torch { namespace jit { using QConfig = std::tuple; -using QConfigDict = std::unordered_map>; +using QConfigDict = std::unordered_map>; /** \brief Insert observer module and observer function call for * the Tensors that needs to be observed. diff --git a/torch/csrc/jit/passes/quantization/insert_quant_dequant.cpp b/torch/csrc/jit/passes/quantization/insert_quant_dequant.cpp index 93683a308dc86..02f4f10969760 100644 --- a/torch/csrc/jit/passes/quantization/insert_quant_dequant.cpp +++ b/torch/csrc/jit/passes/quantization/insert_quant_dequant.cpp @@ -59,7 +59,7 @@ bool isWeight(Module& module, Value* v) { if (isWeight(v)) { return true; } - c10::optional result; + std::optional result; auto* self = v->owningGraph()->inputs()[0]; for (const Use& u : v->uses()) { Node* n = u.user; @@ -221,7 +221,7 @@ Node* insertFP16CastOps(Graph* graph, Value* observer_out) { } // find the observer for Value `v` and return the name of the observer -c10::optional findObserverName(Value* v) { +std::optional findObserverName(Value* v) { // Note that here we just check for the name of observer, but the ideally // we should be comparing the type of observer, this is a temporary // work around until data only clone of module.clone is supported. @@ -258,7 +258,7 @@ at::ScalarType getObserverDtype(Module& module, Value* v) { return at::ScalarType::Undefined; } -c10::optional getEmbeddingBagObsName( +std::optional getEmbeddingBagObsName( script::Module& module, Node* n) { Value* v = n->output(); @@ -273,7 +273,7 @@ c10::optional getEmbeddingBagObsName( bool isEmbeddingBagOp( Node* observer, - c10::optional embedding_bag_name) { + std::optional embedding_bag_name) { return embedding_bag_name && embedding_bag_name.value().find("embedding_bag_") != std::string::npos; } @@ -791,7 +791,7 @@ class InsertQuantDeQuantHelper { Value* original_output, const std::vector& inputs, bool is_scalar = false, - const c10::optional>& qparams_opt = + const std::optional>& qparams_opt = c10::nullopt); bool isQuantized(Value* v) { @@ -1125,7 +1125,7 @@ ModuleMethodVector InsertQuantDeQuantHelper::getInvokedMethods( if (n->kind() == prim::CallMethod) { auto module_instance = n->inputs()[0]; auto module_method_name = n->s(attr::name); - c10::optional m; + std::optional m; // calling method on self if (module_instance == graph->inputs()[0]) { m = module; @@ -1152,7 +1152,7 @@ void InsertQuantDeQuantHelper::propagateQParams( Value* original_output, const std::vector& inputs, bool is_scalar, - const c10::optional>& qparams_opt) { + const std::optional>& qparams_opt) { Node* n = original_output->node(); Graph* graph = n->owningGraph(); if (is_scalar) { @@ -1248,7 +1248,7 @@ void removeDequantizeFromInputs(const std::unordered_set& inputs) { // Check if we need to propagate the quantization ops from input to // output -c10::optional> getDequantizedInputs(Value* output) { +std::optional> getDequantizedInputs(Value* output) { auto inputs = getPassThroughInputs(output); if (!inputs.empty()) { // note that we don't need to recursively check for prim::If diff --git a/torch/csrc/jit/passes/remove_mutation.cpp b/torch/csrc/jit/passes/remove_mutation.cpp index 183c7894f0867..84b990f628336 100644 --- a/torch/csrc/jit/passes/remove_mutation.cpp +++ b/torch/csrc/jit/passes/remove_mutation.cpp @@ -360,7 +360,7 @@ bool RemoveListMutation(const std::shared_ptr& graph) { bool RemoveTensorMutation( const std::shared_ptr& graph, - c10::optional> mutation_filter) { + std::optional> mutation_filter) { MutationRemover mr(graph, std::move(mutation_filter)); return mr.removeTensorMutation(); } diff --git a/torch/csrc/jit/passes/remove_mutation.h b/torch/csrc/jit/passes/remove_mutation.h index eb8cf195ee4ca..be8fc12b11f3d 100644 --- a/torch/csrc/jit/passes/remove_mutation.h +++ b/torch/csrc/jit/passes/remove_mutation.h @@ -11,7 +11,7 @@ namespace jit { struct TORCH_API MutationRemover { MutationRemover( std::shared_ptr graph, - c10::optional> mutation_filter = c10::nullopt) + std::optional> mutation_filter = c10::nullopt) : mutation_filter_(mutation_filter), aliasDb_(nullptr), graph_(std::move(graph)) {} @@ -55,7 +55,7 @@ struct TORCH_API MutationRemover { return aliasDb_.get(); } - c10::optional> mutation_filter_; + std::optional> mutation_filter_; std::unique_ptr aliasDb_ = nullptr; std::shared_ptr graph_; }; @@ -71,7 +71,7 @@ TORCH_API bool RemoveListMutation(const std::shared_ptr& graph); // return true if graph is modified TORCH_API bool RemoveTensorMutation( const std::shared_ptr& graph, - c10::optional> mutation_filter = c10::nullopt); + std::optional> mutation_filter = c10::nullopt); // Replaces in-place aten activation ops with their functional equivalence TORCH_API bool InplaceToFunctionalActivation( diff --git a/torch/csrc/jit/passes/replacement_of_old_operators.cpp b/torch/csrc/jit/passes/replacement_of_old_operators.cpp index 430cd4f743fdc..38255ad141877 100644 --- a/torch/csrc/jit/passes/replacement_of_old_operators.cpp +++ b/torch/csrc/jit/passes/replacement_of_old_operators.cpp @@ -30,7 +30,7 @@ struct OldOpsReplacerWithUpgraders { Node* node = graph_it.next(); while (node) { // load the schema name for this op - c10::optional schema_name = c10::nullopt; + std::optional schema_name = c10::nullopt; if (auto op_schema = node->maybeSchema()) { schema_name = getFullSchemaName(*op_schema); } else { diff --git a/torch/csrc/jit/passes/shape_analysis.cpp b/torch/csrc/jit/passes/shape_analysis.cpp index 706a17bf13e02..abc7bb6411dba 100644 --- a/torch/csrc/jit/passes/shape_analysis.cpp +++ b/torch/csrc/jit/passes/shape_analysis.cpp @@ -153,7 +153,7 @@ bool containsTensorType(const TypePtr& t) { // for each node in the schema with type Tensor, extract the T type // returns c10::nullopt if any Tensor in the schema does not have a known // shape ignores non-tensor in the list of inputs -c10::optional> gatherTensorTypes( +std::optional> gatherTensorTypes( Node* node, bool complete = false) { std::vector tensor_types; @@ -209,7 +209,7 @@ c10::ScalarType unionScalarTypes( // new type promotion logic. See tensor_attributes.rst for details. // This doesn't handle the case of arithmetic ops with Scalar arguments (when // `Tensor.getUnsafeTensorImpl()->is_wrapped_number()` would return true) -c10::optional getPromotedTypeForArithmeticOp(Node* node) { +std::optional getPromotedTypeForArithmeticOp(Node* node) { c10::ScalarType dimmed = c10::ScalarType::Undefined; c10::ScalarType zerodim = c10::ScalarType::Undefined; // binary arithmetic ops, more than 2 args is alpha. @@ -741,7 +741,7 @@ class ShapePropagator : public PropertyPropBase { return setUnshapedType(node); } - static c10::optional determineListSize(Value* list) { + static std::optional determineListSize(Value* list) { AT_ASSERT(list->type()->cast()); if (auto shape = constant_as>(list)) { return shape->size(); @@ -769,7 +769,7 @@ class ShapePropagator : public PropertyPropBase { bool PropagateTensorShapeOnNode(Node* node, bool insert_expands) { static const auto broadcast = [](std::vector& tensor_types, - c10::optional t) -> TensorTypePtr { + std::optional t) -> TensorTypePtr { if (tensor_types.size() == 1) { return tensor_types[0]->dimensionedOnly()->withScalarType(t); } @@ -1244,7 +1244,7 @@ class ShapePropagator : public PropertyPropBase { static const auto reduce_op_handler = [](Node* node, int64_t num_reduced_dim = 0, bool upcast_integer = false, - c10::optional opt_dtype = + std::optional opt_dtype = c10::nullopt) -> type_vec_t { if (auto type = node->input(0)->type()->cast()) { if (!type->scalarType() || !type->dim()) { diff --git a/torch/csrc/jit/passes/symbolic_shape_analysis.cpp b/torch/csrc/jit/passes/symbolic_shape_analysis.cpp index 96aa425b291a1..1765e65d02a6e 100644 --- a/torch/csrc/jit/passes/symbolic_shape_analysis.cpp +++ b/torch/csrc/jit/passes/symbolic_shape_analysis.cpp @@ -65,7 +65,7 @@ namespace jit { struct ShapeArg : public std:: - pair, c10::optional> { + pair, c10::optional> { using pair::pair; static ShapeArg unknownInteger() { @@ -87,11 +87,11 @@ struct ShapeArg } } - c10::optional asConstantInt() const { + std::optional asConstantInt() const { return this->second; } - c10::optional asShapeSymbol() const { + std::optional asShapeSymbol() const { return this->first; } @@ -208,7 +208,7 @@ bool isListOfTensors(const TypePtr& type) { type->cast()->getElementType()->cast(); } -c10::optional normIndex(int64_t index, size_t len) { +std::optional normIndex(int64_t index, size_t len) { if (index < 0) { index = index + len; } @@ -255,7 +255,7 @@ c10::SymbolicShape extractListShape( return c10::SymbolicShape(); } Node* list_construct = list->node(); - std::vector> output_shape; + std::vector> output_shape; for (Value* input : list_construct->inputs()) { if (symbolic_shape_values.count(input)) { output_shape.emplace_back(symbolic_shape_values[input]); @@ -605,7 +605,7 @@ struct SymbolicShapeOpAnalyzer { shape_compute_graph_ = graph->copy(); } - c10::optional> run( + std::optional> run( std::vector& inputs) { if (!shape_compute_graph_) { return c10::nullopt; @@ -813,7 +813,7 @@ struct SymbolicShapeGraphAnalyzer { beg_->owningBlock() == end_->owningBlock() && end_->isAfter(beg_)); } - c10::optional run() { + std::optional run() { AliasDb db(graph_); std::unordered_map> partial_evaluated_graphs = propagateShapesAndGatherPartialEvalShapeGraphs(db); @@ -1120,7 +1120,7 @@ void PropagateShapesOnGraph(std::shared_ptr& graph) { PropagateShapesOnBlock(graph->block(), db); } -c10::optional +std::optional PropagateShapesAndBuildLargeShapeComputeGraph( std::shared_ptr& graph, Node* beg, @@ -1128,7 +1128,7 @@ PropagateShapesAndBuildLargeShapeComputeGraph( return SymbolicShapeGraphAnalyzer(graph, beg, end).run(); } -TORCH_API c10::optional> +TORCH_API std::optional> calculateSymbolicShapesOnOp( const FunctionSchema* schema, const std::vector& inputs) { diff --git a/torch/csrc/jit/passes/symbolic_shape_analysis.h b/torch/csrc/jit/passes/symbolic_shape_analysis.h index 824740792aaf0..f5a17f2c5e550 100644 --- a/torch/csrc/jit/passes/symbolic_shape_analysis.h +++ b/torch/csrc/jit/passes/symbolic_shape_analysis.h @@ -36,7 +36,7 @@ struct ShapeComputeGraphMapping { std::unordered_map graph_output_to_symbolic_shape_dim_; }; -TORCH_API c10::optional +TORCH_API std::optional PropagateShapesAndBuildLargeShapeComputeGraph( std::shared_ptr& graph, Node* beg, @@ -50,7 +50,7 @@ TORCH_API bool setSymbolicShapeAnalysisTestMode(bool value); TORCH_API bool symbolicShapeAnalysisTestModeEnabled(); using SSAInput = std::variant; -TORCH_API c10::optional> +TORCH_API std::optional> calculateSymbolicShapesOnOp( const FunctionSchema* schema, const std::vector& inputs); diff --git a/torch/csrc/jit/passes/symbolic_shape_cache.cpp b/torch/csrc/jit/passes/symbolic_shape_cache.cpp index be8179f18786d..4a742b3f5f635 100644 --- a/torch/csrc/jit/passes/symbolic_shape_cache.cpp +++ b/torch/csrc/jit/passes/symbolic_shape_cache.cpp @@ -109,7 +109,7 @@ TORCH_API void cache_shape_function( shapeCache.Add(std::move(cache_key), std::move(can_ret_vec)); } -TORCH_API c10::optional> +TORCH_API std::optional> get_cached_shape_function( const FunctionSchema* schema, const std::vector& arg_vec) { diff --git a/torch/csrc/jit/passes/symbolic_shape_cache.h b/torch/csrc/jit/passes/symbolic_shape_cache.h index 02e00acac08d2..b842c731c0ce4 100644 --- a/torch/csrc/jit/passes/symbolic_shape_cache.h +++ b/torch/csrc/jit/passes/symbolic_shape_cache.h @@ -31,7 +31,7 @@ struct TORCH_API CanonicalizedSymbolicShape { const CanonicalizedSymbolicShape& b); private: - c10::optional> values_; + std::optional> values_; void init( const c10::SymbolicShape& orig_shape, @@ -39,7 +39,7 @@ struct TORCH_API CanonicalizedSymbolicShape { }; // SHAPE CACHE API -TORCH_API c10::optional> +TORCH_API std::optional> get_cached_shape_function( const FunctionSchema* schema, const std::vector& arg_vec); diff --git a/torch/csrc/jit/passes/symbolic_shape_runtime_fusion.cpp b/torch/csrc/jit/passes/symbolic_shape_runtime_fusion.cpp index b4902a1d5a0d4..9c213f2480d51 100644 --- a/torch/csrc/jit/passes/symbolic_shape_runtime_fusion.cpp +++ b/torch/csrc/jit/passes/symbolic_shape_runtime_fusion.cpp @@ -178,7 +178,7 @@ static StrideInput summarizeOutputStrides(const TensorType& tt) { // Also summarize input striding behavior. The Size information is stored on the // type, The striding is returned. See StrideInput for description of stride // specializations -static c10::optional>> +static std::optional>> TryGeneralizeInputDimensionsToSymbolicShapes( std::shared_ptr tensorexpr_graph) { std::map shape_to_sym_shape; diff --git a/torch/csrc/jit/passes/tensorexpr_fuser.cpp b/torch/csrc/jit/passes/tensorexpr_fuser.cpp index cd95af3424dc2..c9b9b974600dc 100644 --- a/torch/csrc/jit/passes/tensorexpr_fuser.cpp +++ b/torch/csrc/jit/passes/tensorexpr_fuser.cpp @@ -780,7 +780,7 @@ class TensorExprFuser { } } - c10::optional tryMerge(Node* fusion_group, Node* to_merge) { + std::optional tryMerge(Node* fusion_group, Node* to_merge) { if (!canMerge(fusion_group, to_merge)) { return c10::nullopt; } diff --git a/torch/csrc/jit/passes/update_differentiable_graph_requires_grad.cpp b/torch/csrc/jit/passes/update_differentiable_graph_requires_grad.cpp index b926939910c3a..15cefadd8cc76 100644 --- a/torch/csrc/jit/passes/update_differentiable_graph_requires_grad.cpp +++ b/torch/csrc/jit/passes/update_differentiable_graph_requires_grad.cpp @@ -8,7 +8,7 @@ namespace jit { static void UpdateDifferentiableGraphRequiresGrad( Block* block, - c10::optional new_requires_grad) { + std::optional new_requires_grad) { for (Node* n : block->nodes()) { for (Value* v : n->inputs()) { auto ty = v->type()->cast(); @@ -31,7 +31,7 @@ static void UpdateDifferentiableGraphRequiresGrad( void UpdateDifferentiableGraphRequiresGrad( std::shared_ptr& diff_forward_graph, - c10::optional new_requires_grad) { + std::optional new_requires_grad) { UpdateDifferentiableGraphRequiresGrad( diff_forward_graph->block(), new_requires_grad); } diff --git a/torch/csrc/jit/passes/update_differentiable_graph_requires_grad.h b/torch/csrc/jit/passes/update_differentiable_graph_requires_grad.h index eb51ba00c4c9f..0ba8696088934 100644 --- a/torch/csrc/jit/passes/update_differentiable_graph_requires_grad.h +++ b/torch/csrc/jit/passes/update_differentiable_graph_requires_grad.h @@ -14,7 +14,7 @@ namespace jit { // the types of prim::profiles TORCH_API void UpdateDifferentiableGraphRequiresGrad( std::shared_ptr& diff_forward_graph, - c10::optional new_requires_grad); + std::optional new_requires_grad); } // namespace jit } // namespace torch diff --git a/torch/csrc/jit/passes/utils/check_alias_annotation.cpp b/torch/csrc/jit/passes/utils/check_alias_annotation.cpp index d538e33a21359..4c081200715a7 100644 --- a/torch/csrc/jit/passes/utils/check_alias_annotation.cpp +++ b/torch/csrc/jit/passes/utils/check_alias_annotation.cpp @@ -188,7 +188,7 @@ const Node* findNodeForOp( // Handle a few special cases where we need to propagate constants // manually // TODO(suo): we should be able to move this stuff to constant prop -c10::optional toIValueProp(const Value* v) { +std::optional toIValueProp(const Value* v) { if (v->node()->kind() == prim::ListConstruct) { std::vector genericList; for (auto input : v->node()->inputs()) { diff --git a/torch/csrc/jit/passes/utils/memory_dag.h b/torch/csrc/jit/passes/utils/memory_dag.h index f3068588dae85..da5584f9d4bd3 100644 --- a/torch/csrc/jit/passes/utils/memory_dag.h +++ b/torch/csrc/jit/passes/utils/memory_dag.h @@ -62,9 +62,9 @@ struct Element { // We memoize the results of `getMemoryLocations` to speed up queries. // A nullopt means that this cache is not yet populated. Since `MemoryDAG` is // immutable, this cache should never need to be invalidated. - mutable c10::optional cachedMemoryLocations_; + mutable std::optional cachedMemoryLocations_; - mutable c10::optional cachedAllContainedMemoryLocations_; + mutable std::optional cachedAllContainedMemoryLocations_; }; // class MemoryDAG diff --git a/torch/csrc/jit/passes/utils/subgraph_utils.cpp b/torch/csrc/jit/passes/utils/subgraph_utils.cpp index 36515e9e849e3..1bb82432e218f 100644 --- a/torch/csrc/jit/passes/utils/subgraph_utils.cpp +++ b/torch/csrc/jit/passes/utils/subgraph_utils.cpp @@ -18,9 +18,9 @@ bool hasSubgraph(Node* n) { return n->hasAttribute(attr::Subgraph); } -std::vector> gatherLastUses( +std::vector> gatherLastUses( at::ArrayRef values) { - return fmap(values, [&](Value* v) -> c10::optional { + return fmap(values, [&](Value* v) -> std::optional { return firstOrLastUse(v, /*find_first*/ false); }); } @@ -38,7 +38,7 @@ struct ValueMapper { ValueMapper( Node* to_merge, AliasDb& db, - c10::optional existing_subgraph) { + std::optional existing_subgraph) { last_uses_ = gatherLastUses(to_merge->outputs()); if (existing_subgraph) { existing_last_uses_ = gatherLastUses((*existing_subgraph)->outputs()); @@ -91,14 +91,14 @@ struct ValueMapper { placeholder_node_->destroy(); } - std::vector> last_uses_; - std::vector> existing_last_uses_; + std::vector> last_uses_; + std::vector> existing_last_uses_; Node* placeholder_node_; }; Node* executeSubgraphMergeAndUpdateAliasing( Node* to_merge, - c10::optional existing, + std::optional existing, AliasDb& db, const std::function& merge_fn) { // When we merge a node into a subgraph, the new subgraph outputs diff --git a/torch/csrc/jit/python/init.cpp b/torch/csrc/jit/python/init.cpp index a5e3c6059bc84..a4abe02f866be 100644 --- a/torch/csrc/jit/python/init.cpp +++ b/torch/csrc/jit/python/init.cpp @@ -151,7 +151,7 @@ static bool opAllowsNumbersAsTensors(c10::Symbol symbol) { torch::should_allow_numbers_as_tensors(symbol.toUnqualString())); } -c10::optional toTypeInferredIValueOptional(py::handle input) { +std::optional toTypeInferredIValueOptional(py::handle input) { // Errors need to be caught here because toTypeInferredIValue errors out // on various object types, but we want it to work with all types. try { @@ -217,7 +217,7 @@ void initJITBindings(PyObject* module) { []() { return c10::ShapeSymbol::newSymbol().value(); }) .def( "_jit_shape_compute_graph_for_node", - [](Node* n) -> c10::optional> { + [](Node* n) -> std::optional> { if (!n->maybeSchema()) { return c10::nullopt; } @@ -225,7 +225,7 @@ void initJITBindings(PyObject* module) { }) .def( "_jit_decomposition_graph_for_node", - [](Node* n) -> c10::optional> { + [](Node* n) -> std::optional> { if (!n->maybeSchema()) { return c10::nullopt; } @@ -320,7 +320,7 @@ void initJITBindings(PyObject* module) { int quant_type_int) { auto dict = py::cast>>>(qconfig_dict); + std::optional>>>(qconfig_dict); auto quant_type = static_cast(quant_type_int); return InsertObservers( module, method_name, dict, inplace, quant_type); @@ -339,7 +339,7 @@ void initJITBindings(PyObject* module) { int quant_type_int) { auto dict = py::cast>>>(qconfig_dict); + std::optional>>>(qconfig_dict); auto quant_type = static_cast(quant_type_int); return InsertObserversForOnDevicePTQ( module, method_name, dict, inplace, quant_type); @@ -1652,7 +1652,7 @@ void initJITBindings(PyObject* module) { auto func_dk = py::cpp_function( [op, symbol, allow_numbers_as_tensors]( c10::DispatchKey dk_, py::args args, py::kwargs kwargs) { - c10::optional dk = + std::optional dk = c10::make_optional(dk_); ToIValueAllowNumbersAsTensors g(allow_numbers_as_tensors); return _get_operation_for_overload_or_packet( @@ -1821,7 +1821,7 @@ void initJITBindings(PyObject* module) { [](SchemaInfo& self, const std::string& name, const py::object& value) { - c10::optional i_value = toTypeInferredIValueOptional(value); + std::optional i_value = toTypeInferredIValueOptional(value); if (i_value) { // For normalization purposes there is an inconsistency within // torch.fx that turns all arguments named "self" into "input". @@ -1841,7 +1841,7 @@ void initJITBindings(PyObject* module) { TORCH_INTERNAL_ASSERT( key.isString(), "Add argument value keys types should be strings."); - c10::optional value = + std::optional value = toTypeInferredIValueOptional(key_pair.second); if (value) { // For normalization purposes there is an inconsistency within @@ -2077,8 +2077,8 @@ void initJITBindings(PyObject* module) { py::call_guard()); m.def("_is_alias_of", [](const py::object& self, const py::object& other) { - c10::optional self_value = toTypeInferredIValueOptional(self); - c10::optional other_value = toTypeInferredIValueOptional(other); + std::optional self_value = toTypeInferredIValueOptional(self); + std::optional other_value = toTypeInferredIValueOptional(other); // Only return true if we are certain that self and other are aliasing. if (!self_value || !other_value) { @@ -2087,8 +2087,8 @@ void initJITBindings(PyObject* module) { return self_value->isAliasOf(*other_value); }); m.def("_overlaps", [](const py::object& self, const py::object& other) { - c10::optional self_value = toTypeInferredIValueOptional(self); - c10::optional other_value = toTypeInferredIValueOptional(other); + std::optional self_value = toTypeInferredIValueOptional(self); + std::optional other_value = toTypeInferredIValueOptional(other); // Only return true if we are certain that self and other are overlapping. if (!self_value || !other_value) { diff --git a/torch/csrc/jit/python/module_python.h b/torch/csrc/jit/python/module_python.h index 3ab34f5cd8e77..5c7fbbb42d6cf 100644 --- a/torch/csrc/jit/python/module_python.h +++ b/torch/csrc/jit/python/module_python.h @@ -8,7 +8,7 @@ namespace py = pybind11; namespace torch::jit { -inline c10::optional as_module(py::handle obj) { +inline std::optional as_module(py::handle obj) { static py::handle ScriptModule = py::module::import("torch.jit").attr("ScriptModule"); if (py::isinstance(obj, ScriptModule)) { @@ -17,7 +17,7 @@ inline c10::optional as_module(py::handle obj) { return c10::nullopt; } -inline c10::optional as_object(py::handle obj) { +inline std::optional as_object(py::handle obj) { static py::handle ScriptObject = py::module::import("torch").attr("ScriptObject"); if (py::isinstance(obj, ScriptObject)) { diff --git a/torch/csrc/jit/python/pybind_utils.cpp b/torch/csrc/jit/python/pybind_utils.cpp index 23107d91d99ac..4cfe3309a766b 100644 --- a/torch/csrc/jit/python/pybind_utils.cpp +++ b/torch/csrc/jit/python/pybind_utils.cpp @@ -55,7 +55,7 @@ IValue listToIValue(py::handle obj) { return c10::impl::toList(rs); } -IValue toIValue(py::handle obj, const TypePtr& type, c10::optional N) { +IValue toIValue(py::handle obj, const TypePtr& type, std::optional N) { switch (type->kind()) { case TypeKind::TensorType: { if (obj.ptr() == Py_None) { @@ -802,7 +802,7 @@ py::object invokeOperatorFromPython( const std::vector>& operations, py::args args, const py::kwargs& kwargs, - c10::optional dk) { + std::optional dk) { auto [found_op, stack] = getOpWithStack(operations, args, kwargs); { pybind11::gil_scoped_release no_gil_guard; @@ -881,7 +881,7 @@ py::object _get_operation_for_overload_or_packet( py::args args, const py::kwargs& kwargs, bool is_overload, - c10::optional dk) { + std::optional dk) { std::string ns = symbol.ns().toUnqualString(); std::string method_name = symbol.toUnqualString(); std::string overload_name = operations[0]->schema().overload_name(); diff --git a/torch/csrc/jit/python/pybind_utils.h b/torch/csrc/jit/python/pybind_utils.h index a78c3e0c0be34..242da11af7c04 100644 --- a/torch/csrc/jit/python/pybind_utils.h +++ b/torch/csrc/jit/python/pybind_utils.h @@ -62,7 +62,7 @@ void clear_registered_instances(void* ptr); TORCH_PYTHON_API IValue toIValue( py::handle obj, const TypePtr& type, - c10::optional N = c10::nullopt); + std::optional N = c10::nullopt); TORCH_PYTHON_API py::object toPyObject(IValue ivalue); @@ -111,7 +111,7 @@ struct VISIBILITY_HIDDEN PythonFutureWrapper explicit PythonFutureWrapper( c10::intrusive_ptr fut, - c10::optional unwrap_func = c10::nullopt) + std::optional unwrap_func = c10::nullopt) : fut(std::move(fut)), unwrap_func(std::move(unwrap_func)) {} explicit PythonFutureWrapper(const PythonFutureWrapper&) = delete; @@ -232,7 +232,7 @@ struct VISIBILITY_HIDDEN PythonFutureWrapper c10::intrusive_ptr fut; // unwrap_func works like a callback for the value returned by // PythonFutureWrapper::wait(). - c10::optional unwrap_func; + std::optional unwrap_func; private: std::shared_ptr getPtr() { @@ -348,7 +348,7 @@ inline TypedIValue toDictKeyIValue(py::handle key) { } } -inline c10::optional unifyOrInitializeType( +inline std::optional unifyOrInitializeType( const TypePtr& accum, const TypePtr& unify) { if (!accum) { @@ -987,7 +987,7 @@ inline Stack createStackForSchema( const FunctionSchema& schema, const tuple_slice& args, const py::kwargs& kwargs, - c10::optional self) { + std::optional self) { size_t all_arguments = (self ? 1 : 0) + args.size() + kwargs.size(); if (all_arguments > schema.arguments().size()) { throw schema_match_error(c10::str( @@ -1102,7 +1102,7 @@ inline py::object runAndInsertCall( Function& callee, const tuple_slice& args, const py::kwargs& kwargs, - c10::optional self, + std::optional self, // Lambda that tells this function how to insert `callee` into the graph if // we're tracing. const std::function& @@ -1158,7 +1158,7 @@ inline py::object runAndInsertCall( return toPyObject(std::move(stack.back())); } -inline c10::optional maybeTorchFunctionDispatch( +inline std::optional maybeTorchFunctionDispatch( const py::object& callee, const tuple_slice& args_no_self, const py::kwargs& kwargs, @@ -1255,7 +1255,7 @@ TORCH_PYTHON_API py::object invokeOperatorFromPython( const std::vector>& operations, py::args args, const py::kwargs& kwargs, - c10::optional dk = c10::nullopt); + std::optional dk = c10::nullopt); TORCH_PYTHON_API py::tuple _maybe_handle_torch_function( const std::string& ns, @@ -1276,6 +1276,6 @@ TORCH_PYTHON_API py::object _get_operation_for_overload_or_packet( py::args args, const py::kwargs& kwargs, bool is_overload, - c10::optional dk = c10::nullopt); + std::optional dk = c10::nullopt); } // namespace torch::jit diff --git a/torch/csrc/jit/python/python_ir.cpp b/torch/csrc/jit/python/python_ir.cpp index 7c6c5089b6d38..2442ef0573545 100644 --- a/torch/csrc/jit/python/python_ir.cpp +++ b/torch/csrc/jit/python/python_ir.cpp @@ -131,7 +131,7 @@ void ConcretePythonOp::cloneFrom(Node* other_) { // recover the autograd.Function instance, if this PythonOp's function // was originally SomeFunction.apply // used in ONNX for discovering symbolics -c10::optional ConcretePythonOp::autogradFunction() const { +std::optional ConcretePythonOp::autogradFunction() const { pybind11::gil_scoped_acquire gil; // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast) py::handle obj = const_cast(pyobj.get()); @@ -865,7 +865,7 @@ void initPythonIRBindings(PyObject* module_) { }) .def( "with_sizes", - [](Type& t, c10::optional>> sizes) + [](Type& t, std::optional>> sizes) -> py::object { auto ptt = t.expect(); if (!ptt) { diff --git a/torch/csrc/jit/python/python_ir.h b/torch/csrc/jit/python/python_ir.h index 296fc3f0b1f2e..26adf8c0e4941 100644 --- a/torch/csrc/jit/python/python_ir.h +++ b/torch/csrc/jit/python/python_ir.h @@ -42,7 +42,7 @@ struct ConcretePythonOp : public PythonOp { // recover the autograd.Function instance, if this PythonOp's function // was originally SomeFunction.apply // used in ONNX for discovering symbolics - c10::optional autogradFunction() const override; + std::optional autogradFunction() const override; void writeScalars(std::ostream& out) const override; void lint_python() const override; }; diff --git a/torch/csrc/jit/python/python_ivalue.h b/torch/csrc/jit/python/python_ivalue.h index f33ceca30f2d0..4cdc8e430b9a8 100644 --- a/torch/csrc/jit/python/python_ivalue.h +++ b/torch/csrc/jit/python/python_ivalue.h @@ -31,7 +31,7 @@ struct C10_EXPORT ConcretePyObjectHolder final : PyObjectHolder { return torch::jit::tryToInferType(py_obj_); } - IValue toIValue(const TypePtr& type, c10::optional N = c10::nullopt) + IValue toIValue(const TypePtr& type, std::optional N = c10::nullopt) override { pybind11::gil_scoped_acquire ag; return torch::jit::toIValue(py_obj_, type, N); diff --git a/torch/csrc/jit/python/python_list.h b/torch/csrc/jit/python/python_list.h index d70e653043c93..b5bb88b3aeb20 100644 --- a/torch/csrc/jit/python/python_list.h +++ b/torch/csrc/jit/python/python_list.h @@ -175,7 +175,7 @@ class ScriptList final { // Remove and return the element at the specified index from the list. If no // index is passed, the last element is removed and returned. - IValue pop(c10::optional idx = c10::nullopt) { + IValue pop(std::optional idx = c10::nullopt) { IValue ret; if (idx) { diff --git a/torch/csrc/jit/python/python_sugared_value.cpp b/torch/csrc/jit/python/python_sugared_value.cpp index 4b854c884d026..d6f014759c05e 100644 --- a/torch/csrc/jit/python/python_sugared_value.cpp +++ b/torch/csrc/jit/python/python_sugared_value.cpp @@ -24,7 +24,7 @@ std::string typeString(py::handle h) { return py::str(h.get_type().attr("__name__")); } -c10::optional as_function(const py::object& obj) { +std::optional as_function(const py::object& obj) { if (py::isinstance(obj)) { return py::cast(obj); } @@ -169,7 +169,7 @@ std::string PythonValue::kind() const { std::vector> PythonValue::asTuple( const SourceRange& loc, GraphFunction& m, - const c10::optional& size_hint) { + const std::optional& size_hint) { const std::string type_str = typeString(self); std::stringstream ss; ss << kind() << " cannot be used as a tuple"; @@ -927,7 +927,7 @@ std::shared_ptr BooleanDispatchValue::call( at::ArrayRef args, at::ArrayRef kwargs, size_t n_binders) { - c10::optional result; + std::optional result; Graph& graph = *(caller.graph()); auto index = py::cast(dispatched_fn_["index"]); diff --git a/torch/csrc/jit/python/python_sugared_value.h b/torch/csrc/jit/python/python_sugared_value.h index 35298e30b08a6..cb397796c9f55 100644 --- a/torch/csrc/jit/python/python_sugared_value.h +++ b/torch/csrc/jit/python/python_sugared_value.h @@ -27,12 +27,12 @@ std::shared_ptr toSugaredValue( const SourceRange& loc, bool is_constant = false); -c10::optional as_function(const py::object& obj); +std::optional as_function(const py::object& obj); struct VISIBILITY_HIDDEN PythonValue : public SugaredValue { PythonValue( py::object the_self, - c10::optional rcb = c10::nullopt, + std::optional rcb = c10::nullopt, Value* module_self = nullptr) : self(std::move(the_self)), rcb(std::move(rcb)), @@ -56,7 +56,7 @@ struct VISIBILITY_HIDDEN PythonValue : public SugaredValue { std::vector> asTuple( const SourceRange& loc, GraphFunction& m, - const c10::optional& size_hint = {}) override; + const std::optional& size_hint = {}) override; std::shared_ptr attr( const SourceRange& loc, @@ -79,7 +79,7 @@ struct VISIBILITY_HIDDEN PythonValue : public SugaredValue { // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) py::object self; // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) - c10::optional rcb; + std::optional rcb; // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) Value* moduleSelf_ = nullptr; }; diff --git a/torch/csrc/jit/python/python_tracer.cpp b/torch/csrc/jit/python/python_tracer.cpp index bdc62d33568de..92e6e2d3ace23 100644 --- a/torch/csrc/jit/python/python_tracer.cpp +++ b/torch/csrc/jit/python/python_tracer.cpp @@ -45,7 +45,7 @@ std::vector _pythonCallstack() { SourceRange getPythonInterpreterSourceRange() { auto cs = pythonCallstack(); - c10::optional source_filename; + std::optional source_filename; size_t source_line = 0; std::stringstream stack_trace; for (const auto& entry : cs) { diff --git a/torch/csrc/jit/python/python_tree_views.cpp b/torch/csrc/jit/python/python_tree_views.cpp index a171314099c3e..50d18b908107e 100644 --- a/torch/csrc/jit/python/python_tree_views.cpp +++ b/torch/csrc/jit/python/python_tree_views.cpp @@ -12,7 +12,7 @@ namespace py = pybind11; namespace torch::jit { -c10::optional maybeConvertToString(const py::object& obj) { +std::optional maybeConvertToString(const py::object& obj) { if (obj.is_none()) { return c10::nullopt; } @@ -177,10 +177,10 @@ void initTreeViewBindings(PyObject* module) { [](const Property& property) { return property.getter().name(); }) .def("setter_name", [](const Property& property) { if (property.setter().present()) { - return c10::optional(property.setter().get().name()); + return std::optional(property.setter().get().name()); } - return c10::optional(c10::nullopt); + return std::optional(c10::nullopt); }); py::class_(m, "ClassDef") diff --git a/torch/csrc/jit/python/script_init.cpp b/torch/csrc/jit/python/script_init.cpp index 22809069f8809..971b6c76ca47e 100644 --- a/torch/csrc/jit/python/script_init.cpp +++ b/torch/csrc/jit/python/script_init.cpp @@ -207,7 +207,7 @@ void checkOverloadDecl(const Decl& new_decl, const Decl& old_decl) { } } -c10::optional tryCalculateDefaultParam( +std::optional tryCalculateDefaultParam( const Argument& arg, const py::object& def_value) { auto n = arg.N(); @@ -287,7 +287,7 @@ FunctionSchema getSchemaWithNameAndDefaults( auto it = default_args.find(arg.name()); if (it != default_args.end()) { checkMutableFunctionDefault(range, arg, it->second); - c10::optional value = tryCalculateDefaultParam(arg, it->second); + std::optional value = tryCalculateDefaultParam(arg, it->second); if (!value) { ErrorReport error(range); error << "Expected a default value of type " << arg.type()->repr_str() @@ -1369,10 +1369,10 @@ void initJitScriptBindings(PyObject* module) { [](std::shared_ptr self, const std::string& name) { auto fn = self->find_function(QualifiedName(name)); if (fn) { - return c10::optional( + return std::optional( StrongFunctionPtr(std::move(self), fn)); } else { - return c10::optional(c10::nullopt); + return std::optional(c10::nullopt); } }) .def( @@ -1852,7 +1852,7 @@ void initJitScriptBindings(PyObject* module) { py::object map_location, const py::dict& extra_files, bool restore_shapes = false) { - c10::optional optional_device; + std::optional optional_device; if (!map_location.is_none()) { AT_ASSERT(THPDevice_Check(map_location.ptr())); optional_device = @@ -1877,7 +1877,7 @@ void initJitScriptBindings(PyObject* module) { storage_context, py::object map_location, std::string ts_id) { - c10::optional optional_device; + std::optional optional_device; if (!map_location.is_none()) { AT_ASSERT(THPDevice_Check(map_location.ptr())); optional_device = @@ -1898,7 +1898,7 @@ void initJitScriptBindings(PyObject* module) { const py::dict& extra_files, bool restore_shapes = false) { std::istringstream in(buffer); - c10::optional optional_device; + std::optional optional_device; if (!map_location.is_none()) { AT_ASSERT(THPDevice_Check(map_location.ptr())); optional_device = @@ -1918,7 +1918,7 @@ void initJitScriptBindings(PyObject* module) { m.def( "_load_for_lite_interpreter", [](const std::string& filename, py::object map_location) { - c10::optional optional_device; + std::optional optional_device; if (!map_location.is_none()) { AT_ASSERT(THPDevice_Check(map_location.ptr())); optional_device = @@ -1930,7 +1930,7 @@ void initJitScriptBindings(PyObject* module) { "_load_for_lite_interpreter_from_buffer", [](const std::string& buffer, py::object map_location) { std::istringstream in(buffer); - c10::optional optional_device; + std::optional optional_device; if (!map_location.is_none()) { AT_ASSERT(THPDevice_Check(map_location.ptr())); optional_device = @@ -1975,7 +1975,7 @@ void initJitScriptBindings(PyObject* module) { m.def( "_get_model_extra_files", [](const std::string& filename, const py::dict& py_extra_files) { - c10::optional optional_device; + std::optional optional_device; ExtraFilesMap cpp_extra_files = ExtraFilesMap(); _load_for_mobile(filename, optional_device, cpp_extra_files); extra_files_to_python(cpp_extra_files, py_extra_files); @@ -1990,7 +1990,7 @@ void initJitScriptBindings(PyObject* module) { m.def( "_get_model_extra_files_from_buffer", [](const std::string& buffer, const py::dict& py_extra_files) { - c10::optional optional_device; + std::optional optional_device; ExtraFilesMap cpp_extra_files = ExtraFilesMap(); std::istringstream in(buffer); _load_for_mobile(in, optional_device, cpp_extra_files); @@ -2124,7 +2124,7 @@ void initJitScriptBindings(PyObject* module) { m.def( "_get_graph_executor_optimize", - [](c10::optional new_setting = c10::nullopt) { + [](std::optional new_setting = c10::nullopt) { bool old_value = getGraphExecutorOptimize(); if (new_setting) { setGraphExecutorOptimize(*new_setting); diff --git a/torch/csrc/jit/runtime/argument_spec.h b/torch/csrc/jit/runtime/argument_spec.h index 06c77edca718c..7a815e815d8e9 100644 --- a/torch/csrc/jit/runtime/argument_spec.h +++ b/torch/csrc/jit/runtime/argument_spec.h @@ -47,7 +47,7 @@ struct ArgumentInfo { return TensorType::get(); return TensorType::create( - type(), device(), c10::optional(dim()), requires_grad()); + type(), device(), std::optional(dim()), requires_grad()); } operator TypePtr() const { return toType(); @@ -460,10 +460,10 @@ inline CompleteArgumentInfo CompleteArgumentSpec::at(size_t i) const { return CompleteArgumentInfo(*this, i); } -inline c10::optional convertOptional( - c10::optional const& from) { - return (from) ? c10::optional(static_cast(*from)) - : c10::optional{}; +inline std::optional convertOptional( + std::optional const& from) { + return (from) ? std::optional(static_cast(*from)) + : std::optional{}; } } // namespace torch::jit @@ -475,7 +475,7 @@ struct hash> { size_t operator()(const c10::VaryingShape& vs) const { return c10::get_hash( vs.size(), - vs.size() ? vs.sizes().value() : std::vector>()); + vs.size() ? vs.sizes().value() : std::vector>()); } }; @@ -483,10 +483,10 @@ template <> struct hash { size_t operator()(const c10::TensorType& ptt) const { return c10::get_hash< - c10::optional, + std::optional, c10::VaryingShape, c10::VaryingShape, - c10::optional>( + std::optional>( torch::jit::convertOptional(ptt.scalarType()), ptt.sizes(), ptt.strides(), diff --git a/torch/csrc/jit/runtime/autodiff.cpp b/torch/csrc/jit/runtime/autodiff.cpp index 0d33abb217ee9..3987521f658f9 100644 --- a/torch/csrc/jit/runtime/autodiff.cpp +++ b/torch/csrc/jit/runtime/autodiff.cpp @@ -128,7 +128,7 @@ bool isDifferentiable(Graph& g) { // will be cleaned up later using EliminateDeadCode(block). TupleUnPack node in // backward graph will be removed in eliminateDeadcode(ReverseDetails) defined // in this file. -static c10::optional> build_script_grad( +static std::optional> build_script_grad( Node* node, const ArrayRef& grads) { auto graph = node->owningGraph(); @@ -352,7 +352,7 @@ bool outputRequiresGrad(Value* output) { if (output->type()->castRaw() == nullptr) { return output->requires_grad(); } - c10::optional requiresGrad = + std::optional requiresGrad = output->type()->expectRef().requiresGrad(); if (requiresGrad.has_value()) { return *requiresGrad; diff --git a/torch/csrc/jit/runtime/custom_operator.h b/torch/csrc/jit/runtime/custom_operator.h index 64d514374f58e..faa8c90754a0e 100644 --- a/torch/csrc/jit/runtime/custom_operator.h +++ b/torch/csrc/jit/runtime/custom_operator.h @@ -18,8 +18,8 @@ struct TORCH_API RegisterOperators { /// Registers a vector of already created `Operator`s. /// The operator element is now optional to filter null ops. It's backward /// compatible and works for selective operator registration. - explicit RegisterOperators(std::vector> operators) { - for (c10::optional& o : operators) { + explicit RegisterOperators(std::vector> operators) { + for (std::optional& o : operators) { if (o) { registerOperator(std::move(o.value())); } diff --git a/torch/csrc/jit/runtime/decomposition_registry.cpp b/torch/csrc/jit/runtime/decomposition_registry.cpp index 0c5f5f0876c1b..900ee32746906 100644 --- a/torch/csrc/jit/runtime/decomposition_registry.cpp +++ b/torch/csrc/jit/runtime/decomposition_registry.cpp @@ -107,7 +107,7 @@ void RunDecompositions(std::shared_ptr g) { } } -c10::optional> GetDecomposition( +std::optional> GetDecomposition( const FunctionSchema& schema) { loadDecompositionFunctions(); GRAPH_DEBUG("Trying to find schema: ", schema); @@ -120,7 +120,7 @@ c10::optional> GetDecomposition( return c10::nullopt; } -c10::optional GetDecompositionFunction( +std::optional GetDecompositionFunction( const FunctionSchema& schema) { loadDecompositionFunctions(); auto cache_it = schema_to_function.find(&schema); diff --git a/torch/csrc/jit/runtime/decomposition_registry.h b/torch/csrc/jit/runtime/decomposition_registry.h index 8633609bcf2a8..59f5aa796f76c 100644 --- a/torch/csrc/jit/runtime/decomposition_registry.h +++ b/torch/csrc/jit/runtime/decomposition_registry.h @@ -7,7 +7,7 @@ namespace torch::jit { -TORCH_API c10::optional> GetDecomposition( +TORCH_API std::optional> GetDecomposition( const FunctionSchema& schema); TORCH_API void RegisterDecomposition( @@ -16,7 +16,7 @@ TORCH_API void RegisterDecomposition( TORCH_API void RunDecompositions(std::shared_ptr g); -TORCH_API c10::optional GetDecompositionFunction( +TORCH_API std::optional GetDecompositionFunction( const FunctionSchema& schema); // For invocation in C++, recommended is to assign to static local variable diff --git a/torch/csrc/jit/runtime/graph_executor.cpp b/torch/csrc/jit/runtime/graph_executor.cpp index b1888f6344f18..d46e9028bf0af 100644 --- a/torch/csrc/jit/runtime/graph_executor.cpp +++ b/torch/csrc/jit/runtime/graph_executor.cpp @@ -636,7 +636,7 @@ struct GraphExecutorImpl : public GraphExecutorImplBase { const ExecutionPlan& getPlanFor( Stack& stack, - c10::optional remaining_bailout_depth) override { + std::optional remaining_bailout_depth) override { return getGraphExecutorOptimize() ? getOrCompile(stack) : getOrCompileFallback(); } @@ -838,7 +838,7 @@ c10::intrusive_ptr GraphExecutor::runAsync( const ExecutionPlan& GraphExecutor::getPlanFor( Stack& inputs, - c10::optional remaining_bailout_depth) { + std::optional remaining_bailout_depth) { return pImpl->getPlanFor(inputs, remaining_bailout_depth); } diff --git a/torch/csrc/jit/runtime/graph_executor.h b/torch/csrc/jit/runtime/graph_executor.h index d82d69ad5dce5..fce8d4a02e66c 100644 --- a/torch/csrc/jit/runtime/graph_executor.h +++ b/torch/csrc/jit/runtime/graph_executor.h @@ -87,7 +87,7 @@ struct TORCH_API GraphExecutor { // current global fusion strategy settings. const ExecutionPlan& getPlanFor( Stack& inputs, - c10::optional remaining_bailout_depth = c10::nullopt); + std::optional remaining_bailout_depth = c10::nullopt); GraphExecutorState getDebugState(); void debugFlushCompilationCache(); diff --git a/torch/csrc/jit/runtime/graph_executor_impl.h b/torch/csrc/jit/runtime/graph_executor_impl.h index 3aae2eb852796..22a563f00be28 100644 --- a/torch/csrc/jit/runtime/graph_executor_impl.h +++ b/torch/csrc/jit/runtime/graph_executor_impl.h @@ -78,7 +78,7 @@ struct GraphExecutorImplBase { virtual const ExecutionPlan& getPlanFor( Stack& stack, - c10::optional remaining_bailout_depth = c10::nullopt) = 0; + std::optional remaining_bailout_depth = c10::nullopt) = 0; virtual GraphExecutorState getDebugState() = 0; virtual ~GraphExecutorImplBase() = default; diff --git a/torch/csrc/jit/runtime/interpreter.cpp b/torch/csrc/jit/runtime/interpreter.cpp index e5f0f69a45498..18231173dd70e 100644 --- a/torch/csrc/jit/runtime/interpreter.cpp +++ b/torch/csrc/jit/runtime/interpreter.cpp @@ -181,7 +181,7 @@ struct InterpreterStateImpl : c10::intrusive_ptr_target { void callFunction( Function& f, Stack& stack, - c10::optional bailOut = c10::nullopt, + std::optional bailOut = c10::nullopt, bool next = true) { bool newFrame = f.call(stack, bailOut, [&](const Code& code) { enterFrame(code, stack.size() - code.num_inputs()); @@ -882,7 +882,7 @@ struct InterpreterStateImpl : c10::intrusive_ptr_target { // Janky af. See https://github.com/pytorch/pytorch/issues/54612 auto* not_implemented_error = dynamic_cast(&e); - c10::optional python_class_name; + std::optional python_class_name; if (jit_exception) { python_class_name = jit_exception->getPythonClassName(); } @@ -913,7 +913,7 @@ struct InterpreterStateImpl : c10::intrusive_ptr_target { const std::exception& e, bool is_jit_exception, c10::NotImplementedError* not_implemented_error, - c10::optional python_class_name) { + std::optional python_class_name) { ExceptionMessage msg(e); std::ostringstream ss; std::string class_name = diff --git a/torch/csrc/jit/runtime/interpreter.h b/torch/csrc/jit/runtime/interpreter.h index e47a581fd5def..a28b1eb93526b 100644 --- a/torch/csrc/jit/runtime/interpreter.h +++ b/torch/csrc/jit/runtime/interpreter.h @@ -124,7 +124,7 @@ struct InterpreterContinuation { InterpreterState state_, Stack stack_, int64_t dist_autograd_context_id = 0, - c10::optional tls_state = c10::nullopt) + std::optional tls_state = c10::nullopt) : state(std::move(state_)), stack(std::move(stack_)), tls_state_(std::move(tls_state)) @@ -140,7 +140,7 @@ struct InterpreterContinuation { private: InterpreterState state; Stack stack; - c10::optional tls_state_ = c10::nullopt; + std::optional tls_state_ = c10::nullopt; #ifdef USE_DISTRIBUTED int64_t dist_autograd_context_id_; #endif diff --git a/torch/csrc/jit/runtime/interpreter/code_impl.h b/torch/csrc/jit/runtime/interpreter/code_impl.h index 98701aa23b365..60948da5a86d6 100644 --- a/torch/csrc/jit/runtime/interpreter/code_impl.h +++ b/torch/csrc/jit/runtime/interpreter/code_impl.h @@ -111,8 +111,8 @@ struct CodeImpl { // It is also very useful for debugging interpreter problems to // keep this around. std::shared_ptr graph_; - c10::optional> grad_executors_; - c10::optional> forward_executors_; + std::optional> grad_executors_; + std::optional> forward_executors_; PreprocessGraph preprocess_; // map from unique of nodes to register in register table diff --git a/torch/csrc/jit/runtime/interpreter/frame.h b/torch/csrc/jit/runtime/interpreter/frame.h index e3de0a02ff7fa..c6873605d0deb 100644 --- a/torch/csrc/jit/runtime/interpreter/frame.h +++ b/torch/csrc/jit/runtime/interpreter/frame.h @@ -26,7 +26,7 @@ struct Frame { size_t base_pointer; // unique to every frame with prim::profile across all threads - c10::optional id; + std::optional id; // RecordFunction object associated with this frame std::unique_ptr record_function; diff --git a/torch/csrc/jit/runtime/jit_exception.cpp b/torch/csrc/jit/runtime/jit_exception.cpp index 809b1b2f5e599..2586f904c9871 100644 --- a/torch/csrc/jit/runtime/jit_exception.cpp +++ b/torch/csrc/jit/runtime/jit_exception.cpp @@ -7,8 +7,8 @@ static thread_local std::string caughtPythonClassName = ""; JITException::JITException( const std::string& msg, - c10::optional python_class_name, - c10::optional original_msg) + std::optional python_class_name, + std::optional original_msg) : std::runtime_error(msg), python_class_name_(std::move(python_class_name)), original_msg_(std::move(original_msg)) {} diff --git a/torch/csrc/jit/runtime/jit_exception.h b/torch/csrc/jit/runtime/jit_exception.h index 728675ed78418..34c3ebd6fca84 100644 --- a/torch/csrc/jit/runtime/jit_exception.h +++ b/torch/csrc/jit/runtime/jit_exception.h @@ -11,17 +11,17 @@ namespace torch::jit { struct TORCH_API JITException : public std::runtime_error { explicit JITException( const std::string& msg, - c10::optional python_class_name = c10::nullopt, - c10::optional original_msg = c10::nullopt); + std::optional python_class_name = c10::nullopt, + std::optional original_msg = c10::nullopt); - c10::optional getPythonClassName() const { + std::optional getPythonClassName() const { return python_class_name_; } // the original msg if this is from a python exception. The interpretor has // changed the original message by adding "The following operation failed in // the TorchScript interpreter." in front of it in the handleError function. - c10::optional getOriginalMsg() const { + std::optional getOriginalMsg() const { return original_msg_; } @@ -31,8 +31,8 @@ struct TORCH_API JITException : public std::runtime_error { static void setCaughtPythonClassName(const std::string& pythonClassName); private: - c10::optional python_class_name_; - c10::optional original_msg_; + std::optional python_class_name_; + std::optional original_msg_; }; } // namespace torch::jit diff --git a/torch/csrc/jit/runtime/operator.h b/torch/csrc/jit/runtime/operator.h index bcab476441e29..dbc2638457c05 100644 --- a/torch/csrc/jit/runtime/operator.h +++ b/torch/csrc/jit/runtime/operator.h @@ -67,7 +67,7 @@ struct TORCH_API Operator { }; struct UnparsedFunctionSchema final { std::string schema_string_; - mutable c10::optional alias_analysis_; + mutable std::optional alias_analysis_; }; // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) struct JitOnlyOperator final { @@ -298,16 +298,16 @@ TORCH_API bool aliasAnalysisHasSpecialCaseFor(c10::Symbol sym); // compile-time function for the selective op registration based on schema // string. template -c10::optional OperatorGenerator( +std::optional OperatorGenerator( const char* schema_str, Func&& op, AliasAnalysisKind alias_analysis) { - return c10::optional(Operator( + return std::optional(Operator( std::string(schema_str), std::forward(op), alias_analysis)); } template -c10::optional OperatorGenerator( +std::optional OperatorGenerator( torch::detail::SelectiveStr schema_str, Func&& op, AliasAnalysisKind alias_analysis) { @@ -318,7 +318,7 @@ c10::optional OperatorGenerator( } template -c10::optional OperatorGenerator( +std::optional OperatorGenerator( torch::detail::SelectiveStr schema_str, Func&& op, AliasAnalysisKind alias_analysis) { @@ -326,14 +326,14 @@ c10::optional OperatorGenerator( } template -c10::optional OperatorGenerator( +std::optional OperatorGenerator( const std::string name, const std::string overload_name, const std::vector arguments, const std::vector returns, Func&& op, AliasAnalysisKind alias_analysis) { - return c10::optional(Operator( + return std::optional(Operator( name, overload_name, arguments, diff --git a/torch/csrc/jit/runtime/profiling_graph_executor_impl.cpp b/torch/csrc/jit/runtime/profiling_graph_executor_impl.cpp index 58d80c48f9c87..48c7a1959ab22 100644 --- a/torch/csrc/jit/runtime/profiling_graph_executor_impl.cpp +++ b/torch/csrc/jit/runtime/profiling_graph_executor_impl.cpp @@ -118,7 +118,7 @@ static FusionStrategy getInitialStrategy() { } // defer initial value so that we can load in gflags -static c10::optional fusion_strategy = c10::nullopt; +static std::optional fusion_strategy = c10::nullopt; FusionStrategy getFusionStrategy() { std::lock_guard guard(fusion_strategy_lock); @@ -613,7 +613,7 @@ size_t ProfilingGraphExecutorImpl::getInstantiatedBailoutDepth() { const ExecutionPlan& ProfilingGraphExecutorImpl::getOptimizedPlanFor( Stack& stack, - c10::optional remaining_bailout_depth) { + std::optional remaining_bailout_depth) { GRAPH_DEBUG("Running ProfilingGraphExecutorImpl ", this); // TODO: instantiate simple executor when getProfilingMode() is false @@ -700,7 +700,7 @@ const ExecutionPlan& ProfilingGraphExecutorImpl::getOptimizedPlanFor( const ExecutionPlan& ProfilingGraphExecutorImpl::getPlanFor( Stack& stack, - c10::optional remaining_bailout_depth) { + std::optional remaining_bailout_depth) { std::lock_guard lock(compile_mutex); // IMPORTANT: This is a hot path of calling a torchscript function. Try not to diff --git a/torch/csrc/jit/runtime/profiling_graph_executor_impl.h b/torch/csrc/jit/runtime/profiling_graph_executor_impl.h index 45da1f030e962..a49ef18e2fa42 100644 --- a/torch/csrc/jit/runtime/profiling_graph_executor_impl.h +++ b/torch/csrc/jit/runtime/profiling_graph_executor_impl.h @@ -18,7 +18,7 @@ struct TORCH_API ProfilingGraphExecutorImpl : public GraphExecutorImplBase { const ExecutionPlan& getPlanFor( Stack& stack, - c10::optional remaining_bailout_depth) override; + std::optional remaining_bailout_depth) override; GraphExecutorState getDebugState() override; ~ProfilingGraphExecutorImpl() override = default; @@ -31,7 +31,7 @@ struct TORCH_API ProfilingGraphExecutorImpl : public GraphExecutorImplBase { private: const ExecutionPlan& getOptimizedPlanFor( Stack& stack, - c10::optional remaining_bailout_depth); + std::optional remaining_bailout_depth); void runProfilingInsensitiveOptimizations(std::shared_ptr& graph); void runProfilingOptimizations( std::shared_ptr& graph, @@ -47,13 +47,13 @@ struct TORCH_API ProfilingGraphExecutorImpl : public GraphExecutorImplBase { void clearTheGraphCompilationIntermediateGraphs(); std::unique_ptr pr_; - c10::optional + std::optional profiling_plan_; // plan to run in order to profiling the code - c10::optional optimized_plan_; + std::optional optimized_plan_; FusionStrategy fusion_strategy_; // this plan is used if getGraphExecutorOptimize is unset - c10::optional fallback_plan_; + std::optional fallback_plan_; // fallback functions are inserted for tensorexpr fusion groups // and by specialize_autogradzero. Whenever, at runtime, input // tensor don't match profiled properties, fallback functions are called @@ -63,7 +63,7 @@ struct TORCH_API ProfilingGraphExecutorImpl : public GraphExecutorImplBase { // They only exist in the optimized graph which is a private property // of the GraphExecutor and only shared with InterpreterState std::vector> fallback_functions_; - c10::optional remaining_bailout_depth_; + std::optional remaining_bailout_depth_; // The time the optimized_plan_ is created. int32_t time_optimized_plan_created_ = 0; // Has the extra memory used by the graph for profiling is released? diff --git a/torch/csrc/jit/runtime/register_ops_utils.cpp b/torch/csrc/jit/runtime/register_ops_utils.cpp index b926c59e75dee..7335f132dfbf5 100644 --- a/torch/csrc/jit/runtime/register_ops_utils.cpp +++ b/torch/csrc/jit/runtime/register_ops_utils.cpp @@ -403,7 +403,7 @@ void listSetItem(Stack& stack) { at::Generator make_generator_for_device( c10::Device device, - c10::optional seed) { + std::optional seed) { if (device.is_cpu()) { if (seed.has_value()) { return at::detail::createCPUGenerator(seed.value()); diff --git a/torch/csrc/jit/runtime/register_ops_utils.h b/torch/csrc/jit/runtime/register_ops_utils.h index de70cea3a1d50..15e59acb9fe6e 100644 --- a/torch/csrc/jit/runtime/register_ops_utils.h +++ b/torch/csrc/jit/runtime/register_ops_utils.h @@ -879,6 +879,6 @@ struct OperatorGeneratorArgs { TORCH_API at::Generator make_generator_for_device( c10::Device device, - c10::optional seed = c10::nullopt); + std::optional seed = c10::nullopt); } // namespace torch::jit diff --git a/torch/csrc/jit/runtime/register_prim_ops.cpp b/torch/csrc/jit/runtime/register_prim_ops.cpp index ee1c0c9e29ef8..bb9c08465c0ae 100644 --- a/torch/csrc/jit/runtime/register_prim_ops.cpp +++ b/torch/csrc/jit/runtime/register_prim_ops.cpp @@ -34,8 +34,8 @@ namespace { std::string stringSlice( std::string string, - c10::optional start, - c10::optional end, + std::optional start, + std::optional end, int64_t step) { int64_t start_val = start.has_value() ? start.value() : INT64_MAX; int64_t end_val = end.has_value() ? end.value() : INT64_MAX; @@ -1167,7 +1167,7 @@ static const std::vector opGenArgs{ "aten::index.Tensor_hacked_twin(Tensor self, Tensor[] indices) -> Tensor"), [](Stack& stack) { auto indices = pop(stack).to>(); - c10::List> opt_list_indices; + c10::List> opt_list_indices; opt_list_indices.reserve(indices.size()); for (const auto& ten : indices) { opt_list_indices.push_back(ten); @@ -1182,7 +1182,7 @@ static const std::vector opGenArgs{ "aten::_unsafe_index.Tensor_hacked_twin(Tensor self, Tensor[] indices) -> Tensor"), [](Stack& stack) { auto indices = pop(stack).to>(); - c10::List> opt_list_indices; + c10::List> opt_list_indices; opt_list_indices.reserve(indices.size()); for (const auto& ten : indices) { opt_list_indices.push_back(ten); @@ -1200,7 +1200,7 @@ static const std::vector opGenArgs{ auto accumulate = pop(stack).toBool(); auto values = pop(stack).toTensor(); auto indices = pop(stack).to>(); - c10::List> opt_list_indices; + c10::List> opt_list_indices; opt_list_indices.reserve(indices.size()); for (const auto& ten : indices) { opt_list_indices.push_back(ten); @@ -1218,7 +1218,7 @@ static const std::vector opGenArgs{ auto accumulate = pop(stack).toBool(); auto values = pop(stack).toTensor(); auto indices = pop(stack).to>(); - c10::List> opt_list_indices; + c10::List> opt_list_indices; opt_list_indices.reserve(indices.size()); for (const auto& ten : indices) { opt_list_indices.push_back(ten); @@ -1236,7 +1236,7 @@ static const std::vector opGenArgs{ auto accumulate = pop(stack).toBool(); auto values = pop(stack).toTensor(); auto indices = pop(stack).to>(); - c10::List> opt_list_indices; + c10::List> opt_list_indices; opt_list_indices.reserve(indices.size()); for (const auto& ten : indices) { opt_list_indices.push_back(ten); @@ -1254,7 +1254,7 @@ static const std::vector opGenArgs{ auto accumulate = pop(stack).toBool(); auto values = pop(stack).toTensor(); auto indices = pop(stack).to>(); - c10::List> opt_list_indices; + c10::List> opt_list_indices; opt_list_indices.reserve(indices.size()); for (const auto& ten : indices) { opt_list_indices.push_back(ten); @@ -1275,9 +1275,9 @@ static const std::vector opGenArgs{ // NOLINTNEXTLINE(cppcoreguidelines-init-variables) bool copy; pop(stack, non_blocking, copy); - c10::optional scalarType = + std::optional scalarType = pop(stack).toOptional(); - c10::optional device = + std::optional device = pop(stack).toOptional(); at::Tensor self = pop(stack).toTensor(); push( @@ -1404,9 +1404,9 @@ static const std::vector opGenArgs{ } }))}; -static std::vector> createOperators( +static std::vector> createOperators( const std::vector& args) { - std::vector> result; + std::vector> result; result.reserve(args.size()); for (const auto& arg : args) { if (arg.schema_str) { @@ -1769,8 +1769,8 @@ static const std::vector stringOpGenArgs{ "aten::slice.str(str string, int? start=None, int? end=None, int step=1) -> str"), [](Stack& stack) { int64_t step = pop(stack).toInt(); - c10::optional end = pop(stack).toOptional(); - c10::optional start = pop(stack).toOptional(); + std::optional end = pop(stack).toOptional(); + std::optional start = pop(stack).toOptional(); std::string string = pop(stack).toStringRef(); push(stack, stringSlice(string, start, end, step)); }, @@ -2397,7 +2397,7 @@ static const std::vector stringOpGenArgs{ for (const auto& v : ivalues) { values.emplace_back(v.toStringRef()); } - c10::optional opt_string = + std::optional opt_string = pop(stack).toOptional(); const std::string& string = opt_string.value_or(""); std::stringstream ss; @@ -2463,8 +2463,8 @@ static const std::vector opGenArgs1{ // NOLINTNEXTLINE(cppcoreguidelines-init-variables) bool copy; pop(stack, self, non_blocking, copy); - c10::optional device = c10::nullopt; - c10::optional scalarType = c10::nullopt; + std::optional device = c10::nullopt; + std::optional scalarType = c10::nullopt; push( stack, to_dispatch(self, device, scalarType, non_blocking, copy)); }, diff --git a/torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp b/torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp index d48a981666c83..4359b852b6a38 100644 --- a/torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp +++ b/torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp @@ -427,8 +427,8 @@ at::Tensor interpolate( const IValue& size, const IValue& scale_factors, const std::string& mode, - c10::optional align_corners, - c10::optional recompute_scale_factor) { + std::optional align_corners, + std::optional recompute_scale_factor) { if ((mode == "nearest" || mode == "area")) { if (align_corners != c10::nullopt) { throw std::runtime_error( diff --git a/torch/csrc/jit/runtime/register_special_ops.cpp b/torch/csrc/jit/runtime/register_special_ops.cpp index 5e33d8cf27d39..5b8c70c404ae9 100644 --- a/torch/csrc/jit/runtime/register_special_ops.cpp +++ b/torch/csrc/jit/runtime/register_special_ops.cpp @@ -406,7 +406,7 @@ RegisterOperators reg({ double a; // NOLINTNEXTLINE(cppcoreguidelines-init-variables) double b; - c10::optional generator = + std::optional generator = pop(stack).toOptional(); pop(stack, tensor, a, b); @@ -425,7 +425,7 @@ RegisterOperators reg({ double mean; // NOLINTNEXTLINE(cppcoreguidelines-init-variables) double std; - c10::optional generator = + std::optional generator = pop(stack).toOptional(); pop(stack, tensor, mean, std); diff --git a/torch/csrc/jit/runtime/simple_graph_executor_impl.cpp b/torch/csrc/jit/runtime/simple_graph_executor_impl.cpp index 742915995469e..c1dbbddc6d337 100644 --- a/torch/csrc/jit/runtime/simple_graph_executor_impl.cpp +++ b/torch/csrc/jit/runtime/simple_graph_executor_impl.cpp @@ -13,7 +13,7 @@ SimpleGraphExecutorImpl::SimpleGraphExecutorImpl( const ExecutionPlan& SimpleGraphExecutorImpl::getPlanFor( Stack& stack, - c10::optional remaining_bailout_depth) { + std::optional remaining_bailout_depth) { std::lock_guard lock(compile_mutex); // IMPORTANT: This is a hot path of calling a torchscript function. Try not to diff --git a/torch/csrc/jit/runtime/simple_graph_executor_impl.h b/torch/csrc/jit/runtime/simple_graph_executor_impl.h index 34272000f0d1a..e1ebed46ede80 100644 --- a/torch/csrc/jit/runtime/simple_graph_executor_impl.h +++ b/torch/csrc/jit/runtime/simple_graph_executor_impl.h @@ -12,12 +12,12 @@ struct TORCH_API SimpleGraphExecutorImpl : public GraphExecutorImplBase { const ExecutionPlan& getPlanFor( Stack& stack, - c10::optional remaining_bailout_depth) override; + std::optional remaining_bailout_depth) override; GraphExecutorState getDebugState() override; ~SimpleGraphExecutorImpl() override = default; private: - c10::optional execution_plan_; + std::optional execution_plan_; }; } // namespace torch::jit diff --git a/torch/csrc/jit/runtime/static/fusion.cpp b/torch/csrc/jit/runtime/static/fusion.cpp index 5ba3b1a0268f2..ffac37efc9b76 100644 --- a/torch/csrc/jit/runtime/static/fusion.cpp +++ b/torch/csrc/jit/runtime/static/fusion.cpp @@ -168,7 +168,7 @@ static void debugDumpFusionGroup(const std::string& msg, Node* n) { } } -static c10::optional tryMerge( +static std::optional tryMerge( Node* fusion_group, Node* to_merge, AliasDb* aliasDb) { diff --git a/torch/csrc/jit/runtime/static/impl.cpp b/torch/csrc/jit/runtime/static/impl.cpp index 9f62d631bce88..193675672f6b8 100644 --- a/torch/csrc/jit/runtime/static/impl.cpp +++ b/torch/csrc/jit/runtime/static/impl.cpp @@ -286,7 +286,7 @@ void PrepareGraphForStaticModule( ForceNonEmptyOutputs(*graph); } -std::pair, c10::optional> PrepareForStaticModule( +std::pair, std::optional> PrepareForStaticModule( const torch::jit::Module& m, bool is_frozen, const StaticModuleOptions& opts, @@ -316,7 +316,7 @@ std::pair, c10::optional> PrepareForStaticModule( return std::make_pair(graph, module); } -std::pair, c10::optional> PrepareForStaticModule( +std::pair, std::optional> PrepareForStaticModule( std::shared_ptr graph, const StaticModuleOptions& opts, std::vector sample_inputs) { @@ -544,7 +544,7 @@ StaticModule::StaticModule( opts) {} StaticModule::StaticModule( - std::pair, c10::optional> + std::pair, std::optional> graph_and_module, const StaticModuleOptions& opts) : opts_(opts), diff --git a/torch/csrc/jit/runtime/static/impl.h b/torch/csrc/jit/runtime/static/impl.h index 48af8ef02afbf..2e840e582a0a1 100644 --- a/torch/csrc/jit/runtime/static/impl.h +++ b/torch/csrc/jit/runtime/static/impl.h @@ -417,7 +417,7 @@ class TORCH_API StaticModule { private: explicit StaticModule( - std::pair, c10::optional> + std::pair, std::optional> graph_and_module, const StaticModuleOptions& opts); @@ -490,7 +490,7 @@ class TORCH_API StaticModule { C10_NODISCARD Node* findNodeWithKindForTesting(const std::string& kind) const; - const c10::optional& schema() const { + const std::optional& schema() const { return schema_; } @@ -539,8 +539,8 @@ class TORCH_API StaticModule { // metadata that is stored in IR nodes as attribute at::intrusive_ptr sr_metadata_; std::shared_ptr graph_; - c10::optional module_; - c10::optional schema_; + std::optional module_; + std::optional schema_; std::unique_ptr cached_runtime_; // Bookkeeping for creating new StaticRuntime instances diff --git a/torch/csrc/jit/runtime/static/ops.cpp b/torch/csrc/jit/runtime/static/ops.cpp index b4f4c38c2aaf5..b1b8a081c4ce6 100644 --- a/torch/csrc/jit/runtime/static/ops.cpp +++ b/torch/csrc/jit/runtime/static/ops.cpp @@ -209,7 +209,7 @@ at::Tensor& to_copy_out( const Tensor& self, bool non_blocking, bool copy_strides, - c10::optional memory_format) { + std::optional memory_format) { if (copy_strides) { at::native::resize_impl_cpu_( out.unsafeGetTensorImpl(), self.sizes(), self.strides()); @@ -259,7 +259,7 @@ static Tensor& linear_out( Tensor& output, const Tensor& input, const Tensor& weight, - const c10::optional& bias_opt) { + const std::optional& bias_opt) { TORCH_CHECK(!input.is_mkldnn()); auto bias = bias_opt.has_value() @@ -1048,7 +1048,7 @@ REGISTER_OPERATOR_FUNCTOR(aten::logit, aten_logit, [](Node* n) -> SROperator { LogAndDumpSchema(n); return nullptr; } - c10::optional clamp = c10::nullopt; + std::optional clamp = c10::nullopt; if (n->inputs()[1]->node()->kind() == prim::Constant) { auto clamp_d = toIValue(n->inputs()[1])->toOptional(); clamp = clamp_d @@ -1353,10 +1353,10 @@ namespace { // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) struct ToArgs { - c10::optional dtype; + std::optional dtype; c10::Layout layout; bool know_to_will_alias = false; - c10::optional memory_format; + std::optional memory_format; }; template @@ -1440,8 +1440,8 @@ C10_ALWAYS_INLINE void to_copy_functor_impl( // handle memory format bool copy_strides = false; - c10::optional memory_format = c10::MemoryFormat::Preserve; - c10::optional my_args; + std::optional memory_format = c10::MemoryFormat::Preserve; + std::optional my_args; if (!args) { my_args = extract_to_args< has_constant_non_tensor_dtype_and_flags, @@ -1905,7 +1905,7 @@ REGISTER_OPERATOR_FUNCTOR(aten::div, aten_div, [](Node* n) -> SROperator { return [te = createDiv()](ProcessedNode* p_node) { const auto& in0_t = p_node->Input(0).toTensor(); - c10::optional rounding_mode = c10::nullopt; + std::optional rounding_mode = c10::nullopt; if (p_node->num_inputs() > 2) { rounding_mode = p_node->Input(2).toOptional(); } @@ -2396,8 +2396,8 @@ REGISTER_OPERATOR_FUNCTOR( // device & pin_memory matter only when CUDA is enabled. static bool hasTensorWithOptions( const IValue& ivalue, - c10::optional dtype, - c10::optional layout) { + std::optional dtype, + std::optional layout) { if (!ivalue.isTensor()) { return false; } @@ -2412,9 +2412,9 @@ static bool hasTensorWithOptions( static bool hasTensorWithOptions( const IValue& ivalue, - c10::optional dtype, - c10::optional layout, - c10::optional memory_format) { + std::optional dtype, + std::optional layout, + std::optional memory_format) { return hasTensorWithOptions(ivalue, dtype, layout) && (memory_format == ivalue.toTensor().options().memory_format_opt()); } diff --git a/torch/csrc/jit/runtime/static/ops.h b/torch/csrc/jit/runtime/static/ops.h index 53aa0dc787d1b..362837e7ce78f 100644 --- a/torch/csrc/jit/runtime/static/ops.h +++ b/torch/csrc/jit/runtime/static/ops.h @@ -15,7 +15,7 @@ at::Tensor& to_copy_out( const Tensor& self, bool non_blocking, bool copy_strides, - c10::optional memory_format); + std::optional memory_format); } // namespace at::native namespace torch::jit { diff --git a/torch/csrc/jit/runtime/symbolic_script.cpp b/torch/csrc/jit/runtime/symbolic_script.cpp index ff8513f016daf..6aa65c528a42b 100644 --- a/torch/csrc/jit/runtime/symbolic_script.cpp +++ b/torch/csrc/jit/runtime/symbolic_script.cpp @@ -1614,7 +1614,7 @@ static void loadFunctions() { loadModule(compilation_unit); } -c10::optional gradientInfoForSchema( +std::optional gradientInfoForSchema( const FunctionSchema& schema) { std::lock_guard guard(lock); if (schema_to_graphs.empty()) { diff --git a/torch/csrc/jit/runtime/symbolic_script.h b/torch/csrc/jit/runtime/symbolic_script.h index 64e0d6661baeb..271bf66916f3d 100644 --- a/torch/csrc/jit/runtime/symbolic_script.h +++ b/torch/csrc/jit/runtime/symbolic_script.h @@ -12,7 +12,7 @@ struct GradientPair { std::shared_ptr backward; }; -TORCH_API c10::optional gradientInfoForSchema( +TORCH_API std::optional gradientInfoForSchema( const FunctionSchema& schema); TORCH_API bool hasGradientInfoForSchema(const FunctionSchema& schema); } // namespace torch::jit diff --git a/torch/csrc/jit/runtime/symbolic_shape_registry.cpp b/torch/csrc/jit/runtime/symbolic_shape_registry.cpp index 5e380c1f437a7..ddea031aba73c 100644 --- a/torch/csrc/jit/runtime/symbolic_shape_registry.cpp +++ b/torch/csrc/jit/runtime/symbolic_shape_registry.cpp @@ -377,7 +377,7 @@ void loadFunctions() { } } // anonymous namespace -c10::optional> shapeComputeGraphForSchema( +std::optional> shapeComputeGraphForSchema( const FunctionSchema& schema) { std::lock_guard guard(lock); if (cached_schema_to_graph.empty()) { @@ -394,7 +394,7 @@ c10::optional> shapeComputeGraphForSchema( return c10::nullopt; } -TORCH_API c10::optional boundedGraphsForSchema( +TORCH_API std::optional boundedGraphsForSchema( const FunctionSchema& schema) { std::lock_guard guard(lock); if (cached_bounded_schema_to_graph.empty()) { diff --git a/torch/csrc/jit/runtime/symbolic_shape_registry.h b/torch/csrc/jit/runtime/symbolic_shape_registry.h index 2d09eb27876b7..a14d327aab429 100644 --- a/torch/csrc/jit/runtime/symbolic_shape_registry.h +++ b/torch/csrc/jit/runtime/symbolic_shape_registry.h @@ -54,10 +54,10 @@ TORCH_API void RegisterShapeComputeGraphForSchema( const FunctionSchema& schema, std::shared_ptr g); -TORCH_API c10::optional> shapeComputeGraphForSchema( +TORCH_API std::optional> shapeComputeGraphForSchema( const FunctionSchema& schema); -TORCH_API c10::optional boundedGraphsForSchema( +TORCH_API std::optional boundedGraphsForSchema( const FunctionSchema& schema); TORCH_API std::vector RegisteredShapeComputeSchemas(); diff --git a/torch/csrc/jit/serialization/callstack_debug_info_serialization.cpp b/torch/csrc/jit/serialization/callstack_debug_info_serialization.cpp index 7674c5324ce9f..4a326285b2974 100644 --- a/torch/csrc/jit/serialization/callstack_debug_info_serialization.cpp +++ b/torch/csrc/jit/serialization/callstack_debug_info_serialization.cpp @@ -59,7 +59,7 @@ c10::IValue InlinedCallStackSerializer::serialize( } c10::IValue InlinedCallStackSerializer::serialize_module_instance_info( - const c10::optional& m) { + const std::optional& m) { if (!m) { return c10::IValue(); } @@ -168,7 +168,7 @@ InlinedCallStackPtr InlinedCallStackDeserializer::deserialize( return cs_ptr; } -c10::optional InlinedCallStackDeserializer:: +std::optional InlinedCallStackDeserializer:: deserialize_module_instance_info( const c10::IValue& iv, const std::shared_ptr& cu) { diff --git a/torch/csrc/jit/serialization/callstack_debug_info_serialization.h b/torch/csrc/jit/serialization/callstack_debug_info_serialization.h index ac1bdf8d3b1d8..46fd2850d20bd 100644 --- a/torch/csrc/jit/serialization/callstack_debug_info_serialization.h +++ b/torch/csrc/jit/serialization/callstack_debug_info_serialization.h @@ -32,7 +32,7 @@ class InlinedCallStackSerializer { private: // module_info = [ClassType.qualifiedName, instance_name] c10::IValue serialize_module_instance_info( - const c10::optional& m); + const std::optional& m); // This caches serialized inlined callstack ptr, since many // InlinedCallStackPtr can refer to the same one. @@ -64,7 +64,7 @@ class InlinedCallStackDeserializer { const std::shared_ptr& cu); private: - c10::optional deserialize_module_instance_info( + std::optional deserialize_module_instance_info( const c10::IValue& iv, const std::shared_ptr& cu); diff --git a/torch/csrc/jit/serialization/export.cpp b/torch/csrc/jit/serialization/export.cpp index 2df70800a8ad8..6ef9bdbf4abfa 100644 --- a/torch/csrc/jit/serialization/export.cpp +++ b/torch/csrc/jit/serialization/export.cpp @@ -189,7 +189,7 @@ std::string GetFileRootPath(const std::string& rootPath) { } std::string GetExternalFileName( - const c10::optional& external_ref) { + const std::optional& external_ref) { auto tensorName = external_ref.value(); const std::string illegalChars = "\\/:?\"<>|"; for (char& i : tensorName) { @@ -343,7 +343,7 @@ class GraphEncoder { void EncodeTensor( onnx::TensorProto* tensor_proto, const at::Tensor& tensor, - const c10::optional external_ref = {}, + const std::optional external_ref = {}, const bool use_external_data_format = false, const std::string& onnx_file_path = std::string()); @@ -1280,7 +1280,7 @@ void GraphEncoder::EncodeTypeProto( void GraphEncoder::EncodeTensor( onnx::TensorProto* tensor_proto, const at::Tensor& tensor, - const c10::optional external_ref, + const std::optional external_ref, const bool use_external_data_format, const std::string& onnx_file_path) { for (auto d : tensor.sizes()) { diff --git a/torch/csrc/jit/serialization/export_bytecode.cpp b/torch/csrc/jit/serialization/export_bytecode.cpp index 9ec2dbcaa2da3..9f194cd0ad31b 100644 --- a/torch/csrc/jit/serialization/export_bytecode.cpp +++ b/torch/csrc/jit/serialization/export_bytecode.cpp @@ -166,7 +166,7 @@ mobile::Code compileGraphToMobileCode( // and is not allowed. For an operator with num_args = -1, it means the // number of arguments is not available for this operator, we don't do any // backward compatibility adaptation at runtime. - c10::optional num_args = c10::nullopt; + std::optional num_args = c10::nullopt; auto it = op_to_specified_args.find(unique_name); if (it != op_to_specified_args.end()) { num_args = it->second; diff --git a/torch/csrc/jit/serialization/export_module.cpp b/torch/csrc/jit/serialization/export_module.cpp index cdb878d4062c8..5bd7714c4e8d2 100644 --- a/torch/csrc/jit/serialization/export_module.cpp +++ b/torch/csrc/jit/serialization/export_module.cpp @@ -254,7 +254,7 @@ std::pair getFunctionTuple( // schema const auto& schema = func.getSchema(); - auto type_printer = [&](const c10::Type& t) -> c10::optional { + auto type_printer = [&](const c10::Type& t) -> std::optional { auto namedType = t.cast(); if (namedType && namedType->name()) { return type_name_uniquer_.getUniqueName(namedType).qualifiedName(); @@ -313,7 +313,7 @@ std::pair getFunctionTuple( } auto bytecode_vals = to_tuple({qn, codeTable, schemaTable}); - c10::optional debug_info_vals; + std::optional debug_info_vals; // module debug info // This is just a set of debug handles. // We always save debug handles. @@ -754,7 +754,7 @@ void ScriptModuleSerializer::writeByteCode( namespace { -c10::optional type_printer( +std::optional type_printer( const c10::Type& type, torch::jit::TypeNameUniquer& type_name_uniquer) { if (auto dyn = type.castRaw()) { diff --git a/torch/csrc/jit/serialization/flatbuffer_serializer.cpp b/torch/csrc/jit/serialization/flatbuffer_serializer.cpp index a3dada3c715f0..5a47fe900f3fd 100644 --- a/torch/csrc/jit/serialization/flatbuffer_serializer.cpp +++ b/torch/csrc/jit/serialization/flatbuffer_serializer.cpp @@ -61,7 +61,7 @@ static TypePtr realType(TypePtr type) { } } -auto print_type(const c10::Type& t) -> c10::optional { +auto print_type(const c10::Type& t) -> std::optional { auto namedType = t.cast(); if (namedType && namedType->name()) { return namedType->name().value().qualifiedName(); @@ -298,7 +298,7 @@ flatbuffers::Offset FlatbufferSerializer:: auto register_size = static_cast(code.register_size_); // schema - auto type_printer = [&](const c10::Type& t) -> c10::optional { + auto type_printer = [&](const c10::Type& t) -> std::optional { auto namedType = t.cast(); if (namedType && namedType->name()) { return namedType->name().value().qualifiedName(); diff --git a/torch/csrc/jit/serialization/import.cpp b/torch/csrc/jit/serialization/import.cpp index e724853e70c1c..40d155e61c758 100644 --- a/torch/csrc/jit/serialization/import.cpp +++ b/torch/csrc/jit/serialization/import.cpp @@ -152,7 +152,7 @@ class ScriptModuleDeserializer final { reader_->version()) {} Module deserialize( - c10::optional device, + std::optional device, ExtraFilesMap& extra_files, bool restore_shapes = false); @@ -162,7 +162,7 @@ class ScriptModuleDeserializer final { std::shared_ptr compilation_unit_; std::shared_ptr reader_; std::shared_ptr storage_context_; - c10::optional device_; + std::optional device_; std::vector constants_table_; std::string code_prefix_; std::string pickle_dir_prefix_; @@ -248,7 +248,7 @@ graph(%x, %packed_params, %stride, %padding, %dilation, %groups, %r_scale, %r_ze } Module ScriptModuleDeserializer::deserialize( - c10::optional device, + std::optional device, ExtraFilesMap& extra_files, bool restore_shapes) { // we populate the upgraders map before any load starts @@ -311,7 +311,7 @@ Module ScriptModuleDeserializer::deserialize( Module import_ir_module( std::shared_ptr cu, std::istream& in, - c10::optional device, + std::optional device, bool load_debug_files) { ExtraFilesMap extra_files; return import_ir_module( @@ -322,7 +322,7 @@ static Module _load_jit_module_from_bytes( std::shared_ptr data, size_t size, std::shared_ptr cu, - c10::optional device, + std::optional device, ExtraFilesMap& extra_files, bool restore_shapes); @@ -330,7 +330,7 @@ Module parse_and_initialize_jit_module( std::shared_ptr data, size_t size, ExtraFilesMap& extra_files, - c10::optional device) { + std::optional device) { populate_upgraders_graph_map(); ExtraFilesMap jit_files; std::vector jit_constants; @@ -349,7 +349,7 @@ Module parse_and_initialize_jit_module( Module load_jit_module_from_file( const std::string& filename, ExtraFilesMap& extra_files, - c10::optional device) { + std::optional device) { auto data = get_file_content(filename.c_str()); return parse_and_initialize_jit_module( std::move(std::get<0>(data)), std::get<1>(data), extra_files, device); @@ -358,7 +358,7 @@ Module load_jit_module_from_file( Module load_jit_module_from_stream( std::istream& in, ExtraFilesMap& extra_files, - c10::optional device) { + std::optional device) { auto data = get_stream_content(in); return parse_and_initialize_jit_module( std::move(std::get<0>(data)), std::get<1>(data), extra_files, device); @@ -367,7 +367,7 @@ Module load_jit_module_from_stream( Module import_ir_module( std::shared_ptr cu, std::istream& in, - c10::optional device, + std::optional device, ExtraFilesMap& extra_files, bool load_debug_files, bool restore_shapes) { @@ -390,7 +390,7 @@ Module import_ir_module( std::shared_ptr cu, std::shared_ptr reader, std::shared_ptr storage_context, - c10::optional device, + std::optional device, std::string ts_id) { ScriptModuleDeserializer deserializer( std::move(cu), @@ -405,7 +405,7 @@ Module import_ir_module( Module import_ir_module( std::shared_ptr cu, const std::string& filename, - c10::optional device, + std::optional device, bool load_debug_files) { ExtraFilesMap extra_files; return import_ir_module( @@ -415,7 +415,7 @@ Module import_ir_module( Module import_ir_module( std::shared_ptr cu, const std::string& filename, - c10::optional device, + std::optional device, ExtraFilesMap& extra_files, bool load_debug_files, bool restore_shapes) { @@ -435,7 +435,7 @@ Module import_ir_module( Module import_ir_module( std::shared_ptr cu, std::unique_ptr rai, - c10::optional device, + std::optional device, bool load_debug_files) { ExtraFilesMap extra_files; return import_ir_module( @@ -445,7 +445,7 @@ Module import_ir_module( Module import_ir_module( std::shared_ptr cu, std::unique_ptr rai, - c10::optional device, + std::optional device, ExtraFilesMap& extra_files, bool load_debug_files) { std::shared_ptr rai_shared = std::move(rai); @@ -456,7 +456,7 @@ Module import_ir_module( Module import_ir_module( std::shared_ptr cu, std::shared_ptr rai, - c10::optional device, + std::optional device, ExtraFilesMap& extra_files, bool load_debug_files) { auto reader = std::make_shared(std::move(rai)); @@ -467,7 +467,7 @@ Module import_ir_module( Module load( std::istream& in, - c10::optional device, + std::optional device, bool load_debug_files) { auto cu = std::make_shared(); return import_ir_module(std::move(cu), in, device, load_debug_files); @@ -475,7 +475,7 @@ Module load( Module load( std::istream& in, - c10::optional device, + std::optional device, ExtraFilesMap& extra_files, bool load_debug_files) { auto cu = std::make_shared(); @@ -485,7 +485,7 @@ Module load( Module load( const std::string& filename, - c10::optional device, + std::optional device, bool load_debug_files) { auto cu = std::make_shared(); return import_ir_module(std::move(cu), filename, device, load_debug_files); @@ -493,7 +493,7 @@ Module load( Module load( const std::string& filename, - c10::optional device, + std::optional device, ExtraFilesMap& extra_files, bool load_debug_files) { auto cu = std::make_shared(); @@ -503,7 +503,7 @@ Module load( Module load( std::shared_ptr rai, - c10::optional device, + std::optional device, bool load_debug_files) { auto cu = std::make_shared(); ExtraFilesMap extra_files; @@ -513,7 +513,7 @@ Module load( Module load( std::shared_ptr rai, - c10::optional device, + std::optional device, ExtraFilesMap& extra_files, bool load_debug_files) { auto cu = std::make_shared(); @@ -525,7 +525,7 @@ Module _load_jit_module_from_bytes( std::shared_ptr data, size_t size, std::shared_ptr cu, - c10::optional device, + std::optional device, ExtraFilesMap& extra_files, bool restore_shapes) { TORCH_CHECK(size >= kFileFormatHeaderSize, "Unrecognized data format"); diff --git a/torch/csrc/jit/serialization/import.h b/torch/csrc/jit/serialization/import.h index c8379f38810f7..b090a1c80a3cd 100644 --- a/torch/csrc/jit/serialization/import.h +++ b/torch/csrc/jit/serialization/import.h @@ -21,25 +21,25 @@ class DeserializationStorageContext; TORCH_API Module import_ir_module( std::shared_ptr cu, const std::string& filename, - c10::optional device = c10::nullopt, + std::optional device = c10::nullopt, bool load_debug_files = true); TORCH_API Module import_ir_module( std::shared_ptr cu, std::istream& in, - c10::optional device = c10::nullopt, + std::optional device = c10::nullopt, bool load_debug_files = true); TORCH_API Module import_ir_module( std::shared_ptr cu, std::unique_ptr rai, - c10::optional device = c10::nullopt, + std::optional device = c10::nullopt, bool load_debug_files = true); TORCH_API Module import_ir_module( std::shared_ptr cu, const std::string& filename, - c10::optional device, + std::optional device, ExtraFilesMap& extra_files, bool load_debug_files = true, bool restore_shapes = false); @@ -49,13 +49,13 @@ TORCH_API Module import_ir_module( std::shared_ptr cu, std::shared_ptr reader, std::shared_ptr storage_context, - c10::optional device, + std::optional device, std::string ts_id /* torchscript identifier inside package */); TORCH_API Module import_ir_module( std::shared_ptr cu, std::istream& in, - c10::optional device, + std::optional device, ExtraFilesMap& extra_files, bool load_debug_files = true, bool restore_shapes = false); @@ -63,14 +63,14 @@ TORCH_API Module import_ir_module( TORCH_API Module import_ir_module( std::shared_ptr cu, std::unique_ptr rai, - c10::optional device, + std::optional device, ExtraFilesMap& extra_files, bool load_debug_files = true); TORCH_API Module import_ir_module( std::shared_ptr cu, std::shared_ptr rai, - c10::optional device, + std::optional device, ExtraFilesMap& extra_files, bool load_debug_files = true); @@ -80,12 +80,12 @@ TORCH_API Module import_ir_module( /// `torch::jit::ExportModule` in C++. TORCH_API Module load( std::istream& in, - c10::optional device = c10::nullopt, + std::optional device = c10::nullopt, bool load_debug_files = true); TORCH_API Module load( std::istream& in, - c10::optional device, + std::optional device, ExtraFilesMap& extra_files, bool load_debug_files = true); @@ -96,12 +96,12 @@ TORCH_API Module load( /// Python or `torch::jit::ExportModule` in C++. TORCH_API Module load( const std::string& filename, - c10::optional device = c10::nullopt, + std::optional device = c10::nullopt, bool load_debug_files = true); TORCH_API Module load( const std::string& filename, - c10::optional device, + std::optional device, ExtraFilesMap& extra_files, bool load_debug_files = true); @@ -112,12 +112,12 @@ TORCH_API Module load( /// Python or `torch::jit::ExportModule` in C++. TORCH_API Module load( std::shared_ptr rai, - c10::optional device = c10::nullopt, + std::optional device = c10::nullopt, bool load_debug_files = true); TORCH_API Module load( std::shared_ptr rai, - c10::optional device, + std::optional device, ExtraFilesMap& extra_files, bool load_debug_files = true); @@ -131,23 +131,23 @@ TORCH_API Module parse_and_initialize_jit_module( std::shared_ptr data, size_t size, ExtraFilesMap& extra_files, - c10::optional device = c10::nullopt); + std::optional device = c10::nullopt); TORCH_API Module load_jit_module_from_file( const std::string& filename, ExtraFilesMap& extra_files, - c10::optional device = c10::nullopt); + std::optional device = c10::nullopt); TORCH_API Module load_jit_module_from_stream( std::istream& in, ExtraFilesMap& extra_files, - c10::optional device = c10::nullopt); + std::optional device = c10::nullopt); TORCH_API Module parse_and_initialize_jit_module( std::shared_ptr data, size_t size, ExtraFilesMap& extra_files, - c10::optional device); + std::optional device); TORCH_API c10::intrusive_ptr ObjLoaderFunc( const at::StrongTypePtr& type, diff --git a/torch/csrc/jit/serialization/import_legacy.cpp b/torch/csrc/jit/serialization/import_legacy.cpp index 85ec2675a9c23..d7c592d18c72f 100644 --- a/torch/csrc/jit/serialization/import_legacy.cpp +++ b/torch/csrc/jit/serialization/import_legacy.cpp @@ -41,7 +41,7 @@ class ScriptModuleDeserializer final { ScriptModuleDeserializer( std::shared_ptr cu, std::shared_ptr reader, - const c10::optional& device) + const std::optional& device) : compilation_unit_(std::move(cu)), reader_(std::move(reader)), device_(device), @@ -77,7 +77,7 @@ class ScriptModuleDeserializer final { std::shared_ptr compilation_unit_; std::shared_ptr reader_; - c10::optional device_; + std::optional device_; // Legacy only tensor can be a constant. std::vector constant_table_; std::vector tensor_table_; @@ -377,7 +377,7 @@ Module ScriptModuleDeserializer::LEGACY_convertModule( Module LEGACY_deserialize( std::shared_ptr cu, std::shared_ptr reader, - const c10::optional& device) { + const std::optional& device) { ScriptModuleDeserializer deserializer( std::move(cu), std::move(reader), device); return deserializer.LEGACY_deserialize(); diff --git a/torch/csrc/jit/serialization/import_legacy.h b/torch/csrc/jit/serialization/import_legacy.h index a261828109596..2e206eae09bcf 100644 --- a/torch/csrc/jit/serialization/import_legacy.h +++ b/torch/csrc/jit/serialization/import_legacy.h @@ -17,7 +17,7 @@ struct CompilationUnit; Module LEGACY_deserialize( std::shared_ptr cu, std::shared_ptr reader, - const c10::optional& device); + const std::optional& device); } // namespace jit } // namespace torch diff --git a/torch/csrc/jit/serialization/import_read.cpp b/torch/csrc/jit/serialization/import_read.cpp index 533fed491773f..eeaa79c856627 100644 --- a/torch/csrc/jit/serialization/import_read.cpp +++ b/torch/csrc/jit/serialization/import_read.cpp @@ -7,9 +7,9 @@ IValue readArchiveAndTensors( const std::string& archive_name, const std::string& pickle_prefix, const std::string& tensor_prefix, - c10::optional type_resolver, - c10::optional obj_loader, - c10::optional device, + std::optional type_resolver, + std::optional obj_loader, + std::optional device, caffe2::serialize::PyTorchStreamReader& stream_reader, c10::TypePtr (*type_parser)(const std::string&), std::shared_ptr storage_context) { diff --git a/torch/csrc/jit/serialization/import_read.h b/torch/csrc/jit/serialization/import_read.h index ab89f93880c34..ae78f1979f10a 100644 --- a/torch/csrc/jit/serialization/import_read.h +++ b/torch/csrc/jit/serialization/import_read.h @@ -16,9 +16,9 @@ TORCH_API IValue readArchiveAndTensors( const std::string& archive_name, const std::string& pickle_prefix, const std::string& tensor_prefix, - c10::optional type_resolver, - c10::optional obj_loader, - c10::optional device, + std::optional type_resolver, + std::optional obj_loader, + std::optional device, caffe2::serialize::PyTorchStreamReader& stream_reader, c10::TypePtr (*type_parser)(const std::string&) = Unpickler::defaultTypeParser, diff --git a/torch/csrc/jit/serialization/import_source.cpp b/torch/csrc/jit/serialization/import_source.cpp index 53d0d9fd47359..f67c2a22e9eb1 100644 --- a/torch/csrc/jit/serialization/import_source.cpp +++ b/torch/csrc/jit/serialization/import_source.cpp @@ -304,7 +304,7 @@ void SourceImporterImpl::importNamedType( } } -c10::optional SourceImporterImpl:: +std::optional SourceImporterImpl:: attributeAssignmentSpecialHandlingHack( const QualifiedName& qualified_classname, const Assign& assign) { @@ -703,7 +703,7 @@ void SourceImporterImpl::importNamedTuple( const auto assign = Assign(statement); auto name = Var(Assign(statement).lhs()).name().name(); - c10::optional default_val; + std::optional default_val; if (assign.rhs().present()) { std::vector parsed = type_parser.evaluateDefaults( assign.rhs().range(), {assign.rhs().get()}, {assign.type().get()}); diff --git a/torch/csrc/jit/serialization/import_source.h b/torch/csrc/jit/serialization/import_source.h index 9a720a81bcbb2..9b364f379b409 100644 --- a/torch/csrc/jit/serialization/import_source.h +++ b/torch/csrc/jit/serialization/import_source.h @@ -45,7 +45,7 @@ struct SourceImporterImpl : public Resolver, private: void importFunction(const std::string& qualifier, const Def& def); void importNamedType(const std::string& qualifier, const ClassDef& class_def); - c10::optional attributeAssignmentSpecialHandlingHack( + std::optional attributeAssignmentSpecialHandlingHack( const QualifiedName& qualified_classname, const Assign& assign); void importClass( @@ -66,7 +66,7 @@ struct SourceImporterImpl : public Resolver, std::shared_ptr cu_; std::unordered_map> env_; SourceLoader source_loader_; - c10::optional version_ = c10::nullopt; + std::optional version_ = c10::nullopt; std::unordered_set loaded_sources_; // named types and functions loaded from a file but not yet defined because // their type has not been requested yet. diff --git a/torch/csrc/jit/serialization/pickler.cpp b/torch/csrc/jit/serialization/pickler.cpp index 6e1b399e40fd4..173ab5c13e5da 100644 --- a/torch/csrc/jit/serialization/pickler.cpp +++ b/torch/csrc/jit/serialization/pickler.cpp @@ -601,7 +601,7 @@ void Pickler::startTypeTag() { } } namespace { -c10::optional type_printer(const c10::Type& type) { +std::optional type_printer(const c10::Type& type) { if (auto dyn = type.castRaw()) { return dyn->fallback()->annotation_str(type_printer); } diff --git a/torch/csrc/jit/serialization/pickler.h b/torch/csrc/jit/serialization/pickler.h index 4f553b6f7ca8a..39726d00b0998 100644 --- a/torch/csrc/jit/serialization/pickler.h +++ b/torch/csrc/jit/serialization/pickler.h @@ -311,14 +311,14 @@ inline std::unordered_set& GetBackendMetaAllowlist() { // Dynamically obtain serialization function pairs // that require the corresponding backend. inline std::array< - c10::optional>, + std::optional>, at::COMPILE_TIME_MAX_DEVICE_TYPES>& GetBackendMetaSerialization() { // The array to save function pointer for BackendMeta serialization. // key is the DeviceType, value is std::pair obj. // value.first represent get function and value.seconde represent set function static std::array< - c10::optional>, + std::optional>, at::COMPILE_TIME_MAX_DEVICE_TYPES> BackendMetaSerialization; return BackendMetaSerialization; @@ -348,7 +348,7 @@ TORCH_API inline void TensorBackendMetaRegistry( t, " has been registered."); BackendMetaSerialization[device_type] = - c10::optional>( + std::optional>( std::make_pair(get_fptr, set_fptr)); } diff --git a/torch/csrc/jit/serialization/python_print.cpp b/torch/csrc/jit/serialization/python_print.cpp index cac31c6ce5868..f1b0865032c39 100644 --- a/torch/csrc/jit/serialization/python_print.cpp +++ b/torch/csrc/jit/serialization/python_print.cpp @@ -1714,7 +1714,7 @@ static std::vector traverseIValueAndGetObjects(IValue ivalue) { return result; } -static c10::optional printType( +static std::optional printType( const c10::Type& type, torch::jit::TypeNameUniquer& type_name_uniquer) { if (auto dyn = type.castRaw()) { diff --git a/torch/csrc/jit/serialization/source_range_serialization.cpp b/torch/csrc/jit/serialization/source_range_serialization.cpp index d3c4eaf7bf491..118becd20dc7c 100644 --- a/torch/csrc/jit/serialization/source_range_serialization.cpp +++ b/torch/csrc/jit/serialization/source_range_serialization.cpp @@ -68,7 +68,7 @@ std::shared_ptr SourceRangeDeserializer::deserialize_source( const auto& textIndex = tup_elems[0].toIntList(); int64_t fnameIndex = tup_elems[1].toInt(); int64_t starting_line_no_ = tup_elems[2].toInt(); - c10::optional filename = c10::nullopt; + std::optional filename = c10::nullopt; TORCH_CHECK( (uint64_t)fnameIndex < text_table_.size(), @@ -88,7 +88,7 @@ std::shared_ptr SourceRangeDeserializer::deserialize_source( source = std::make_shared(str_cord, filename, starting_line_no_); } else { std::string text_ = tup_elems[0].toStringRef(); - c10::optional filename_ = + std::optional filename_ = tup_elems[1].toOptional(); int64_t starting_line_no_ = tup_elems[2].toInt(); source = std::make_shared( @@ -229,7 +229,7 @@ void ConcreteSourceRangeUnpickler::unpickle() { } } -c10::optional ConcreteSourceRangeUnpickler:: +std::optional ConcreteSourceRangeUnpickler:: findSourceRangeThatGenerated(const SourceRange& range) { unpickle(); diff --git a/torch/csrc/jit/serialization/source_range_serialization.h b/torch/csrc/jit/serialization/source_range_serialization.h index bbfd533cd1789..044e9655a9ea1 100644 --- a/torch/csrc/jit/serialization/source_range_serialization.h +++ b/torch/csrc/jit/serialization/source_range_serialization.h @@ -55,7 +55,7 @@ class SourceRangeDeserializer { class SourceRangeUnpickler { public: - virtual c10::optional findSourceRangeThatGenerated( + virtual std::optional findSourceRangeThatGenerated( const SourceRange& range) = 0; virtual ~SourceRangeUnpickler() = default; diff --git a/torch/csrc/jit/serialization/source_range_serialization_impl.h b/torch/csrc/jit/serialization/source_range_serialization_impl.h index 2b7cd5a14ba92..9b00956ccd048 100644 --- a/torch/csrc/jit/serialization/source_range_serialization_impl.h +++ b/torch/csrc/jit/serialization/source_range_serialization_impl.h @@ -12,7 +12,7 @@ class ConcreteSourceRangeUnpickler : public SourceRangeUnpickler { public: ConcreteSourceRangeUnpickler(at::DataPtr&& data, size_t size); - c10::optional findSourceRangeThatGenerated( + std::optional findSourceRangeThatGenerated( const SourceRange& range) override; private: diff --git a/torch/csrc/jit/serialization/unpickler.cpp b/torch/csrc/jit/serialization/unpickler.cpp index 26fa21575368d..ee5793b14856a 100644 --- a/torch/csrc/jit/serialization/unpickler.cpp +++ b/torch/csrc/jit/serialization/unpickler.cpp @@ -822,7 +822,7 @@ void Unpickler::readGlobal( // like the other branches here because no REDUCE or BUILD will // be called on this value. Instead, we just put it on the stack // and return early - c10::optional scalar_type; + std::optional scalar_type; #define CHECK_SCALAR(_, name) \ if (class_name == #name "Storage") { \ scalar_type = c10::k##name; \ @@ -834,7 +834,7 @@ void Unpickler::readGlobal( return; } - c10::optional qscheme; + std::optional qscheme; for (int i = 0; i < at::COMPILE_TIME_NUM_QSCHEMES; ++i) { if (class_name == toString(static_cast(i))) { qscheme = static_cast(i); diff --git a/torch/csrc/jit/serialization/unpickler.h b/torch/csrc/jit/serialization/unpickler.h index bc980bf90522b..eed216455f3e2 100644 --- a/torch/csrc/jit/serialization/unpickler.h +++ b/torch/csrc/jit/serialization/unpickler.h @@ -68,7 +68,7 @@ class TORCH_API Unpickler { TypeResolver type_resolver, ObjLoader obj_loader, std::function read_record, - c10::optional device, + std::optional device, bool use_storage_device = false, TypeParserT type_parser = defaultTypeParser, std::shared_ptr storage_context = nullptr) @@ -178,7 +178,7 @@ class TORCH_API Unpickler { IValue empty_tuple_; std::function read_record_; - c10::optional device_; + std::optional device_; // When set to true, Unpickler will ignore the pickled device and use the // device of the DataPtr returned by the read_record_ function. The default // value of this flag is false. diff --git a/torch/csrc/jit/tensorexpr/codegen.cpp b/torch/csrc/jit/tensorexpr/codegen.cpp index 53754aab7c0d6..e1464d0efc3ec 100644 --- a/torch/csrc/jit/tensorexpr/codegen.cpp +++ b/torch/csrc/jit/tensorexpr/codegen.cpp @@ -95,7 +95,7 @@ void CodeGen::call_with_numel(void** args, int64_t numel) { false, "This codegen backend does not implement call_with_numel"); } -static c10::optional bufSize(BufPtr buf) { +static std::optional bufSize(BufPtr buf) { size_t size = elementSize(buf->dtype().scalar_type()) * buf->dtype().lanes(); for (auto& d : buf->dims()) { if (!d->isConstant()) { diff --git a/torch/csrc/jit/tensorexpr/codegen.h b/torch/csrc/jit/tensorexpr/codegen.h index fdcf3425e3abc..42db25c26ea49 100644 --- a/torch/csrc/jit/tensorexpr/codegen.h +++ b/torch/csrc/jit/tensorexpr/codegen.h @@ -85,10 +85,10 @@ class TORCH_API CodeGen { virtual at::Tensor empty_strided( c10::IntArrayRef size, c10::IntArrayRef stride, - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt) { + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt) { return at::empty_strided( size, stride, dtype_opt, layout_opt, device_opt, pin_memory_opt); } diff --git a/torch/csrc/jit/tensorexpr/cuda_codegen.cpp b/torch/csrc/jit/tensorexpr/cuda_codegen.cpp index 07626232399e4..602bc49302c53 100644 --- a/torch/csrc/jit/tensorexpr/cuda_codegen.cpp +++ b/torch/csrc/jit/tensorexpr/cuda_codegen.cpp @@ -1275,10 +1275,10 @@ void CudaCodeGen::call(const std::vector& args) { at::Tensor CudaCodeGen::empty_strided( c10::IntArrayRef size, c10::IntArrayRef stride, - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt) { + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt) { c10::DeviceGuard device_guard(device_opt.value()); return at::native::empty_strided_cuda( size, stride, dtype_opt, layout_opt, device_opt, pin_memory_opt); diff --git a/torch/csrc/jit/tensorexpr/cuda_codegen.h b/torch/csrc/jit/tensorexpr/cuda_codegen.h index 22de1ce32d00f..74f3d4ec7835b 100644 --- a/torch/csrc/jit/tensorexpr/cuda_codegen.h +++ b/torch/csrc/jit/tensorexpr/cuda_codegen.h @@ -235,10 +235,10 @@ class TORCH_CUDA_CU_API CudaCodeGen : public CodeGen { at::Tensor empty_strided( c10::IntArrayRef size, c10::IntArrayRef stride, - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt) override; + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt) override; const std::vector& gpu_block_extents() const { return cuda_analysis_->gpu_block_extents(); diff --git a/torch/csrc/jit/tensorexpr/eval.cpp b/torch/csrc/jit/tensorexpr/eval.cpp index e5a59ae33ef26..be1057e21c3c7 100644 --- a/torch/csrc/jit/tensorexpr/eval.cpp +++ b/torch/csrc/jit/tensorexpr/eval.cpp @@ -1300,7 +1300,7 @@ InterpValue SimpleIREvaluator::value() const { return impl_->value(); } -c10::optional evalInt(ExprPtr e) { +std::optional evalInt(ExprPtr e) { try { return ExprEval(cast(ExprHandle(e))) .value(); diff --git a/torch/csrc/jit/tensorexpr/eval.h b/torch/csrc/jit/tensorexpr/eval.h index 64ac1edf8f188..9bbea1bd28a43 100644 --- a/torch/csrc/jit/tensorexpr/eval.h +++ b/torch/csrc/jit/tensorexpr/eval.h @@ -307,7 +307,7 @@ class ExprEval { // Evaluates the given expression and returns an int64_t value if the result of // the given expression is int64_t. -c10::optional evalInt(ExprPtr e); +std::optional evalInt(ExprPtr e); // Substitutes the given vars with their corresponding expressions in the input // expression. diff --git a/torch/csrc/jit/tensorexpr/expr.cpp b/torch/csrc/jit/tensorexpr/expr.cpp index cffc5e45dbf46..bf3cc13ccb39f 100644 --- a/torch/csrc/jit/tensorexpr/expr.cpp +++ b/torch/csrc/jit/tensorexpr/expr.cpp @@ -415,7 +415,7 @@ Buf::Buf( std::vector dims, Dtype dtype, ExprPtr initializer, - c10::optional> strides, + std::optional> strides, ExprPtr qscale, ExprPtr qzero) : ExprNodeBase(dtype, kPrimitive), @@ -452,11 +452,11 @@ BufHandle Buf::make( const std::string& name_hint, const std::vector& dims, Dtype dtype, - c10::optional initializer, - c10::optional> strides, - c10::optional qscale, - c10::optional qzero) { - c10::optional> opt_strides; + std::optional initializer, + std::optional> strides, + std::optional qscale, + std::optional qzero) { + std::optional> opt_strides; if (strides) { opt_strides = ExprHandleVectorToExprVector(*strides); } diff --git a/torch/csrc/jit/tensorexpr/expr.h b/torch/csrc/jit/tensorexpr/expr.h index 1a0cc57875d19..8c8de89975750 100644 --- a/torch/csrc/jit/tensorexpr/expr.h +++ b/torch/csrc/jit/tensorexpr/expr.h @@ -207,10 +207,10 @@ class TORCH_API Buf : public ExprNode { const std::string& name_hint, const std::vector& dims, Dtype dtype, - c10::optional initializer = c10::nullopt, - c10::optional> strides = c10::nullopt, - c10::optional qscale = c10::nullopt, - c10::optional qzero = c10::nullopt); + std::optional initializer = c10::nullopt, + std::optional> strides = c10::nullopt, + std::optional qscale = c10::nullopt, + std::optional qzero = c10::nullopt); // TODO: unique_name VarPtr base_handle() const { @@ -232,7 +232,7 @@ class TORCH_API Buf : public ExprNode { const std::vector& dims, Dtype dtype, ExprPtr initializer = nullptr, - c10::optional> strides = c10::nullopt, + std::optional> strides = c10::nullopt, ExprPtr qscale = nullptr, ExprPtr qzero = nullptr) : Buf(alloc(name_hint, kHandle), @@ -248,7 +248,7 @@ class TORCH_API Buf : public ExprNode { std::vector dims, Dtype dtype, ExprPtr initializer = nullptr, - c10::optional> strides = c10::nullopt, + std::optional> strides = c10::nullopt, ExprPtr qscale = nullptr, ExprPtr qzero = nullptr); diff --git a/torch/csrc/jit/tensorexpr/external_functions.cpp b/torch/csrc/jit/tensorexpr/external_functions.cpp index c593ab80e811c..a3146ccfaff55 100644 --- a/torch/csrc/jit/tensorexpr/external_functions.cpp +++ b/torch/csrc/jit/tensorexpr/external_functions.cpp @@ -80,7 +80,7 @@ std::vector constructTensors( int64_t* buf_dims, int64_t* buf_strides, int8_t* buf_dtypes, - c10::optional>> qdataArg) { + std::optional>> qdataArg) { std::vector buf_data_vec; std::vector> buf_dims_vec; std::vector> buf_strides_vec; @@ -123,7 +123,7 @@ std::vector constructTensors( } } else { // handle quantized - std::vector> qdata(bufs_num, c10::nullopt); + std::vector> qdata(bufs_num, c10::nullopt); for (const auto& qd : *qdataArg) { qdata[qd.first] = qd.second; } @@ -172,7 +172,7 @@ static std::vector constructTensors( int64_t* buf_strides, int8_t* buf_dtypes, std::vector> qdata) { - c10::optional>> opt = std::move(qdata); + std::optional>> opt = std::move(qdata); return constructTensors( bufs_num, buf_data, buf_ranks, buf_dims, buf_strides, buf_dtypes, opt); } @@ -184,7 +184,7 @@ std::vector constructTensors2( int64_t* buf_dims, int64_t* buf_strides, int8_t* buf_dtypes, - c10::optional>> qdataArg, + std::optional>> qdataArg, size_t bufs_out_num) { std::vector buf_data_vec; std::vector> buf_dims_vec; @@ -233,7 +233,7 @@ std::vector constructTensors2( } } else { // handle quantized - std::vector> qdata(bufs_in_num, c10::nullopt); + std::vector> qdata(bufs_in_num, c10::nullopt); for (const auto& qd : *qdataArg) { qdata[qd.first - bufs_out_num] = qd.second; } @@ -283,7 +283,7 @@ static std::vector constructTensors2( int8_t* buf_dtypes, std::vector> qdata, size_t bufs_out_num = 0u) { - c10::optional>> opt = std::move(qdata); + std::optional>> opt = std::move(qdata); return constructTensors2( bufs_in_num, buf_data, @@ -331,15 +331,15 @@ static at::Tensor quantized_mul_scalar(const at::Tensor& x, double scalar) { static at::Tensor quantized_cat( const c10::List& qxs, int64_t dim, - c10::optional scale, - c10::optional zero) { + std::optional scale, + std::optional zero) { const auto op = c10::Dispatcher::singleton() .findSchemaOrThrow("quantized::cat", "") .typed const&, int64_t, - c10::optional, - c10::optional)>(); + std::optional, + std::optional)>(); return op.redispatch( c10::DispatchKeySet({c10::DispatchKey::QuantizedCPU}), qxs, @@ -972,7 +972,7 @@ void nnc_aten_upsample_nearest2d( const int64_t x_qzero = extra_args[1]; const int64_t x_qdtype = extra_args[2]; const auto is_quantized = x_qdtype != -1; - c10::optional>> qdata; + std::optional>> qdata; if (is_quantized) { qdata = { {1u, @@ -992,9 +992,9 @@ void nnc_aten_upsample_nearest2d( auto r = at::upsample_nearest2d( x, (output_size_h != -1) - ? c10::optional({output_size_h, output_size_w}) + ? std::optional({output_size_h, output_size_w}) : c10::nullopt, - (scale_factor_h != -1.f) ? c10::optional>( + (scale_factor_h != -1.f) ? std::optional>( {scale_factor_h, scale_factor_w}) : c10::nullopt); memcpy(buf_data[0], r.const_data_ptr(), r.element_size() * r.numel()); @@ -1015,7 +1015,7 @@ void nnc_aten_upsample_nearest2d_out( const int64_t x_qzero = extra_args[1]; const int64_t x_qdtype = extra_args[2]; const auto is_quantized = x_qdtype != -1; - c10::optional>> qdata; + std::optional>> qdata; if (is_quantized) { qdata = { {1u, @@ -1042,9 +1042,9 @@ void nnc_aten_upsample_nearest2d_out( auto r = at::upsample_nearest2d( x, (output_size_h != -1) - ? c10::optional({output_size_h, output_size_w}) + ? std::optional({output_size_h, output_size_w}) : c10::nullopt, - (scale_factor_h != -1.f) ? c10::optional>( + (scale_factor_h != -1.f) ? std::optional>( {scale_factor_h, scale_factor_w}) : c10::nullopt); buf_data[0] = r.data_ptr(); diff --git a/torch/csrc/jit/tensorexpr/external_functions.h b/torch/csrc/jit/tensorexpr/external_functions.h index 627d67c934d59..1fd90a3f056b8 100644 --- a/torch/csrc/jit/tensorexpr/external_functions.h +++ b/torch/csrc/jit/tensorexpr/external_functions.h @@ -74,7 +74,7 @@ std::vector constructTensors( int64_t* buf_dims, int64_t* buf_strides, int8_t* buf_dtypes, - c10::optional>> qdataArg = + std::optional>> qdataArg = c10::nullopt); std::vector constructTensors2( @@ -84,7 +84,7 @@ std::vector constructTensors2( int64_t* buf_dims, int64_t* buf_strides, int8_t* buf_dtypes, - c10::optional>> qdataArg = + std::optional>> qdataArg = c10::nullopt, size_t bufs_out_num = 0); diff --git a/torch/csrc/jit/tensorexpr/graph_opt.cpp b/torch/csrc/jit/tensorexpr/graph_opt.cpp index c8f06fea063fd..01511b2b4d8c5 100644 --- a/torch/csrc/jit/tensorexpr/graph_opt.cpp +++ b/torch/csrc/jit/tensorexpr/graph_opt.cpp @@ -184,7 +184,7 @@ bool OptimizeCat(const std::shared_ptr& graph) { void annotateInputShapes( const std::shared_ptr& graph, - const std::vector>& example_inputs) { + const std::vector>& example_inputs) { TORCH_INTERNAL_ASSERT( graph->inputs().size() == example_inputs.size(), buildErrorMessage("Given inputs do not match the fuser graph inputs.")); @@ -304,8 +304,8 @@ bool isGraphCompilable(const std::shared_ptr& graph) { static void fixupTypeInfoForValue( Value* v, - c10::optional scalar_type, - c10::optional device) { + std::optional scalar_type, + std::optional device) { Node* n = v->node(); auto const& t = v->type(); if (t->kind() != TypeKind::TensorType) { @@ -339,8 +339,8 @@ static void fixupTypeInfoForValue( v->setType(new_tt); } -static c10::optional inferScalarType(Node* n) { - c10::optional scalar_type; +static std::optional inferScalarType(Node* n) { + std::optional scalar_type; for (auto v : n->inputs()) { auto const& t = v->type(); if (t->kind() == TypeKind::TensorType) { @@ -358,8 +358,8 @@ static c10::optional inferScalarType(Node* n) { return scalar_type; } -static c10::optional inferDevice(Node* n) { - c10::optional device; +static std::optional inferDevice(Node* n) { + std::optional device; for (auto v : n->inputs()) { auto const& t = v->type(); if (t->kind() == TypeKind::TensorType) { @@ -394,8 +394,8 @@ void fixupMissingShapeInfo(const std::shared_ptr& graph) { } for (auto n : graph->nodes()) { - c10::optional scalar_type = inferScalarType(n); - c10::optional device = inferDevice(n); + std::optional scalar_type = inferScalarType(n); + std::optional device = inferDevice(n); for (auto v : n->outputs()) { fixupTypeInfoForValue(v, scalar_type, device); diff --git a/torch/csrc/jit/tensorexpr/graph_opt.h b/torch/csrc/jit/tensorexpr/graph_opt.h index 1180d0ac438b9..5bd2ec8600931 100644 --- a/torch/csrc/jit/tensorexpr/graph_opt.h +++ b/torch/csrc/jit/tensorexpr/graph_opt.h @@ -60,7 +60,7 @@ bool OptimizeCat(const std::shared_ptr& graph); TORCH_API void annotateInputShapes( const std::shared_ptr& graph, - const std::vector>& example_inputs); + const std::vector>& example_inputs); TORCH_API std::shared_ptr removeUnusedSelfArgument( const std::shared_ptr& graph); TORCH_API std::shared_ptr removeGraphOutput( diff --git a/torch/csrc/jit/tensorexpr/ir.h b/torch/csrc/jit/tensorexpr/ir.h index 1ab21c83ef183..f35bafb332eaf 100644 --- a/torch/csrc/jit/tensorexpr/ir.h +++ b/torch/csrc/jit/tensorexpr/ir.h @@ -361,7 +361,7 @@ ExprPtr immLike(const ExprHandle& e, T v) { return immLike(e.node(), v); } -inline c10::optional intValue(const ExprPtr& e) { +inline std::optional intValue(const ExprPtr& e) { #define TYPE_CASE(Type, Name) \ if (auto v = to(e)) { \ return v->value(); \ @@ -371,7 +371,7 @@ inline c10::optional intValue(const ExprPtr& e) { return c10::nullopt; } -inline c10::optional intValue(const ExprHandle& e) { +inline std::optional intValue(const ExprHandle& e) { return intValue(e.node()); } diff --git a/torch/csrc/jit/tensorexpr/ir_simplifier.cpp b/torch/csrc/jit/tensorexpr/ir_simplifier.cpp index 4ce640bb8a739..afb7aefdda652 100644 --- a/torch/csrc/jit/tensorexpr/ir_simplifier.cpp +++ b/torch/csrc/jit/tensorexpr/ir_simplifier.cpp @@ -1867,7 +1867,7 @@ class ModRound { ExprPtr mod_divisor; }; -static c10::optional isModRound(TermPtr e) { +static std::optional isModRound(TermPtr e) { DivPtr div{nullptr}; ModPtr mod{nullptr}; ExprPtr denom{nullptr}; diff --git a/torch/csrc/jit/tensorexpr/kernel.cpp b/torch/csrc/jit/tensorexpr/kernel.cpp index a360762f5bf9c..50578a0414572 100644 --- a/torch/csrc/jit/tensorexpr/kernel.cpp +++ b/torch/csrc/jit/tensorexpr/kernel.cpp @@ -128,9 +128,9 @@ bool& getOptConditionals() { return opt_conditionals; } -c10::optional pickDeviceType( +std::optional pickDeviceType( const at::ArrayRef& inputs) { - c10::optional device = c10::nullopt; + std::optional device = c10::nullopt; for (auto const& input : inputs) { auto tt = input->type()->cast(); if (tt && tt->device()) { @@ -143,9 +143,9 @@ c10::optional pickDeviceType( return device; } -static c10::optional pickDeviceType( +static std::optional pickDeviceType( const std::shared_ptr& graph) { - c10::optional device = c10::nullopt; + std::optional device = c10::nullopt; for (auto const& node : graph->nodes()) { for (auto const& input : node->inputs()) { if (auto tt = input->type()->cast()) { @@ -179,7 +179,7 @@ static c10::optional pickDeviceType( // If v is a Tensor with concretely-known sizes and dtype, return them, else // nullopt. -static c10::optional getTensorInfoJit(torch::jit::Value* v) { +static std::optional getTensorInfoJit(torch::jit::Value* v) { auto const& it = v->type()->cast(); c10::ScalarType dtype = c10::ScalarType::Float; @@ -527,7 +527,7 @@ std::vector TensorExprKernel::sizesForValue( throw malformed_input(msg); } -static c10::optional findDtypeForValue(const torch::jit::Value* v) { +static std::optional findDtypeForValue(const torch::jit::Value* v) { if (v->type()->kind() == TypeKind::TensorType) { auto tt = v->type()->cast(); if (tt->scalarType()) { @@ -707,7 +707,7 @@ static void fuseAllLoops(StmtPtr st) { } // Compute the trip count of a loop if it is a constant. -static c10::optional tripCount(ForPtr loop) { +static std::optional tripCount(ForPtr loop) { auto tc = IRSimplifier::simplify( cast(ExprHandle(loop->stop()) - ExprHandle(loop->start()))); if (auto val = to(tc.node())) { @@ -958,7 +958,7 @@ std::string TensorExprKernel::getCodeGenName(BackendType backendType) { } template -static bool isValidPrimProperty(const c10::optional& a, T b) { +static bool isValidPrimProperty(const std::optional& a, T b) { return !a.has_value() || *a == b; } diff --git a/torch/csrc/jit/tensorexpr/kernel.h b/torch/csrc/jit/tensorexpr/kernel.h index 45658beb750e9..d7c737d8f8f2c 100644 --- a/torch/csrc/jit/tensorexpr/kernel.h +++ b/torch/csrc/jit/tensorexpr/kernel.h @@ -274,10 +274,10 @@ class TORCH_API TensorExprKernel { const std::vector& interm_bufs); struct UnpackedTensorOptions { - c10::optional dtype; - c10::optional layout; - c10::optional device; - c10::optional pinned_memory; + std::optional dtype; + std::optional layout; + std::optional device; + std::optional pinned_memory; UnpackedTensorOptions(const c10::TensorOptions& opts) : dtype(c10::optTypeMetaToScalarType(opts.dtype_opt())), @@ -370,7 +370,7 @@ TORCH_API bool setFallbackAllowed(bool value); TORCH_API bool& getCatWoConditionals(); TORCH_API bool& getOptConditionals(); -TORCH_API c10::optional pickDeviceType( +TORCH_API std::optional pickDeviceType( const at::ArrayRef& inputs); bool isContiguous( diff --git a/torch/csrc/jit/tensorexpr/llvm_codegen.cpp b/torch/csrc/jit/tensorexpr/llvm_codegen.cpp index fd7f0818996c9..dec03637847e2 100644 --- a/torch/csrc/jit/tensorexpr/llvm_codegen.cpp +++ b/torch/csrc/jit/tensorexpr/llvm_codegen.cpp @@ -84,16 +84,16 @@ C10_DEFINE_bool( namespace torch::jit::tensorexpr { -c10::optional& LLVMTargetTriple() { - static c10::optional triple = c10::nullopt; +std::optional& LLVMTargetTriple() { + static std::optional triple = c10::nullopt; return triple; } -c10::optional& LLVMTargetCPU() { - static c10::optional cpu = c10::nullopt; +std::optional& LLVMTargetCPU() { + static std::optional cpu = c10::nullopt; return cpu; } -c10::optional& LLVMTargetAttrs() { - static c10::optional attrs = c10::nullopt; +std::optional& LLVMTargetAttrs() { + static std::optional attrs = c10::nullopt; return attrs; } bool& LLVMAOTWorkflow() { @@ -306,9 +306,9 @@ class LLVMCodeGenImpl : public IRVisitor { at::Device device, Dtype dtype, std::string kernel_func_name, - c10::optional triple, - c10::optional cpu, - c10::optional attrs); + std::optional triple, + std::optional cpu, + std::optional attrs); ~LLVMCodeGenImpl() = default; llvm::JITTargetAddress getKernelAddress() const; @@ -397,9 +397,9 @@ LLVMCodeGen::LLVMCodeGen( at::Device device, const std::string& kernel_func_name, Dtype dtype, - c10::optional triple, - c10::optional cpu, - c10::optional attrs) + std::optional triple, + std::optional cpu, + std::optional attrs) : CodeGen(stmt, args, device, kernel_func_name) { impl_ = std::make_unique( this->stmt(), @@ -446,10 +446,10 @@ void LLVMCodeGen::call(const std::vector& args) { at::Tensor LLVMCodeGen::empty_strided( c10::IntArrayRef size, c10::IntArrayRef stride, - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt) { + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt) { return at::native::empty_strided_cpu( size, stride, dtype_opt, layout_opt, device_opt, pin_memory_opt); } @@ -489,9 +489,9 @@ LLVMCodeGenImpl::LLVMCodeGenImpl( at::Device device, Dtype dtype, std::string kernel_func_name, - c10::optional triple, - c10::optional cpu, - c10::optional attrs) + std::optional triple, + std::optional cpu, + std::optional attrs) : context_(std::make_unique()), irb_(getContext()), kernel_func_name_(std::move(kernel_func_name)), diff --git a/torch/csrc/jit/tensorexpr/llvm_codegen.h b/torch/csrc/jit/tensorexpr/llvm_codegen.h index 7ab506fa8fe1e..74271fa879f3d 100644 --- a/torch/csrc/jit/tensorexpr/llvm_codegen.h +++ b/torch/csrc/jit/tensorexpr/llvm_codegen.h @@ -27,9 +27,9 @@ class TORCH_API LLVMCodeGen : public CodeGen { at::Device device = at::kCPU, const std::string& kernel_func_name = "func", Dtype dtype = kInt, - c10::optional triple = c10::nullopt, - c10::optional cpu = c10::nullopt, - c10::optional attrs = c10::nullopt); + std::optional triple = c10::nullopt, + std::optional cpu = c10::nullopt, + std::optional attrs = c10::nullopt); explicit LLVMCodeGen(StmtPtr stmt); LLVMCodeGen() = delete; @@ -48,10 +48,10 @@ class TORCH_API LLVMCodeGen : public CodeGen { at::Tensor empty_strided( c10::IntArrayRef size, c10::IntArrayRef stride, - c10::optional dtype_opt, - c10::optional layout_opt, - c10::optional device_opt, - c10::optional pin_memory_opt) override; + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt) override; template T value() { @@ -126,14 +126,14 @@ struct TORCH_API LLVMCodeGenBuilder { at::Device device_ = at::kCPU; std::string kernelFuncName_ = "func"; Dtype dtype_ = kInt; - c10::optional triple_ = c10::nullopt; - c10::optional cpu_ = c10::nullopt; - c10::optional attrs_ = c10::nullopt; + std::optional triple_ = c10::nullopt; + std::optional cpu_ = c10::nullopt; + std::optional attrs_ = c10::nullopt; }; -TORCH_API c10::optional& LLVMTargetTriple(); -TORCH_API c10::optional& LLVMTargetCPU(); -TORCH_API c10::optional& LLVMTargetAttrs(); +TORCH_API std::optional& LLVMTargetTriple(); +TORCH_API std::optional& LLVMTargetCPU(); +TORCH_API std::optional& LLVMTargetAttrs(); TORCH_API bool& LLVMAOTWorkflow(); } // namespace tensorexpr diff --git a/torch/csrc/jit/tensorexpr/llvm_jit.cpp b/torch/csrc/jit/tensorexpr/llvm_jit.cpp index 71f4fed3db3e7..37a4b8db6bb27 100644 --- a/torch/csrc/jit/tensorexpr/llvm_jit.cpp +++ b/torch/csrc/jit/tensorexpr/llvm_jit.cpp @@ -67,8 +67,8 @@ static llvm::SubtargetFeatures getHostSubtargetFeatures() { // Create a JTMB using the host's triple. CPU and attrs default to the host // unless they are supplied. static llvm::orc::JITTargetMachineBuilder makeJTMBFromHost( - c10::optional cpu, - c10::optional attrs) { + std::optional cpu, + std::optional attrs) { llvm::orc::JITTargetMachineBuilder JTMB( (llvm::Triple(llvm::sys::getProcessTriple()))); JTMB.setCPU(cpu.value_or(llvm::sys::getHostCPUName().str())); @@ -85,8 +85,8 @@ static llvm::orc::JITTargetMachineBuilder makeJTMBFromHost( // Create a JTMB using a given triple. Do not set cpu or attrs if not supplied. static llvm::orc::JITTargetMachineBuilder makeJTMBFromTriple( const std::string& triple, - c10::optional cpu, - c10::optional attrs) { + std::optional cpu, + std::optional attrs) { llvm::orc::JITTargetMachineBuilder JTMB((llvm::Triple(triple))); if (cpu) { JTMB.setCPU(*cpu); @@ -100,9 +100,9 @@ static llvm::orc::JITTargetMachineBuilder makeJTMBFromTriple( } static llvm::orc::JITTargetMachineBuilder makeTargetMachineBuilder( - c10::optional triple, - c10::optional cpu, - c10::optional attrs) { + std::optional triple, + std::optional cpu, + std::optional attrs) { auto JTMB = triple ? makeJTMBFromTriple(*triple, cpu, attrs) : makeJTMBFromHost(cpu, attrs); #if LLVM_VERSION_MAJOR >= 18 @@ -160,9 +160,9 @@ class TORCH_API PytorchLLVMJITImpl { public: PytorchLLVMJITImpl( - c10::optional triple, - c10::optional cpu, - c10::optional attrs) + std::optional triple, + std::optional cpu, + std::optional attrs) : TM(assertSuccess(makeTargetMachineBuilder(triple, cpu, attrs) .createTargetMachine())), LLJ(assertSuccess( @@ -241,9 +241,9 @@ class TORCH_API PytorchLLVMJITImpl { public: PytorchLLVMJITImpl( - c10::optional triple, - c10::optional cpu, - c10::optional attrs) + std::optional triple, + std::optional cpu, + std::optional attrs) : Resolver(createLegacyLookupResolver( ES, [this](const std::string& Name) -> JITSymbol { @@ -320,9 +320,9 @@ class TORCH_API PytorchLLVMJITImpl { #endif PytorchLLVMJIT::PytorchLLVMJIT( - c10::optional triple, - c10::optional cpu, - c10::optional attrs) + std::optional triple, + std::optional cpu, + std::optional attrs) : impl_(std::make_unique(triple, cpu, attrs)) {} PytorchLLVMJIT::~PytorchLLVMJIT() = default; diff --git a/torch/csrc/jit/tensorexpr/llvm_jit.h b/torch/csrc/jit/tensorexpr/llvm_jit.h index 4aca55a9abf47..98238e0043885 100644 --- a/torch/csrc/jit/tensorexpr/llvm_jit.h +++ b/torch/csrc/jit/tensorexpr/llvm_jit.h @@ -51,9 +51,9 @@ class PytorchLLVMJITImpl; class TORCH_API PytorchLLVMJIT { public: PytorchLLVMJIT( - c10::optional triple, - c10::optional cpu, - c10::optional attrs); + std::optional triple, + std::optional cpu, + std::optional attrs); ~PytorchLLVMJIT(); void addModule(std::unique_ptr M, std::unique_ptr C); diff --git a/torch/csrc/jit/tensorexpr/lowerings.cpp b/torch/csrc/jit/tensorexpr/lowerings.cpp index 79f0c59e59b39..1518e06376c14 100644 --- a/torch/csrc/jit/tensorexpr/lowerings.cpp +++ b/torch/csrc/jit/tensorexpr/lowerings.cpp @@ -55,7 +55,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { auto sub_lambda = [](const ExprHandle& lhs, const ExprHandle& rhs) { // NB: sub isn't supported on boolean, no need to promote to integer. @@ -86,7 +86,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeTwoOperand( "aten_mul", @@ -108,7 +108,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, \ const std::vector& outputShape, \ const std::vector& outputStrides, \ - const c10::optional& outputType, \ + const std::optional& outputType, \ at::Device device) { \ return computeScalar( \ "aten_#op_name", \ @@ -131,7 +131,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeScalar( "aten_div", @@ -155,7 +155,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, \ const std::vector& outputShape, \ const std::vector& outputStrides, \ - const c10::optional& outputType, \ + const std::optional& outputType, \ at::Device device) { \ return computeScalar( \ "aten_#op_name", \ @@ -179,7 +179,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, \ const std::vector& outputShape, \ const std::vector& outputStrides, \ - const c10::optional& outputType, \ + const std::optional& outputType, \ at::Device device) { \ return computeScalar( \ "aten_#op_name", \ @@ -204,7 +204,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, \ const std::vector& outputShape, \ const std::vector& outputStrides, \ - const c10::optional& outputType, \ + const std::optional& outputType, \ at::Device device) { \ return computeScalar( \ "aten_#op_name", \ @@ -225,7 +225,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeTwoOperand( "aten_div", @@ -245,7 +245,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeTwoOperand( "aten_and", @@ -264,7 +264,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeTwoOperand( "aten_or", @@ -283,7 +283,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeTwoOperand( "aten_xor", @@ -302,7 +302,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeTwoOperand( "aten_lshift", @@ -321,7 +321,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeTwoOperand( "aten_rshift", @@ -340,7 +340,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeTwoOperand( "aten_eq", @@ -359,7 +359,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeTwoOperand( "aten_ne", @@ -378,7 +378,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeTwoOperand( "aten_ge", @@ -397,7 +397,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeTwoOperand( "aten_gt", @@ -416,7 +416,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeTwoOperand( "aten_le", @@ -435,7 +435,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeTwoOperand( "aten_lt", @@ -453,7 +453,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeTwoOperand( "aten_min", @@ -471,7 +471,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeTwoOperand( "aten_max", @@ -490,7 +490,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeThreeOperand( "aten_masked_fill", @@ -513,7 +513,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { bool noMin = false; bool noMax = false; @@ -561,7 +561,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeFourOperand( "aten_addcmul", @@ -580,7 +580,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { // check if the activation is quantized const BufHandle& x = std::get(inputs[0]); @@ -604,7 +604,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_silu", @@ -620,7 +620,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_reciprocal", @@ -636,7 +636,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_neg", @@ -652,7 +652,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_isnan", @@ -673,7 +673,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { auto A = std::get(inputs[0]); if (A.node()->qscale()) { @@ -697,7 +697,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeTwoOperand( "aten_leaky_relu", @@ -719,7 +719,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_relu6", @@ -739,7 +739,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { const auto& kApproximate = std::get(inputs[1]); std::vector operands = {inputs.front()}; @@ -787,7 +787,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_log", @@ -805,7 +805,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_log10", @@ -823,7 +823,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_log1p", @@ -841,7 +841,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_log2", @@ -859,7 +859,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_exp", @@ -877,7 +877,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_expm1", @@ -895,7 +895,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_erf", @@ -913,7 +913,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_erfc", @@ -931,7 +931,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_cos", @@ -949,7 +949,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_sin", @@ -967,7 +967,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_tan", @@ -985,7 +985,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { const BufHandle& rhs = std::get(inputs[1]); auto dtype = rhs.dtype(); @@ -1005,7 +1005,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeTwoOperand( "aten_pow", @@ -1050,7 +1050,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeTwoOperand( "aten_fmod", @@ -1069,7 +1069,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeThreeOperand( "aten_lerp", @@ -1089,7 +1089,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { auto imodImpl = [](const ExprHandle& lhs, const ExprHandle& rhs) { return Mod::make(lhs, rhs); @@ -1137,7 +1137,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_acos", @@ -1155,7 +1155,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_asin", @@ -1173,7 +1173,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_cosh", @@ -1191,7 +1191,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_sinh", @@ -1209,7 +1209,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_atan", @@ -1227,7 +1227,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeTwoOperand( "aten_atan2", @@ -1247,7 +1247,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_tanh", @@ -1265,7 +1265,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeThreeOperand( "aten_hardtanh", @@ -1286,7 +1286,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeThreeOperand( "aten_softplus", @@ -1314,7 +1314,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_mish", @@ -1333,7 +1333,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeFourOperand( "aten_elu", @@ -1366,7 +1366,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_hardsigmoid", @@ -1387,7 +1387,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_hardswish", @@ -1410,7 +1410,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeTwoOperand( "aten_hardshrink", @@ -1433,7 +1433,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_sqrt", @@ -1451,7 +1451,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_rsqrt", @@ -1469,7 +1469,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_abs", @@ -1488,7 +1488,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeSign(inputs, outputShape); }); RegisterNNCLoweringsFunction aten_ceil( @@ -1496,7 +1496,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_ceil", @@ -1512,7 +1512,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_floor", @@ -1528,7 +1528,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_round", @@ -1544,7 +1544,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_trunc", @@ -1560,7 +1560,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_cast_float", @@ -1582,7 +1582,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { // see handling of aten::to in tensorexpr_fuser.cpp for why we only // need to handle the first input @@ -1604,7 +1604,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeThreeOperand( "aten_threshold", @@ -1628,7 +1628,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeConditionWithTwoOperand( "aten_where", @@ -1646,7 +1646,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_frac", @@ -1666,7 +1666,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "aten_lgamma", @@ -1684,7 +1684,7 @@ int nnc_lowerings_lazy_registration() { // {"aten::rand_like"}, // [](const std::vector& inputs, // const std::vector& outputShape, - // const c10::optional& outputType, + // const std::optional& outputType, // at::Device device) { // return computeOneOperand( // "aten_rand_like", @@ -1701,7 +1701,7 @@ int nnc_lowerings_lazy_registration() { // {"aten::slice"}, // [](const std::vector& inputs, // const std::vector& outputShape, - // const c10::optional& outputType, + // const std::optional& outputType, // at::Device device) { // return Compute( // "aten_slice", @@ -1723,7 +1723,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return Compute( "aten_unsqueeze", @@ -1757,7 +1757,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeTranspose( {inputs[0], (int64_t)1, (int64_t)0}, @@ -1774,7 +1774,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { auto A = std::get(inputs[0]); // Trivial case of 0-dim tensors: just a copy of the input @@ -1848,7 +1848,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeSoftmax(inputs, outputShape, outputStrides, false); }); @@ -1858,7 +1858,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeSoftmax(inputs, outputShape, outputStrides, true); }); @@ -1892,7 +1892,7 @@ int nnc_lowerings_lazy_registration() { [](const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { auto add_lambda = [](const ExprHandle& lhs, const ExprHandle& rhs) { return boolToInteger(lhs) + boolToInteger(rhs); diff --git a/torch/csrc/jit/tensorexpr/lowerings.h b/torch/csrc/jit/tensorexpr/lowerings.h index 6d8b2c433ae37..da22899ba28ce 100644 --- a/torch/csrc/jit/tensorexpr/lowerings.h +++ b/torch/csrc/jit/tensorexpr/lowerings.h @@ -32,7 +32,7 @@ using NNCLoweringFunction = std::function&, const std::vector&, const std::vector&, - const c10::optional&, + const std::optional&, at::Device)>; TORCH_API FunctionSchemaMap& getNNCLoweringRegistry(); diff --git a/torch/csrc/jit/tensorexpr/operators/conv2d.cpp b/torch/csrc/jit/tensorexpr/operators/conv2d.cpp index 3f29dad4c13f3..bdf313f0ad051 100644 --- a/torch/csrc/jit/tensorexpr/operators/conv2d.cpp +++ b/torch/csrc/jit/tensorexpr/operators/conv2d.cpp @@ -353,7 +353,7 @@ Tensor computeConv2d( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { Dtype dtype = kFloat; if (outputType) { @@ -401,7 +401,7 @@ Tensor computeConv1d( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { Dtype dtype = kFloat; if (outputType) { @@ -435,7 +435,7 @@ Tensor computePrepackedConv2dClampRun( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { Dtype dtype = kFloat; if (outputType) { @@ -454,7 +454,7 @@ Tensor computePrepackedLinearClampRun( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { Dtype dtype = kFloat; if (outputType) { @@ -473,7 +473,7 @@ Tensor computeMkldnnPrepackedConvRun( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { Dtype dtype = kFloat; if (outputType) { diff --git a/torch/csrc/jit/tensorexpr/operators/conv2d.h b/torch/csrc/jit/tensorexpr/operators/conv2d.h index 65902960192ab..f842a1350a551 100644 --- a/torch/csrc/jit/tensorexpr/operators/conv2d.h +++ b/torch/csrc/jit/tensorexpr/operators/conv2d.h @@ -74,31 +74,31 @@ Tensor computeConv2d( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); Tensor computeConv1d( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); Tensor computePrepackedConv2dClampRun( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); Tensor computePrepackedLinearClampRun( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); Tensor computeMkldnnPrepackedConvRun( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); } // namespace tensorexpr } // namespace jit diff --git a/torch/csrc/jit/tensorexpr/operators/matmul.cpp b/torch/csrc/jit/tensorexpr/operators/matmul.cpp index 38b420a7aca1c..92c6c14519325 100644 --- a/torch/csrc/jit/tensorexpr/operators/matmul.cpp +++ b/torch/csrc/jit/tensorexpr/operators/matmul.cpp @@ -9,7 +9,7 @@ Tensor computeMatmul( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { Dtype dtype = kFloat; if (outputType) { @@ -56,7 +56,7 @@ Tensor computeAddMM( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { Dtype dtype = kFloat; if (outputType) { diff --git a/torch/csrc/jit/tensorexpr/operators/matmul.h b/torch/csrc/jit/tensorexpr/operators/matmul.h index 70f3f4bf7bf03..40ef3cfd9b619 100644 --- a/torch/csrc/jit/tensorexpr/operators/matmul.h +++ b/torch/csrc/jit/tensorexpr/operators/matmul.h @@ -10,13 +10,13 @@ Tensor computeMatmul( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); Tensor computeAddMM( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); } // namespace tensorexpr diff --git a/torch/csrc/jit/tensorexpr/operators/misc.cpp b/torch/csrc/jit/tensorexpr/operators/misc.cpp index c282787485ea4..70991f6db1f4c 100644 --- a/torch/csrc/jit/tensorexpr/operators/misc.cpp +++ b/torch/csrc/jit/tensorexpr/operators/misc.cpp @@ -136,7 +136,7 @@ ExprHandle promoteIntegerToDefaultType(const ExprHandle& e) { ExprHandle demoteOutput( const ExprHandle& e, - const c10::optional type) { + const std::optional type) { if (!type.has_value()) { return e; } @@ -160,7 +160,7 @@ ExprHandle demoteOutput( return e; } -c10::optional getTensorInfo(BufHandle b) { +std::optional getTensorInfo(BufHandle b) { std::vector dims; for (auto dim : b.dims()) { auto val = intValue(dim.node()); @@ -321,7 +321,7 @@ Tensor computeChunk( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return Compute( "prim_constantchunk", @@ -355,7 +355,7 @@ Tensor computeTranspose( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { auto A = std::get(inputs[0]); // Trivial case of 0-dim and 1-dim tensors: transpose is just a copy @@ -382,7 +382,7 @@ Tensor computeExpand( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { auto A = std::get(inputs[0]); return Compute( @@ -396,7 +396,7 @@ Tensor computeReshape( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { auto A = std::get(inputs[0]); if (A.ndim() == 0) { @@ -464,7 +464,7 @@ Tensor computeFlatten( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { std::vector outputShapeVec; for (const auto dim : c10::irange(outputShape.size())) { @@ -622,7 +622,7 @@ Tensor computeCat( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { if (device == at::kCPU && getCatWoConditionals()) { return computeCatWoConditionals(inputs, outputShape, outputStrides); @@ -685,7 +685,7 @@ Tensor computeEmbedding( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { Dtype dtype = kFloat; if (outputType) { diff --git a/torch/csrc/jit/tensorexpr/operators/misc.h b/torch/csrc/jit/tensorexpr/operators/misc.h index 5650b35147b17..50f53b0b50d07 100644 --- a/torch/csrc/jit/tensorexpr/operators/misc.h +++ b/torch/csrc/jit/tensorexpr/operators/misc.h @@ -12,7 +12,7 @@ struct TensorInfo { std::vector dims; c10::ScalarType dtype; }; -c10::optional getTensorInfo(BufHandle b); +std::optional getTensorInfo(BufHandle b); int64_t normalizeAndCheckIndex(int64_t idx, int64_t list_size); @@ -26,7 +26,7 @@ ExprHandle promoteIntegerToDefaultType(const ExprHandle& e); ExprHandle promoteHalfToFloat(const ExprHandle& e); ExprHandle demoteOutput( const ExprHandle& e, - const c10::optional type); + const std::optional type); std::vector broadcastShapes( std::vector> shapes); @@ -51,31 +51,31 @@ Tensor computeChunk( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); Tensor computeTranspose( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); Tensor computeExpand( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); Tensor computeReshape( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); Tensor computeFlatten( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); Tensor computeCatWoConditionals( const std::vector& inputs, @@ -84,13 +84,13 @@ Tensor computeCat( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); Tensor computeEmbedding( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); } // namespace tensorexpr diff --git a/torch/csrc/jit/tensorexpr/operators/norm.cpp b/torch/csrc/jit/tensorexpr/operators/norm.cpp index 335cfae05f4d4..c87a931d1fc43 100644 --- a/torch/csrc/jit/tensorexpr/operators/norm.cpp +++ b/torch/csrc/jit/tensorexpr/operators/norm.cpp @@ -9,7 +9,7 @@ Tensor computeBatchNorm( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { bool hasWeight = true; bool hasBias = true; diff --git a/torch/csrc/jit/tensorexpr/operators/norm.h b/torch/csrc/jit/tensorexpr/operators/norm.h index 7c8cc43387b01..dbe6140cca8b4 100644 --- a/torch/csrc/jit/tensorexpr/operators/norm.h +++ b/torch/csrc/jit/tensorexpr/operators/norm.h @@ -10,7 +10,7 @@ Tensor computeBatchNorm( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); } // namespace tensorexpr diff --git a/torch/csrc/jit/tensorexpr/operators/pointwise.cpp b/torch/csrc/jit/tensorexpr/operators/pointwise.cpp index 57c63fcd92391..19aad4d015e27 100644 --- a/torch/csrc/jit/tensorexpr/operators/pointwise.cpp +++ b/torch/csrc/jit/tensorexpr/operators/pointwise.cpp @@ -10,7 +10,7 @@ using namespace torch::jit::tensorexpr; Tensor computeSign( const std::vector& inputValues, const std::vector& outputShape, - c10::optional> outputStrides) { + std::optional> outputStrides) { return Compute( "aten_sign", outputShape, outputStrides, [&](ParameterList& axes) { std::vector indices(axes.begin(), axes.end()); @@ -28,7 +28,7 @@ Tensor computeOneOperand( const std::vector& inputValues, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, const std::function& innerExpr, const int checkParamTypes) { return Compute( @@ -51,7 +51,7 @@ Tensor computeTwoOperand( const std::vector& inputValues, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, const std::function& innerExpr) { return Compute( @@ -76,7 +76,7 @@ Tensor computeTwoOperandWithAlpha( const std::vector& inputValues, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, const std::function& innerExpr) { return Compute( @@ -102,7 +102,7 @@ Tensor computeConditionWithTwoOperand( const std::vector& inputValues, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, const std::function< ExprHandle(const ExprHandle&, const ExprHandle&, const ExprHandle&)>& innerExpr) { @@ -131,7 +131,7 @@ Tensor computeThreeOperand( const std::vector& inputValues, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, const std::function< ExprHandle(const ExprHandle&, const ExprHandle&, const ExprHandle&)>& innerExpr, @@ -161,7 +161,7 @@ Tensor computeFourOperand( const std::vector& inputValues, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, const std::function& inputValues, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { return computeOneOperand( "copy", @@ -207,7 +207,7 @@ Tensor computeScalar( const std::vector& inputValues, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, const std::function& innerExpr) { auto dt = Dtype(*outputType); diff --git a/torch/csrc/jit/tensorexpr/operators/pointwise.h b/torch/csrc/jit/tensorexpr/operators/pointwise.h index 8de218dbb0383..0ce10424b3d30 100644 --- a/torch/csrc/jit/tensorexpr/operators/pointwise.h +++ b/torch/csrc/jit/tensorexpr/operators/pointwise.h @@ -9,14 +9,14 @@ namespace tensorexpr { TORCH_API Tensor computeSign( const std::vector& inputs, const std::vector& outputShape, - c10::optional> outputStrides = c10::nullopt); + std::optional> outputStrides = c10::nullopt); Tensor computeOneOperand( const std::string& name, const std::vector& inputValues, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, const std::function& innerExpr, const int checkParamTypes = kAllTypes); Tensor computeTwoOperand( @@ -24,7 +24,7 @@ Tensor computeTwoOperand( const std::vector& inputValues, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, const std::function& innerExpr); Tensor computeTwoOperandWithAlpha( @@ -32,7 +32,7 @@ Tensor computeTwoOperandWithAlpha( const std::vector& inputValues, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, const std::function& innerExpr); Tensor computeConditionWithTwoOperand( @@ -40,7 +40,7 @@ Tensor computeConditionWithTwoOperand( const std::vector& inputValues, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, const std::function< ExprHandle(const ExprHandle&, const ExprHandle&, const ExprHandle&)>& innerExpr); @@ -49,7 +49,7 @@ Tensor computeThreeOperand( const std::vector& inputValues, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, const std::function< ExprHandle(const ExprHandle&, const ExprHandle&, const ExprHandle&)>& innerExpr, @@ -59,7 +59,7 @@ Tensor computeFourOperand( const std::vector& inputValues, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, const std::function& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); Tensor computeScalar( @@ -77,7 +77,7 @@ Tensor computeScalar( const std::vector& inputValues, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, const std::function& innerExpr); diff --git a/torch/csrc/jit/tensorexpr/operators/quantization.cpp b/torch/csrc/jit/tensorexpr/operators/quantization.cpp index da6d43cbb7aa9..66c0688538a1d 100644 --- a/torch/csrc/jit/tensorexpr/operators/quantization.cpp +++ b/torch/csrc/jit/tensorexpr/operators/quantization.cpp @@ -141,7 +141,7 @@ Tensor computeQuantizePerTensor( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional&, + const std::optional&, at::Device) { std::vector vars; std::vector indices; @@ -181,7 +181,7 @@ Tensor computeQuantizedAdd( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device) { const BufHandle& QA = std::get(inputs[0]); const BufHandle& QB = std::get(inputs[1]); @@ -225,7 +225,7 @@ Tensor computeQuantizePerTensorExternalCall( const std::vector& outputShape, const std::vector& outputStrides, // NOLINTNEXTLINE - const c10::optional& outputType, + const std::optional& outputType, at::Device) { const BufHandle& x = std::get(inputs[0]); const auto qscale = std::get(inputs[1]); @@ -257,7 +257,7 @@ Tensor computeDequantizeExternalCall( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device) { Dtype dtype = kFloat; if (outputType) { @@ -282,7 +282,7 @@ Tensor computeQuantizedConv2dPrepack( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device) { Dtype dtype = kFloat; if (outputType) { @@ -332,7 +332,7 @@ Tensor computeQuantizedConv1d( const std::vector& outputShape, const std::vector& outputStrides, // NOLINTNEXTLINE - const c10::optional& outputType, + const std::optional& outputType, // NOLINTNEXTLINE at::Device device) { const BufHandle& qx = std::get(inputs[0]); @@ -364,7 +364,7 @@ Tensor computeQuantizedConv2d( const std::vector& outputShape, const std::vector& outputStrides, // NOLINTNEXTLINE - const c10::optional& outputType, + const std::optional& outputType, // NOLINTNEXTLINE at::Device device) { const BufHandle& qx = std::get(inputs[0]); @@ -396,7 +396,7 @@ Tensor computeQuantizedConv2dRelu( const std::vector& outputShape, const std::vector& outputStrides, // NOLINTNEXTLINE - const c10::optional& outputType, + const std::optional& outputType, // NOLINTNEXTLINE at::Device device) { const BufHandle& qx = std::get(inputs[0]); @@ -428,7 +428,7 @@ Tensor computeQuantizedLinear( const std::vector& outputShape, const std::vector& outputStrides, // NOLINTNEXTLINE - const c10::optional& outputType, + const std::optional& outputType, // NOLINTNEXTLINE at::Device device) { const BufHandle& qx = std::get(inputs[0]); @@ -460,7 +460,7 @@ Tensor computeQuantizedLinearRelu( const std::vector& outputShape, const std::vector& outputStrides, // NOLINTNEXTLINE - const c10::optional& outputType, + const std::optional& outputType, // NOLINTNEXTLINE at::Device device) { const BufHandle& qx = std::get(inputs[0]); @@ -492,7 +492,7 @@ Tensor computeQuantizedAddExternalCall( const std::vector& outputShape, const std::vector& outputStrides, // NOLINTNEXTLINE - const c10::optional& outputType, + const std::optional& outputType, // NOLINTNEXTLINE at::Device device) { const BufHandle& qa = std::get(inputs[0]); @@ -536,7 +536,7 @@ Tensor computeQuantizedMul( const std::vector& outputShape, const std::vector& outputStrides, // NOLINTNEXTLINE - const c10::optional& outputType, + const std::optional& outputType, // NOLINTNEXTLINE at::Device device) { const BufHandle& qa = std::get(inputs[0]); @@ -567,7 +567,7 @@ Tensor computeQuantizedMulScalar( const std::vector& outputShape, const std::vector& outputStrides, // NOLINTNEXTLINE - const c10::optional& outputType, + const std::optional& outputType, // NOLINTNEXTLINE at::Device device) { const BufHandle& qa = std::get(inputs[0]); @@ -594,7 +594,7 @@ Tensor computeQuantizedRelu( const std::vector& outputShape, const std::vector& outputStrides, // NOLINTNEXTLINE - const c10::optional& outputType, + const std::optional& outputType, // NOLINTNEXTLINE at::Device device) { const BufHandle& qa = std::get(inputs[0]); @@ -625,7 +625,7 @@ Tensor computeQuantizedCat( const std::vector& outputShape, const std::vector& outputStrides, // NOLINTNEXTLINE - const c10::optional& outputType, + const std::optional& outputType, // NOLINTNEXTLINE at::Device device) { // NOLINTNEXTLINE(performance-unnecessary-copy-initialization) @@ -663,7 +663,7 @@ Tensor computeDequantize( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device) { Dtype dtype = kFloat; if (outputType) { @@ -695,7 +695,7 @@ Tensor computeUpsampleNearest2d( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device) { auto A = std::get(inputs[0]); const auto& output_height = outputShape[2]; @@ -742,7 +742,7 @@ Tensor computeUpsampleNearest2dExternalCall( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device) { Dtype dtype = kFloat; if (outputType) { @@ -802,7 +802,7 @@ Tensor computeQuantizedSigmoidExternalCall( const std::vector& outputShape, const std::vector& outputStrides, // NOLINTNEXTLINE - const c10::optional& outputType, + const std::optional& outputType, at::Device) { const BufHandle& qx = std::get(inputs[0]); diff --git a/torch/csrc/jit/tensorexpr/operators/quantization.h b/torch/csrc/jit/tensorexpr/operators/quantization.h index 019b2349b1840..d48c9e3273ba0 100644 --- a/torch/csrc/jit/tensorexpr/operators/quantization.h +++ b/torch/csrc/jit/tensorexpr/operators/quantization.h @@ -20,140 +20,140 @@ TORCH_API Tensor computeQuantizePerTensor( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); TORCH_API Tensor computeQuantizePerTensorExternalCall( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); TORCH_API Tensor computeQuantizedConv1d( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); TORCH_API Tensor computeQuantizedConv2dPrepack( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); TORCH_API Tensor computeQuantizedConv1d( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); TORCH_API Tensor computeQuantizedConv2d( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); TORCH_API Tensor computeQuantizedConv2dRelu( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); TORCH_API Tensor computeQuantizedLinear( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); TORCH_API Tensor computeQuantizedLinearRelu( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); TORCH_API Tensor computeQuantizedAdd( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); Tensor computeQuantizedAddExternalCall( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); TORCH_API Tensor computeQuantizedMul( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); TORCH_API Tensor computeQuantizedMulScalar( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); TORCH_API Tensor computeQuantizedCat( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); TORCH_API Tensor computeQuantizedRelu( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); TORCH_API Tensor computeDequantize( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); TORCH_API Tensor computeDequantizeExternalCall( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); TORCH_API Tensor computeUpsampleNearest2d( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); TORCH_API Tensor computeUpsampleNearest2dExternalCall( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); TORCH_API Tensor computeQuantizedSigmoidExternalCall( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device); } // namespace tensorexpr } // namespace jit diff --git a/torch/csrc/jit/tensorexpr/operators/reduction.cpp b/torch/csrc/jit/tensorexpr/operators/reduction.cpp index dfd6e2d01adf5..b5f53560c9be3 100644 --- a/torch/csrc/jit/tensorexpr/operators/reduction.cpp +++ b/torch/csrc/jit/tensorexpr/operators/reduction.cpp @@ -23,7 +23,7 @@ Tensor computeSum( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { std::vector axes; bool keepdim = false; @@ -108,7 +108,7 @@ Tensor computeMean( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { Dtype dtype = kFloat; if (outputType) { @@ -140,7 +140,7 @@ Tensor computeMax( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { Dtype dtype = kFloat; if (outputType) { @@ -164,7 +164,7 @@ Tensor computeAdaptiveAvgPool2d( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device) { Dtype dtype = kFloat; if (outputType) { diff --git a/torch/csrc/jit/tensorexpr/operators/reduction.h b/torch/csrc/jit/tensorexpr/operators/reduction.h index 6265c4d265858..7d25e14a171ce 100644 --- a/torch/csrc/jit/tensorexpr/operators/reduction.h +++ b/torch/csrc/jit/tensorexpr/operators/reduction.h @@ -10,25 +10,25 @@ TORCH_API Tensor computeSum( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); TORCH_API Tensor computeMean( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); TORCH_API Tensor computeAdaptiveAvgPool2d( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); Tensor computeMax( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const c10::optional& outputType, + const std::optional& outputType, at::Device device); } // namespace tensorexpr diff --git a/torch/csrc/jit/tensorexpr/tensor.cpp b/torch/csrc/jit/tensorexpr/tensor.cpp index 746a9a8cd1f0b..5bc734bb80b83 100644 --- a/torch/csrc/jit/tensorexpr/tensor.cpp +++ b/torch/csrc/jit/tensorexpr/tensor.cpp @@ -99,7 +99,7 @@ StmtPtr Tensor::constructStmt( Tensor Compute( const std::string& name, const std::vector& dims, - c10::optional> strides, + std::optional> strides, const std::function&)>& body_func) { std::vector args = create_index_vars(dims); ExprHandle body = body_func(args); @@ -116,7 +116,7 @@ Tensor Compute( Tensor Compute( const std::string& name, const std::vector& dims, - c10::optional> strides, + std::optional> strides, const std::function& body_func) { if (dims.size() != 1) { throw malformed_input("mismatch between body and arg size (1)"); @@ -137,7 +137,7 @@ Tensor Compute( Tensor Compute( const std::string& name, const std::vector& dims, - c10::optional> strides, + std::optional> strides, const std::function& body_func) { if (dims.size() != 2) { @@ -159,7 +159,7 @@ Tensor Compute( Tensor Compute( const std::string& name, const std::vector& dims, - c10::optional> strides, + std::optional> strides, const std::function< ExprHandle(const VarHandle&, const VarHandle&, const VarHandle&)>& body_func) { @@ -183,7 +183,7 @@ Tensor Compute( Tensor Compute( const std::string& name, const std::vector& dims, - c10::optional> strides, + std::optional> strides, const std::function& dims, - c10::optional> strides, + std::optional> strides, const Reducer& reducer, const BufHandle& buffer, const std::vector& reduce_dims) { @@ -235,7 +235,7 @@ Tensor Reduce( Tensor Reduce( const std::string& name, const std::vector& dims, - c10::optional> strides, + std::optional> strides, const Reducer& reducer, Tensor tensor, const std::vector& reduce_dims) { diff --git a/torch/csrc/jit/tensorexpr/tensor.h b/torch/csrc/jit/tensorexpr/tensor.h index 698de07f2be54..7b589d0974b37 100644 --- a/torch/csrc/jit/tensorexpr/tensor.h +++ b/torch/csrc/jit/tensorexpr/tensor.h @@ -75,7 +75,7 @@ class TORCH_API Tensor { TORCH_API Tensor Compute( const std::string& func_name, const std::vector& dims, - c10::optional> strides, + std::optional> strides, const std::function& body_func); TORCH_API Tensor Compute( const std::string& func_name, @@ -84,7 +84,7 @@ TORCH_API Tensor Compute( TORCH_API Tensor Compute( const std::string& func_name, const std::vector& dims, - c10::optional> strides, + std::optional> strides, const std::function& body_func); TORCH_API Tensor Compute( @@ -95,7 +95,7 @@ TORCH_API Tensor Compute( TORCH_API Tensor Compute( const std::string& func_name, const std::vector& dims, - c10::optional> strides, + std::optional> strides, const std::function< ExprHandle(const VarHandle&, const VarHandle&, const VarHandle&)>& body_func); @@ -108,7 +108,7 @@ TORCH_API Tensor Compute( TORCH_API Tensor Compute( const std::string& func_name, const std::vector& dims, - c10::optional> strides, + std::optional> strides, const std::function& dims, - c10::optional> strides, + std::optional> strides, const std::function&)>& body_func); TORCH_API Tensor Compute( const std::string& func_name, @@ -148,7 +148,7 @@ template Tensor Reduce( const std::string& func_name, const std::vector& dims, - c10::optional> strides, + std::optional> strides, const Reducer& reducer, const InitFunc& init_func, const BodyFunc& body_func, @@ -217,7 +217,7 @@ template Tensor Reduce( const std::string& func_name, const std::vector& dims, - c10::optional> strides, + std::optional> strides, const Reducer& reducer, const BodyFunc& body_func, const std::vector& reduce_dims) { @@ -246,7 +246,7 @@ template Tensor Reduce( const std::string& func_name, const std::vector& dims, - c10::optional> strides, + std::optional> strides, const Reducer& reducer, const BodyFunc&& body_func, const std::vector& reduce_dims) { @@ -265,7 +265,7 @@ Tensor Reduce( TORCH_API Tensor Reduce( const std::string& name, const std::vector& dims, - c10::optional> strides, + std::optional> strides, const Reducer& reducer, const BufHandle& buffer, const std::vector& reduce_dims); @@ -281,7 +281,7 @@ TORCH_API Tensor Reduce( TORCH_API Tensor Reduce( const std::string& func_name, const std::vector& dims, - c10::optional> strides, + std::optional> strides, const Reducer& reducer, Tensor tensor, const std::vector& reduce_dims); diff --git a/torch/csrc/jit/tensorexpr/tensorexpr_init.cpp b/torch/csrc/jit/tensorexpr/tensorexpr_init.cpp index f6e0b270c92ca..204326dc03e21 100644 --- a/torch/csrc/jit/tensorexpr/tensorexpr_init.cpp +++ b/torch/csrc/jit/tensorexpr/tensorexpr_init.cpp @@ -936,13 +936,13 @@ void initTensorExprBindings(PyObject* module) { &tensorexpr::replaceListOutputWithTuple); te.def("trim_graph", &tensorexpr::trimGraph); #ifdef TORCH_ENABLE_LLVM - te.def("set_llvm_target_triple", [](const c10::optional& val) { + te.def("set_llvm_target_triple", [](const std::optional& val) { tensorexpr::LLVMTargetTriple() = val; }); - te.def("set_llvm_target_cpu", [](const c10::optional& val) { + te.def("set_llvm_target_cpu", [](const std::optional& val) { tensorexpr::LLVMTargetCPU() = val; }); - te.def("set_llvm_target_attrs", [](const c10::optional& val) { + te.def("set_llvm_target_attrs", [](const std::optional& val) { tensorexpr::LLVMTargetAttrs() = val; }); te.def("set_llvm_aot_workflow", [](bool val) { diff --git a/torch/csrc/jit/testing/file_check.cpp b/torch/csrc/jit/testing/file_check.cpp index e1f87fccf7266..ec0011f40d775 100644 --- a/torch/csrc/jit/testing/file_check.cpp +++ b/torch/csrc/jit/testing/file_check.cpp @@ -43,17 +43,17 @@ struct Check { Check( CheckType type, std::string str, - c10::optional count = c10::nullopt) + std::optional count = c10::nullopt) : type_(type), count_(count), search_str_(std::move(str)) {} Check( CheckType type, c10::string_view str, - c10::optional count = c10::nullopt) + std::optional count = c10::nullopt) : Check(type, std::string(str.begin(), str.end()), count) {} CheckType type_; - c10::optional count_; + std::optional count_; const std::string search_str_; friend std::ostream& operator<<(std::ostream& out, const Check& c); @@ -234,7 +234,7 @@ struct FileCheckImpl { TORCH_API void addCheck( CheckType type, const std::string& s, - c10::optional count = c10::nullopt) { + std::optional count = c10::nullopt) { addCheck(Check(type, s, count)); } @@ -264,7 +264,7 @@ struct FileCheckImpl { } size_t end_check_string = suffix_pos + check_suffix.size(); CheckType type = check_pair.first; - c10::optional count = c10::nullopt; + std::optional count = c10::nullopt; auto end_line = source->text_str().find("\n", end_check_string); bool exactly = false; if (type == CHECK_COUNT) { diff --git a/torch/csrc/lazy/backend/backend_device.cpp b/torch/csrc/lazy/backend/backend_device.cpp index eaf3d6b28c07c..6d146ca0881ce 100644 --- a/torch/csrc/lazy/backend/backend_device.cpp +++ b/torch/csrc/lazy/backend/backend_device.cpp @@ -54,7 +54,7 @@ c10::Device backendDeviceToAtenDevice(const BackendDevice& device) { return c10::Device(at::kLazy, device.ordinal()); } -c10::optional GetBackendDevice(at::ITensorListRef tensors) { +std::optional GetBackendDevice(at::ITensorListRef tensors) { for (auto& tensor : tensors) { if (auto lt = TryGetLtcTensor(tensor)) { return lt->GetDevice(); @@ -63,26 +63,26 @@ c10::optional GetBackendDevice(at::ITensorListRef tensors) { return c10::nullopt; } -c10::optional GetBackendDevice(at::TensorList tensors) { +std::optional GetBackendDevice(at::TensorList tensors) { return GetBackendDevice(at::ITensorListRef(tensors)); } -c10::optional GetBackendDevice(const at::Tensor& tensor) { +std::optional GetBackendDevice(const at::Tensor& tensor) { if (auto lt = TryGetLtcTensor(tensor)) { return lt->GetDevice(); } return c10::nullopt; } -c10::optional GetBackendDevice( - const c10::optional& device) { +std::optional GetBackendDevice( + const std::optional& device) { if (device) { return c10::make_optional(atenDeviceToBackendDevice(*device)); } return c10::nullopt; } -c10::optional GetBackendDevice() { +std::optional GetBackendDevice() { return c10::nullopt; } diff --git a/torch/csrc/lazy/backend/backend_device.h b/torch/csrc/lazy/backend/backend_device.h index 4c239d1e4b71c..e80c800a2ecea 100644 --- a/torch/csrc/lazy/backend/backend_device.h +++ b/torch/csrc/lazy/backend/backend_device.h @@ -73,20 +73,20 @@ TORCH_API c10::Device backendDeviceToAtenDevice(const BackendDevice& device); // Tries to extract the backend device out of the lazy tensor. Returns nullopt // if the input is not a lazy tensor. -TORCH_API c10::optional GetBackendDevice( +TORCH_API std::optional GetBackendDevice( const at::ITensorListRef tensors); -TORCH_API c10::optional GetBackendDevice( +TORCH_API std::optional GetBackendDevice( const at::TensorList tensors); -TORCH_API c10::optional GetBackendDevice( +TORCH_API std::optional GetBackendDevice( const at::Tensor& tensor); -TORCH_API c10::optional GetBackendDevice( - const c10::optional& device); +TORCH_API std::optional GetBackendDevice( + const std::optional& device); // For variadic template. -TORCH_API c10::optional GetBackendDevice(); +TORCH_API std::optional GetBackendDevice(); template -c10::optional GetBackendDevice( +std::optional GetBackendDevice( const T& tensor, const Args&... forward_tensors) { auto optional_device = GetBackendDevice(tensor); diff --git a/torch/csrc/lazy/backend/backend_interface.h b/torch/csrc/lazy/backend/backend_interface.h index f94d3b602e52c..366311921c394 100644 --- a/torch/csrc/lazy/backend/backend_interface.h +++ b/torch/csrc/lazy/backend/backend_interface.h @@ -63,7 +63,7 @@ class TORCH_API BackendImplInterface { virtual at::Tensor MakeTensorFromComputationData( const BackendDataPtr data, - c10::optional logical_scalar_type) const = 0; + std::optional logical_scalar_type) const = 0; /** * Lowering, Compilation, Execution diff --git a/torch/csrc/lazy/core/hash.h b/torch/csrc/lazy/core/hash.h index bb6a779555f22..57cf1f521030f 100644 --- a/torch/csrc/lazy/core/hash.h +++ b/torch/csrc/lazy/core/hash.h @@ -163,11 +163,11 @@ static inline hash_t Hash(const at::Generator& value) { // repeatedly hash a constant at runtime. static const int64_t kNullOpt = 0x8655d738f3678dda; -// Hashing for c10::optional types contributes to hash +// Hashing for std::optional types contributes to hash // for optionals with null value, important to distinguish // between and cases template -hash_t Hash(const c10::optional& value) { +hash_t Hash(const std::optional& value) { if (value.has_value()) { return Hash(value.value()); } else { @@ -187,7 +187,7 @@ hash_t Hash(const std::vector& values) { // Need a special case for optional? template -hash_t Hash(const c10::optional>& value) { +hash_t Hash(const std::optional>& value) { if (value.has_value()) { return ContainerHash(value.value()); } else { diff --git a/torch/csrc/lazy/core/ir_builder.h b/torch/csrc/lazy/core/ir_builder.h index 3b58d00aace6c..981e166777294 100644 --- a/torch/csrc/lazy/core/ir_builder.h +++ b/torch/csrc/lazy/core/ir_builder.h @@ -61,7 +61,7 @@ struct IrBuilder { virtual NodePtr MakeCast( const Value& input0, const at::ScalarType& dtype, - const c10::optional& stype = c10::nullopt) const = 0; + const std::optional& stype = c10::nullopt) const = 0; virtual NodePtr MakeTensorList(const OpList& inputs) const = 0; virtual NodePtr MakeGeneric( const OpKind& op, @@ -96,7 +96,7 @@ static inline NodePtr MakeExpand( static inline NodePtr MakeCast( const Value& input0, const at::ScalarType& dtype, - const c10::optional& stype = c10::nullopt) { + const std::optional& stype = c10::nullopt) { return getIrBuilder()->MakeCast(input0, dtype, stype); } static inline NodePtr MakeTensorList(const OpList& inputs) { diff --git a/torch/csrc/lazy/core/ir_dump_util.cpp b/torch/csrc/lazy/core/ir_dump_util.cpp index 19cb2ae7b1624..a4fb11761a67c 100644 --- a/torch/csrc/lazy/core/ir_dump_util.cpp +++ b/torch/csrc/lazy/core/ir_dump_util.cpp @@ -28,7 +28,7 @@ std::string::size_type SkipTagSeparator( return node_string.compare(pos, 2, ", ") == 0 ? pos + 2 : pos; } -c10::optional ParseAttrTag( +std::optional ParseAttrTag( const std::string& node_string, std::string::size_type pos) { // @lint-ignore-every CLANGTIDY facebook-hte-StdRegexIsAwful @@ -97,7 +97,7 @@ std::unordered_map GetRootsIds( return roots_ids; } -c10::optional GetRootNodeId( +std::optional GetRootNodeId( const Node* node, const std::unordered_map& roots_ids) { auto it = roots_ids.find(node); diff --git a/torch/csrc/lazy/core/lazy_graph_executor.cpp b/torch/csrc/lazy/core/lazy_graph_executor.cpp index afeac5e75e6c3..a2b67c958313a 100644 --- a/torch/csrc/lazy/core/lazy_graph_executor.cpp +++ b/torch/csrc/lazy/core/lazy_graph_executor.cpp @@ -610,7 +610,7 @@ LazyGraphExecutor::SyncTensorCollection LazyGraphExecutor::CollectSyncTensors( } else if (config.force_ltc_data) { // The tensor only has at::Tensor data. We need to queue it for a // device upload. - c10::optional tensor_data = tensors[i]->CurrentTensorData(); + std::optional tensor_data = tensors[i]->CurrentTensorData(); TORCH_CHECK(tensor_data); at_tensors.push_back(*tensor_data); devices.push_back(tensors[i]->GetDevice()); @@ -996,7 +996,7 @@ std::vector LazyGraphExecutor::FetchTensors( ++literals_index; ++sync_index; } else { - c10::optional tensor_data = + std::optional tensor_data = (*tensors)[i]->CurrentTensorData(); if (tensor_data) { results.push_back(*tensor_data); diff --git a/torch/csrc/lazy/core/shape.cpp b/torch/csrc/lazy/core/shape.cpp index 200dd8fac7895..939e2745ed393 100644 --- a/torch/csrc/lazy/core/shape.cpp +++ b/torch/csrc/lazy/core/shape.cpp @@ -13,7 +13,7 @@ namespace lazy { Shape::Shape( at::ScalarType scalar_type, c10::ArrayRef sizes, - c10::optional> is_symbolic) + std::optional> is_symbolic) : scalar_type_(scalar_type), sizes_(sizes.begin(), sizes.end()), is_symbolic_(std::move(is_symbolic)) {} @@ -49,7 +49,7 @@ hash_t Shape::hash(bool bakeInSizes) const { } Shape Shape::with_symbolic_dims( - c10::optional> symbolic_dims) const { + std::optional> symbolic_dims) const { Shape copy = *this; copy.is_symbolic_ = symbolic_dims; return copy; @@ -75,7 +75,7 @@ static c10::SymbolicShape get_symbolic_shape(at::Tensor& tensor) { TORCH_INTERNAL_ASSERT( sizes.size() == is_symbolic->size(), "Dims of two values are not consistent"); - std::vector> symbolic_dims; + std::vector> symbolic_dims; for (size_t i = 0; i < sizes.size(); i++) { if (is_symbolic->at(i)) { symbolic_dims.emplace_back(c10::nullopt); diff --git a/torch/csrc/lazy/core/shape.h b/torch/csrc/lazy/core/shape.h index 1c6b4d5bb3d81..63566619fd149 100644 --- a/torch/csrc/lazy/core/shape.h +++ b/torch/csrc/lazy/core/shape.h @@ -19,7 +19,7 @@ class TORCH_API Shape { Shape( at::ScalarType scalar_type, c10::ArrayRef sizes, - c10::optional> is_symbolic = c10::nullopt); + std::optional> is_symbolic = c10::nullopt); std::string to_string() const; @@ -43,13 +43,13 @@ class TORCH_API Shape { sizes_.at(dim) = size; } - const c10::optional>& is_symbolic() const { + const std::optional>& is_symbolic() const { return is_symbolic_; } // Makes a copy with symbolic dims applied Shape with_symbolic_dims( - c10::optional> symbolic_dims) const; + std::optional> symbolic_dims) const; size_t numel() const; hash_t hash(bool bakeInSizes) const; @@ -64,7 +64,7 @@ class TORCH_API Shape { // Stores which dimmensions are symbolic // If nullopt, either it hasn't been initialized or the symbolic // dimmensions are not calculatable - c10::optional> is_symbolic_ = c10::nullopt; + std::optional> is_symbolic_ = c10::nullopt; }; TORCH_API std::ostream& operator<<(std::ostream& out, const Shape& shape); diff --git a/torch/csrc/lazy/core/tensor.cpp b/torch/csrc/lazy/core/tensor.cpp index 541a0f6f5a070..ba0571f87df4d 100644 --- a/torch/csrc/lazy/core/tensor.cpp +++ b/torch/csrc/lazy/core/tensor.cpp @@ -197,7 +197,7 @@ Value LazyTensor::GetIrValue() const { AssignIrValue(CreateTensorNode(handle, /*read_only=*/false)); return data()->ir_value; } - c10::optional tensor_data = CurrentTensorData(); + std::optional tensor_data = CurrentTensorData(); TORCH_CHECK(tensor_data); AssignIrValue(GetIrValueForTensor(*tensor_data, GetDevice())); return data()->ir_value; @@ -211,7 +211,7 @@ void LazyTensor::SetTensorData(at::Tensor tensor_data) { data()->tensor_data = std::move(tensor_data); } -c10::optional LazyTensor::CurrentTensorData() const { +std::optional LazyTensor::CurrentTensorData() const { return data()->tensor_data; } @@ -236,7 +236,7 @@ Value LazyTensor::GetIrValueForTensor( at::Tensor LazyTensor::ToTensor(bool detached) { at::Tensor tensor; - c10::optional tensor_data = CurrentTensorData(); + std::optional tensor_data = CurrentTensorData(); if (!tensor_data) { LazyGraphExecutor::Get()->DeviceBarrier(GetDevice()); // The GetDataHandle() call will trigger an ApplyPendingGraph() if an IR @@ -373,7 +373,7 @@ std::vector GetLtcTensors(c10::ArrayRef tensors) { } LazyTensorPtr GetOrCreateLtcTensor( - const c10::optional& tensor, + const std::optional& tensor, const BackendDevice& device) { return GetOrCreateLtcTensor(tensor.value_or(at::Tensor()), device); } diff --git a/torch/csrc/lazy/core/tensor.h b/torch/csrc/lazy/core/tensor.h index 3a15c91c03452..afc52376c5545 100644 --- a/torch/csrc/lazy/core/tensor.h +++ b/torch/csrc/lazy/core/tensor.h @@ -47,7 +47,7 @@ class TORCH_API LazyTensor : public c10::intrusive_ptr_target { BackendDataPtr handle; Value ir_value; - c10::optional tensor_data; + std::optional tensor_data; const BackendDevice device; const int64_t unique_id = 0; size_t generation = 1; @@ -124,7 +124,7 @@ class TORCH_API LazyTensor : public c10::intrusive_ptr_target { void SetIrValue(Value ir_value); void SetInPlaceIrValue(Value ir_value); - c10::optional CurrentTensorData() const; + std::optional CurrentTensorData() const; std::vector MakeOutputTensors(NodePtr node) const; @@ -191,7 +191,7 @@ TORCH_API std::vector GetLtcTensors( // If tensor is a lazy tensor type, returns the LazyTensor embedded within it, // otherwise creates a new lazy tensor type with tensor as data. TORCH_API LazyTensorPtr GetOrCreateLtcTensor( - const c10::optional& tensor, + const std::optional& tensor, const BackendDevice& device); TORCH_API LazyTensorPtr GetLtcTensorOrCreateForWrappedNumber( diff --git a/torch/csrc/lazy/core/tensor_impl.h b/torch/csrc/lazy/core/tensor_impl.h index 6eca2212c08ed..a35c02a7aeac4 100644 --- a/torch/csrc/lazy/core/tensor_impl.h +++ b/torch/csrc/lazy/core/tensor_impl.h @@ -54,7 +54,7 @@ class TORCH_API LTCTensorImpl final : public c10::TensorImpl { void setup_size_properties(); LazyTensorPtr tensor_; - mutable c10::optional> sym_sizes_; + mutable std::optional> sym_sizes_; size_t generation_{0}; }; diff --git a/torch/csrc/lazy/core/tensor_util.h b/torch/csrc/lazy/core/tensor_util.h index e4e6a1b7f0c26..121235ef9d8c0 100644 --- a/torch/csrc/lazy/core/tensor_util.h +++ b/torch/csrc/lazy/core/tensor_util.h @@ -43,7 +43,7 @@ inline at::Tensor CopyTensor( } template -T OptionalOr(const c10::optional& value, T defval) { +T OptionalOr(const std::optional& value, T defval) { return value ? static_cast(*value) : defval; } diff --git a/torch/csrc/lazy/core/unique.h b/torch/csrc/lazy/core/unique.h index 0b156a29eb906..fc09c8d71d7d8 100644 --- a/torch/csrc/lazy/core/unique.h +++ b/torch/csrc/lazy/core/unique.h @@ -49,7 +49,7 @@ class Unique { } private: - c10::optional value_; + std::optional value_; }; } // namespace lazy diff --git a/torch/csrc/lazy/core/util.h b/torch/csrc/lazy/core/util.h index a3d35783ae969..e535e5365f227 100644 --- a/torch/csrc/lazy/core/util.h +++ b/torch/csrc/lazy/core/util.h @@ -89,7 +89,7 @@ class MaybeRef { } private: - c10::optional storage_; + std::optional storage_; const T& ref_; }; @@ -109,7 +109,7 @@ std::vector ToVector(const S& input) { } template -c10::optional> ToOptionalVector( +std::optional> ToOptionalVector( c10::OptionalArrayRef arrayRef) { if (arrayRef) { return arrayRef->vec(); diff --git a/torch/csrc/lazy/python/python_util.cpp b/torch/csrc/lazy/python/python_util.cpp index 703d43ca65059..90d9797e3fd35 100644 --- a/torch/csrc/lazy/python/python_util.cpp +++ b/torch/csrc/lazy/python/python_util.cpp @@ -11,7 +11,7 @@ namespace torch { namespace lazy { -c10::optional GetPythonFrameTop() { +std::optional GetPythonFrameTop() { if (!Py_IsInitialized()) { return c10::nullopt; } diff --git a/torch/csrc/lazy/python/python_util.h b/torch/csrc/lazy/python/python_util.h index 8040a023de518..456aafa880971 100644 --- a/torch/csrc/lazy/python/python_util.h +++ b/torch/csrc/lazy/python/python_util.h @@ -7,7 +7,7 @@ namespace torch { namespace lazy { -c10::optional TORCH_PYTHON_API GetPythonFrameTop(); +std::optional TORCH_PYTHON_API GetPythonFrameTop(); std::vector TORCH_PYTHON_API GetPythonFrames(); diff --git a/torch/csrc/lazy/ts_backend/ir_builder.h b/torch/csrc/lazy/ts_backend/ir_builder.h index 1f32a3521ba8a..c538292374434 100644 --- a/torch/csrc/lazy/ts_backend/ir_builder.h +++ b/torch/csrc/lazy/ts_backend/ir_builder.h @@ -33,7 +33,7 @@ struct TorchScriptIrBuilder : IrBuilder { NodePtr MakeCast( const Value& input0, const at::ScalarType& dtype, - const c10::optional& stype = + const std::optional& stype = c10::nullopt) const override { return ReuseOrMakeNode(input0, dtype, stype); } diff --git a/torch/csrc/lazy/ts_backend/ops/to_copy.h b/torch/csrc/lazy/ts_backend/ops/to_copy.h index 4b96b1c389f78..3a5f47411dfdd 100644 --- a/torch/csrc/lazy/ts_backend/ops/to_copy.h +++ b/torch/csrc/lazy/ts_backend/ops/to_copy.h @@ -18,12 +18,12 @@ class ToCopy : public torch::lazy::TsNode { ToCopy( const torch::lazy::Value& self, - const c10::optional& dtype, - const c10::optional& layout, - const c10::optional& device, - const c10::optional& pin_memory, + const std::optional& dtype, + const std::optional& layout, + const std::optional& device, + const std::optional& pin_memory, const bool& non_blocking, - const c10::optional& memory_format, + const std::optional& memory_format, std::vector&& shapes) : torch::lazy::TsNode( ClassOpKind(), @@ -47,12 +47,12 @@ class ToCopy : public torch::lazy::TsNode { bool CanBeReused( const torch::lazy::Value& self, - const c10::optional& dtype, - const c10::optional& layout, - const c10::optional& device, - const c10::optional& pin_memory, + const std::optional& dtype, + const std::optional& layout, + const std::optional& device, + const std::optional& pin_memory, const bool& non_blocking, - const c10::optional& memory_format) const { + const std::optional& memory_format) const { size_t i = 0; return ( operand(i++) == self && this->dtype == dtype && @@ -115,12 +115,12 @@ class ToCopy : public torch::lazy::TsNode { return _to_copy_out; } - c10::optional dtype; - c10::optional layout; - c10::optional device; - c10::optional pin_memory; + std::optional dtype; + std::optional layout; + std::optional device; + std::optional pin_memory; bool non_blocking; - c10::optional memory_format; + std::optional memory_format; }; } // namespace lazy diff --git a/torch/csrc/lazy/ts_backend/ts_backend_impl.cpp b/torch/csrc/lazy/ts_backend/ts_backend_impl.cpp index 927e2ba62c2de..b0a2d7568aef8 100644 --- a/torch/csrc/lazy/ts_backend/ts_backend_impl.cpp +++ b/torch/csrc/lazy/ts_backend/ts_backend_impl.cpp @@ -81,7 +81,7 @@ class TSBackendImpl : public torch::lazy::BackendImplInterface { at::Tensor MakeTensorFromComputationData( const torch::lazy::BackendDataPtr data, - c10::optional logical_scalar_type) const override { + std::optional logical_scalar_type) const override { const auto ts_data = std::static_pointer_cast(data); return ts_data->data(); } diff --git a/torch/csrc/lazy/ts_backend/ts_backend_impl.h b/torch/csrc/lazy/ts_backend/ts_backend_impl.h index d238e8263e577..0607c3efb5386 100644 --- a/torch/csrc/lazy/ts_backend/ts_backend_impl.h +++ b/torch/csrc/lazy/ts_backend/ts_backend_impl.h @@ -38,7 +38,7 @@ class TORCH_API TSData : public torch::lazy::BackendData { return data_; } - c10::optional scalar; + std::optional scalar; private: at::Tensor data_; diff --git a/torch/csrc/lazy/ts_backend/ts_eager_fallback.cpp b/torch/csrc/lazy/ts_backend/ts_eager_fallback.cpp index e59a665d7bc29..42acc2c5df10a 100644 --- a/torch/csrc/lazy/ts_backend/ts_eager_fallback.cpp +++ b/torch/csrc/lazy/ts_backend/ts_eager_fallback.cpp @@ -69,16 +69,16 @@ std::vector to_eager( return eager_tensors; } -std::vector> to_eager( - const std::vector>& tensors, +std::vector> to_eager( + const std::vector>& tensors, c10::DeviceType device_type) { // We can't just call _to_eager() on the entire list of Tensors because it // will break on undefined tensors. Separate out undefined tensors first. - std::vector> eager_tensors(tensors.size()); + std::vector> eager_tensors(tensors.size()); std::vector valid_tensors; std::vector to_translate(tensors.size()); for (size_t i = 0; i < tensors.size(); ++i) { - const c10::optional& tensor = tensors[i]; + const std::optional& tensor = tensors[i]; // Explicitly handling undefined tensors here instead of letting `_to_eager` // handle it. Otherwise, we'd need to require all backends with their own // implementation of _to_eager to properly handle undefined tensors. @@ -112,10 +112,10 @@ c10::DispatchKey dispatch_key(c10::DeviceType device_type) { } } -c10::optional compute_target_device( +std::optional compute_target_device( std::vector& t_args, std::vector> tlist_args, - std::vector>> opt_tlist_args) { + std::vector>> opt_tlist_args) { // Decide what device to move the output tensor(s) to. // The current convention is that we use the first tensor arg to pick the // device Barring that, we take the first tensor from a TensorList arg. @@ -217,7 +217,7 @@ void ts_eager_fallback( std::vector tensor_args_indices; std::vector> tensorlist_args; - std::vector>> opt_tensorlist_args; + std::vector>> opt_tensorlist_args; // Step 1: Convert all non-eager tensor inputs into eager tensors and put them // on the stack at the correct indices. @@ -236,7 +236,7 @@ void ts_eager_fallback( (*stack)[arguments_begin + idx] = std::move(eager_ivalue); tensorlist_args.push_back(ivalue.toTensorList()); } else if (ivalue.isOptionalTensorList()) { - auto eager_ivalue = c10::IValue(c10::List>( + auto eager_ivalue = c10::IValue(c10::List>( to_eager(ivalue.toOptionalTensorVector(), device_type))); (*stack)[arguments_begin + idx] = std::move(eager_ivalue); opt_tensorlist_args.push_back(ivalue.toOptionalTensorList()); @@ -323,7 +323,7 @@ void ts_eager_fallback( "mutable alias: ", schema_returns[idx]); } else { - c10::optional tgt_device = compute_target_device( + std::optional tgt_device = compute_target_device( tensor_args, tensorlist_args, opt_tensorlist_args); if (alias_info != nullptr && !alias_info->isWrite()) { // immutable alias (view) case: Warn here, since we're copying and diff --git a/torch/csrc/lazy/ts_backend/ts_native_functions.cpp b/torch/csrc/lazy/ts_backend/ts_native_functions.cpp index 456ff4211ac1a..78ae6a6f6e2e5 100644 --- a/torch/csrc/lazy/ts_backend/ts_native_functions.cpp +++ b/torch/csrc/lazy/ts_backend/ts_native_functions.cpp @@ -28,7 +28,7 @@ namespace { at::Tensor CreateLtcTensor( const at::Tensor& tensor, - const c10::optional& device) { + const std::optional& device) { if (tensor.defined() && device) { return torch::lazy::CreateAtenFromLtcTensor( torch::lazy::LazyTensor::Create(tensor, *device)); @@ -36,8 +36,8 @@ at::Tensor CreateLtcTensor( return tensor; } -c10::optional GetLtcDevice( - const c10::optional& device) { +std::optional GetLtcDevice( + const std::optional& device) { if (!device) { return c10::nullopt; } @@ -53,7 +53,7 @@ c10::optional GetLtcDevice( // This should be safe to do, because every operator in the LT is functional. at::Tensor LazyNativeFunctions::clone( const at::Tensor& self, - c10::optional memory_format) { + std::optional memory_format) { auto self_lt = torch::lazy::TryGetLtcTensor(self); return torch::lazy::CreateAtenFromLtcTensor( self_lt->Create(self_lt->GetIrValue(), self_lt->GetDevice())); @@ -138,12 +138,12 @@ at::Tensor LazyNativeFunctions::_copy_from_and_resize( at::Tensor LazyNativeFunctions::_to_copy( const at::Tensor& self, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, bool non_blocking, - c10::optional memory_format) { + std::optional memory_format) { if (force_eager_fallback(at::aten::_to_copy)) { TORCH_INTERNAL_ASSERT( false, @@ -270,11 +270,11 @@ at::Tensor LazyNativeFunctions::_to_copy( at::Tensor LazyNativeFunctions::empty_symint( at::SymIntArrayRef sym_size, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory, - c10::optional memory_format) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory, + std::optional memory_format) { // TODO: support this directly auto size = C10_AS_INTARRAYREF_SLOW(sym_size); const auto device_type = torch::lazy::getBackend()->EagerFallbackDeviceType(); @@ -301,10 +301,10 @@ at::Tensor LazyNativeFunctions::empty_symint( at::Tensor LazyNativeFunctions::empty_strided_symint( at::SymIntArrayRef sym_size, at::SymIntArrayRef sym_stride, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { TORCH_LAZY_FN_COUNTER("lazy::"); at::Tensor t = empty_symint(sym_size, dtype, layout, device, pin_memory, c10::nullopt); @@ -406,10 +406,10 @@ at::Tensor LazyNativeFunctions::new_empty_strided_symint( const at::Tensor& self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, - c10::optional dtype, - c10::optional layout, - c10::optional device, - c10::optional pin_memory) { + std::optional dtype, + std::optional layout, + std::optional device, + std::optional pin_memory) { return at::functionalization:: functionalize_aten_op_symint::call( self, size, stride, dtype, layout, device, pin_memory); @@ -457,8 +457,8 @@ at::Tensor LazyNativeFunctions::_trilinear( } at::Tensor LazyNativeFunctions::linalg_pinv( const at::Tensor& self, - const c10::optional& atol, - const c10::optional& rtol, + const std::optional& atol, + const std::optional& rtol, bool hermitian) { return at::functionalization::functionalize_aten_op::call(self, atol, rtol, hermitian); @@ -525,8 +525,8 @@ at::Tensor LazyNativeFunctions::slice_backward_symint( // backwards formula for native_group_norm std::tuple LazyNativeFunctions::native_group_norm( const at::Tensor& input, - const c10::optional& weight, - const c10::optional& bias, + const std::optional& weight, + const std::optional& bias, int64_t N, int64_t C, int64_t HxW, diff --git a/torch/csrc/profiler/collection.h b/torch/csrc/profiler/collection.h index 3a129b3118d86..6822d39c225ac 100644 --- a/torch/csrc/profiler/collection.h +++ b/torch/csrc/profiler/collection.h @@ -61,9 +61,9 @@ struct TORCH_API RawTensorMetadata : RawTensorMetadataBase { RawTensorMetadata& operator=(RawTensorMetadata&&) noexcept = default; explicit RawTensorMetadata(const at::Tensor& t); - // Wrap `weak_self_` in `c10::optional` and split device into components to + // Wrap `weak_self_` in `std::optional` and split device into components to // keep struct default constructable. (which the std::array initializer needs) - c10::optional weak_self_; + std::optional weak_self_; c10::DeviceType device_type_{c10::DeviceType::CPU}; c10::DeviceIndex device_index_{-1}; }; @@ -85,8 +85,8 @@ struct TORCH_API TensorMetadata : public RawTensorMetadataBase { std::vector strides_; // Set during `calculateUniqueTensorIDs`. - c10::optional id_; - c10::optional allocation_id_; + std::optional id_; + std::optional allocation_id_; }; using op_input_t = std::variant< @@ -207,8 +207,8 @@ struct ExtraFields : RawAllocation { return {device_type_, device_index_}; } - c10::optional id_; - c10::optional allocation_id_; + std::optional id_; + std::optional allocation_id_; }; template <> @@ -246,7 +246,7 @@ struct NNModuleInfo { struct ParameterInfo { std::string name_; TensorMetadata metadata_; - c10::optional grad_metadata_; + std::optional grad_metadata_; }; PyModuleSelf self_; @@ -261,7 +261,7 @@ struct NNModuleInfo { struct OptimizerInfo { struct ParameterInfo { TensorMetadata metadata_; - c10::optional grad_metadata_; + std::optional grad_metadata_; std::vector> state_; }; @@ -293,8 +293,8 @@ template <> struct ExtraFields : public PyExtraFieldsBase { struct args_t { PyFrameState frame_state_; - c10::optional module_info_; - c10::optional optimizer_info_; + std::optional module_info_; + std::optional optimizer_info_; }; ExtraFields( @@ -308,8 +308,8 @@ struct ExtraFields : public PyExtraFieldsBase { optimizer_{std::move(args.optimizer_info_)} {} PyFrameState callsite_; - c10::optional module_; - c10::optional optimizer_; + std::optional module_; + std::optional optimizer_; }; template <> diff --git a/torch/csrc/profiler/data_flow.cpp b/torch/csrc/profiler/data_flow.cpp index e719835d7c2c1..9ea79cdbdb27d 100644 --- a/torch/csrc/profiler/data_flow.cpp +++ b/torch/csrc/profiler/data_flow.cpp @@ -18,8 +18,8 @@ struct RawTensorInfo { bool is_free_; // Used to assign back to the original structs. - std::reference_wrapper> allocation_id_ref_; - std::reference_wrapper> id_ref_; + std::reference_wrapper> allocation_id_ref_; + std::reference_wrapper> id_ref_; }; struct RawTensors { @@ -32,7 +32,7 @@ struct RawTensors { t.impl(), t.data_, t.device_, false, t.allocation_id_, t.id_}); } - void operator()(c10::optional& t) { + void operator()(std::optional& t) { if (t.has_value()) { (*this)(*t); } diff --git a/torch/csrc/profiler/unwind/unwind.cpp b/torch/csrc/profiler/unwind/unwind.cpp index f3fbde151b775..f45be9ecd5ac6 100644 --- a/torch/csrc/profiler/unwind/unwind.cpp +++ b/torch/csrc/profiler/unwind/unwind.cpp @@ -11,7 +11,7 @@ std::vector unwind() { "record_context_cpp is not support on non-linux non-x86_64 platforms"); } -c10::optional> libraryFor(void* addr) { +std::optional> libraryFor(void* addr) { TORCH_CHECK( false, "record_context_cpp is not support on non-linux non-x86_64 platforms"); @@ -322,7 +322,7 @@ std::vector unwind() { return frames; } -c10::optional> libraryFor(void* addr) { +std::optional> libraryFor(void* addr) { if (!addr) { return c10::nullopt; } diff --git a/torch/csrc/profiler/unwind/unwind.h b/torch/csrc/profiler/unwind/unwind.h index 69b27f49e5b79..0f20a235c3899 100644 --- a/torch/csrc/profiler/unwind/unwind.h +++ b/torch/csrc/profiler/unwind/unwind.h @@ -25,7 +25,7 @@ struct Frame { TORCH_API std::vector symbolize(const std::vector& frames); // returns path to the library, and the offset of the addr inside the library -TORCH_API c10::optional> libraryFor( +TORCH_API std::optional> libraryFor( void* addr); struct Stats { diff --git a/torch/csrc/profiler/util.cpp b/torch/csrc/profiler/util.cpp index 2e42a7412f706..f301596fca813 100644 --- a/torch/csrc/profiler/util.cpp +++ b/torch/csrc/profiler/util.cpp @@ -18,10 +18,10 @@ namespace profiler { namespace impl { namespace { -c10::optional soft_assert_raises_; +std::optional soft_assert_raises_; } // namespace -void setSoftAssertRaises(c10::optional value) { +void setSoftAssertRaises(std::optional value) { soft_assert_raises_ = value; } diff --git a/torch/csrc/profiler/util.h b/torch/csrc/profiler/util.h index e27d4084412c8..c8216c93f41c5 100644 --- a/torch/csrc/profiler/util.h +++ b/torch/csrc/profiler/util.h @@ -38,7 +38,7 @@ namespace torch { namespace profiler { namespace impl { TORCH_API bool softAssertRaises(); -TORCH_API void setSoftAssertRaises(c10::optional value); +TORCH_API void setSoftAssertRaises(std::optional value); TORCH_API void logSoftAssert( const char* func, const char* file, diff --git a/torch/csrc/tensor/python_tensor.cpp b/torch/csrc/tensor/python_tensor.cpp index 4ea523cedc942..8d18180ed9195 100644 --- a/torch/csrc/tensor/python_tensor.cpp +++ b/torch/csrc/tensor/python_tensor.cpp @@ -314,8 +314,8 @@ static void set_default_storage_type(Backend backend, ScalarType dtype) { } static void set_default_tensor_type( - c10::optional backend, - c10::optional dtype) { + std::optional backend, + std::optional dtype) { if (backend.has_value()) { TORCH_CHECK_TYPE( *backend != Backend::Undefined, "default type cannot be undefined"); diff --git a/torch/csrc/utils.h b/torch/csrc/utils.h index 5a610c28d2b1e..7552f6d0c028a 100644 --- a/torch/csrc/utils.h +++ b/torch/csrc/utils.h @@ -206,7 +206,7 @@ bool maybeThrowBackCompatKeepdimWarn(char* func); // NB: This is in torch/csrc/cuda/utils.cpp, for whatever reason #ifdef USE_CUDA -std::vector> +std::vector> THPUtils_PySequence_to_CUDAStreamList(PyObject* obj); #endif diff --git a/torch/csrc/utils/device_lazy_init.h b/torch/csrc/utils/device_lazy_init.h index b290ae04d792e..4d736898e5359 100644 --- a/torch/csrc/utils/device_lazy_init.h +++ b/torch/csrc/utils/device_lazy_init.h @@ -33,7 +33,7 @@ static inline void maybe_initialize_device(at::Device& device) { } } -static inline void maybe_initialize_device(c10::optional& device) { +static inline void maybe_initialize_device(std::optional& device) { if (!device.has_value()) { return; } diff --git a/torch/csrc/utils/out_types.cpp b/torch/csrc/utils/out_types.cpp index 3d55b9caaf1ca..7e712f2087169 100644 --- a/torch/csrc/utils/out_types.cpp +++ b/torch/csrc/utils/out_types.cpp @@ -7,10 +7,10 @@ namespace utils { // consistent with the out tensor's options void check_out_type_matches( const at::Tensor& result, - c10::optional scalarType, + std::optional scalarType, bool scalarType_is_none, - c10::optional layout, - c10::optional device, + std::optional layout, + std::optional device, bool device_is_none) { if (scalarType_is_none && !layout && device_is_none) { // common case return; diff --git a/torch/csrc/utils/out_types.h b/torch/csrc/utils/out_types.h index 1cab00bc270f2..68bf759f30038 100644 --- a/torch/csrc/utils/out_types.h +++ b/torch/csrc/utils/out_types.h @@ -7,10 +7,10 @@ namespace utils { TORCH_API void check_out_type_matches( const at::Tensor& result, - c10::optional scalarType, + std::optional scalarType, bool scalarType_is_none, - c10::optional layout, - c10::optional device, + std::optional layout, + std::optional device, bool device_is_none); } diff --git a/torch/csrc/utils/python_arg_parser.cpp b/torch/csrc/utils/python_arg_parser.cpp index 9ea90e8911dbd..90c331488e0c9 100644 --- a/torch/csrc/utils/python_arg_parser.cpp +++ b/torch/csrc/utils/python_arg_parser.cpp @@ -267,7 +267,7 @@ static py::object dispatch_on_subclass( PyObject* torch_api_function, bool is_torch_function, const char* torch_function_name_str, - c10::optional maybe_mode_key = + std::optional maybe_mode_key = c10::nullopt) { py::object ret; for (auto& arg : overloaded_args) { @@ -1003,13 +1003,13 @@ std::string FunctionParameter::type_name() const { } } -static inline c10::optional parse_as_integer(const std::string& s) { +static inline std::optional parse_as_integer(const std::string& s) { if (s.empty()) return c10::nullopt; char* str_end = nullptr; long ans = strtol(s.c_str(), &str_end, 0); // *str_end == 0 if the entire string was parsed as an integer. - return (*str_end == 0) ? c10::optional(ans) : c10::nullopt; + return (*str_end == 0) ? std::optional(ans) : c10::nullopt; } /* diff --git a/torch/csrc/utils/python_arg_parser.h b/torch/csrc/utils/python_arg_parser.h index 7bbef2f622ad6..06c32d52f0172 100644 --- a/torch/csrc/utils/python_arg_parser.h +++ b/torch/csrc/utils/python_arg_parser.h @@ -231,12 +231,12 @@ struct PythonArgs { inline bool has_torch_function(); inline std::string get_func_name(); inline at::Tensor tensor(int i); - inline c10::optional optionalTensor(int i); + inline std::optional optionalTensor(int i); inline at::Scalar scalar(int i); inline at::Scalar scalarWithDefault(int i, const at::Scalar& default_scalar); inline std::vector scalarlist(int i); inline std::vector tensorlist(int i); - inline torch::List> list_of_optional_tensors(int i); + inline torch::List> list_of_optional_tensors(int i); template inline std::array tensorlist_n(int i); inline std::vector intlist(int i); @@ -246,7 +246,7 @@ struct PythonArgs { inline std::vector intlistWithDefault( int i, std::vector default_intlist); - inline c10::optional generator(int i); + inline std::optional generator(int i); inline at::Storage storage(int i); inline at::Storage storage( int i, @@ -257,35 +257,35 @@ struct PythonArgs { inline at::ScalarType scalartypeWithDefault( int i, at::ScalarType default_scalartype); - inline c10::optional scalartypeOptional(int i); - inline c10::optional scalarOptional(int i); - inline c10::optional toInt64Optional(int i); - inline c10::optional toSymIntOptional(int i); - inline c10::optional toBoolOptional(int i); - inline c10::optional toDoubleOptional(int i); + inline std::optional scalartypeOptional(int i); + inline std::optional scalarOptional(int i); + inline std::optional toInt64Optional(int i); + inline std::optional toSymIntOptional(int i); + inline std::optional toBoolOptional(int i); + inline std::optional toDoubleOptional(int i); inline c10::OptionalArray doublelistOptional(int i); inline std::vector doublelist(int i); inline std::vector getDoublelist(int i); inline at::Layout layout(int i); inline at::Layout layoutWithDefault(int i, at::Layout default_layout); - inline c10::optional layoutOptional(int i); + inline std::optional layoutOptional(int i); inline at::Device device(int i); inline at::Device deviceWithDefault(int i, const at::Device& default_device); - inline c10::optional deviceOptional(int i); + inline std::optional deviceOptional(int i); inline at::Dimname dimname(int i); inline std::vector dimnamelist(int i); - inline c10::optional> toDimnameListOptional(int i); + inline std::optional> toDimnameListOptional(int i); inline at::MemoryFormat memoryformat(int i); - inline c10::optional memoryformatOptional(int i); + inline std::optional memoryformatOptional(int i); inline at::QScheme toQScheme(int i); inline std::string string(int i); inline std::string stringWithDefault(int i, const std::string& default_str); - inline c10::optional stringOptional(int i); + inline std::optional stringOptional(int i); inline c10::string_view stringView(int i); inline c10::string_view stringViewWithDefault( int i, const c10::string_view default_str); - inline c10::optional stringViewOptional(int i); + inline std::optional stringViewOptional(int i); inline PyObject* pyobject(int i); inline int64_t toInt64(int i); inline c10::SymInt toSymInt(int i); @@ -300,7 +300,7 @@ struct PythonArgs { inline bool toBool(int i); inline bool toBoolWithDefault(int i, bool default_bool); inline bool isNone(int i); - inline c10::optional toDispatchKeySetOptional(int i); + inline std::optional toDispatchKeySetOptional(int i); private: at::Tensor tensor_slow(int i); @@ -393,7 +393,7 @@ inline at::Tensor PythonArgs::tensor(int i) { return tensor_slow(i); } -inline c10::optional PythonArgs::optionalTensor(int i) { +inline std::optional PythonArgs::optionalTensor(int i) { at::Tensor t = tensor(i); // NOLINTNEXTLINE(bugprone-branch-clone) if (t.defined()) { @@ -433,7 +433,7 @@ inline at::Scalar PythonArgs::scalarWithDefault( return scalar_slow(i); } -inline c10::optional PythonArgs::scalarOptional(int i) { +inline std::optional PythonArgs::scalarOptional(int i) { if (!args[i]) return c10::nullopt; return scalar_slow(i); @@ -457,15 +457,15 @@ inline std::vector PythonArgs::tensorlist(int i) { return res; } -inline torch::List> PythonArgs:: +inline torch::List> PythonArgs:: list_of_optional_tensors(int i) { if (!args[i]) - return torch::List>(); + return torch::List>(); auto tuple = six::isTuple(args[i]); THPObjectPtr arg = six::maybeAsTuple(args[i]); // NOLINTNEXTLINE(bugprone-branch-clone) auto size = tuple ? PyTuple_GET_SIZE(arg.get()) : PyList_GET_SIZE(arg.get()); - torch::List> res; + torch::List> res; res.reserve(size); for (const auto idx : c10::irange(size)) { PyObject* obj = tuple ? PyTuple_GET_ITEM(arg.get(), idx) @@ -729,7 +729,7 @@ inline std::vector PythonArgs::doublelist(int i) { return this->getDoublelist(i); } -inline c10::optional PythonArgs::toDispatchKeySetOptional( +inline std::optional PythonArgs::toDispatchKeySetOptional( int i) { if (!args[i]) { return {}; @@ -769,7 +769,7 @@ inline at::ScalarType PythonArgs::scalartype(int i) { return toScalarType(obj); } -inline c10::optional PythonArgs::scalartypeOptional(int i) { +inline std::optional PythonArgs::scalartypeOptional(int i) { if (!args[i]) return c10::nullopt; return scalartype(i); @@ -794,7 +794,7 @@ inline at::Layout PythonArgs::layoutWithDefault( return layout(i); } -inline c10::optional PythonArgs::layoutOptional(int i) { +inline std::optional PythonArgs::layoutOptional(int i) { if (!args[i]) return c10::nullopt; return layout(i); @@ -835,7 +835,7 @@ inline at::Device PythonArgs::deviceWithDefault( return device(i); } -inline c10::optional PythonArgs::deviceOptional(int i) { +inline std::optional PythonArgs::deviceOptional(int i) { if (!args[i]) return c10::nullopt; return device(i); @@ -860,7 +860,7 @@ inline std::vector parseDimnameList(PyObject* arg) { return res; } -inline c10::optional> PythonArgs:: +inline std::optional> PythonArgs:: toDimnameListOptional(int i) { if (!args[i]) return c10::nullopt; @@ -888,7 +888,7 @@ inline at::MemoryFormat PythonArgs::memoryformat(int i) { return memory_format->memory_format; } -inline c10::optional PythonArgs::memoryformatOptional(int i) { +inline std::optional PythonArgs::memoryformatOptional(int i) { if (!args[i]) return c10::nullopt; return memoryformat(i); @@ -916,7 +916,7 @@ inline std::string PythonArgs::stringWithDefault( return THPUtils_unpackString(args[i]); } -inline c10::optional PythonArgs::stringOptional(int i) { +inline std::optional PythonArgs::stringOptional(int i) { if (!args[i]) return c10::nullopt; return THPUtils_unpackString(args[i]); @@ -934,7 +934,7 @@ inline c10::string_view PythonArgs::stringViewWithDefault( return THPUtils_unpackStringView(args[i]); } -inline c10::optional PythonArgs::stringViewOptional(int i) { +inline std::optional PythonArgs::stringViewOptional(int i) { if (!args[i]) return c10::nullopt; return THPUtils_unpackStringView(args[i]); @@ -988,26 +988,26 @@ inline int64_t PythonArgs::toInt64WithDefault(int i, int64_t default_int) { return toInt64(i); } -inline c10::optional PythonArgs::toInt64Optional(int i) { +inline std::optional PythonArgs::toInt64Optional(int i) { if (!args[i]) return c10::nullopt; return toInt64(i); } -inline c10::optional PythonArgs::toSymIntOptional(int i) { +inline std::optional PythonArgs::toSymIntOptional(int i) { if (!args[i]) return c10::nullopt; return toSymInt(i); } -inline c10::optional PythonArgs::toBoolOptional(int i) { +inline std::optional PythonArgs::toBoolOptional(int i) { if (!args[i]) { return c10::nullopt; } return toBool(i); } -inline c10::optional PythonArgs::toDoubleOptional(int i) { +inline std::optional PythonArgs::toDoubleOptional(int i) { if (!args[i]) { return c10::nullopt; } @@ -1069,7 +1069,7 @@ inline bool PythonArgs::isNone(int i) { return args[i] == nullptr; } -inline c10::optional PythonArgs::generator(int i) { +inline std::optional PythonArgs::generator(int i) { if (!args[i]) return c10::nullopt; return reinterpret_cast(args[i])->cdata; diff --git a/torch/csrc/utils/python_dispatch.cpp b/torch/csrc/utils/python_dispatch.cpp index a3e71a2542e3d..e370923b398d8 100644 --- a/torch/csrc/utils/python_dispatch.cpp +++ b/torch/csrc/utils/python_dispatch.cpp @@ -826,7 +826,7 @@ void initDispatchBindings(PyObject* module) { m.def( "_parse_dispatch_key", - [](const char* dispatch_key) -> c10::optional { + [](const char* dispatch_key) -> std::optional { try { return c10::parseDispatchKey(dispatch_key); } catch (const c10::Error& err) { diff --git a/torch/csrc/utils/python_raii.h b/torch/csrc/utils/python_raii.h index 70a5ddfeb55ee..411e558715e8b 100644 --- a/torch/csrc/utils/python_raii.h +++ b/torch/csrc/utils/python_raii.h @@ -22,7 +22,7 @@ struct RAIIContextManager { } private: - c10::optional guard_; + std::optional guard_; std::tuple args_; }; @@ -55,7 +55,7 @@ struct DeprecatedRAIIContextManager { } private: - c10::optional guard_; + std::optional guard_; std::tuple args_; }; diff --git a/torch/csrc/utils/python_symnode.h b/torch/csrc/utils/python_symnode.h index c4814930507bf..f8c710cf6579f 100644 --- a/torch/csrc/utils/python_symnode.h +++ b/torch/csrc/utils/python_symnode.h @@ -140,7 +140,7 @@ class PythonSymNodeImpl : public c10::SymNodeImpl { return getPyObj().attr("int_")().cast(); } - c10::optional maybe_as_int() override { + std::optional maybe_as_int() override { py::gil_scoped_acquire acquire; const auto& r = getPyObj().attr("maybe_as_int")(); if (r.is_none()) { diff --git a/torch/csrc/utils/schema_info.cpp b/torch/csrc/utils/schema_info.cpp index 56e1c6b4a6be2..0caa5b254d279 100644 --- a/torch/csrc/utils/schema_info.cpp +++ b/torch/csrc/utils/schema_info.cpp @@ -6,7 +6,7 @@ namespace utils { void SchemaInfo::addArgumentValue( const std::string& name, const at::IValue& value) { - c10::optional index = schema_.argumentIndexWithName(name); + std::optional index = schema_.argumentIndexWithName(name); TORCH_INTERNAL_ASSERT( index != c10::nullopt, "Schema has no argument named ", name); value_map_[name] = value; @@ -14,7 +14,7 @@ void SchemaInfo::addArgumentValue( } void SchemaInfo::addArgumentValues( - const std::vector>& value_list) { + const std::vector>& value_list) { TORCH_INTERNAL_ASSERT( value_list.size() <= schema_.arguments().size(), "Schema does not have enough arguments for value list"); @@ -106,7 +106,7 @@ bool SchemaInfo::has_argument(c10::string_view name) { } bool SchemaInfo::is_mutable(c10::string_view name) { - c10::optional index = schema_.argumentIndexWithName(name); + std::optional index = schema_.argumentIndexWithName(name); TORCH_INTERNAL_ASSERT( index.has_value(), "Schema has no argument named ", name); @@ -144,10 +144,10 @@ bool SchemaInfo::may_alias( if (basic_check) { return true; } - c10::optional lhsAliasTypeSet = + std::optional lhsAliasTypeSet = schema_.mapTypeToAliasTypeSet( schema_.getCorrectList(lhs.type)[lhs.index].type()); - c10::optional rhsAliasTypeSet = + std::optional rhsAliasTypeSet = schema_.mapTypeToAliasTypeSet( schema_.getCorrectList(rhs.type)[rhs.index].type()); bool types_can_alias = @@ -205,10 +205,10 @@ bool SchemaInfo::may_contain_alias( bool SchemaInfo::mayContainAliasImpl( const c10::SchemaArgument& lhs, const c10::SchemaArgument& rhs) { - c10::optional lhsContainedAliasTypeSet = + std::optional lhsContainedAliasTypeSet = schema_.getAliasTypeSetContainedTypes(schema_.mapTypeToAliasTypeSet( schema_.getCorrectList(lhs.type)[lhs.index].type())); - c10::optional rhsAliasTypeSet = + std::optional rhsAliasTypeSet = schema_.mapTypeToAliasTypeSet( schema_.getCorrectList(rhs.type)[rhs.index].type()); bool types_can_alias = @@ -339,7 +339,7 @@ void SchemaInfo::initSchemaInfo() { } } } - c10::optional contained_types = + std::optional contained_types = schema_.getAliasTypeSetContainedTypes( schema_.mapTypeToAliasTypeSet(argument.type())); if (contained_types && !contained_types->empty()) { diff --git a/torch/csrc/utils/schema_info.h b/torch/csrc/utils/schema_info.h index 461f5a6f0427b..acda1bffc1538 100644 --- a/torch/csrc/utils/schema_info.h +++ b/torch/csrc/utils/schema_info.h @@ -61,7 +61,7 @@ struct TORCH_API SchemaInfo { void addArgumentValue(const std::string& name, const at::IValue& value); void addArgumentValues( - const std::vector>& value_list); + const std::vector>& value_list); void addArgumentValues( const std::unordered_map& values); diff --git a/torch/csrc/utils/tensor_new.cpp b/torch/csrc/utils/tensor_new.cpp index e1755b5b36248..4fd398d1a8faf 100644 --- a/torch/csrc/utils/tensor_new.cpp +++ b/torch/csrc/utils/tensor_new.cpp @@ -42,7 +42,7 @@ using at::ScalarType; using at::Storage; using at::Tensor; using at::TensorOptions; -using c10::optional; +using std::optional; namespace torch::utils { namespace { @@ -53,7 +53,7 @@ thread_local bool kOnlyLiftCPUTensors = false; TensorOptions build_options( c10::TensorOptions options, at::ScalarType scalar_type, - const c10::optional& device = c10::nullopt) { + const std::optional& device = c10::nullopt) { options = options.dtype(scalar_type); if (device.has_value()) { return options.device(device); @@ -172,7 +172,7 @@ ScalarType infer_scalar_type(PyObject* obj) { Py_TYPE(obj)->tp_name, "'"); if (PySequence_Check(obj)) { - c10::optional scalarType; + std::optional scalarType; auto length = PySequence_Length(obj); if (length < 0) throw python_error(); @@ -290,7 +290,7 @@ void recursive_store( Tensor internal_new_from_data( c10::TensorOptions options, at::ScalarType scalar_type, - c10::optional device_opt, + std::optional device_opt, PyObject* data, bool copy_variables, bool copy_numpy, @@ -489,7 +489,7 @@ Tensor internal_new_from_data( Tensor new_from_data_copy( c10::TensorOptions options, at::ScalarType scalar_type, - c10::optional device, + std::optional device, PyObject* data) { return internal_new_from_data( options, @@ -504,7 +504,7 @@ Tensor new_from_data_copy( Tensor legacy_new_from_sequence( c10::TensorOptions options, at::ScalarType scalar_type, - c10::optional device, + std::optional device, PyObject* data) { TORCH_CHECK_TYPE( PySequence_Check(data), @@ -570,7 +570,7 @@ void check_base_legacy_new( // TODO: Make this accept options instead of dispatch key void check_legacy_ctor_device( c10::DispatchKey dispatch_key, - c10::optional device) { + std::optional device) { if (device.has_value()) { TORCH_CHECK( dispatchKeyToDeviceType(dispatch_key) == device.value().type(), @@ -833,7 +833,7 @@ Tensor legacy_tensor_new( Tensor indexing_tensor_from_data( c10::TensorOptions options, at::ScalarType scalar_type, - c10::optional device, + std::optional device, PyObject* data) { // Specific to tensor indexing, converts an indexing list to an // indexing tensor (type Byte or Long) @@ -877,7 +877,7 @@ static Tensor sparse_compressed_tensor_ctor_worker( c10::DispatchKey dispatch_key, at::ScalarType scalar_type, PythonArgs& r, - c10::optional required_layout) { + std::optional required_layout) { TORCH_INTERNAL_ASSERT(!isSparseCsr(dispatchKeyToBackend(dispatch_key))); TORCH_INTERNAL_ASSERT(!isSparse(dispatchKeyToBackend(dispatch_key))); enum { @@ -971,7 +971,7 @@ static Tensor sparse_compressed_tensor_ctor_worker( /*copy_variables=*/false, /*copy_numpy=*/true, /*type_inference=*/true); - c10::optional layout = + std::optional layout = (required_layout ? r.layoutWithDefault(ARG_LAYOUT, required_layout.value()) : r.layoutOptional(ARG_LAYOUT)); @@ -1027,7 +1027,7 @@ static Tensor sparse_compressed_tensor_ctor_worker( /*copy_variables=*/false, /*copy_numpy=*/true, /*type_inference=*/true); - c10::optional layout = + std::optional layout = (required_layout ? r.layoutWithDefault(ARG_LAYOUT1, required_layout.value()) : r.layoutOptional(ARG_LAYOUT1)); @@ -1054,7 +1054,7 @@ Tensor sparse_compressed_tensor_ctor( c10::DispatchKey dispatch_key, at::ScalarType scalar_type, PythonArgs& r) { - c10::optional required_layout{}; + std::optional required_layout{}; return sparse_compressed_tensor_ctor_worker( "sparse_compressed_tensor", dispatch_key, @@ -1067,7 +1067,7 @@ Tensor sparse_csr_tensor_ctor( c10::DispatchKey dispatch_key, at::ScalarType scalar_type, PythonArgs& r) { - c10::optional required_layout(c10::Layout::SparseCsr); + std::optional required_layout(c10::Layout::SparseCsr); return sparse_compressed_tensor_ctor_worker( "sparse_csr_tensor", dispatch_key, scalar_type, r, required_layout); } @@ -1076,7 +1076,7 @@ Tensor sparse_csc_tensor_ctor( c10::DispatchKey dispatch_key, at::ScalarType scalar_type, PythonArgs& r) { - c10::optional required_layout(c10::Layout::SparseCsc); + std::optional required_layout(c10::Layout::SparseCsc); return sparse_compressed_tensor_ctor_worker( "sparse_csc_tensor", dispatch_key, scalar_type, r, required_layout); } @@ -1085,7 +1085,7 @@ Tensor sparse_bsr_tensor_ctor( c10::DispatchKey dispatch_key, at::ScalarType scalar_type, PythonArgs& r) { - c10::optional required_layout(c10::Layout::SparseBsr); + std::optional required_layout(c10::Layout::SparseBsr); return sparse_compressed_tensor_ctor_worker( "sparse_bsr_tensor", dispatch_key, scalar_type, r, required_layout); } @@ -1094,7 +1094,7 @@ Tensor sparse_bsc_tensor_ctor( c10::DispatchKey dispatch_key, at::ScalarType scalar_type, PythonArgs& r) { - c10::optional required_layout(c10::Layout::SparseBsc); + std::optional required_layout(c10::Layout::SparseBsc); return sparse_compressed_tensor_ctor_worker( "sparse_bsc_tensor", dispatch_key, scalar_type, r, required_layout); } @@ -1660,9 +1660,9 @@ Tensor tensor_fromDLPack(PyObject* data) { Tensor asarray( PyObject* obj, - c10::optional dtype, - c10::optional device, - c10::optional copy, + std::optional dtype, + std::optional device, + std::optional copy, bool requires_grad) { Tensor tensor; diff --git a/torch/csrc/utils/tensor_new.h b/torch/csrc/utils/tensor_new.h index a1c34bd448882..70a4fbca0bac3 100644 --- a/torch/csrc/utils/tensor_new.h +++ b/torch/csrc/utils/tensor_new.h @@ -44,7 +44,7 @@ at::Tensor legacy_tensor_new( at::Tensor indexing_tensor_from_data( c10::TensorOptions options, at::ScalarType scalar_type, - c10::optional device, + std::optional device, PyObject* data); at::Tensor sparse_coo_tensor_ctor( c10::DispatchKey dispatch_key, @@ -130,9 +130,9 @@ at::Tensor tensor_frombuffer( at::Tensor tensor_fromDLPack(PyObject* data); at::Tensor asarray( PyObject* obj, - c10::optional dtype, - c10::optional device, - c10::optional copy, + std::optional dtype, + std::optional device, + std::optional copy, bool requires_grad); } // namespace utils } // namespace torch diff --git a/torch/csrc/utils/tensor_numpy.cpp b/torch/csrc/utils/tensor_numpy.cpp index a94ed7783dfd5..9b07b9d32f1c0 100644 --- a/torch/csrc/utils/tensor_numpy.cpp +++ b/torch/csrc/utils/tensor_numpy.cpp @@ -473,7 +473,7 @@ at::Tensor tensor_from_cuda_array_interface(PyObject* obj) { } } - const auto target_device = [&]() -> c10::optional { + const auto target_device = [&]() -> std::optional { // note(crcrpar): zero-size arrays come with nullptr. // ref: // https://numba.readthedocs.io/en/stable/cuda/cuda_array_interface.html#cuda-array-interface-version-3 diff --git a/torch/csrc/utils/torch_dispatch_mode.h b/torch/csrc/utils/torch_dispatch_mode.h index 79173aeb3e007..d1c1392e37d63 100644 --- a/torch/csrc/utils/torch_dispatch_mode.h +++ b/torch/csrc/utils/torch_dispatch_mode.h @@ -35,7 +35,7 @@ struct StashTorchDispatchModeGuard { private: std::shared_ptr saved_mode_; - c10::optional saved_mode_key_; + std::optional saved_mode_key_; }; struct StashTorchDispatchStackGuard { diff --git a/torch/csrc/utils/variadic.h b/torch/csrc/utils/variadic.h index 9c021d9f5cd3d..78ffe29971423 100644 --- a/torch/csrc/utils/variadic.h +++ b/torch/csrc/utils/variadic.h @@ -18,7 +18,7 @@ struct CountTensors : IterArgs { void operator()(const at::Tensor& x) { out += 1; } - void operator()(const c10::optional& x) { + void operator()(const std::optional& x) { out += x.has_value(); } void operator()(at::ArrayRef xs) { diff --git a/torch/custom_class_detail.h b/torch/custom_class_detail.h index 736d5aacdaa32..e27721c349864 100644 --- a/torch/custom_class_detail.h +++ b/torch/custom_class_detail.h @@ -61,7 +61,7 @@ struct arg { // IValue's default constructor makes it None, which is not distinguishable // from an actual, user-provided default value that is None. This boolean // helps distinguish between the two cases. - c10::optional value_; + std::optional value_; }; namespace detail { diff --git a/torch/library.h b/torch/library.h index c38179a6eea1d..3c1d0c415106f 100644 --- a/torch/library.h +++ b/torch/library.h @@ -299,9 +299,9 @@ class TORCH_API CppFunction final { } private: - c10::optional dispatch_key_; + std::optional dispatch_key_; c10::KernelFunction func_; - c10::optional cpp_signature_; + std::optional cpp_signature_; std::unique_ptr schema_; std::string debug_; @@ -316,7 +316,7 @@ class TORCH_API CppFunction final { CppFunction( c10::KernelFunction func, - c10::optional cpp_signature, + std::optional cpp_signature, std::unique_ptr schema); }; @@ -555,7 +555,7 @@ class TORCH_API Library final { Library( Kind kind, std::string ns, - c10::optional k, + std::optional k, const char* file, uint32_t line); @@ -847,9 +847,9 @@ class TORCH_API Library final { private: Kind kind_; - c10::optional ns_; - c10::optional dispatch_key_; - c10::optional> python_module_; + std::optional ns_; + std::optional dispatch_key_; + std::optional> python_module_; const char* file_; uint32_t line_; @@ -889,7 +889,7 @@ class TorchLibraryInit final { Library::Kind kind, InitFn* fn, const char* ns, - c10::optional k, + std::optional k, const char* file, uint32_t line) : lib_(kind, ns, k, file, line) {