From 92f7ad879e54f86b0107d9f4837e1b4b7589eaf0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Wed, 11 Oct 2023 23:47:33 +0200 Subject: [PATCH 01/26] Add item_bool --- pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java | 2 ++ pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java | 1 + 2 files changed, 3 insertions(+) diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java index c392efaf2dc..45ff765bc91 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java @@ -148,6 +148,8 @@ private native void allocate( + public native @Cast("bool") @Name("item") boolean item_bool(); + public native @Name("item") byte item_char(); public native @Name("item") short item_short(); diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index e0b246b634a..57a4f36e306 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -1857,6 +1857,7 @@ We need either to put an annotation info on each member, or javaName("@NoOffset .put(new Info("at::TensorBase::data_ptr").javaNames("data_ptr_long")) .put(new Info("at::TensorBase::data_ptr").javaNames("data_ptr_float")) .put(new Info("at::TensorBase::data_ptr").javaNames("data_ptr_double")) + .put(new Info("at::Tensor::item").javaNames("item_bool")) .put(new Info("at::Tensor::item").javaNames("item_char")) .put(new Info("at::Tensor::item").javaNames("item_short")) .put(new Info("at::Tensor::item").javaNames("item_int")) From ebdcdaac8edd954294954a35f46b4317fda5ded6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Mon, 16 Oct 2023 11:32:46 +0200 Subject: [PATCH 02/26] Update Pytorch to 2.1 --- pytorch/README.md | 6 +- pytorch/cppbuild.sh | 2 +- pytorch/platform/gpu/pom.xml | 2 +- pytorch/platform/pom.xml | 2 +- pytorch/pom.xml | 2 +- pytorch/samples/pom.xml | 4 +- .../java/org/bytedeco/pytorch/Allocator.java | 2 +- .../java/org/bytedeco/pytorch/AnyModule.java | 4 + .../org/bytedeco/pytorch/ArgumentDef.java | 16 +- .../org/bytedeco/pytorch/AutogradMeta.java | 6 + .../org/bytedeco/pytorch/AutogradState.java | 2 +- .../gen/java/org/bytedeco/pytorch/Blob.java | 2 +- .../java/org/bytedeco/pytorch/BlockWrap.java | 10 +- .../bytedeco/pytorch/CPUGeneratorImpl.java | 2 + .../bytedeco/pytorch/CUDAHooksInterface.java | 16 +- .../gen/java/org/bytedeco/pytorch/Code.java | 2 +- .../pytorch/CompilationUnitVector.java | 47 - .../java/org/bytedeco/pytorch/Context.java | 4 +- .../bytedeco/pytorch/CopyBytesFunction.java | 34 - .../java/org/bytedeco/pytorch/DataPtr.java | 21 +- .../org/bytedeco/pytorch/DeleterFnPtr.java | 29 - .../DeserializationStorageContext.java | 32 +- .../gen/java/org/bytedeco/pytorch/Device.java | 10 +- .../bytedeco/pytorch/ExperimentalConfig.java | 10 +- .../bytedeco/pytorch/FuncTorchTLSBase.java | 1 + .../bytedeco/pytorch/FunctionPostHook.java | 2 + .../org/bytedeco/pytorch/FunctionPreHook.java | 2 + .../java/org/bytedeco/pytorch/Generator.java | 7 + .../org/bytedeco/pytorch/GeneratorImpl.java | 2 + .../gen/java/org/bytedeco/pytorch/IValue.java | 17 +- .../org/bytedeco/pytorch/InferenceMode.java | 2 +- .../bytedeco/pytorch/InstructionVector.java | 47 - .../java/org/bytedeco/pytorch/JitModule.java | 28 +- .../org/bytedeco/pytorch/JitNodeWrap.java | 10 +- .../java/org/bytedeco/pytorch/JitObject.java | 5 +- .../java/org/bytedeco/pytorch/ListType.java | 1 + .../bytedeco/pytorch/MPSHooksInterface.java | 29 +- .../org/bytedeco/pytorch/NamedIValue.java | 41 - .../bytedeco/pytorch/NamedIValuePolicy.java | 44 - .../org/bytedeco/pytorch/NamedTensor.java | 41 - .../bytedeco/pytorch/NamedTensorPolicy.java | 44 - .../bytedeco/pytorch/NestedTensorImpl.java | 62 +- .../gen/java/org/bytedeco/pytorch/Node.java | 19 + .../org/bytedeco/pytorch/OperandInfo.java | 8 +- .../bytedeco/pytorch/OperationCreator.java | 29 - .../org/bytedeco/pytorch/OperatorHandle.java | 2 + .../bytedeco/pytorch/OperatorOptional.java | 35 - .../pytorch/OperatorOptionalVector.java | 90 - .../pytorch/PlacementDeleteContext.java | 8 +- .../org/bytedeco/pytorch/PlacementDtor.java | 29 - .../bytedeco/pytorch/PyInterpreterVTable.java | 3 + .../bytedeco/pytorch/PyTorchStreamReader.java | 88 +- .../org/bytedeco/pytorch/SafePyObject.java | 5 + .../org/bytedeco/pytorch/SavedVariable.java | 2 + .../gen/java/org/bytedeco/pytorch/Scalar.java | 37 +- .../org/bytedeco/pytorch/SequentialImpl.java | 6 + .../pytorch/SerializationStorageContext.java | 47 - .../bytedeco/pytorch/StackEntryVector.java | 90 - .../java/org/bytedeco/pytorch/Storage.java | 18 +- .../org/bytedeco/pytorch/StorageImpl.java | 26 +- .../org/bytedeco/pytorch/StorageType.java | 2 +- .../java/org/bytedeco/pytorch/StringType.java | 2 +- .../java/org/bytedeco/pytorch/SymBool.java | 12 +- .../java/org/bytedeco/pytorch/SymFloat.java | 23 + .../org/bytedeco/pytorch/SymFloatType.java | 2 +- .../gen/java/org/bytedeco/pytorch/SymInt.java | 43 +- .../java/org/bytedeco/pytorch/SymIntType.java | 2 +- .../org/bytedeco/pytorch/SymNodeImpl.java | 26 + .../gen/java/org/bytedeco/pytorch/Tensor.java | 47 +- .../java/org/bytedeco/pytorch/TensorBase.java | 42 +- .../org/bytedeco/pytorch/TensorGeometry.java | 5 + .../java/org/bytedeco/pytorch/TensorImpl.java | 48 +- .../org/bytedeco/pytorch/TensorIndex.java | 6 +- .../bytedeco/pytorch/TensorIteratorBase.java | 5 + .../org/bytedeco/pytorch/TensorMaker.java | 12 +- .../org/bytedeco/pytorch/TensorOptions.java | 4 +- .../java/org/bytedeco/pytorch/TreeList.java | 51 - .../pytorch/TreeRefSmallVectorBase.java | 29 - .../pytorch/TreeRefSmallVectorCommon.java | 49 - .../pytorch/TreeRefSmallVectorImpl.java | 71 - .../gen/java/org/bytedeco/pytorch/Type.java | 2 +- .../java/org/bytedeco/pytorch/TypeMeta.java | 23 +- .../org/bytedeco/pytorch/TypeMetaData.java | 50 +- .../org/bytedeco/pytorch/UniqueVoidPtr.java | 20 +- .../java/org/bytedeco/pytorch/ValueWrap.java | 10 +- .../org/bytedeco/pytorch/VariableInfo.java | 2 +- .../org/bytedeco/pytorch/VariableVersion.java | 4 +- .../org/bytedeco/pytorch/ZeroPad2dImpl.java | 39 +- .../bytedeco/pytorch/ZeroPad2dOptions.java | 16 +- .../bytedeco/pytorch/attribute_iterator.java | 55 - .../org/bytedeco/pytorch/attribute_list.java | 32 - .../org/bytedeco/pytorch/buffer_iterator.java | 55 - .../org/bytedeco/pytorch/buffer_list.java | 32 - .../pytorch/cuda/CUDAStreamArrayRef.java | 2 +- .../pytorch/cuda/TensorDescriptor.java | 2 +- .../org/bytedeco/pytorch/global/torch.java | 10748 +++++++++------- .../bytedeco/pytorch/global/torch_cuda.java | 510 +- .../pytorch/named_attribute_iterator.java | 55 - .../pytorch/named_attribute_list.java | 32 - .../pytorch/named_buffer_iterator.java | 55 - .../bytedeco/pytorch/named_buffer_list.java | 32 - .../pytorch/named_parameter_iterator.java | 55 - .../pytorch/named_parameter_list.java | 32 - .../bytedeco/pytorch/parameter_iterator.java | 55 - .../org/bytedeco/pytorch/parameter_list.java | 32 - .../pytorch/prime_number_hash_policy.java | 244 - .../pytorch/functions/BackendMetaPtr.java | 31 + .../pytorch/functions/MemCopyFunction.java | 31 + .../pytorch/functions/MetadataLogger.java | 35 + .../pytorch/functions/OperationCreator.java | 34 + .../pytorch/functions/PickleReader.java | 31 + .../pytorch/functions/PlacementConsumer.java | 29 + .../pytorch/functions/PlacementCopier.java | 30 + .../pytorch/functions/PointerSupplier.java | 28 + .../functions/StorageImplCreateHelper.java | 34 + .../functions/TensorTensorRefHook.java | 31 + .../pytorch/functions/TypeParser.java | 31 + .../pytorch/functions/TypePrinter.java | 34 + .../pytorch/functions/TypeResolver.java | 31 + .../pytorch/functions/TypeSupplier.java | 30 + .../org/bytedeco/pytorch/presets/torch.java | 139 +- .../bytedeco/pytorch/presets/torch_cuda.java | 2 +- .../pytorch/presets/torch_cuda_include.h | 24 +- .../bytedeco/pytorch/presets/torch_include.h | 429 +- 124 files changed, 7986 insertions(+), 6917 deletions(-) delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnitVector.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/CopyBytesFunction.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/DeleterFnPtr.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/InstructionVector.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/NamedIValue.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/NamedIValuePolicy.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensor.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorPolicy.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/OperationCreator.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/OperatorOptional.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/OperatorOptionalVector.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/PlacementDtor.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SerializationStorageContext.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/StackEntryVector.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TreeList.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefSmallVectorBase.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefSmallVectorCommon.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefSmallVectorImpl.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/attribute_iterator.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/attribute_list.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/buffer_iterator.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/buffer_list.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_iterator.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_list.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_iterator.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_list.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_iterator.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_list.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/parameter_iterator.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/parameter_list.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/prime_number_hash_policy.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/BackendMetaPtr.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/MemCopyFunction.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/MetadataLogger.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/OperationCreator.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/PickleReader.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/PlacementConsumer.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/PlacementCopier.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/PointerSupplier.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/StorageImplCreateHelper.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/TensorTensorRefHook.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/TypeParser.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/TypePrinter.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/TypeResolver.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/TypeSupplier.java diff --git a/pytorch/README.md b/pytorch/README.md index 352be0a24a1..be4da837483 100644 --- a/pytorch/README.md +++ b/pytorch/README.md @@ -9,7 +9,7 @@ Introduction ------------ This directory contains the JavaCPP Presets module for: - * PyTorch 2.0.1 https://pytorch.org/ + * PyTorch 2.1.0 https://pytorch.org/ Please refer to the parent README.md file for more detailed information about the JavaCPP Presets. @@ -48,14 +48,14 @@ We can use [Maven 3](http://maven.apache.org/) to download and install automatic org.bytedeco pytorch-platform - 2.0.1-1.5.10-SNAPSHOT + 2.1.0-1.5.10-SNAPSHOT org.bytedeco pytorch-platform-gpu - 2.0.1-1.5.10-SNAPSHOT + 2.1.0-1.5.10-SNAPSHOT diff --git a/pytorch/cppbuild.sh b/pytorch/cppbuild.sh index aebc073401d..b5b4468a8b0 100755 --- a/pytorch/cppbuild.sh +++ b/pytorch/cppbuild.sh @@ -35,7 +35,7 @@ if [[ $PLATFORM == windows* ]]; then export PYTHON_BIN_PATH=$(which python.exe) fi -PYTORCH_VERSION=2.0.1 +PYTORCH_VERSION=2.1.0 mkdir -p "$PLATFORM$EXTENSION" cd "$PLATFORM$EXTENSION" diff --git a/pytorch/platform/gpu/pom.xml b/pytorch/platform/gpu/pom.xml index 6ae56e0ee59..d6f24bdfb39 100644 --- a/pytorch/platform/gpu/pom.xml +++ b/pytorch/platform/gpu/pom.xml @@ -12,7 +12,7 @@ org.bytedeco pytorch-platform-gpu - 2.0.1-${project.parent.version} + 2.1.0-${project.parent.version} JavaCPP Presets Platform GPU for PyTorch diff --git a/pytorch/platform/pom.xml b/pytorch/platform/pom.xml index 8df62d99c80..4755c9c317e 100644 --- a/pytorch/platform/pom.xml +++ b/pytorch/platform/pom.xml @@ -12,7 +12,7 @@ org.bytedeco pytorch-platform - 2.0.1-${project.parent.version} + 2.1.0-${project.parent.version} JavaCPP Presets Platform for PyTorch diff --git a/pytorch/pom.xml b/pytorch/pom.xml index 4acbcd7fc5d..14da0bf4f81 100644 --- a/pytorch/pom.xml +++ b/pytorch/pom.xml @@ -11,7 +11,7 @@ org.bytedeco pytorch - 2.0.1-${project.parent.version} + 2.1.0-${project.parent.version} JavaCPP Presets for PyTorch diff --git a/pytorch/samples/pom.xml b/pytorch/samples/pom.xml index ddab7cd4c0a..c4306cfab81 100644 --- a/pytorch/samples/pom.xml +++ b/pytorch/samples/pom.xml @@ -12,14 +12,14 @@ org.bytedeco pytorch-platform - 2.0.1-1.5.10-SNAPSHOT + 2.1.0-1.5.10-SNAPSHOT org.bytedeco pytorch-platform-gpu - 2.0.1-1.5.10-SNAPSHOT + 2.1.0-1.5.10-SNAPSHOT diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Allocator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Allocator.java index eb44f918baf..bab8dd1b2e2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Allocator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Allocator.java @@ -52,7 +52,7 @@ public class Allocator extends Pointer { // is guaranteed to return a unique_ptr with this deleter attached; // it means the rawAllocate and rawDeallocate APIs are safe to use. // This function MUST always return the same BoundDeleter. - public native @Cast("c10::DeleterFnPtr") PointerConsumer raw_deleter(); + public native PointerConsumer raw_deleter(); public native Pointer raw_allocate(@Cast("size_t") long n); public native void raw_deallocate(Pointer ptr); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModule.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModule.java index 6da3c43040e..acb0b54f7bc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModule.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModule.java @@ -227,6 +227,8 @@ public class AnyModule extends Pointer { private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad1dImpl module); public AnyModule(ConstantPad1dImpl module) { super((Pointer)null); allocate(module); } private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad1dImpl module); + public AnyModule(ZeroPad1dImpl module) { super((Pointer)null); allocate(module); } + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ZeroPad1dImpl module); public AnyModule(AvgPool1dImpl module) { super((Pointer)null); allocate(module); } private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool1dImpl module); public AnyModule(MaxPool1dImpl module) { super((Pointer)null); allocate(module); } @@ -267,6 +269,8 @@ public class AnyModule extends Pointer { private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad3dImpl module); public AnyModule(ConstantPad3dImpl module) { super((Pointer)null); allocate(module); } private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad3dImpl module); + public AnyModule(ZeroPad3dImpl module) { super((Pointer)null); allocate(module); } + private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ZeroPad3dImpl module); public AnyModule(AvgPool3dImpl module) { super((Pointer)null); allocate(module); } private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool3dImpl module); public AnyModule(MaxPool3dImpl module) { super((Pointer)null); allocate(module); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDef.java index f5e011b39e6..b7233c25fc5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDef.java @@ -38,18 +38,10 @@ public class ArgumentDef extends Pointer { return new ArgumentDef((Pointer)this).offsetAddress(i); } - public static class GetTypeFn extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public GetTypeFn(Pointer p) { super(p); } - protected GetTypeFn() { allocate(); } - private native void allocate(); - public native @ByVal Type.TypePtr call(); - } - public native GetTypeFn getTypeFn(); public native ArgumentDef getTypeFn(GetTypeFn setter); - public native GetTypeFn getFakeTypeFn(); public native ArgumentDef getFakeTypeFn(GetTypeFn setter); + public native TypeSupplier getTypeFn(); public native ArgumentDef getTypeFn(TypeSupplier setter); + public native TypeSupplier getFakeTypeFn(); public native ArgumentDef getFakeTypeFn(TypeSupplier setter); public ArgumentDef() { super((Pointer)null); allocate(); } private native void allocate(); - public ArgumentDef(GetTypeFn getTypeFn, GetTypeFn getFakeTypeFn) { super((Pointer)null); allocate(getTypeFn, getFakeTypeFn); } - private native void allocate(GetTypeFn getTypeFn, GetTypeFn getFakeTypeFn); + public ArgumentDef(TypeSupplier getTypeFn, TypeSupplier getFakeTypeFn) { super((Pointer)null); allocate(getTypeFn, getFakeTypeFn); } + private native void allocate(TypeSupplier getTypeFn, TypeSupplier getFakeTypeFn); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMeta.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMeta.java index 73c4c4f626e..a68a8d19f87 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMeta.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMeta.java @@ -71,6 +71,12 @@ public class AutogradMeta extends AutogradMetaInterface { + // The post_acc_grad_hooks_ field stores only Python hooks + // (PyFunctionTensorPostAccGradHooks) that are called after the + // .grad field has been accumulated into. This is less complicated + // than the hooks_ field, which encapsulates a lot more. + public native @UniquePtr @Cast({"", "", "std::unique_ptr&&"}) PostAccumulateGradHook post_acc_grad_hooks_(); public native AutogradMeta post_acc_grad_hooks_(PostAccumulateGradHook setter); + // Only meaningful on leaf variables (must be false otherwise) public native @Cast("bool") boolean requires_grad_(); public native AutogradMeta requires_grad_(boolean setter); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradState.java index 3fd2ecff942..d9fe4f36999 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradState.java @@ -46,7 +46,7 @@ private native void allocate( public native void set_inference_mode(@Cast("bool") boolean enabled); - public native void set_multithreading_enabled(@Cast("bool") boolean mulithreading_enabled); + public native void set_multithreading_enabled(@Cast("bool") boolean multithreading_enabled); public native void set_view_replay_enabled(@Cast("bool") boolean view_replay_enabled); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Blob.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Blob.java index 688b9c76ecb..da86aa83043 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Blob.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Blob.java @@ -69,7 +69,7 @@ public class Blob extends Pointer { * \brief Gets the const reference of the stored object. The code checks if * the stored object is of the desired type. */ - // TODO(jerryzh): add a Get(DeviceType) function? + // TODO(jerryzh): add a Get(c10::DeviceType) function? public native @NoException(true) Pointer GetRaw(); /** diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BlockWrap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BlockWrap.java index 1320b1b5311..db2effd1d10 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BlockWrap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BlockWrap.java @@ -32,13 +32,5 @@ public class BlockWrap extends Pointer { private native void allocate(Block p); public native void clear(); public native Block elem(); public native BlockWrap elem(Block setter); - public static class Clear_cb_Pointer extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Clear_cb_Pointer(Pointer p) { super(p); } - protected Clear_cb_Pointer() { allocate(); } - private native void allocate(); - public native void call(Pointer arg0); - } - public native Clear_cb_Pointer clear_cb(); public native BlockWrap clear_cb(Clear_cb_Pointer setter); + public native PointerConsumer clear_cb(); public native BlockWrap clear_cb(PointerConsumer setter); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CPUGeneratorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CPUGeneratorImpl.java index dd8c370d49d..92d33064b98 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CPUGeneratorImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CPUGeneratorImpl.java @@ -33,6 +33,8 @@ public class CPUGeneratorImpl extends GeneratorImpl { // CPUGeneratorImpl methods public native @SharedPtr CPUGeneratorImpl clone(); public native void set_current_seed(@Cast("uint64_t") long seed); + public native void set_offset(@Cast("uint64_t") long offset); + public native @Cast("uint64_t") long get_offset(); public native @Cast("uint64_t") long current_seed(); public native @Cast("uint64_t") long seed(); public native void set_state(@Const @ByRef TensorImpl new_state); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksInterface.java index 9156c2370a2..633a735e0cf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksInterface.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksInterface.java @@ -64,7 +64,7 @@ public class CUDAHooksInterface extends Pointer { public native @ByVal Device getDeviceFromPtr(Pointer arg0); - public native @Cast("bool") boolean isPinnedPtr(Pointer arg0); + public native @Cast("bool") boolean isPinnedPtr(@Const Pointer arg0); public native @Cast("bool") boolean hasCUDA(); @@ -80,9 +80,9 @@ public class CUDAHooksInterface extends Pointer { public native @Cast("const at::cuda::NVRTC*") @ByRef Pointer nvrtc(); - public native @Cast("bool") boolean hasPrimaryContext(@Cast("int64_t") long device_index); + public native @Cast("bool") boolean hasPrimaryContext(@Cast("c10::DeviceIndex") byte device_index); - public native @Cast("int64_t") long current_device(); + public native @Cast("c10::DeviceIndex") byte current_device(); public native Allocator getPinnedMemoryAllocator(); @@ -106,15 +106,15 @@ public class CUDAHooksInterface extends Pointer { public native double batchnormMinEpsilonCuDNN(); - public native @Cast("int64_t") long cuFFTGetPlanCacheMaxSize(@Cast("int64_t") long arg0); + public native @Cast("int64_t") long cuFFTGetPlanCacheMaxSize(@Cast("c10::DeviceIndex") byte arg0); - public native void cuFFTSetPlanCacheMaxSize(@Cast("int64_t") long arg0, @Cast("int64_t") long arg1); + public native void cuFFTSetPlanCacheMaxSize(@Cast("c10::DeviceIndex") byte arg0, @Cast("int64_t") long arg1); - public native @Cast("int64_t") long cuFFTGetPlanCacheSize(@Cast("int64_t") long arg0); + public native @Cast("int64_t") long cuFFTGetPlanCacheSize(@Cast("c10::DeviceIndex") byte arg0); - public native void cuFFTClearPlanCache(@Cast("int64_t") long arg0); + public native void cuFFTClearPlanCache(@Cast("c10::DeviceIndex") byte arg0); public native int getNumGPUs(); - public native void deviceSynchronize(@Cast("int64_t") long arg0); + public native void deviceSynchronize(@Cast("c10::DeviceIndex") byte arg0); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Code.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Code.java index 1350f11bacf..e0fc05b2248 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Code.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Code.java @@ -78,7 +78,7 @@ private native void allocate( public native @Cast("size_t") long num_bailouts(); public native @Const @ByRef IValueVector constant_table(); public native @Const @ByRef TypeVector type_table(); - public native @Const @ByRef InstructionVector instructions(); + public native @StdVector Instruction instructions(); public native @Const @ByRef StringSizeTMap op_to_num_specified_args(); public native @Cast("torch::jit::Node**") @StdVector PointerPointer instructions_source(); public native void request_bailout(@Cast("size_t") long index); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnitVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnitVector.java deleted file mode 100644 index ff4b8bb1df9..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnitVector.java +++ /dev/null @@ -1,47 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("std::vector") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class CompilationUnitVector extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public CompilationUnitVector(Pointer p) { super(p); } - public CompilationUnitVector() { allocate(); } - private native void allocate(); - - - public boolean empty() { return size() == 0; } - public native long size(); - - public CompilationUnit front() { return get(0); } - public CompilationUnit back() { return get(size() - 1); } - @Index(function = "at") public native @ByRef CompilationUnit get(@Cast("size_t") long i); - - public native @ByVal Iterator begin(); - public native @ByVal Iterator end(); - @NoOffset @Name("iterator") public static class Iterator extends Pointer { - public Iterator(Pointer p) { super(p); } - public Iterator() { } - - public native @Name("operator ++") @ByRef Iterator increment(); - public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *") @ByRef @Const CompilationUnit get(); - } -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Context.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Context.java index 62169c23401..dfcd3ecb333 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Context.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Context.java @@ -39,13 +39,14 @@ public class Context extends Pointer { public native @Const @ByRef Generator defaultGenerator(@ByVal Device device); public native @ByVal Device getDeviceFromPtr(Pointer data, DeviceType device_type); public native @ByVal Device getDeviceFromPtr(Pointer data, @Cast("c10::DeviceType") byte device_type); - public static native @Cast("bool") boolean isPinnedPtr(Pointer data); + public static native @Cast("bool") boolean isPinnedPtr(@Const Pointer data); public static native @Cast("bool") boolean hasOpenMP(); public static native @Cast("bool") boolean hasMKL(); public static native @Cast("bool") boolean hasLAPACK(); public static native @Cast("bool") boolean hasMKLDNN(); public static native @Cast("bool") boolean hasMAGMA(); public static native @Cast("bool") boolean hasCUDA(); + public static native @Cast("bool") boolean hasMTIA(); public static native @Cast("bool") boolean hasCUDART(); public static native long versionCUDART(); public static native @Cast("bool") boolean hasCuDNN(); @@ -55,6 +56,7 @@ public class Context extends Pointer { public static native @Cast("bool") boolean hasMPS(); public static native @Cast("bool") boolean hasIPU(); public static native @Cast("bool") boolean hasXLA(); + public static native @Cast("bool") boolean hasXPU(); public static native @Cast("bool") boolean hasLazy(); public static native @Cast("bool") boolean hasORT(); // defined in header so that getNonVariableType has ability to inline diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CopyBytesFunction.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CopyBytesFunction.java deleted file mode 100644 index f6f6de80c36..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CopyBytesFunction.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class CopyBytesFunction extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public CopyBytesFunction(Pointer p) { super(p); } - protected CopyBytesFunction() { allocate(); } - private native void allocate(); - public native void call( - @Cast("size_t") long nbytes, - @Const Pointer src, - @ByVal Device src_device, - Pointer dst, - @ByVal Device dst_device); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DataPtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DataPtr.java index 88ca2be01a9..4149f900580 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DataPtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DataPtr.java @@ -46,20 +46,17 @@ public class DataPtr extends Pointer { private native void allocate(); public DataPtr(Pointer data, @ByVal Device device) { super((Pointer)null); allocate(data, device); } private native void allocate(Pointer data, @ByVal Device device); - public DataPtr(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") PointerConsumer ctx_deleter, @ByVal Device device) { super((Pointer)null); allocate(data, ctx, ctx_deleter, device); } - private native void allocate(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") PointerConsumer ctx_deleter, @ByVal Device device); - public DataPtr(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") Pointer ctx_deleter, @ByVal Device device) { super((Pointer)null); allocate(data, ctx, ctx_deleter, device); } - private native void allocate(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") Pointer ctx_deleter, @ByVal Device device); - public DataPtr(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") long ctx_deleter, @ByVal Device device) { super((Pointer)null); allocate(data, ctx, ctx_deleter, device); } - private native void allocate(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") long ctx_deleter, @ByVal Device device); + public DataPtr(Pointer data, Pointer ctx, PointerConsumer ctx_deleter, @ByVal Device device) { super((Pointer)null); allocate(data, ctx, ctx_deleter, device); } + private native void allocate(Pointer data, Pointer ctx, PointerConsumer ctx_deleter, @ByVal Device device); public native @Name("operator ->") Pointer access(); public native void clear(); public native Pointer get(); + public native Pointer mutable_get(); public native Pointer get_context(); public native Pointer release_context(); public native @Cast("bool") @Name("operator bool") boolean asBoolean(); - public native @Cast("c10::DeleterFnPtr") PointerConsumer get_deleter(); + public native PointerConsumer get_deleter(); /** * Compare the deleter in a DataPtr to expected_deleter. * If it matches, replace the deleter with new_deleter @@ -98,14 +95,8 @@ public class DataPtr extends Pointer { * in question to confirm this. */ public native @Cast("bool") boolean compare_exchange_deleter( - @Cast("c10::DeleterFnPtr") PointerConsumer expected_deleter, - @Cast("c10::DeleterFnPtr") PointerConsumer new_deleter); - public native @Cast("bool") boolean compare_exchange_deleter( - @Cast("c10::DeleterFnPtr") Pointer expected_deleter, - @Cast("c10::DeleterFnPtr") Pointer new_deleter); - public native @Cast("bool") boolean compare_exchange_deleter( - @Cast("c10::DeleterFnPtr") long expected_deleter, - @Cast("c10::DeleterFnPtr") long new_deleter); + PointerConsumer expected_deleter, + PointerConsumer new_deleter); public native @ByVal Device device(); // Unsafely mutates the device on a DataPtr. Under normal use, // you should never actually need to call this function. diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DeleterFnPtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DeleterFnPtr.java deleted file mode 100644 index eb998102820..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DeleterFnPtr.java +++ /dev/null @@ -1,29 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class DeleterFnPtr extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public DeleterFnPtr(Pointer p) { super(p); } - protected DeleterFnPtr() { allocate(); } - private native void allocate(); - public native void call(Pointer arg0); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DeserializationStorageContext.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DeserializationStorageContext.java index 67d3a7dc2a0..89262907d22 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DeserializationStorageContext.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DeserializationStorageContext.java @@ -16,36 +16,12 @@ import static org.bytedeco.openblas.global.openblas.*; import static org.bytedeco.pytorch.global.torch.*; + // namespace caffe2 - -// Used in torch.package and TorchScript deserialization to coordinate -// sharing of storages between models. -@Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Namespace("torch::jit") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class DeserializationStorageContext extends Pointer { - static { Loader.load(); } + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public DeserializationStorageContext() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DeserializationStorageContext(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public DeserializationStorageContext(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public DeserializationStorageContext position(long position) { - return (DeserializationStorageContext)super.position(position); - } - @Override public DeserializationStorageContext getPointer(long i) { - return new DeserializationStorageContext((Pointer)this).offsetAddress(i); - } - - public DeserializationStorageContext() { super((Pointer)null); allocate(); } - private native void allocate(); - - - - public native void addStorage(@StdString BytePointer name, @Cast({"", "c10::Storage&&"}) @StdMove Storage storage); - public native void addStorage(@StdString String name, @Cast({"", "c10::Storage&&"}) @StdMove Storage storage); - - public native @Cast("bool") boolean hasStorage(@StdString BytePointer name); - public native @Cast("bool") boolean hasStorage(@StdString String name); - - public native @Cast({"", "c10::Storage&&"}) @StdMove Storage getStorage(@StdString BytePointer name); - public native @Cast({"", "c10::Storage&&"}) @StdMove Storage getStorage(@StdString String name); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Device.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Device.java index 0a895e88832..822d4df91e6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Device.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Device.java @@ -18,7 +18,7 @@ import static org.bytedeco.pytorch.global.torch.*; -/** Represents a a compute device on which a tensor is located. A device is +/** Represents a compute device on which a tensor is located. A device is * uniquely identified by a type, which specifies the type of machine it is * (e.g. CPU or CUDA GPU), and a device index or ordinal, which identifies the * specific compute device when there is more than one of a certain type. The @@ -79,6 +79,9 @@ public class Device extends Pointer { /** Return true if the device is of CUDA type. */ public native @Cast("bool") @NoException(true) boolean is_cuda(); + /** Return true if the device is of PrivateUse1 type. */ + public native @Cast("bool") @NoException(true) boolean is_privateuseone(); + /** Return true if the device is of MPS type. */ public native @Cast("bool") @NoException(true) boolean is_mps(); @@ -97,6 +100,9 @@ public class Device extends Pointer { /** Return true if the device is of XLA type. */ public native @Cast("bool") @NoException(true) boolean is_xla(); + /** Return true if the device is of MTIA type. */ + public native @Cast("bool") @NoException(true) boolean is_mtia(); + /** Return true if the device is of HPU type. */ public native @Cast("bool") @NoException(true) boolean is_hpu(); @@ -118,7 +124,7 @@ public class Device extends Pointer { /** Return true if the device is of CPU type. */ public native @Cast("bool") @NoException(true) boolean is_cpu(); - /** Return true if the device supports arbirtary strides. */ + /** Return true if the device supports arbitrary strides. */ public native @Cast("bool") @NoException(true) boolean supports_as_strided(); /** Same string as returned from operator<<. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExperimentalConfig.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExperimentalConfig.java index 7661ce7bd4e..279ff234f29 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExperimentalConfig.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExperimentalConfig.java @@ -38,12 +38,14 @@ public ExperimentalConfig( @Cast("bool") boolean profiler_measure_per_kernel/*=false*/, @Cast("bool") boolean verbose/*=false*/, @ByVal(nullValue = "std::vector{}") StringVector performance_events, - @Cast("bool") boolean adjust_timestamps/*=false*/) { super((Pointer)null); allocate(profiler_metrics, profiler_measure_per_kernel, verbose, performance_events, adjust_timestamps); } + @Cast("bool") boolean enable_cuda_sync_events/*=false*/, + @Cast("bool") boolean adjust_timestamps/*=false*/) { super((Pointer)null); allocate(profiler_metrics, profiler_measure_per_kernel, verbose, performance_events, enable_cuda_sync_events, adjust_timestamps); } private native void allocate( @ByVal(nullValue = "std::vector{}") StringVector profiler_metrics, @Cast("bool") boolean profiler_measure_per_kernel/*=false*/, @Cast("bool") boolean verbose/*=false*/, @ByVal(nullValue = "std::vector{}") StringVector performance_events, + @Cast("bool") boolean enable_cuda_sync_events/*=false*/, @Cast("bool") boolean adjust_timestamps/*=false*/); public ExperimentalConfig() { super((Pointer)null); allocate(); } private native void allocate(); @@ -57,6 +59,12 @@ private native void allocate( * An empty list will disable performance event based profiling altogether. */ public native @ByRef StringVector performance_events(); public native ExperimentalConfig performance_events(StringVector setter); + /* + * For CUDA profiling mode, enable adding CUDA synchronization events + * that expose CUDA device, stream and event synchronization activities. + * This feature is new and currently disabled by default. + */ + public native @Cast("bool") boolean enable_cuda_sync_events(); public native ExperimentalConfig enable_cuda_sync_events(boolean setter); /* * Controls whether or not timestamp adjustment occurs after profiling. * The purpose of this is to adjust Vulkan event timelines to align with those diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FuncTorchTLSBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FuncTorchTLSBase.java index c826d754b4c..522c9d2423a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FuncTorchTLSBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FuncTorchTLSBase.java @@ -45,6 +45,7 @@ public class FuncTorchTLSBase extends Pointer { public native @UniquePtr FuncTorchTLSBase deepcopy(); public native @Cast("int64_t") long checkSupportsSingleLevelAutogradFunction(); + public native void checkSupportsCppAutogradFunction(); public native void checkSupportsInplaceRequiresGrad(); public native void checkSupportsRetainGrad(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHook.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHook.java index b8499221f1b..171b416baf1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHook.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHook.java @@ -27,4 +27,6 @@ public class FunctionPostHook extends Pointer { public native @Name("operator ()") @Cast({"", "std::vector"}) @StdMove TensorVector apply( @Cast({"", "std::vector"}) @StdMove TensorVector outputs, @Cast({"", "std::vector"}) @StdMove TensorVector inputs); + // only implemented for python hooks, registers hook with compiled autograd + public native void compiled_args(@ByRef CompiledNodeArgs args); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHook.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHook.java index 1edde112cfc..749a9b3c347 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHook.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHook.java @@ -25,4 +25,6 @@ public class FunctionPreHook extends Pointer { public FunctionPreHook(Pointer p) { super(p); } public native @Name("operator ()") @Cast({"", "std::vector"}) @StdMove TensorVector apply(@Cast({"", "std::vector"}) @StdMove TensorVector grads); + // only implemented for python hooks, registers hook with compiled autograd + public native void compiled_args(@ByRef CompiledNodeArgs args); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Generator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Generator.java index 8dd29e67335..bd1dee91cc3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Generator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Generator.java @@ -85,6 +85,13 @@ public class Generator extends Pointer { public native @Const @ByRef GeneratorImplPtr getIntrusivePtr(); public native void set_current_seed(@Cast("uint64_t") long seed); + // Sets the offset of Generator state to the desired offset. This is currently + // supported for only Philox based Generators, i.e., CUDA and MPS. + public native void set_offset(@Cast("uint64_t") long offset); + + // Returns the offset of Generator state. This is currently supported for only + // Philox based Generators, i.e., CUDA and MPS. + public native @Cast("uint64_t") long get_offset(); public native @Cast("uint64_t") long current_seed(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorImpl.java index ad4d1087ba7..147677fde43 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorImpl.java @@ -35,6 +35,8 @@ public class GeneratorImpl extends Pointer { // Common methods for all generators public native void set_current_seed(@Cast("uint64_t") long seed); + public native void set_offset(@Cast("uint64_t") long offset); + public native @Cast("uint64_t") long get_offset(); public native @Cast("uint64_t") long current_seed(); public native @Cast("uint64_t") long seed(); public native void set_state(@Const @ByRef TensorImpl new_state); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java index fef627f9ae0..4388e2cec88 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java @@ -252,6 +252,14 @@ public class IValue extends Pointer { public native @ByVal SymFloat toSymFloat(); + public IValue(@ByVal SymBool i) { super((Pointer)null); allocate(i); } + private native void allocate(@ByVal SymBool i); + + public native @Cast("bool") boolean isSymBool(); + + + public native @ByVal SymBool toSymBool(); + // allow you to pass literals (3, 4) without ambiguity public IValue(int i) { super((Pointer)null); allocate(i); } private native void allocate(int i); @@ -268,9 +276,11 @@ public class IValue extends Pointer { // IntList public native @Cast("bool") boolean isIntList(); + public native @Cast("bool") boolean isSymIntList(); public native @ByVal LongList toIntList(); public native @ByVal @Cast("std::vector*") LongVector toIntVector(); + public native @ByVal SymIntVector toSymIntVector(); public native @ByVal DimVector toDimVector(); // ConstantString @@ -497,6 +507,11 @@ public class IValue extends Pointer { // TODO: There are several places that recurse over IValue. This is fragile. // This visitor should be used to recurse over ivalues. + public native @ByVal IValue deepcopy(@ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); public native @ByVal IValue deepcopy(); - public native @ByVal IValue deepcopy(@ByRef HashAliasedIValueMap memo); + public native @ByVal IValue deepcopy( + @ByRef HashAliasedIValueMap memo, + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @ByVal IValue deepcopy( + @ByRef HashAliasedIValueMap memo); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InferenceMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InferenceMode.java index 7b01b98d7a2..1b5381c0d3c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InferenceMode.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InferenceMode.java @@ -71,7 +71,7 @@ public class InferenceMode extends Pointer { // // 3. Why does setting InferenceMode also set GradMode? // - // This is required since InferenceMode is a faster and more restricive + // This is required since InferenceMode is a faster and more restrictive // version of NoGradGuard. All runtime checks using GradMode::is_enabled() // are applicable to InferenceMode as well, e.g. // `tensorTypeInCurrentExecutionContext` in interpreter.cpp. diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstructionVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstructionVector.java deleted file mode 100644 index 5df2a22e996..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstructionVector.java +++ /dev/null @@ -1,47 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("std::vector") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class InstructionVector extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public InstructionVector(Pointer p) { super(p); } - public InstructionVector() { allocate(); } - private native void allocate(); - - - public boolean empty() { return size() == 0; } - public native long size(); - - public Instruction front() { return get(0); } - public Instruction back() { return get(size() - 1); } - @Index(function = "at") public native @ByRef Instruction get(@Cast("size_t") long i); - - public native @ByVal Iterator begin(); - public native @ByVal Iterator end(); - @NoOffset @Name("iterator") public static class Iterator extends Pointer { - public Iterator(Pointer p) { super(p); } - public Iterator() { } - - public native @Name("operator ++") @ByRef Iterator increment(); - public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *") @ByRef @Const Instruction get(); - } -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JitModule.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JitModule.java index 5df5dbd4718..bd1c16b5ec7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JitModule.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JitModule.java @@ -37,6 +37,9 @@ public class JitModule extends JitObject { private native void allocate(@SharedPtr CompilationUnit cu, @Const @SharedPtr("c10::ClassType") @ByRef ClassType type); public JitModule() { super((Pointer)null); allocate(); } private native void allocate(); + public JitModule(@Const @ByRef JitModule arg0) { super((Pointer)null); allocate(arg0); } + private native void allocate(@Const @ByRef JitModule arg0); + public native @ByRef @Name("operator =") JitModule put(@Const @ByRef JitModule arg0); public JitModule( @ByVal QualifiedName arg0, @SharedPtr CompilationUnit cu, @@ -103,10 +106,10 @@ public native void register_attribute( public native void apply(@Const @ByRef JitModuleApplyFunction fn); - public native @ByVal buffer_list buffers(@Cast("bool") boolean recurse/*=true*/); - public native @ByVal buffer_list buffers(); - public native @ByVal named_buffer_list named_buffers(@Cast("bool") boolean recurse/*=true*/); - public native @ByVal named_buffer_list named_buffers(); + public native @ByVal @Cast("torch::jit::buffer_list*") module_list buffers(@Cast("bool") boolean recurse/*=true*/); + public native @ByVal @Cast("torch::jit::buffer_list*") module_list buffers(); + public native @ByVal @Cast("torch::jit::named_buffer_list*") module_list named_buffers(@Cast("bool") boolean recurse/*=true*/); + public native @ByVal @Cast("torch::jit::named_buffer_list*") module_list named_buffers(); public native @ByVal module_list children(); // direct modules public native @ByVal named_module_list named_children(); @@ -114,16 +117,16 @@ public native void register_attribute( public native @ByVal named_module_list named_modules(); // all tensors involved in gradient optimization - public native @ByVal parameter_list parameters(@Cast("bool") boolean recurse/*=true*/); - public native @ByVal parameter_list parameters(); - public native @ByVal named_parameter_list named_parameters(@Cast("bool") boolean recurse/*=true*/); - public native @ByVal named_parameter_list named_parameters(); + public native @ByVal @Cast("torch::jit::parameter_list*") module_list parameters(@Cast("bool") boolean recurse/*=true*/); + public native @ByVal @Cast("torch::jit::parameter_list*") module_list parameters(); + public native @ByVal @Cast("torch::jit::named_parameter_list*") module_list named_parameters(@Cast("bool") boolean recurse/*=true*/); + public native @ByVal @Cast("torch::jit::named_parameter_list*") module_list named_parameters(); // all members of the object, similar to iterating over dir(obj) in python - public native @ByVal attribute_list attributes(@Cast("bool") boolean recurse/*=true*/); - public native @ByVal attribute_list attributes(); - public native @ByVal named_attribute_list named_attributes(@Cast("bool") boolean recurse/*=true*/); - public native @ByVal named_attribute_list named_attributes(); + public native @ByVal @Cast("torch::jit::attribute_list*") module_list attributes(@Cast("bool") boolean recurse/*=true*/); + public native @ByVal @Cast("torch::jit::attribute_list*") module_list attributes(); + public native @ByVal @Cast("torch::jit::named_attribute_list*") module_list named_attributes(@Cast("bool") boolean recurse/*=true*/); + public native @ByVal @Cast("torch::jit::named_attribute_list*") module_list named_attributes(); public native void dump( @Cast("bool") boolean print_method_bodies, @@ -219,6 +222,7 @@ public native void _save_for_mobile( public native @ByVal JitModule copy(); + public native @ByVal JitModule deepcopy(@ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); public native @ByVal JitModule deepcopy(); // Clones both the underlying `ClassType` and the module instance(data), this diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JitNodeWrap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JitNodeWrap.java index 9d2dbfaf87b..ec6d18f9abd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JitNodeWrap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JitNodeWrap.java @@ -27,13 +27,5 @@ public class JitNodeWrap extends Pointer { private native void allocate(JitNode p); public native void clear(); public native JitNode elem(); public native JitNodeWrap elem(JitNode setter); - public static class Clear_cb_Pointer extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Clear_cb_Pointer(Pointer p) { super(p); } - protected Clear_cb_Pointer() { allocate(); } - private native void allocate(); - public native void call(Pointer arg0); - } - public native Clear_cb_Pointer clear_cb(); public native JitNodeWrap clear_cb(Clear_cb_Pointer setter); + public native PointerConsumer clear_cb(); public native JitNodeWrap clear_cb(PointerConsumer setter); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JitObject.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JitObject.java index 4dcc937a66b..7c6f00bf376 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JitObject.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JitObject.java @@ -18,7 +18,6 @@ import static org.bytedeco.pytorch.global.torch.*; -// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) @Name("torch::jit::Object") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class JitObject extends Pointer { static { Loader.load(); } @@ -34,7 +33,9 @@ public class JitObject extends Pointer { public JitObject() { super((Pointer)null); allocate(); } private native void allocate(); - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + public JitObject(@Const @ByRef JitObject arg0) { super((Pointer)null); allocate(arg0); } + private native void allocate(@Const @ByRef JitObject arg0); + public native @ByRef @Name("operator =") JitObject put(@Const @ByRef JitObject arg0); public JitObject(@ByVal @Cast("torch::jit::ObjectPtr*") Pointer _ivalue) { super((Pointer)null); allocate(_ivalue); } private native void allocate(@ByVal @Cast("torch::jit::ObjectPtr*") Pointer _ivalue); public JitObject(@SharedPtr CompilationUnit cu, @Const @SharedPtr("c10::ClassType") @ByRef ClassType type) { super((Pointer)null); allocate(cu, type); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ListType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ListType.java index 222672f536b..ad73a59c945 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ListType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ListType.java @@ -45,6 +45,7 @@ public class ListType extends ListSingleElementType { public static native @SharedPtr ListType ofTensors(); public static native @SharedPtr ListType ofOptionalTensors(); public static native @SharedPtr ListType ofInts(); + public static native @SharedPtr ListType ofSymInts(); public static native @SharedPtr ListType ofFloats(); public static native @SharedPtr ListType ofComplexDoubles(); public static native @SharedPtr ListType ofBools(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MPSHooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MPSHooksInterface.java index 8be7bcf388e..f95bfffd635 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MPSHooksInterface.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MPSHooksInterface.java @@ -36,25 +36,36 @@ public class MPSHooksInterface extends Pointer { return new MPSHooksInterface((Pointer)this).offsetAddress(i); } + // this fails the implementation if MPSHooks functions are called, but + // MPS backend is not present. +// #define FAIL_MPSHOOKS_FUNC(func) +// TORCH_CHECK(false, "Cannot execute ", func, "() without MPS backend."); // Initialize the MPS library state public native void initMPS(); - public native @Cast("bool") boolean hasMPS(); - + public native @Cast("bool") boolean isOnMacOS13orNewer(@Cast("unsigned") int minor/*=0*/); public native @Cast("bool") boolean isOnMacOS13orNewer(); - public native @Const @ByRef Generator getDefaultMPSGenerator(); - public native Allocator getMPSDeviceAllocator(); - public native void deviceSynchronize(); - + public native void commitStream(); + public native Pointer getCommandBuffer(); + public native Pointer getDispatchQueue(); public native void emptyCache(); - public native @Cast("size_t") long getCurrentAllocatedMemory(); - public native @Cast("size_t") long getDriverAllocatedMemory(); - public native void setMemoryFraction(double arg0); + public native void profilerStartTrace(@StdString BytePointer mode, @Cast("bool") boolean waitUntilCompleted); + public native void profilerStartTrace(@StdString String mode, @Cast("bool") boolean waitUntilCompleted); + public native void profilerStopTrace(); + public native @Cast("uint32_t") int acquireEvent(@Cast("bool") boolean enable_timing); + public native void releaseEvent(@Cast("uint32_t") int event_id); + public native void recordEvent(@Cast("uint32_t") int event_id); + public native void waitForEvent(@Cast("uint32_t") int event_id); + public native void synchronizeEvent(@Cast("uint32_t") int event_id); + public native @Cast("bool") boolean queryEvent(@Cast("uint32_t") int event_id); + public native double elapsedTimeOfEvents(@Cast("uint32_t") int start_event_id, @Cast("uint32_t") int end_event_id); + +// #undef FAIL_MPSHOOKS_FUNC } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedIValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedIValue.java deleted file mode 100644 index d3e34d907d8..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedIValue.java +++ /dev/null @@ -1,41 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Name("torch::jit::Named") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class NamedIValue extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public NamedIValue() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public NamedIValue(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public NamedIValue(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public NamedIValue position(long position) { - return (NamedIValue)super.position(position); - } - @Override public NamedIValue getPointer(long i) { - return new NamedIValue((Pointer)this).offsetAddress(i); - } - - public native @StdString BytePointer name(); public native NamedIValue name(BytePointer setter); - public native @ByRef IValue value(); public native NamedIValue value(IValue setter); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedIValuePolicy.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedIValuePolicy.java deleted file mode 100644 index 7ef631e5448..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedIValuePolicy.java +++ /dev/null @@ -1,44 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::jit::detail::NamedPolicy") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class NamedIValuePolicy extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public NamedIValuePolicy() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public NamedIValuePolicy(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public NamedIValuePolicy(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public NamedIValuePolicy position(long position) { - return (NamedIValuePolicy)super.position(position); - } - @Override public NamedIValuePolicy getPointer(long i) { - return new NamedIValuePolicy((Pointer)this).offsetAddress(i); - } - - public static native @ByVal @Cast("torch::jit::detail::NamedPolicy::value_type*") NamedJitModule create( - @StdVector SlotCursor cursors, - @ByVal IValue v); - public static native @Cast("bool") boolean valid(@Const @SharedPtr("c10::ClassType") @ByRef ClassType t, @Cast("size_t") long i, @Const @ByRef IValue v); - @MemberGetter public static native @Cast("const bool") boolean all_slots(); - public static final boolean all_slots = all_slots(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensor.java deleted file mode 100644 index 09bf1bd7203..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensor.java +++ /dev/null @@ -1,41 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Name("torch::jit::Named") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class NamedTensor extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public NamedTensor() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public NamedTensor(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public NamedTensor(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public NamedTensor position(long position) { - return (NamedTensor)super.position(position); - } - @Override public NamedTensor getPointer(long i) { - return new NamedTensor((Pointer)this).offsetAddress(i); - } - - public native @StdString BytePointer name(); public native NamedTensor name(BytePointer setter); - public native @ByRef Tensor value(); public native NamedTensor value(Tensor setter); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorPolicy.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorPolicy.java deleted file mode 100644 index ce27ce3456b..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorPolicy.java +++ /dev/null @@ -1,44 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::jit::detail::NamedPolicy") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class NamedTensorPolicy extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public NamedTensorPolicy() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public NamedTensorPolicy(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public NamedTensorPolicy(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public NamedTensorPolicy position(long position) { - return (NamedTensorPolicy)super.position(position); - } - @Override public NamedTensorPolicy getPointer(long i) { - return new NamedTensorPolicy((Pointer)this).offsetAddress(i); - } - - public static native @ByVal @Cast("torch::jit::detail::NamedPolicy::value_type*") NamedJitModule create( - @StdVector SlotCursor cursors, - @ByVal IValue v); - public static native @Cast("bool") boolean valid(@Const @SharedPtr("c10::ClassType") @ByRef ClassType t, @Cast("size_t") long i, @Const @ByRef IValue v); - @MemberGetter public static native @Cast("const bool") boolean all_slots(); - public static final boolean all_slots = all_slots(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NestedTensorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NestedTensorImpl.java index 7f879309617..ec020741932 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NestedTensorImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NestedTensorImpl.java @@ -28,65 +28,65 @@ public NestedTensorImpl( @Cast({"", "c10::Storage&&"}) @StdMove Storage storage, @ByVal DispatchKeySet key_set, @Const @ByVal TypeMeta data_type, - @ByVal Tensor nested_size_tensor, - @ByVal Tensor nested_stride_tensor, - @Cast("std::vector*") @ByRef(true) LongVector offsets) { super((Pointer)null); allocate(storage, key_set, data_type, nested_size_tensor, nested_stride_tensor, offsets); } + @ByVal Tensor nested_sizes, + @ByVal Tensor nested_strides, + @ByVal Tensor storage_offsets) { super((Pointer)null); allocate(storage, key_set, data_type, nested_sizes, nested_strides, storage_offsets); } private native void allocate( @Cast({"", "c10::Storage&&"}) @StdMove Storage storage, @ByVal DispatchKeySet key_set, @Const @ByVal TypeMeta data_type, - @ByVal Tensor nested_size_tensor, - @ByVal Tensor nested_stride_tensor, - @Cast("std::vector*") @ByRef(true) LongVector offsets); + @ByVal Tensor nested_sizes, + @ByVal Tensor nested_strides, + @ByVal Tensor storage_offsets); public NestedTensorImpl( @ByVal Tensor buffer, - @ByVal Tensor nested_size_tensor, - @ByVal Tensor nested_stride_tensor, - @Cast("std::vector*") @ByRef(true) LongVector offsets) { super((Pointer)null); allocate(buffer, nested_size_tensor, nested_stride_tensor, offsets); } + @ByVal Tensor nested_sizes, + @ByVal Tensor nested_strides, + @ByVal Tensor storage_offsets) { super((Pointer)null); allocate(buffer, nested_sizes, nested_strides, storage_offsets); } private native void allocate( @ByVal Tensor buffer, - @ByVal Tensor nested_size_tensor, - @ByVal Tensor nested_stride_tensor, - @Cast("std::vector*") @ByRef(true) LongVector offsets); - // assume contiguous, `nested_stride_tensor` and `offsets` - // can be infered from `nested_size_tensor` - public NestedTensorImpl(@ByVal Tensor buffer, @ByVal Tensor nested_size_tensor) { super((Pointer)null); allocate(buffer, nested_size_tensor); } - private native void allocate(@ByVal Tensor buffer, @ByVal Tensor nested_size_tensor); + @ByVal Tensor nested_sizes, + @ByVal Tensor nested_strides, + @ByVal Tensor storage_offsets); + // assume contiguous, `nested_strides` and `offsets` + // can be infered from `nested_sizes` + public NestedTensorImpl(@ByVal Tensor buffer, @ByVal Tensor nested_sizes) { super((Pointer)null); allocate(buffer, nested_sizes); } + private native void allocate(@ByVal Tensor buffer, @ByVal Tensor nested_sizes); // This constructor is used creating view tensors from nested tensors public NestedTensorImpl( TensorImpl.ImplType impl_type, @Const @ByRef Tensor base_tensor, - @ByVal Tensor nested_size_tensor, - @ByVal Tensor nested_stride_tensor, - @Cast("std::vector*") @ByRef(true) LongVector offsets) { super((Pointer)null); allocate(impl_type, base_tensor, nested_size_tensor, nested_stride_tensor, offsets); } + @ByVal Tensor nested_sizes, + @ByVal Tensor nested_strides, + @ByVal Tensor storage_offsets) { super((Pointer)null); allocate(impl_type, base_tensor, nested_sizes, nested_strides, storage_offsets); } private native void allocate( TensorImpl.ImplType impl_type, @Const @ByRef Tensor base_tensor, - @ByVal Tensor nested_size_tensor, - @ByVal Tensor nested_stride_tensor, - @Cast("std::vector*") @ByRef(true) LongVector offsets); + @ByVal Tensor nested_sizes, + @ByVal Tensor nested_strides, + @ByVal Tensor storage_offsets); public NestedTensorImpl( @Cast("c10::TensorImpl::ImplType") int impl_type, @Const @ByRef Tensor base_tensor, - @ByVal Tensor nested_size_tensor, - @ByVal Tensor nested_stride_tensor, - @Cast("std::vector*") @ByRef(true) LongVector offsets) { super((Pointer)null); allocate(impl_type, base_tensor, nested_size_tensor, nested_stride_tensor, offsets); } + @ByVal Tensor nested_sizes, + @ByVal Tensor nested_strides, + @ByVal Tensor storage_offsets) { super((Pointer)null); allocate(impl_type, base_tensor, nested_sizes, nested_strides, storage_offsets); } private native void allocate( @Cast("c10::TensorImpl::ImplType") int impl_type, @Const @ByRef Tensor base_tensor, - @ByVal Tensor nested_size_tensor, - @ByVal Tensor nested_stride_tensor, - @Cast("std::vector*") @ByRef(true) LongVector offsets); + @ByVal Tensor nested_sizes, + @ByVal Tensor nested_strides, + @ByVal Tensor storage_offsets); // TODO: don't expose private implementation details like this; in // particular, resizing this tensor will mess up our dim() and // callers cannot fix it. - public native @Const @ByRef Tensor get_nested_size_tensor(); + public native @Const @ByRef Tensor get_nested_sizes(); // TODO: don't expose private implementation details like this - public native @Const @ByRef Tensor get_nested_stride_tensor(); - public native @Cast("const std::vector*") @ByRef LongVector get_storage_offsets(); + public native @Const @ByRef Tensor get_nested_strides(); + public native @Const @ByRef Tensor get_storage_offsets(); // Returns nullopt if the ith dimension is irregular. The ith dimension // of a NestedTensor is regular if the unbound tensors match in // size at the (i-1)th dimension. diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Node.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Node.java index 194657a7a85..91e918358d8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Node.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Node.java @@ -115,6 +115,8 @@ public class Node extends Pointer { public native @Cast("uint32_t") @NoException(true) int num_inputs(); + // Danger: not thread safe, caller must protect with lock + /** * Note: Function Streams * A function's stream (for a given device type) is the stream of the first @@ -277,6 +279,8 @@ public native void add_retains_grad_hook( public native @ByRef @NoException(true) FunctionPreHookVector tensor_pre_hooks(); + public native @UniquePtr @NoException(true) PostAccumulateGradHook tensor_post_acc_grad_hooks(); + // Customization Points for Subclasses @@ -305,4 +309,19 @@ public native void add_retains_grad_hook( * will probably depend on saved_variable_list being mutable. * NOTE: this value matters only if is_traceable() returns false. */ public native @Cast("bool") boolean passes_state_transparently(); + + // see [Note: Compiled Autograd] + // Used by compiled autograd to + // 1) Extract tensors/symint args + // 2) Collect node information for specialization and caching + // Implementations in subclasses should call args.collect() with all node + // attrs. These functions are only called durring backward. + public native void compiled_args(@ByRef CompiledNodeArgs args); + + // Used by compiled autograd to call apply() with different saved tensors + // Implementations should call saved.before() on all attrs, then apply(), then + // saved.after() on all attrs in the same order. + public native @Cast({"", "std::vector"}) @StdMove TensorVector apply_with_saved( + @Cast({"", "std::vector"}) @StdMove TensorVector inputs, + @ByRef SwapSavedVariables saved); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OperandInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OperandInfo.java index 7fd6d89ac87..3e9fc97826a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OperandInfo.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OperandInfo.java @@ -38,6 +38,10 @@ public class OperandInfo extends Pointer { public OperandInfo(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorBaseMaybeOwned t) { super((Pointer)null); allocate(t); } private native void allocate(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorBaseMaybeOwned t); + /** The data pointer. This may be different from tensor->data_ptr() if the + * iterator is split. */ + public native Pointer data(); public native OperandInfo data(Pointer setter); + /** Stride after broadcasting. The stride is in bytes, not number of elements. */ public native @ByRef @Cast("at::OperandInfo::StrideVector*") SymDimVector stride_bytes(); public native OperandInfo stride_bytes(SymDimVector setter); @@ -60,10 +64,6 @@ public class OperandInfo extends Pointer { public native @Cast("bool") boolean is_type_defined(); public native @ByVal TensorOptions options(); - /** The data pointer. This may be different from tensor->data_ptr() if the - * iterator is split. */ - public native Pointer data(); public native OperandInfo data(Pointer setter); - public native @Cast("bool") boolean is_output(); public native OperandInfo is_output(boolean setter); public native @Cast("bool") boolean will_resize(); public native OperandInfo will_resize(boolean setter); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OperationCreator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OperationCreator.java deleted file mode 100644 index ecfe9fdf858..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OperationCreator.java +++ /dev/null @@ -1,29 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class OperationCreator extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public OperationCreator(Pointer p) { super(p); } - protected OperationCreator() { allocate(); } - private native void allocate(); - public native @ByVal Operation call(@Const JitNode arg0); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorHandle.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorHandle.java index 4b714bbc142..b73c4d5258f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorHandle.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorHandle.java @@ -58,6 +58,8 @@ public class OperatorHandle extends Pointer { public native @ByVal TagArrayRef getTags(); + public native void setReportErrorCallback_(@UniquePtr SafePyObject callback); + public native @Cast("bool") boolean hasTag(Tag tag); public native @Cast("bool") boolean hasTag(@Cast("at::Tag") int tag); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorOptional.java deleted file mode 100644 index eb12b5e8e1e..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorOptional.java +++ /dev/null @@ -1,35 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@NoOffset @Name("c10::optional") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class OperatorOptional extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public OperatorOptional(Pointer p) { super(p); } - public OperatorOptional(Operator value) { this(); put(value); } - public OperatorOptional() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef OperatorOptional put(@ByRef OperatorOptional x); - - public native boolean has_value(); - public native void reset(); - public native @Name("value") @ByRef Operator get(); - @ValueSetter public native OperatorOptional put(@ByRef Operator value); -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorOptionalVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorOptionalVector.java deleted file mode 100644 index 2f4f89bbea6..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorOptionalVector.java +++ /dev/null @@ -1,90 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("std::vector >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class OperatorOptionalVector extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public OperatorOptionalVector(Pointer p) { super(p); } - public OperatorOptionalVector(OperatorOptional value) { this(1); put(0, value); } - public OperatorOptionalVector(OperatorOptional ... array) { this(array.length); put(array); } - public OperatorOptionalVector() { allocate(); } - public OperatorOptionalVector(long n) { allocate(n); } - private native void allocate(); - private native void allocate(@Cast("size_t") long n); - public native @Name("operator =") @ByRef OperatorOptionalVector put(@ByRef OperatorOptionalVector x); - - public boolean empty() { return size() == 0; } - public native long size(); - public void clear() { resize(0); } - public native void resize(@Cast("size_t") long n); - - public OperatorOptional front() { return get(0); } - public OperatorOptional back() { return get(size() - 1); } - @Index(function = "at") public native @ByRef OperatorOptional get(@Cast("size_t") long i); - public native OperatorOptionalVector put(@Cast("size_t") long i, OperatorOptional value); - - public native @ByVal Iterator insert(@ByVal Iterator pos, @ByRef OperatorOptional value); - public native @ByVal Iterator erase(@ByVal Iterator pos); - public native @ByVal Iterator begin(); - public native @ByVal Iterator end(); - @NoOffset @Name("iterator") public static class Iterator extends Pointer { - public Iterator(Pointer p) { super(p); } - public Iterator() { } - - public native @Name("operator ++") @ByRef Iterator increment(); - public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *") @ByRef @Const OperatorOptional get(); - } - - public OperatorOptional[] get() { - OperatorOptional[] array = new OperatorOptional[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE]; - for (int i = 0; i < array.length; i++) { - array[i] = get(i); - } - return array; - } - @Override public String toString() { - return java.util.Arrays.toString(get()); - } - - public OperatorOptional pop_back() { - long size = size(); - OperatorOptional value = get(size - 1); - resize(size - 1); - return value; - } - public OperatorOptionalVector push_back(OperatorOptional value) { - long size = size(); - resize(size + 1); - return put(size, value); - } - public OperatorOptionalVector put(OperatorOptional value) { - if (size() != 1) { resize(1); } - return put(0, value); - } - public OperatorOptionalVector put(OperatorOptional ... array) { - if (size() != array.length) { resize(array.length); } - for (int i = 0; i < array.length; i++) { - put(i, array[i]); - } - return this; - } -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PlacementDeleteContext.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PlacementDeleteContext.java index bbf5459a7e3..3f6e69c540e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PlacementDeleteContext.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PlacementDeleteContext.java @@ -34,19 +34,19 @@ public class PlacementDeleteContext extends Pointer { public PlacementDeleteContext(Pointer p) { super(p); } public native @Cast({"", "c10::DataPtr&&"}) @StdMove DataPtr data_ptr_(); public native PlacementDeleteContext data_ptr_(DataPtr setter); - public native PlacementDtor placement_dtor_(); public native PlacementDeleteContext placement_dtor_(PlacementDtor setter); + public native PlacementConsumer placement_dtor_(); public native PlacementDeleteContext placement_dtor_(PlacementConsumer setter); public native @Cast("size_t") long size_(); public native PlacementDeleteContext size_(long setter); public PlacementDeleteContext( @Cast({"", "c10::DataPtr&&"}) @StdMove DataPtr data_ptr, - PlacementDtor placement_dtor, + PlacementConsumer placement_dtor, @Cast("size_t") long size) { super((Pointer)null); allocate(data_ptr, placement_dtor, size); } private native void allocate( @Cast({"", "c10::DataPtr&&"}) @StdMove DataPtr data_ptr, - PlacementDtor placement_dtor, + PlacementConsumer placement_dtor, @Cast("size_t") long size); public static native @Cast({"", "c10::DataPtr&&"}) @StdMove DataPtr makeDataPtr( @Cast({"", "c10::DataPtr&&"}) @StdMove DataPtr data_ptr, - PlacementDtor placement_dtor, + PlacementConsumer placement_dtor, @Cast("size_t") long size, @ByVal Device device); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PlacementDtor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PlacementDtor.java deleted file mode 100644 index f85f2cffd25..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PlacementDtor.java +++ /dev/null @@ -1,29 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class PlacementDtor extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public PlacementDtor(Pointer p) { super(p); } - protected PlacementDtor() { allocate(); } - private native void allocate(); - public native void call(Pointer arg0, @Cast("size_t") long arg1); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PyInterpreterVTable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PyInterpreterVTable.java index 949391fc2ee..6f48746b9c5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PyInterpreterVTable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PyInterpreterVTable.java @@ -129,6 +129,9 @@ public class PyInterpreterVTable extends Pointer { // Invoke the Python boxed fallback dispatch to go back into Python public native void dispatch(@Const @ByRef OperatorHandle op, IValueVector stack); + public native void reportErrorCallback(@Cast("PyObject*") Pointer callback, DispatchKey key); + public native void reportErrorCallback(@Cast("PyObject*") Pointer callback, @Cast("c10::DispatchKey") short key); + // This is only invoked in the multipy/torchdeploy situation from // pythonOpRegistrationTrampoline; this lets us get to the Python // interpreter to actually find the appropriate Python op registration diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PyTorchStreamReader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PyTorchStreamReader.java index a601a940b5a..902a24daea9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PyTorchStreamReader.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PyTorchStreamReader.java @@ -18,76 +18,6 @@ import static org.bytedeco.pytorch.global.torch.*; -// PyTorch containers are a special zip archive with the following layout -// archive_name.zip contains: -// archive_name/ -// version # a file with a single decimal number written in ascii, -// # used to establish the version of the archive format -// model.json # overall model description, this is a json output of -// # ModelDef from torch.proto -// # the following names are by convention only, model.json will -// # refer to these files by full names -// tensors/ -// 0 # flat storage for tensor data, meta-data about shapes, etc. is -// # in model.json -// 1 -// ... -// # code entries will only exist for modules that have methods attached -// code/ -// archive_name.py # serialized torch script code (python syntax, using -// PythonPrint) archive_name_my_submodule.py # submodules have separate -// files -// -// The PyTorchStreamWriter also ensures additional useful properties for these -// files -// 1. All files are stored uncompressed. -// 2. All files in the archive are aligned to 64 byte boundaries such that -// it is possible to mmap the entire file and get an aligned pointer to -// tensor data. -// 3. We universally write in ZIP64 format for consistency. - -// The PyTorchStreamReader also provides additional properties: -// 1. It can read zip files that are created with common -// zip tools. This means that even though our writer doesn't compress files, -// the reader can still read files that were compressed. -// 2. It provides a getRecordOffset function which returns the offset into the -// raw file where file data lives. If the file was written with -// PyTorchStreamWriter it is guaranteed to be 64 byte aligned. - -// PyTorchReader/Writer handle checking the version number on the archive format -// and ensure that all files are written to a archive_name directory so they -// unzip cleanly. - -// When developing this format we want to pay particular attention to the -// following use cases: -// -// -- Reading -- -// 1) Reading with full random access -// a) Reading with file api's such as fread() -// b) mmaping the file and jumping around the mapped region -// 2) Reading with 1-pass sequential access -// -> A reader will need to build up a data structure of parsed structures -// as it reads -// -// -- Writing -- -// 1) Writing with full random access -// 2) Writing with 1-pass sequential access -// -> We must take care not to require updating values that have already -// been written. We place the variable-length index at the end and do -// not put any indicies into the header to fulfill this constraint. - -// The model.json, which contains all the metadata information, -// should be written as the last file. One reason is that the size of tensor -// data is usually stable. As long as the shape and type of the tensor do not -// change, the size of the data won't change. On the other sied, the size of the -// serialized model is likely to change, so we store it as the last record, and -// we don't need to move previous records when updating the model data. - -// The zip format is sufficiently flexible to handle the above use-case. -// it puts its central directory at the end of the archive and we write -// model.json as the last file when writing after we have accumulated all -// other information. - @Namespace("caffe2::serialize") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class PyTorchStreamReader extends Pointer { static { Loader.load(); } @@ -104,12 +34,30 @@ public class PyTorchStreamReader extends Pointer { // return dataptr, size public native @ByVal T_DataPtrSizeT_T getRecord(@StdString BytePointer name); public native @ByVal T_DataPtrSizeT_T getRecord(@StdString String name); + // inplace memory writing + public native @Cast("size_t") long getRecord(@StdString BytePointer name, Pointer dst, @Cast("size_t") long n); + public native @Cast("size_t") long getRecord(@StdString String name, Pointer dst, @Cast("size_t") long n); + public native @Cast("size_t") long getRecord( + @StdString BytePointer name, + Pointer dst, + @Cast("size_t") long n, + @Cast("size_t") long chunk_size, + Pointer buf, + @Const @ByRef MemCopyFunction memcpy_func); + public native @Cast("size_t") long getRecord( + @StdString String name, + Pointer dst, + @Cast("size_t") long n, + @Cast("size_t") long chunk_size, + Pointer buf, + @Const @ByRef MemCopyFunction memcpy_func); public native @Cast("size_t") long getRecordOffset(@StdString BytePointer name); public native @Cast("size_t") long getRecordOffset(@StdString String name); public native @Cast("bool") boolean hasRecord(@StdString BytePointer name); public native @Cast("bool") boolean hasRecord(@StdString String name); public native @ByVal StringVector getAllRecords(); public native @Cast("uint64_t") long version(); + public native @StdString BytePointer serializationId(); public native void setShouldLoadDebugSymbol(@Cast("bool") boolean should_load_debug_symbol); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SafePyObject.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SafePyObject.java index 21af3c7c18f..81e0bbbb085 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SafePyObject.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SafePyObject.java @@ -39,6 +39,8 @@ public class SafePyObject extends Pointer { // Steals a reference to data public SafePyObject(@Cast("PyObject*") Pointer data, PyInterpreter pyinterpreter) { super((Pointer)null); allocate(data, pyinterpreter); } private native void allocate(@Cast("PyObject*") Pointer data, PyInterpreter pyinterpreter); + public SafePyObject(@ByRef(true) SafePyObject other) { super((Pointer)null); allocate(other); } + private native void allocate(@ByRef(true) SafePyObject other); // In principle this could be copyable if we add an incref to PyInterpreter // but for now it's easier to just disallow it. @@ -47,4 +49,7 @@ public class SafePyObject extends Pointer { public native @ByRef PyInterpreter pyinterpreter(); public native @Cast("PyObject*") Pointer ptr(@Const PyInterpreter arg0); + + // stop tracking the current object, and return it + public native @Cast("PyObject*") Pointer release(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariable.java index 3d5babb2615..dd95c5fd443 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariable.java @@ -78,4 +78,6 @@ private native void allocate( public native void register_hooks(@UniquePtr SavedVariableHooks hooks); public native void reset_data(); + + public native @Cast("bool") boolean has_hooks(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Scalar.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Scalar.java index 2c53b21e50a..e3097f6dfe4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Scalar.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Scalar.java @@ -41,21 +41,34 @@ public class Scalar extends Pointer { // Scalar(type vv) : Scalar(vv, true) {} public Scalar(@Cast("uint8_t") byte vv) { super((Pointer)null); allocate(vv); } - private native void allocate(@Cast("uint8_t") byte vv); + private native void allocate(@Cast("uint8_t") byte vv); + public Scalar(short vv) { super((Pointer)null); allocate(vv); } - private native void allocate(short vv); + private native void allocate(short vv); + public Scalar(int vv) { super((Pointer)null); allocate(vv); } - private native void allocate(int vv); + private native void allocate(int vv); + public Scalar(@Cast("int64_t") long vv) { super((Pointer)null); allocate(vv); } - private native void allocate(@Cast("int64_t") long vv); + private native void allocate(@Cast("int64_t") long vv); + public Scalar(float vv) { super((Pointer)null); allocate(vv); } - private native void allocate(float vv); + private native void allocate(float vv); + public Scalar(double vv) { super((Pointer)null); allocate(vv); } - private native void allocate(double vv); + private native void allocate(double vv); + public Scalar(@ByVal Half vv) { super((Pointer)null); allocate(vv); } - private native void allocate(@ByVal Half vv); + private native void allocate(@ByVal Half vv); + public Scalar(@ByVal BFloat16 vv) { super((Pointer)null); allocate(vv); } private native void allocate(@ByVal BFloat16 vv); + + public Scalar(@ByVal Float8_e5m2 vv) { super((Pointer)null); allocate(vv); } + private native void allocate(@ByVal Float8_e5m2 vv); + + public Scalar(@ByVal Float8_e4m3fn vv) { super((Pointer)null); allocate(vv); } + private native void allocate(@ByVal Float8_e4m3fn vv); public Scalar(@ByVal FloatComplex vv) { super((Pointer)null); allocate(vv); } private native void allocate(@ByVal FloatComplex vv); public Scalar(@ByVal DoubleComplex vv) { super((Pointer)null); allocate(vv); } @@ -82,6 +95,8 @@ public class Scalar extends Pointer { // TORCH_CHECK(false, "tried to get " #name " out of SymInt") // } else if (Tag::HAS_sd == tag) { // TORCH_CHECK(false, "tried to get " #name " out of SymFloat") +// } else if (Tag::HAS_sb == tag) { +// TORCH_CHECK(false, "tried to get " #name " out of SymBool") // } // TORCH_CHECK(false) // } @@ -100,6 +115,8 @@ public class Scalar extends Pointer { public native @ByVal DoubleComplex toComplexDouble(); public native @Cast("bool") boolean toBool(); public native @ByVal BFloat16 toBFloat16(); + public native @ByVal Float8_e5m2 toFloat8_e5m2(); + public native @ByVal Float8_e4m3fn toFloat8_e4m3fn(); // #undef DEFINE_ACCESSOR @@ -107,6 +124,8 @@ public class Scalar extends Pointer { public native @ByVal SymFloat toSymFloat(); + public native @ByVal SymBool toSymBool(); + // also support scalar.to(); // Deleted for unsupported types, but specialized below for supported types @@ -125,6 +144,7 @@ public class Scalar extends Pointer { // you probably don't actually want these; they're mostly for testing public native @Cast("bool") boolean isSymInt(); public native @Cast("bool") boolean isSymFloat(); + public native @Cast("bool") boolean isSymBool(); public native @Cast("bool") boolean isSymbolic(); @@ -146,4 +166,7 @@ public class Scalar extends Pointer { public Scalar(@ByVal SymFloat sd) { super((Pointer)null); allocate(sd); } private native void allocate(@ByVal SymFloat sd); + + public Scalar(@ByVal SymBool sb) { super((Pointer)null); allocate(sb); } + private native void allocate(@ByVal SymBool sb); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImpl.java index 09fcaef8459..db55cbd061f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImpl.java @@ -231,6 +231,7 @@ public SequentialImpl( public native @Name("push_back") void push_back(@SharedPtr ReflectionPad1dImpl module_ptr); public native @Name("push_back") void push_back(@SharedPtr ReplicationPad1dImpl module_ptr); public native @Name("push_back") void push_back(@SharedPtr ConstantPad1dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr ZeroPad1dImpl module_ptr); public native @Name("push_back") void push_back(@SharedPtr AvgPool1dImpl module_ptr); public native @Name("push_back") void push_back(@SharedPtr MaxPool1dImpl module_ptr); public native @Name("push_back") void push_back(@SharedPtr AdaptiveAvgPool1dImpl module_ptr); @@ -251,6 +252,7 @@ public SequentialImpl( public native @Name("push_back") void push_back(@SharedPtr ReflectionPad3dImpl module_ptr); public native @Name("push_back") void push_back(@SharedPtr ReplicationPad3dImpl module_ptr); public native @Name("push_back") void push_back(@SharedPtr ConstantPad3dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr ZeroPad3dImpl module_ptr); public native @Name("push_back") void push_back(@SharedPtr AvgPool3dImpl module_ptr); public native @Name("push_back") void push_back(@SharedPtr MaxPool3dImpl module_ptr); public native @Name("push_back") void push_back(@SharedPtr AdaptiveAvgPool3dImpl module_ptr); @@ -408,6 +410,8 @@ public SequentialImpl( public native @Name("push_back") void push_back(@StdString String name, @SharedPtr ReplicationPad1dImpl module_ptr); public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr ConstantPad1dImpl module_ptr); public native @Name("push_back") void push_back(@StdString String name, @SharedPtr ConstantPad1dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr ZeroPad1dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr ZeroPad1dImpl module_ptr); public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr AvgPool1dImpl module_ptr); public native @Name("push_back") void push_back(@StdString String name, @SharedPtr AvgPool1dImpl module_ptr); public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr MaxPool1dImpl module_ptr); @@ -448,6 +452,8 @@ public SequentialImpl( public native @Name("push_back") void push_back(@StdString String name, @SharedPtr ReplicationPad3dImpl module_ptr); public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr ConstantPad3dImpl module_ptr); public native @Name("push_back") void push_back(@StdString String name, @SharedPtr ConstantPad3dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr ZeroPad3dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr ZeroPad3dImpl module_ptr); public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr AvgPool3dImpl module_ptr); public native @Name("push_back") void push_back(@StdString String name, @SharedPtr AvgPool3dImpl module_ptr); public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr MaxPool3dImpl module_ptr); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SerializationStorageContext.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SerializationStorageContext.java deleted file mode 100644 index 7f3bd8df017..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SerializationStorageContext.java +++ /dev/null @@ -1,47 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -// Used in torch.package and TorchScript serialization to coordinate -// sharing of storages between models. Also used to create deterministic -// naming for storages. -@Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SerializationStorageContext extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SerializationStorageContext(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public SerializationStorageContext(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public SerializationStorageContext position(long position) { - return (SerializationStorageContext)super.position(position); - } - @Override public SerializationStorageContext getPointer(long i) { - return new SerializationStorageContext((Pointer)this).offsetAddress(i); - } - - public SerializationStorageContext() { super((Pointer)null); allocate(); } - private native void allocate(); - - - - public native @Cast("uint64_t") long getOrAddStorage(@Cast({"", "c10::Storage&&"}) @StdMove Storage storage); - - public native @Cast("bool") boolean hasStorage(@Cast({"", "c10::Storage&&"}) @StdMove Storage storage); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StackEntryVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StackEntryVector.java deleted file mode 100644 index 4e1bfe44c2d..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StackEntryVector.java +++ /dev/null @@ -1,90 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("std::vector") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class StackEntryVector extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public StackEntryVector(Pointer p) { super(p); } - public StackEntryVector(StackEntry value) { this(1); put(0, value); } - public StackEntryVector(StackEntry ... array) { this(array.length); put(array); } - public StackEntryVector() { allocate(); } - public StackEntryVector(long n) { allocate(n); } - private native void allocate(); - private native void allocate(@Cast("size_t") long n); - public native @Name("operator =") @ByRef StackEntryVector put(@ByRef StackEntryVector x); - - public boolean empty() { return size() == 0; } - public native long size(); - public void clear() { resize(0); } - public native void resize(@Cast("size_t") long n); - - public StackEntry front() { return get(0); } - public StackEntry back() { return get(size() - 1); } - @Index(function = "at") public native @ByRef StackEntry get(@Cast("size_t") long i); - public native StackEntryVector put(@Cast("size_t") long i, StackEntry value); - - public native @ByVal Iterator insert(@ByVal Iterator pos, @ByRef StackEntry value); - public native @ByVal Iterator erase(@ByVal Iterator pos); - public native @ByVal Iterator begin(); - public native @ByVal Iterator end(); - @NoOffset @Name("iterator") public static class Iterator extends Pointer { - public Iterator(Pointer p) { super(p); } - public Iterator() { } - - public native @Name("operator ++") @ByRef Iterator increment(); - public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *") @ByRef @Const StackEntry get(); - } - - public StackEntry[] get() { - StackEntry[] array = new StackEntry[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE]; - for (int i = 0; i < array.length; i++) { - array[i] = get(i); - } - return array; - } - @Override public String toString() { - return java.util.Arrays.toString(get()); - } - - public StackEntry pop_back() { - long size = size(); - StackEntry value = get(size - 1); - resize(size - 1); - return value; - } - public StackEntryVector push_back(StackEntry value) { - long size = size(); - resize(size + 1); - return put(size, value); - } - public StackEntryVector put(StackEntry value) { - if (size() != 1) { resize(1); } - return put(0, value); - } - public StackEntryVector put(StackEntry ... array) { - if (size() != array.length) { resize(array.length); } - for (int i = 0; i < array.length; i++) { - put(i, array[i]); - } - return this; - } -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Storage.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Storage.java index 99dc4da4061..5d3afbd5c6a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Storage.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Storage.java @@ -107,9 +107,13 @@ private native void allocate( public native @ByVal SymInt sym_nbytes(); // get() use here is to get const-correctness - public native Pointer data(); + public native @Const Pointer data(); - public native @ByRef DataPtr data_ptr(); + public native Pointer mutable_data(); + + public native @ByRef DataPtr mutable_data_ptr(); + + public native @Cast({"", "c10::DataPtr&&"}) @StdMove DataPtr data_ptr(); // Returns the previous data_ptr public native @Cast({"", "c10::DataPtr&&"}) @StdMove DataPtr set_data_ptr(@Cast({"", "c10::DataPtr&&"}) @StdMove DataPtr data_ptr); @@ -139,18 +143,10 @@ private native void allocate( public native void UniqueStorageShareExternalPointer( Pointer src, @Cast("size_t") long _capacity, - @Cast("c10::DeleterFnPtr") PointerConsumer d/*=nullptr*/); + PointerConsumer d/*=nullptr*/); public native void UniqueStorageShareExternalPointer( Pointer src, @Cast("size_t") long _capacity); - public native void UniqueStorageShareExternalPointer( - Pointer src, - @Cast("size_t") long _capacity, - @Cast("c10::DeleterFnPtr") Pointer d/*=nullptr*/); - public native void UniqueStorageShareExternalPointer( - Pointer src, - @Cast("size_t") long _capacity, - @Cast("c10::DeleterFnPtr") long d/*=nullptr*/); public native void UniqueStorageShareExternalPointer( @Cast({"", "c10::DataPtr&&"}) @StdMove DataPtr data_ptr, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StorageImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StorageImpl.java index 8cf0d823075..248751e099b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StorageImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StorageImpl.java @@ -69,20 +69,19 @@ private native void allocate( public StorageImpl( @ByVal use_byte_size_t arg0, - @ByVal SymInt size_bytes, + @Const @ByRef SymInt size_bytes, Allocator allocator, @Cast("bool") boolean resizable) { super((Pointer)null); allocate(arg0, size_bytes, allocator, resizable); } private native void allocate( @ByVal use_byte_size_t arg0, - @ByVal SymInt size_bytes, + @Const @ByRef SymInt size_bytes, Allocator allocator, @Cast("bool") boolean resizable); - public StorageImpl(@ByRef(true) StorageImpl other) { super((Pointer)null); allocate(other); } - private native void allocate(@ByRef(true) StorageImpl other); + public native void reset(); @@ -102,15 +101,18 @@ private native void allocate( public native @Cast("bool") boolean resizable(); - public native @ByRef DataPtr data_ptr(); + public native @ByRef DataPtr mutable_data_ptr(); + + public native @Cast({"", "c10::DataPtr&&"}) @StdMove DataPtr data_ptr(); // Returns the previous data_ptr public native @Cast({"", "c10::DataPtr&&"}) @StdMove DataPtr set_data_ptr(@Cast({"", "c10::DataPtr&&"}) @StdMove DataPtr data_ptr); public native void set_data_ptr_noswap(@Cast({"", "c10::DataPtr&&"}) @StdMove DataPtr data_ptr); - // TODO: Return const ptr eventually if possible - public native Pointer data(); + public native @Const Pointer data(); + + public native Pointer mutable_data(); public native @ByVal DeviceType device_type(); @@ -132,18 +134,10 @@ private native void allocate( public native void UniqueStorageShareExternalPointer( Pointer src, @Cast("size_t") long size_bytes, - @Cast("c10::DeleterFnPtr") PointerConsumer d/*=nullptr*/); + PointerConsumer d/*=nullptr*/); public native void UniqueStorageShareExternalPointer( Pointer src, @Cast("size_t") long size_bytes); - public native void UniqueStorageShareExternalPointer( - Pointer src, - @Cast("size_t") long size_bytes, - @Cast("c10::DeleterFnPtr") Pointer d/*=nullptr*/); - public native void UniqueStorageShareExternalPointer( - Pointer src, - @Cast("size_t") long size_bytes, - @Cast("c10::DeleterFnPtr") long d/*=nullptr*/); /** * Can only be called when use_count is 1 diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StorageType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StorageType.java index 673ab545a48..f3b23d04c77 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StorageType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StorageType.java @@ -25,7 +25,7 @@ public class StorageType extends Type { public native @Cast("bool") boolean equals(@Const @ByRef Type rhs); public native @StdString BytePointer str(); - public native @StdString BytePointer annotation_str_impl(@ByVal(nullValue = "c10::TypePrinter(nullptr)") @Cast("c10::TypePrinter*") Pointer printer); + public native @StdString BytePointer annotation_str_impl(@ByVal(nullValue = "c10::TypePrinter(nullptr)") TypePrinter printer); public native @StdString BytePointer annotation_str_impl(); @MemberGetter public static native TypeKind Kind(); // global singleton diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringType.java index 7a1bfd09fef..eaeeb283705 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringType.java @@ -26,7 +26,7 @@ public class StringType extends Type { public native @Cast("bool") boolean equals(@Const @ByRef Type rhs); public native @StdString BytePointer str(); - public native @StdString BytePointer annotation_str_impl(@ByVal(nullValue = "c10::TypePrinter(nullptr)") @Cast("c10::TypePrinter*") Pointer printer); + public native @StdString BytePointer annotation_str_impl(@ByVal(nullValue = "c10::TypePrinter(nullptr)") TypePrinter printer); public native @StdString BytePointer annotation_str_impl(); @MemberGetter public static native TypeKind Kind(); // global singleton diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymBool.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymBool.java index b9f449dfc08..63005c0cc03 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymBool.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymBool.java @@ -44,8 +44,12 @@ public class SymBool extends Pointer { + // Only valid if is_heap_allocated() public native @ByVal SymNode toSymNodeImpl(); + // Guaranteed to return a SymNode, wrapping using base if necessary + public native @ByVal SymNode wrap_node(@Const @ByRef SymNode base); + public native @Cast("bool") boolean expect_bool(); public native @ByVal SymBool sym_and(@Const @ByRef SymBool arg0); @@ -61,8 +65,14 @@ public class SymBool extends Pointer { // bool, so it's not so common to have to call this public native @Cast("bool") boolean guard_bool(@Cast("const char*") BytePointer file, @Cast("int64_t") long line); public native @Cast("bool") boolean guard_bool(String file, @Cast("int64_t") long line); + public native @Cast("bool") boolean expect_true(@Cast("const char*") BytePointer file, @Cast("int64_t") long line); + public native @Cast("bool") boolean expect_true(String file, @Cast("int64_t") long line); - public native @Cast("bool") boolean is_symbolic(); + public native @Cast("bool") boolean has_hint(); public native @Cast("bool") boolean as_bool_unchecked(); + + public native @ByVal BoolOptional maybe_as_bool(); + + public native @Cast("bool") boolean is_heap_allocated(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymFloat.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymFloat.java index 29ba5c4e98f..54f17b63f71 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymFloat.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymFloat.java @@ -36,8 +36,12 @@ public class SymFloat extends Pointer { + // Only valid if is_symbolic() public native @ByVal SymNode toSymNodeImpl(); + // Guaranteed to return a SymNode, wrapping using base if necessary + public native @ByVal SymNode wrap_node(@Const @ByRef SymNode base); + public native double expect_float(); public native @ByVal @Name("operator +") SymFloat add(@Const @ByRef SymFloat arg0); @@ -45,6 +49,23 @@ public class SymFloat extends Pointer { public native @ByVal @Name("operator *") SymFloat multiply(@Const @ByRef SymFloat arg0); public native @ByVal @Name("operator /") SymFloat divide(@Const @ByRef SymFloat arg0); + public native @ByVal SymBool sym_eq(@Const @ByRef SymFloat arg0); + public native @ByVal SymBool sym_ne(@Const @ByRef SymFloat arg0); + public native @ByVal SymBool sym_lt(@Const @ByRef SymFloat arg0); + public native @ByVal SymBool sym_le(@Const @ByRef SymFloat arg0); + public native @ByVal SymBool sym_gt(@Const @ByRef SymFloat arg0); + public native @ByVal SymBool sym_ge(@Const @ByRef SymFloat arg0); + + public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef SymFloat o); + public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef SymFloat o); + public native @Cast("bool") @Name("operator <") boolean lessThan(@Const @ByRef SymFloat o); + public native @Cast("bool") @Name("operator <=") boolean lessThanEquals(@Const @ByRef SymFloat o); + public native @Cast("bool") @Name("operator >") boolean greaterThan(@Const @ByRef SymFloat o); + public native @Cast("bool") @Name("operator >=") boolean greaterThanEquals(@Const @ByRef SymFloat o); + + public native @ByVal SymFloat min(@Const @ByRef SymFloat sci); + public native @ByVal SymFloat max(@Const @ByRef SymFloat sci); + // Need guidance on where to put this code public native @ByVal SymFloat sqrt(); @@ -59,6 +80,8 @@ public class SymFloat extends Pointer { public native double guard_float(@Cast("const char*") BytePointer file, @Cast("int64_t") long line); public native double guard_float(String file, @Cast("int64_t") long line); + public native @Cast("bool") boolean has_hint(); + // N.B. It's important to keep this definition in the header // as we expect if checks to be folded for mobile builds // where `is_symbolic` is always false diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymFloatType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymFloatType.java index 6229f558c0a..81248472b77 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymFloatType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymFloatType.java @@ -25,7 +25,7 @@ public class SymFloatType extends Type { public native @Cast("bool") boolean equals(@Const @ByRef Type rhs); public native @StdString BytePointer str(); - public native @StdString BytePointer annotation_str_impl(@ByVal(nullValue = "c10::TypePrinter(nullptr)") @Cast("c10::TypePrinter*") Pointer printer); + public native @StdString BytePointer annotation_str_impl(@ByVal(nullValue = "c10::TypePrinter(nullptr)") TypePrinter printer); public native @StdString BytePointer annotation_str_impl(); @MemberGetter public static native TypeKind Kind(); // global singleton diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymInt.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymInt.java index 93892d57206..0f7392b7f8b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymInt.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymInt.java @@ -71,15 +71,17 @@ public enum Unchecked { public native @ByRef @Name("operator =") SymInt put(@Const @ByRef SymInt s); - public native @ByVal SymInt clone(); - public native SymNodeImpl toSymNodeImplUnowned(); public native void release_(); - public native @ByVal SymNode toSymNodeImpl(); + // Only valid if is_heap_allocated() + public native @ByVal SymNode toSymNode(); + + // Guaranteed to return a SymNode, wrapping using base if necessary + public native @ByVal SymNode wrap_node(@Const @ByRef SymNode base); // Require the int to be non-symbolic, and if it is symbolic raise an // error. This is safe to use for C++ code that doesn't work for symbolic @@ -87,6 +89,11 @@ public enum Unchecked { // try to trigger the path in C++ you'll appropriately get an error public native @Cast("int64_t") long expect_int(); + // Test if we have a hint for this int (e.g., guard_int would work). + // Most of the time this is true; it is only false when you have + // an unbacked SymInt. + public native @Cast("bool") boolean has_hint(); + // Insert a guard for the int to be its concrete value, and then return // that value. This operation always works, even if the int is symbolic, // so long as we know what the underlying value is (e.g., this won't work @@ -99,10 +106,13 @@ public enum Unchecked { public native @Cast("int64_t") long guard_int(@Cast("const char*") BytePointer file, @Cast("int64_t") long line); public native @Cast("int64_t") long guard_int(String file, @Cast("int64_t") long line); + // Distinguish actual symbolic values from constants stored on the heap + public native @Cast("bool") boolean is_symbolic(); + // N.B. It's important to keep this definition in the header // as we expect if checks to be folded for mobile builds - // where `is_symbolic` is always false and optimize dead code paths - public native @Cast("bool") boolean is_symbolic(); + // where `is_heap_allocated` is always false and optimize dead code paths + public native @Cast("bool") boolean is_heap_allocated(); public native @ByVal @Name("operator +") SymInt add(@Const @ByRef SymInt sci); public native @ByVal @Name("operator -") SymInt subtract(@Const @ByRef SymInt sci); @@ -113,6 +123,8 @@ public enum Unchecked { public native @Name("operator +=") void addPut(@Const @ByRef SymInt sci); public native @Name("operator /=") void dividePut(@Const @ByRef SymInt sci); + public native @ByVal SymInt clone(); + public native @ByVal SymBool sym_eq(@Const @ByRef SymInt arg0); public native @ByVal SymBool sym_ne(@Const @ByRef SymInt arg0); public native @ByVal SymBool sym_lt(@Const @ByRef SymInt arg0); @@ -130,21 +142,22 @@ public enum Unchecked { public native @ByVal SymInt min(@Const @ByRef SymInt sci); public native @ByVal SymInt max(@Const @ByRef SymInt sci); - public native @ByVal @Name("operator *") SymInt multiply(@Cast("int64_t") long sci); - public native @Cast("bool") @Name("operator <") boolean lessThan(@Cast("int64_t") long sci); - public native @Cast("bool") @Name("operator ==") boolean equals(@Cast("int64_t") long sci); - public native @Cast("bool") @Name("operator !=") boolean notEquals(@Cast("int64_t") long sci); - public native @Cast("bool") @Name("operator <=") boolean lessThanEquals(@Cast("int64_t") long sci); - public native @Cast("bool") @Name("operator >") boolean greaterThan(@Cast("int64_t") long sci); - public native @Cast("bool") @Name("operator >=") boolean greaterThanEquals(@Cast("int64_t") long sci); - public native @ByVal @Name("operator c10::SymFloat") SymFloat asSymFloat(); + // Don't use this. Prefer maybe_as_int instead public native @Cast("int64_t") long as_int_unchecked(); - // Return whether the integer is representable as a SymInt. + public native @ByVal LongOptional maybe_as_int(); + + // Return whether the integer is directly coercible to a SymInt + // without requiring heap allocation. You don't need to use this + // to check if you can pass an integer to SymInt; this is guaranteed + // to work (it just might heap allocate!) public static native @Cast("bool") boolean check_range(@Cast("int64_t") long i); - // Return the min represetable integer as a SymInt + // Return the min representable integer as a SymInt without + // heap allocation. For quantities that count bytes (or larger), + // this is still much larger than you need, so you may consider + // using this as a more efficient version of MIN_INT public static native @Cast("const int64_t") long min_representable_int(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntType.java index 6d40c4ea877..ef9e3f2f765 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntType.java @@ -25,7 +25,7 @@ public class SymIntType extends Type { public native @Cast("bool") boolean equals(@Const @ByRef Type rhs); public native @StdString BytePointer str(); - public native @StdString BytePointer annotation_str_impl(@ByVal(nullValue = "c10::TypePrinter(nullptr)") @Cast("c10::TypePrinter*") Pointer printer); + public native @StdString BytePointer annotation_str_impl(@ByVal(nullValue = "c10::TypePrinter(nullptr)") TypePrinter printer); public native @StdString BytePointer annotation_str_impl(); @MemberGetter public static native TypeKind Kind(); // global singleton diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymNodeImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymNodeImpl.java index 38eecb05c4b..ea262456d1e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymNodeImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymNodeImpl.java @@ -18,6 +18,10 @@ import static org.bytedeco.pytorch.global.torch.*; +// When you add a method, you also need to edit +// torch/csrc/jit/python/init.cpp +// torch/csrc/utils/python_symnode.h +// c10/core/ConstantSymNodeImpl.h @Namespace("c10") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class SymNodeImpl extends Pointer { static { Loader.load(); } @@ -63,6 +67,21 @@ public class SymNodeImpl extends Pointer { public native @ByVal SymNode sym_and(@Const @ByRef SymNode other); public native @ByVal SymNode sym_not(); // NB: self is ignored here, only the arguments are used + public native @ByVal SymNode is_contiguous( + @ByVal SymNodeArrayRef sizes, + @ByVal SymNodeArrayRef strides); + public native @ByVal SymNode is_channels_last_contiguous_2d( + @ByVal SymNodeArrayRef sizes, + @ByVal SymNodeArrayRef strides); + public native @ByVal SymNode is_channels_last_contiguous_3d( + @ByVal SymNodeArrayRef sizes, + @ByVal SymNodeArrayRef strides); + public native @ByVal SymNode is_channels_last_strides_2d( + @ByVal SymNodeArrayRef sizes, + @ByVal SymNodeArrayRef strides); + public native @ByVal SymNode is_channels_last_strides_3d( + @ByVal SymNodeArrayRef sizes, + @ByVal SymNodeArrayRef strides); public native @ByVal SymNode is_non_overlapping_and_dense( @ByVal SymNodeArrayRef sizes, @ByVal SymNodeArrayRef strides); @@ -77,8 +96,15 @@ public class SymNodeImpl extends Pointer { public native @Cast("bool") boolean guard_bool(String file, @Cast("int64_t") long line); public native double guard_float(@Cast("const char*") BytePointer file, @Cast("int64_t") long line); public native double guard_float(String file, @Cast("int64_t") long line); + public native @Cast("bool") boolean expect_true(@Cast("const char*") BytePointer file, @Cast("int64_t") long line); + public native @Cast("bool") boolean expect_true(String file, @Cast("int64_t") long line); public native @Cast("int64_t") long int_(); public native @Cast("bool") boolean bool_(); + public native @Cast("bool") boolean has_hint(); public native @StdString BytePointer str(); + public native @ByVal LongOptional singleton_int(); + public native @ByVal LongOptional constant_int(); + public native @ByVal BoolOptional constant_bool(); + public native @ByVal LongOptional maybe_as_int(); public native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer os); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java index 45ff765bc91..102a9d2cd32 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java @@ -669,8 +669,10 @@ private native void allocate( public native @ByVal Tensor flatten(@ByVal DimnameArrayRef dims, @ByVal Dimname out_dim); public native @ByVal Tensor unflatten(@Cast("int64_t") long dim, @ByVal LongArrayRef sizes); public native @ByVal Tensor unflatten(@Cast("int64_t") long dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); + public native @ByVal Tensor unflatten_symint(@Cast("int64_t") long dim, @ByVal SymIntArrayRef sizes); public native @ByVal Tensor unflatten(@ByVal Dimname dim, @ByVal LongArrayRef sizes, @ByVal DimnameArrayRef names); public native @ByVal Tensor unflatten(@ByVal Dimname dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, @ByVal DimnameArrayRef names); + public native @ByVal Tensor unflatten_symint(@ByVal Dimname dim, @ByVal SymIntArrayRef sizes, @ByVal DimnameArrayRef names); public native @ByRef Tensor fill_(@Const @ByRef Scalar value); public native @ByRef Tensor fill_(@Const @ByRef Tensor value); public native @ByVal Tensor floor(); @@ -972,6 +974,7 @@ private native void allocate( public native @ByVal Tensor nansum(@ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); public native @ByVal Tensor sum_to_size(@ByVal LongArrayRef size); public native @ByVal Tensor sum_to_size(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); + public native @ByVal Tensor sum_to_size_symint(@ByVal SymIntArrayRef size); public native @ByVal Tensor sqrt(); public native @ByRef Tensor sqrt_(); public native @ByVal Tensor square(); @@ -981,12 +984,12 @@ private native void allocate( public native @ByVal Tensor std(@ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased); public native @ByVal Tensor std(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); public native @ByVal Tensor std(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased); - public native @ByVal Tensor std(@ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal Tensor std(@ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional correction, @Cast("bool") boolean keepdim/*=false*/); public native @ByVal Tensor std(); - public native @ByVal Tensor std(@ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal Tensor std(@ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional correction, @Cast("bool") boolean keepdim/*=false*/); public native @ByVal Tensor std(@ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); public native @ByVal Tensor std(@ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased); - public native @ByVal Tensor std(@ByVal DimnameArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal Tensor std(@ByVal DimnameArrayRef dim, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional correction, @Cast("bool") boolean keepdim/*=false*/); public native @ByVal Tensor std(@ByVal DimnameArrayRef dim); public native @ByVal Tensor prod(@ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); public native @ByVal Tensor prod(); @@ -1002,6 +1005,7 @@ private native void allocate( public native @ByRef Tensor tanh_(); public native @ByVal Tensor tile(@ByVal LongArrayRef dims); public native @ByVal Tensor tile(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); + public native @ByVal Tensor tile_symint(@ByVal SymIntArrayRef dims); public native @ByVal Tensor transpose(@Cast("int64_t") long dim0, @Cast("int64_t") long dim1); public native @ByVal Tensor transpose(@ByVal Dimname dim0, @ByVal Dimname dim1); public native @ByRef Tensor transpose_(@Cast("int64_t") long dim0, @Cast("int64_t") long dim1); @@ -1013,12 +1017,15 @@ private native void allocate( public native @ByVal Tensor roll(@ByVal LongArrayRef shifts); public native @ByVal Tensor roll(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] shifts, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); public native @ByVal Tensor roll(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... shifts); + public native @ByVal Tensor roll_symint(@ByVal SymIntArrayRef shifts, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef dims); + public native @ByVal Tensor roll_symint(@ByVal SymIntArrayRef shifts); + public native @ByVal Tensor roll_symint(@ByVal SymIntArrayRef shifts, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); public native @ByVal Tensor rot90(@Cast("int64_t") long k/*=1*/, @ByVal(nullValue = "at::IntArrayRef({0,1})") LongArrayRef dims); public native @ByVal Tensor rot90(); public native @ByVal Tensor rot90(@Cast("int64_t") long k/*=1*/, @ByVal(nullValue = "at::IntArrayRef({0,1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); public native @ByVal Tensor _nested_tensor_size(); public native @ByVal Tensor _nested_tensor_strides(); - public native @ByVal @Cast("std::vector*") LongVector _nested_tensor_offsets(); + public native @ByVal Tensor _nested_tensor_storage_offsets(); public native @ByVal Tensor trunc(); public native @ByRef Tensor trunc_(); public native @ByVal Tensor fix(); @@ -1031,12 +1038,12 @@ private native void allocate( public native @ByVal Tensor var(@ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased); public native @ByVal Tensor var(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); public native @ByVal Tensor var(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased); - public native @ByVal Tensor var(@ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal Tensor var(@ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional correction, @Cast("bool") boolean keepdim/*=false*/); public native @ByVal Tensor var(); - public native @ByVal Tensor var(@ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal Tensor var(@ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional correction, @Cast("bool") boolean keepdim/*=false*/); public native @ByVal Tensor var(@ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); public native @ByVal Tensor var(@ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased); - public native @ByVal Tensor var(@ByVal DimnameArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal Tensor var(@ByVal DimnameArrayRef dim, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional correction, @Cast("bool") boolean keepdim/*=false*/); public native @ByVal Tensor var(@ByVal DimnameArrayRef dim); public native @ByVal Tensor view_as(@Const @ByRef Tensor other); public native @ByVal Tensor where(@Const @ByRef Tensor condition, @Const @ByRef Tensor other); @@ -1090,9 +1097,11 @@ private native void allocate( public native @Const @ByRef Tensor sparse_resize_and_clear_(@ByVal LongArrayRef size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); public native @Const @ByRef Tensor sparse_resize_and_clear_(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); public native @ByVal Tensor sparse_mask(@Const @ByRef Tensor mask); - public native @ByVal Tensor to_dense(@ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); + public native @ByVal Tensor _sparse_mask_projection(@Const @ByRef Tensor mask, @Cast("bool") boolean accumulate_matches/*=false*/); + public native @ByVal Tensor _sparse_mask_projection(@Const @ByRef Tensor mask); + public native @ByVal Tensor to_dense(@ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional masked_grad); public native @ByVal Tensor to_dense(); - public native @ByVal Tensor _to_dense(@ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); + public native @ByVal Tensor _to_dense(@ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional masked_grad); public native @ByVal Tensor _to_dense(); public native @Cast("int64_t") long sparse_dim(); public native @Cast("int64_t") long _dimI(); @@ -1114,21 +1123,37 @@ private native void allocate( public native @Cast({"", "std::vector"}) @StdMove TensorVector unbind(); public native @Cast({"", "std::vector"}) @StdMove TensorVector unbind(@ByVal Dimname dim); public native @ByVal Tensor to_sparse(@Cast("int64_t") long sparse_dim); + public native @ByVal Tensor _to_sparse(@Cast("int64_t") long sparse_dim); public native @ByVal Tensor to_sparse(@ByVal(nullValue = "c10::optional(c10::nullopt)") LayoutOptional layout, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); public native @ByVal Tensor to_sparse(); public native @ByVal Tensor to_sparse(@ByVal(nullValue = "c10::optional(c10::nullopt)") LayoutOptional layout, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); + public native @ByVal Tensor _to_sparse(@ByVal(nullValue = "c10::optional(c10::nullopt)") LayoutOptional layout, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); + public native @ByVal Tensor _to_sparse(); + public native @ByVal Tensor _to_sparse(@ByVal(nullValue = "c10::optional(c10::nullopt)") LayoutOptional layout, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); public native @ByVal Tensor to_sparse_csr(@ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); public native @ByVal Tensor to_sparse_csr(); + public native @ByVal Tensor _to_sparse_csr(@ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); + public native @ByVal Tensor _to_sparse_csr(); public native @ByVal Tensor to_sparse_csc(@ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); public native @ByVal Tensor to_sparse_csc(); + public native @ByVal Tensor _to_sparse_csc(@ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); + public native @ByVal Tensor _to_sparse_csc(); public native @ByVal Tensor to_sparse_bsr(@ByVal LongArrayRef blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); public native @ByVal Tensor to_sparse_bsr(@ByVal LongArrayRef blocksize); public native @ByVal Tensor to_sparse_bsr(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); public native @ByVal Tensor to_sparse_bsr(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... blocksize); + public native @ByVal Tensor _to_sparse_bsr(@ByVal LongArrayRef blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); + public native @ByVal Tensor _to_sparse_bsr(@ByVal LongArrayRef blocksize); + public native @ByVal Tensor _to_sparse_bsr(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); + public native @ByVal Tensor _to_sparse_bsr(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... blocksize); public native @ByVal Tensor to_sparse_bsc(@ByVal LongArrayRef blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); public native @ByVal Tensor to_sparse_bsc(@ByVal LongArrayRef blocksize); public native @ByVal Tensor to_sparse_bsc(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); public native @ByVal Tensor to_sparse_bsc(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... blocksize); + public native @ByVal Tensor _to_sparse_bsc(@ByVal LongArrayRef blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); + public native @ByVal Tensor _to_sparse_bsc(@ByVal LongArrayRef blocksize); + public native @ByVal Tensor _to_sparse_bsc(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); + public native @ByVal Tensor _to_sparse_bsc(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... blocksize); public native @ByVal Tensor to_mkldnn(@ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); public native @ByVal Tensor to_mkldnn(); public native @ByVal Tensor dequantize(); @@ -1355,6 +1380,8 @@ private native void allocate( public native @ByVal Tensor index_select(@ByVal Dimname dim, @Const @ByRef Tensor index); public native @ByVal Tensor masked_select(@Const @ByRef Tensor mask); public native @ByVal Tensor nonzero(); + public native @ByVal Tensor nonzero_static(@Cast("int64_t") long size, @Cast("int64_t") long fill_value/*=-1*/); + public native @ByVal Tensor nonzero_static(@Cast("int64_t") long size); public native @Cast({"", "std::vector"}) @StdMove TensorVector nonzero_numpy(); public native @ByVal Tensor argwhere(); public native @ByVal Tensor gather(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Cast("bool") boolean sparse_grad/*=false*/); @@ -1471,6 +1498,8 @@ private native void allocate( public native @ByVal Tensor argsort(@ByVal Dimname dim); public native @ByVal T_TensorTensor_T topk(@Cast("int64_t") long k, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean largest/*=true*/, @Cast("bool") boolean sorted/*=true*/); public native @ByVal T_TensorTensor_T topk(@Cast("int64_t") long k); + public native @ByVal T_TensorTensor_T topk_symint(@ByVal SymInt k, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean largest/*=true*/, @Cast("bool") boolean sorted/*=true*/); + public native @ByVal T_TensorTensor_T topk_symint(@ByVal SymInt k); public native @ByVal Tensor all(); public native @ByVal Tensor any(); public native @ByVal Tensor renorm(@Const @ByRef Scalar p, @Cast("int64_t") long dim, @Const @ByRef Scalar maxnorm); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBase.java index ebbd2266dc2..ff968a7d753 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBase.java @@ -181,6 +181,23 @@ private native void allocate( public native @Cast({"", "c10::Storage&&"}) @StdMove Storage storage(); public native @Cast("bool") boolean is_alias_of(@Const @ByRef TensorBase other); + // Move the storage backend to shm based + // to enable memory sharing across processes. + // + // NB1: the ideal behavior of this API still requires further discussion + // but for now we are inclined to keep it consistent with existing THP behavior + // https://github.com/pytorch/pytorch/blob/4dca9bde0552afc67b5b74f4a0696fe6055709c4/torch/storage.py#L196-L212 + // so we don't assert on anything here and rely on caller knowing + // what it's doing. + // + // NB2: this currently provides Linux fd based shm support only + // to simplify the storage lifetime management logic in ATen + // and similarly for now we are not adding support for file system based + // shm support like in THP due to additional GC manager support needed + // to prevent leaks. + // As such, calling this from non supported systems (e.g. Windows) would fail. + public native void share_memory_(); + public native @Cast("bool") boolean _is_zerotensor(); public native void _set_zero(@Cast("bool") boolean _zero); @@ -228,6 +245,9 @@ private native void allocate( /** Returns if a {@code Tensor} has XLA backend. */ public native @Cast("bool") boolean is_xla(); + /** Returns if a {@code Tensor} has MTIA backend. */ + public native @Cast("bool") boolean is_mtia(); + /** Returns if a {@code Tensor} has HPU backend. */ public native @Cast("bool") boolean is_hpu(); @@ -289,18 +309,30 @@ private native void allocate( * TensorOptions.h. */ public native @ByVal TensorOptions options(); + public native @Const Pointer const_data_ptr(); + + public native Pointer mutable_data_ptr(); + + // TODO(#97856) Make this return a const pointer. This currently + // returns a non-const pointer because of the large + // number of clients that we still want to audit before + // migrating to mutable_data_ptr(). public native Pointer data_ptr(); + // Legacy interface during the migration to indicate that a callsite + // has not been audited for mutability. + // + // Do not add new uses of this, use const_data_ptr() if possible, + // mutable_data_ptr() otherwise. + // + // TODO(#97856) Make this return a const pointer. This is currently + // const because of the vast number of clients that + // rely on this. public native @Name("data_ptr") BytePointer data_ptr_char(); - public native @Name("data_ptr") ShortPointer data_ptr_short(); - public native @Name("data_ptr") IntPointer data_ptr_int(); - public native @Cast("int64_t*") @Name("data_ptr") LongPointer data_ptr_long(); - public native @Name("data_ptr") FloatPointer data_ptr_float(); - public native @Name("data_ptr") DoublePointer data_ptr_double(); // Purposely not defined here to avoid inlining diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorGeometry.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorGeometry.java index 346a18a4343..2ece11ab2ca 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorGeometry.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorGeometry.java @@ -62,4 +62,9 @@ public class TensorGeometry extends Pointer { public native @ByVal SymInt sym_numel(); public native @ByVal TensorGeometry transpose(@Cast("int64_t") long dim0, @Cast("int64_t") long dim1); + + public native @ByRef SymIntVector mutable_sizes(); + public native @ByRef SymIntVector mutable_strides(); + public native @ByRef SymInt mutable_storage_offset(); + public native void recompute(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImpl.java index ffed5f6f1d5..48e58e18111 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImpl.java @@ -429,6 +429,8 @@ public enum SizesStridesPolicy { public native @Cast("bool") boolean is_xla(); + public native @Cast("bool") boolean is_mtia(); + public native @Cast("bool") boolean is_hpu(); public native @Cast("bool") boolean is_lazy(); @@ -633,39 +635,57 @@ public native void _set_fw_grad( * for you; this class is available from 'Tensor'. */ + /** + * Return a mutable typed data pointer to the actual data which this + * tensor refers to. This checks that the requested type (from the + * template parameter) matches the internal type of the tensor. + * + * It is invalid to call data() on a dtype-uninitialized tensor, even if + * the size is 0. + * + * WARNING: If a tensor is not contiguous, you MUST use strides when + * performing index calculations to determine the location of elements in + * the tensor. We recommend using 'TensorAccessor' to handle this computation + * for you; this class is available from 'Tensor'. + */ + // Shared implementation of data_dtype_initialized() and + // mutable_data_dtype_initialized(). + /** * More efficient helper for Tensor::data_ptr(). Like data(), but * does not do a type check. Unlike the untemplated data(), does * check has_storage() and storage_initialized(). */ + // Shared implementation of mutable_data_ptr_impl() and the future + // mutable_data_ptr_impl(). + public native @Const Pointer data(); /** * Return a void* data pointer to the actual data which this tensor refers to. * - * It is invalid to call data() on a dtype-uninitialized tensor, even if the - * size is 0. + * It is invalid to call mutable_data() on a dtype-uninitialized + * tensor, even if the size is 0. * * WARNING: The data pointed to by this tensor may not contiguous; do NOT * assume that itemsize() * numel() is sufficient to compute the bytes that * can be validly read from this tensor. */ - public native Pointer data(); - - /** - * Like data(), but performs no checks. You are responsible for ensuring - * that all invariants required by data() are upheld here. - */ - - /** - * Returns the TypeMeta of a tensor, which describes what data type - * it is (e.g., int, float, ...) - */ + public native Pointer mutable_data(); public native @Const @ByVal TypeMeta dtype(); /** * Return the size of a single element of this tensor in bytes. */ public native @Cast("size_t") long itemsize(); + + public native void set_backend_meta(@ByVal BackendMetaRef backend_meta); + + public native BackendMeta get_backend_meta(); + + public native @ByVal BackendMetaRef get_backend_meta_intrusive_ptr(); + + public native void release_storage_and_set_meta_custom_data_ptr_error_msg_( + @ByVal StringOptional s); /** * True if a tensor has no elements (e.g., numel() == 0). */ @@ -949,7 +969,7 @@ public native void ShareExternalPointer( * If the existing data does not match the desired type, it will be deleted * and a new storage will be created. */ - public native Pointer raw_mutable_data(@Const @ByVal TypeMeta meta); + public native Pointer raw_mutable_data(@Const @ByRef TypeMeta meta); /** * Returns a typed pointer of the underlying storage. diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndex.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndex.java index 6cd3bc9ee7a..a6cef830105 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndex.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndex.java @@ -61,7 +61,9 @@ public class TensorIndex extends Pointer { public TensorIndex(String str) { super((Pointer)null); allocate(str); } private native void allocate(String str); - // Case 3: Integer value + // Case 3: (Sym) Integer value + public TensorIndex(@ByVal SymInt integer) { super((Pointer)null); allocate(integer); } + private native void allocate(@ByVal SymInt integer); public TensorIndex(@Cast("int64_t") long integer) { super((Pointer)null); allocate(integer); } private native void allocate(@Cast("int64_t") long integer); public TensorIndex(int integer) { super((Pointer)null); allocate(integer); } @@ -83,7 +85,7 @@ public class TensorIndex extends Pointer { public native @Cast("bool") boolean is_integer(); - public native @Cast("int64_t") long integer(); + public native @ByVal SymInt integer(); public native @Cast("bool") boolean is_boolean(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorBase.java index 66ff0985cdd..cd04ea89ae2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorBase.java @@ -112,6 +112,11 @@ public class TensorIteratorBase extends MetaBase { /** Returns the dimension with the largest extent: (size[dim]-1) * stride[dim] */ public native int get_dim_to_split(); + + /** Return scalar value from original_tensor_base if it is defined. When + * common_dtype is Half, casting scalar input to common_dtype might overflow. + * If the scalar is aleady given in the type of Half, then return scalar + * value from tensor_base. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorMaker.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorMaker.java index e005e974334..1f6d0af4008 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorMaker.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorMaker.java @@ -41,18 +41,18 @@ public class TensorMaker extends Pointer { public native @ByRef @NoException(true) TensorMaker storage_offset(@ByVal LongOptional value); - public native @ByRef @NoException(true) TensorMaker deleter(@ByVal PointerConsumer value); - public native @ByRef @NoException(true) TensorMaker deleter(@ByVal @Cast("void(*)(void*)") Pointer value); - public native @ByRef @NoException(true) TensorMaker deleter(@ByVal @Cast("void(*)(void*)") long value); + public native @ByRef @NoException(true) TensorMaker deleter(PointerConsumer value); - public native @ByRef @NoException(true) TensorMaker context(Pointer value, @Cast("at::TensorMaker::ContextDeleter") PointerConsumer deleter/*=nullptr*/); + public native @ByRef @NoException(true) TensorMaker context(Pointer value, PointerConsumer deleter/*=nullptr*/); public native @ByRef @NoException(true) TensorMaker context(Pointer value); - public native @ByRef @NoException(true) TensorMaker context(Pointer value, @Cast("at::TensorMaker::ContextDeleter") Pointer deleter/*=nullptr*/); - public native @ByRef @NoException(true) TensorMaker context(Pointer value, @Cast("at::TensorMaker::ContextDeleter") long deleter/*=nullptr*/); public native @ByRef @NoException(true) TensorMaker target_device(@ByVal DeviceOptional value); public native @ByRef @NoException(true) TensorMaker options(@ByVal TensorOptions value); + public native @ByRef @NoException(true) TensorMaker resizeable_storage(); + + public native @ByRef @NoException(true) TensorMaker allocator(Allocator allocator); + public native @ByVal Tensor make_tensor(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptions.java index 8ca34de54a6..f9d6700c6a8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptions.java @@ -78,7 +78,7 @@ * {@code torch.device} object (e.g., "cuda:1" can be passed to everywhere a * {@code torch.device("cuda:1")} is accepted). To support the syntax * {@code at::empty({10}, {kCUDA, 1})} and {@code tensor.to(kCUDA)}, we need to make sure - * that {@code TensorOptions} is implicitly constructible with any argments that a + * that {@code TensorOptions} is implicitly constructible with any arguments that a * {@code Device} can constructed from. So we have, * * /* implicit * / TensorOptions(T&& device) : TensorOptions() { @@ -93,7 +93,7 @@ * * * But this will be problematic. Consider this: {@code TensorOptions({kCUDA, 1})}. - * Compiler will compain about ambiguity between the copy constructor and the + * Compiler will complain about ambiguity between the copy constructor and the * {@code Device} constructor because {@code {kCUDA, 1}} can be converted to both a * {@code TensorOption} and a {@code Device}. * diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TreeList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TreeList.java deleted file mode 100644 index cb3422040f8..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TreeList.java +++ /dev/null @@ -1,51 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("c10::SmallVector,4>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TreeList extends TreeRefSmallVectorImpl { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TreeList(Pointer p) { super(p); } - - public TreeList() { super((Pointer)null); allocate(); } - private native void allocate(); - - public TreeList(@Cast("size_t") long Size, @Const @ByRef(nullValue = "c10::intrusive_ptr()") TreeRef Value) { super((Pointer)null); allocate(Size, Value); } - private native void allocate(@Cast("size_t") long Size, @Const @ByRef(nullValue = "c10::intrusive_ptr()") TreeRef Value); - public TreeList(@Cast("size_t") long Size) { super((Pointer)null); allocate(Size); } - private native void allocate(@Cast("size_t") long Size); - - // note: The enable_if restricts Container to types that have a .begin() and - // .end() that return valid input iterators. - - public TreeList(@Const @ByRef TreeList RHS) { super((Pointer)null); allocate(RHS); } - private native void allocate(@Const @ByRef TreeList RHS); - - public native @ByRef @Name("operator =") TreeList put(@Const @ByRef TreeList RHS); - - // note: The enable_if restricts Container to types that have a .begin() and - // .end() that return valid input iterators. - - - - - - // note: The enable_if restricts Container to types that have a .begin() and - // .end() that return valid input iterators. -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefSmallVectorBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefSmallVectorBase.java deleted file mode 100644 index 443b5c97060..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefSmallVectorBase.java +++ /dev/null @@ -1,29 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("c10::SmallVectorTemplateBase >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TreeRefSmallVectorBase extends TreeRefSmallVectorCommon { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TreeRefSmallVectorBase(Pointer p) { super(p); } - - public native void push_back(@Const @ByRef TreeRef Elt); - - public native void pop_back(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefSmallVectorCommon.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefSmallVectorCommon.java deleted file mode 100644 index 5a11aad8b86..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefSmallVectorCommon.java +++ /dev/null @@ -1,49 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("c10::SmallVectorTemplateCommon >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TreeRefSmallVectorCommon extends IntSizedSmallVectorBase { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TreeRefSmallVectorCommon(Pointer p) { super(p); } - - - // forward iterator creation methods. - public native @ByVal @Cast("c10::SmallVectorTemplateCommon >::iterator*") TreeRef begin(); - public native @ByVal @Cast("c10::SmallVectorTemplateCommon >::iterator*") TreeRef end(); - - // reverse iterator creation methods. - - public native long size_in_bytes(); - public native long max_size(); - - public native @Cast("size_t") long capacity_in_bytes(); - - /** Return a pointer to the vector's buffer, even if empty(). */ - public native @ByVal @Cast("c10::SmallVectorTemplateCommon >::pointer*") TreeRef data(); - /** Return a pointer to the vector's buffer, even if empty(). */ - - // SmallVector::at is NOT from LLVM. - public native @ByVal TreeRef at(long idx); - public native @Name("operator []") @ByVal TreeRef get(long idx); - - public native @ByVal TreeRef front(); - - public native @ByVal TreeRef back(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefSmallVectorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefSmallVectorImpl.java deleted file mode 100644 index 3c54a576b99..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefSmallVectorImpl.java +++ /dev/null @@ -1,71 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("c10::SmallVectorImpl >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TreeRefSmallVectorImpl extends TreeRefSmallVectorBase { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TreeRefSmallVectorImpl(Pointer p) { super(p); } - - - - public native void clear(); - public native void resize(long N); - - /** Like resize, but \ref T is POD, the new values won't be initialized. */ - public native void resize_for_overwrite(long N); - - public native void resize(long N, @ByVal TreeRef NV); - - public native void reserve(long N); - - public native void pop_back_n(long NumItems); - - public native @ByVal TreeRef pop_back_val(); - - public native void swap(@ByRef TreeRefSmallVectorImpl RHS); - - /** Add the specified range to the end of the SmallVector. */ - - /** Append \p NumInputs copies of \p Elt to the end. */ - public native void append(long NumInputs, @ByVal TreeRef Elt); - - public native void append(@Const @ByRef TreeRefSmallVectorImpl RHS); - - public native void assign(long NumElts, @ByVal TreeRef Elt); - - // FIXME: Consider assigning over existing elements, rather than clearing & - // re-initializing them - for all assign(...) variants. - - public native void assign(@Const @ByRef TreeRefSmallVectorImpl RHS); - - public native @ByVal @Cast("c10::SmallVectorImpl >::iterator*") TreeRef erase(@ByVal @Cast("c10::SmallVectorImpl >::const_iterator*") TreeRef CI); - - public native @ByVal @Cast("c10::SmallVectorImpl >::iterator*") TreeRef erase(@ByVal @Cast("c10::SmallVectorImpl >::const_iterator*") TreeRef CS, @ByVal @Cast("c10::SmallVectorImpl >::const_iterator*") TreeRef CE); - public native @ByVal @Cast("c10::SmallVectorImpl >::iterator*") TreeRef insert(@ByVal @Cast("c10::SmallVectorImpl >::iterator*") TreeRef I, @ByRef(true) TreeRef Elt); - - public native @ByVal @Cast("c10::SmallVectorImpl >::iterator*") TreeRef insert(@ByVal @Cast("c10::SmallVectorImpl >::iterator*") TreeRef I, long NumToInsert, @ByVal TreeRef Elt); - - public native @ByRef @Name("operator =") TreeRefSmallVectorImpl put(@Const @ByRef TreeRefSmallVectorImpl RHS); - - public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef TreeRefSmallVectorImpl RHS); - public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef TreeRefSmallVectorImpl RHS); - - public native @Cast("bool") @Name("operator <") boolean lessThan(@Const @ByRef TreeRefSmallVectorImpl RHS); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Type.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Type.java index 2f378a7991e..e11a6db1e77 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Type.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Type.java @@ -106,7 +106,7 @@ public class Type extends Pointer { // // Takes a custom printer that users can pass in to customize the output of // this method. - public native @StdString BytePointer annotation_str(@ByVal @Cast("c10::TypePrinter*") Pointer printer); + public native @StdString BytePointer annotation_str(@ByVal TypePrinter printer); public native @StdString BytePointer annotation_str(); // Returns a human readable string that includes additional information like diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMeta.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMeta.java index 6b35f9ace60..f97112cfeed 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMeta.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMeta.java @@ -77,26 +77,26 @@ public class TypeMeta extends Pointer { /** * Returns the new function pointer for individual items. */ - public native @Cast("caffe2::TypeMeta::New*") @NoException(true) TypeMetaData.New newFn(); + public native @NoException(true) PointerSupplier newFn(); /** * Returns the placement new function pointer for individual items. */ - public native @Cast("caffe2::TypeMeta::PlacementNew*") @NoException(true) TypeMetaData.PlacementNew placementNew(); + public native @NoException(true) PlacementConsumer placementNew(); /** * Returns the typed copy function pointer for individual iterms. */ - public native @Cast("caffe2::TypeMeta::Copy*") @NoException(true) TypeMetaData.Copy copy(); + public native @NoException(true) PlacementCopier copy(); /** * Returns the destructor function pointer for individual items. */ - public native @Cast("caffe2::TypeMeta::PlacementDelete*") @NoException(true) TypeMetaData.PlacementDelete placementDelete(); - public native @Cast("caffe2::TypeMeta::Delete*") @NoException(true) TypeMetaData.Delete deleteFn(); + public native @NoException(true) PlacementConsumer placementDelete(); + public native @NoException(true) PointerConsumer deleteFn(); /** * Returns a printable name for the type. */ public native @StringView @NoException(true) BytePointer name(); - private static native @Namespace @Cast("bool") @Name("operator ==") @NoException(true) boolean equals(@Const @ByVal TypeMeta lhs, @Const @ByVal TypeMeta rhs); + private static native @Namespace @Cast("bool") @Name("operator ==") @NoException(true) boolean equals(@Const @ByRef TypeMeta lhs, @Const @ByRef TypeMeta rhs); public boolean equals(TypeMeta rhs) { return equals(this, rhs); } // Below are static functions that can be called by passing a specific type. @@ -114,4 +114,15 @@ public class TypeMeta extends Pointer { * convert TypeMeta handles to ScalarType enum values */ public native ScalarType toScalarType(); +// #ifdef __CUDACC__ + // NOTE [ TypeIdentifier::Get nvcc/clang discrepancy] + // nvcc and clang do not produce identical results for + // TypeIdentifier::Get, because TypeIdentifier::Get relies on + // __PRETTY_FUNCTION__ and they don't agree on the canonical names + // of types (e.g., nvcc normalizes to `short unsigned int`, but clang + // calls it `unsigned short`). Hide the implementation of this function + // from nvcc so that we always use clang (or whatever host C++ compiler) + // for TypeIdentifier::Get. +// #else + // specializations return indexes into typeMetaDataInstances() } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMetaData.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMetaData.java index 905f7993d70..6f7a2d409f3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMetaData.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMetaData.java @@ -36,45 +36,35 @@ public class TypeMetaData extends Pointer { return new TypeMetaData((Pointer)this).offsetAddress(i); } - public static class New extends FunctionPointer { - static { Loader.load(); } + @Opaque public static class New extends Pointer { + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public New() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public New(Pointer p) { super(p); } - protected New() { allocate(); } - private native void allocate(); - public native Pointer call(); + public New(Pointer p) { super(p); } } - public static class PlacementNew extends FunctionPointer { - static { Loader.load(); } + @Opaque public static class PlacementNew extends Pointer { + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public PlacementNew() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public PlacementNew(Pointer p) { super(p); } - protected PlacementNew() { allocate(); } - private native void allocate(); - public native void call(Pointer arg0, @Cast("size_t") long arg1); + public PlacementNew(Pointer p) { super(p); } } - public static class Copy extends FunctionPointer { - static { Loader.load(); } + @Opaque public static class Copy extends Pointer { + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public Copy() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Copy(Pointer p) { super(p); } - protected Copy() { allocate(); } - private native void allocate(); - public native void call(@Const Pointer arg0, Pointer arg1, @Cast("size_t") long arg2); + public Copy(Pointer p) { super(p); } } - public static class PlacementDelete extends FunctionPointer { - static { Loader.load(); } + @Opaque public static class PlacementDelete extends Pointer { + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public PlacementDelete() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public PlacementDelete(Pointer p) { super(p); } - protected PlacementDelete() { allocate(); } - private native void allocate(); - public native void call(Pointer arg0, @Cast("size_t") long arg1); + public PlacementDelete(Pointer p) { super(p); } } - public static class Delete extends FunctionPointer { - static { Loader.load(); } + @Opaque public static class Delete extends Pointer { + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public Delete() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Delete(Pointer p) { super(p); } - protected Delete() { allocate(); } - private native void allocate(); - public native void call(Pointer arg0); + public Delete(Pointer p) { super(p); } } public TypeMetaData() { super((Pointer)null); allocate(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UniqueVoidPtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UniqueVoidPtr.java index 50e7b49129f..e341f365f66 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UniqueVoidPtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UniqueVoidPtr.java @@ -58,12 +58,8 @@ public class UniqueVoidPtr extends Pointer { private native void allocate(); public UniqueVoidPtr(Pointer data) { super((Pointer)null); allocate(data); } private native void allocate(Pointer data); - public UniqueVoidPtr(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") PointerConsumer ctx_deleter) { super((Pointer)null); allocate(data, ctx, ctx_deleter); } - private native void allocate(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") PointerConsumer ctx_deleter); - public UniqueVoidPtr(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") Pointer ctx_deleter) { super((Pointer)null); allocate(data, ctx, ctx_deleter); } - private native void allocate(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") Pointer ctx_deleter); - public UniqueVoidPtr(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") long ctx_deleter) { super((Pointer)null); allocate(data, ctx, ctx_deleter); } - private native void allocate(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") long ctx_deleter); + public UniqueVoidPtr(Pointer data, Pointer ctx, PointerConsumer ctx_deleter) { super((Pointer)null); allocate(data, ctx, ctx_deleter); } + private native void allocate(Pointer data, Pointer ctx, PointerConsumer ctx_deleter); public native @Name("operator ->") Pointer access(); public native void clear(); public native Pointer get(); @@ -71,14 +67,8 @@ public class UniqueVoidPtr extends Pointer { public native Pointer release_context(); public native @Cast("bool") boolean compare_exchange_deleter( - @Cast("c10::DeleterFnPtr") PointerConsumer expected_deleter, - @Cast("c10::DeleterFnPtr") PointerConsumer new_deleter); - public native @Cast("bool") boolean compare_exchange_deleter( - @Cast("c10::DeleterFnPtr") Pointer expected_deleter, - @Cast("c10::DeleterFnPtr") Pointer new_deleter); - public native @Cast("bool") boolean compare_exchange_deleter( - @Cast("c10::DeleterFnPtr") long expected_deleter, - @Cast("c10::DeleterFnPtr") long new_deleter); + PointerConsumer expected_deleter, + PointerConsumer new_deleter); public native @Cast("bool") @Name("operator bool") boolean asBoolean(); - public native @Cast("c10::DeleterFnPtr") PointerConsumer get_deleter(); + public native PointerConsumer get_deleter(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ValueWrap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ValueWrap.java index 20c5ac4cdbe..200bce275b7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ValueWrap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ValueWrap.java @@ -27,13 +27,5 @@ public class ValueWrap extends Pointer { private native void allocate(Value p); public native void clear(); public native Value elem(); public native ValueWrap elem(Value setter); - public static class Clear_cb_Pointer extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Clear_cb_Pointer(Pointer p) { super(p); } - protected Clear_cb_Pointer() { allocate(); } - private native void allocate(); - public native void call(Pointer arg0); - } - public native Clear_cb_Pointer clear_cb(); public native ValueWrap clear_cb(Clear_cb_Pointer setter); + public native PointerConsumer clear_cb(); public native ValueWrap clear_cb(PointerConsumer setter); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/VariableInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/VariableInfo.java index 9267c72101e..080f3487552 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/VariableInfo.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/VariableInfo.java @@ -43,7 +43,7 @@ public class VariableInfo extends Pointer { public native @ByRef Layout layout(); public native VariableInfo layout(Layout setter); public native @ByRef Device device(); public native VariableInfo device(Device setter); public native ScalarType scalar_type(); public native VariableInfo scalar_type(ScalarType setter); - public native @ByRef @Cast("std::vector*") LongVector size(); public native VariableInfo size(LongVector setter); + public native @ByRef SymIntVector size(); public native VariableInfo size(SymIntVector setter); public native @Cast("bool") boolean requires_grad(); public native VariableInfo requires_grad(boolean setter); public native @Cast("bool") boolean is_empty(); public native VariableInfo is_empty(boolean setter); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/VariableVersion.java b/pytorch/src/gen/java/org/bytedeco/pytorch/VariableVersion.java index 12a0a2cc194..2e281aa3828 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/VariableVersion.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/VariableVersion.java @@ -73,9 +73,9 @@ public class VariableVersion extends Pointer { // doesn't allocate the intrusive_ptr. // Example use cases are: // - Inference tensors don't track version counter, so they'll just always - // have disbaled VariableVersion. + // have disabled VariableVersion. // - In SavedVariable class we override version_counter_ inside its - // construtor + // constructor // so that we can use the cheap constructor there. public enum Disabled { DISABLED(0); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImpl.java index 8f1f6d01f8a..90ce1c896ae 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImpl.java @@ -18,40 +18,21 @@ import static org.bytedeco.pytorch.global.torch.*; -// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ZeroPad2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -/** Applies ZeroPad over a 2-D input. - * See https://pytorch.org/docs/master/nn.html#torch.nn.ZeroPad2d to learn - * about the exact behavior of this module. - * - * See the documentation for {@code torch::nn::ZeroPad2dOptions} class to learn what - * constructor arguments are supported for this module. - * - * Example: - *
{@code
- *  ZeroPad2d model(ZeroPad2dOptions({1, 1, 2, 0}));
- *  }
*/ -@Namespace("torch::nn") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ZeroPad2dImpl extends ZeroPad2dImplCloneable { +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ZeroPad2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Applies ZeroPad over a 2-D input. +@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ZeroPad2dImpl extends ZeroPad2dImplBase { static { Loader.load(); } + + + public ZeroPad2dImpl(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } + private native void allocate(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding); + public ZeroPad2dImpl(@Const @ByRef ZeroPad2dOptions options_) { super((Pointer)null); allocate(options_); } + private native void allocate(@Const @ByRef ZeroPad2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ZeroPad2dImpl(Pointer p) { super(p); } /** Downcast constructor. */ public ZeroPad2dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); - public ZeroPad2dImpl(@ByVal @Cast("torch::ExpandingArray<4>*") LongPointer padding) { super((Pointer)null); allocate(padding); } - @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal @Cast("torch::ExpandingArray<4>*") LongPointer padding); - public ZeroPad2dImpl(@Const @ByRef ZeroPad2dOptions options_) { super((Pointer)null); allocate(options_); } - @SharedPtr @Name("std::make_shared") private native void allocate(@Const @ByRef ZeroPad2dOptions options_); - - public native void reset(); - - /** Pretty prints the {@code ZeroPad2d} module into the given {@code stream}. */ - public native void pretty_print(@Cast("std::ostream*") @ByRef Pointer stream); - - public native @ByVal Tensor forward(@Const @ByRef Tensor input); - - /** The options with which this {@code Module} was constructed. */ - public native @ByRef ZeroPad2dOptions options(); public native ZeroPad2dImpl options(ZeroPad2dOptions setter); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dOptions.java index b685de576d7..940a490c1a4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dOptions.java @@ -18,21 +18,13 @@ import static org.bytedeco.pytorch.global.torch.*; -// ============================================================================ - -/** Options for the {@code ZeroPad2d} module. - * - * Example: - *
{@code
- *  ZeroPad2d model(ZeroPad2dOptions({1, 1, 2, 0}));
- *  }
*/ -@Namespace("torch::nn") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("torch::nn::ZeroPadOptions<2>") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class ZeroPad2dOptions extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ZeroPad2dOptions(Pointer p) { super(p); } - public ZeroPad2dOptions(@ByVal @Cast("torch::ExpandingArray<4>*") LongPointer padding) { super((Pointer)null); allocate(padding); } - private native void allocate(@ByVal @Cast("torch::ExpandingArray<4>*") LongPointer padding); - public native @Cast("torch::ExpandingArray<4>*") @ByRef @NoException(true) LongPointer padding(); + public ZeroPad2dOptions(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } + private native void allocate(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding); + public native @Cast("torch::ExpandingArray<2*2>*") @ByRef @NoException(true) LongPointer padding(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/attribute_iterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/attribute_iterator.java deleted file mode 100644 index cf4c7ad5820..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/attribute_iterator.java +++ /dev/null @@ -1,55 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::jit::slot_iterator_impl") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class attribute_iterator extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public attribute_iterator(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public attribute_iterator(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public attribute_iterator position(long position) { - return (attribute_iterator)super.position(position); - } - @Override public attribute_iterator getPointer(long i) { - return new attribute_iterator((Pointer)this).offsetAddress(i); - } - - public attribute_iterator( - @ByVal JitModule root, - @Cast("bool") boolean recurse, - @Cast("bool") boolean return_module) { super((Pointer)null); allocate(root, recurse, return_module); } - private native void allocate( - @ByVal JitModule root, - @Cast("bool") boolean recurse, - @Cast("bool") boolean return_module); - // empty cursors_, represents end of iteration - public attribute_iterator() { super((Pointer)null); allocate(); } - private native void allocate(); - public native @ByVal @Name("operator *") IValue multiply(); - public native @ByVal @Name("operator ->") IValue access(); - public native @ByRef @Name("operator ++") attribute_iterator increment(); - public native @ByVal @Name("operator ++") attribute_iterator increment(int arg0); - - private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals( - @Const @ByRef attribute_iterator a, - @Const @ByRef attribute_iterator b); - public boolean notEquals(attribute_iterator b) { return notEquals(this, b); } -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/attribute_list.java b/pytorch/src/gen/java/org/bytedeco/pytorch/attribute_list.java deleted file mode 100644 index 53ddd45911f..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/attribute_list.java +++ /dev/null @@ -1,32 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::jit::slot_list_impl") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class attribute_list extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public attribute_list(Pointer p) { super(p); } - - public native @ByVal attribute_iterator begin(); - public native @ByVal attribute_iterator end(); - public native @Cast("size_t") long size(); - - public attribute_list(@ByVal JitModule module, @Cast("bool") boolean recurse, @Cast("bool") boolean return_module) { super((Pointer)null); allocate(module, recurse, return_module); } - private native void allocate(@ByVal JitModule module, @Cast("bool") boolean recurse, @Cast("bool") boolean return_module); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/buffer_iterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/buffer_iterator.java deleted file mode 100644 index a0b5610ea52..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/buffer_iterator.java +++ /dev/null @@ -1,55 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::jit::slot_iterator_impl") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class buffer_iterator extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public buffer_iterator(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public buffer_iterator(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public buffer_iterator position(long position) { - return (buffer_iterator)super.position(position); - } - @Override public buffer_iterator getPointer(long i) { - return new buffer_iterator((Pointer)this).offsetAddress(i); - } - - public buffer_iterator( - @ByVal JitModule root, - @Cast("bool") boolean recurse, - @Cast("bool") boolean return_module) { super((Pointer)null); allocate(root, recurse, return_module); } - private native void allocate( - @ByVal JitModule root, - @Cast("bool") boolean recurse, - @Cast("bool") boolean return_module); - // empty cursors_, represents end of iteration - public buffer_iterator() { super((Pointer)null); allocate(); } - private native void allocate(); - public native @ByVal @Name("operator *") Tensor multiply(); - public native @ByVal @Name("operator ->") Tensor access(); - public native @ByRef @Name("operator ++") buffer_iterator increment(); - public native @ByVal @Name("operator ++") buffer_iterator increment(int arg0); - - private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals( - @Const @ByRef buffer_iterator a, - @Const @ByRef buffer_iterator b); - public boolean notEquals(buffer_iterator b) { return notEquals(this, b); } -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/buffer_list.java b/pytorch/src/gen/java/org/bytedeco/pytorch/buffer_list.java deleted file mode 100644 index 6e3e6674133..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/buffer_list.java +++ /dev/null @@ -1,32 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::jit::slot_list_impl") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class buffer_list extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public buffer_list(Pointer p) { super(p); } - - public native @ByVal buffer_iterator begin(); - public native @ByVal buffer_iterator end(); - public native @Cast("size_t") long size(); - - public buffer_list(@ByVal JitModule module, @Cast("bool") boolean recurse, @Cast("bool") boolean return_module) { super((Pointer)null); allocate(module, recurse, return_module); } - private native void allocate(@ByVal JitModule module, @Cast("bool") boolean recurse, @Cast("bool") boolean return_module); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamArrayRef.java index e93dde9c480..25ad53c806e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamArrayRef.java @@ -114,7 +114,7 @@ public class CUDAStreamArrayRef extends Pointer { public native @ByVal CUDAStreamArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); /** slice(n) - Chop off the first N elements of the array. */ - public native @Const @ByVal CUDAStreamArrayRef slice(@Cast("size_t") long N); + public native @ByVal CUDAStreamArrayRef slice(@Cast("size_t") long N); /** \} * \name Operator Overloads diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TensorDescriptor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TensorDescriptor.java index c80f0a8137b..bd394ed4f0d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TensorDescriptor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TensorDescriptor.java @@ -60,7 +60,7 @@ public class TensorDescriptor extends Pointer { // padding). If 't' is lower-dimensional than 'pad', the remaining // dimensions (on the right) are padded with ones. This doesn't // affect the underlying data layout. This is particularly useful for - // dealing with a pecularity of the CuDNN API, which is that broadcasting in CuDNN is + // dealing with a peculiarity of the CuDNN API, which is that broadcasting in CuDNN is // done in two steps: first, the client code is expected to pad out // (the dimensions) input tensors to be the same dimension as the // target broadcast, and then second, CuDNN takes of actually diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java index 9a755548443..f4c93ef6955 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java @@ -182,9 +182,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../MethodOptional.java -// Targeting ../OperatorOptional.java - - // Targeting ../NamedValueOptional.java @@ -209,6 +206,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../StringViewVectorOptional.java +// Targeting ../PointerPairOptional.java + + // Targeting ../ExampleVectorOptional.java @@ -314,12 +314,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../PropertyVector.java -// Targeting ../InstructionVector.java - - -// Targeting ../CompilationUnitVector.java - - // Targeting ../OptimizerParamGroupVector.java @@ -413,9 +407,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../TensorOptionalVector.java -// Targeting ../OperatorOptionalVector.java - - // Targeting ../SharedFunctionPreVector.java @@ -431,9 +422,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../ResolverVector.java -// Targeting ../StackEntryVector.java - - // Targeting ../ValueVector.java @@ -494,6 +482,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../RecordFunctionHandleIntPair.java +// Targeting ../PointerPair.java + + // Targeting ../SizeTMatchedSchemaPair.java @@ -644,6 +635,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../RecordScopeSet.java +// Targeting ../DeviceTypeSet.java + + // Parsed from torch/csrc/utils/python_stub.h // #pragma once @@ -829,6 +823,115 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #endif +// Parsed from c10/core/DeviceType.h + +// #pragma once + +// This is directly synchronized with caffe2/proto/caffe2.proto, but +// doesn't require me to figure out how to get Protobuf headers into +// ATen/core (which would require a lot more build system hacking.) +// If you modify me, keep me synchronized with that file. + +// #include + +// #include +// #include + +// These contains all device types that also have a BackendComponent +// and therefore participate in per-backend functionality dispatch keys. +// This is most backends except PrivateUse2 and PrivateUse3 +// #define C10_FORALL_BACKEND_DEVICE_TYPES(_, extra) +// _(CPU, extra) +// _(CUDA, extra) +// _(HIP, extra) +// _(XLA, extra) +// _(MPS, extra) +// _(IPU, extra) +// _(XPU, extra) +// _(HPU, extra) +// _(VE, extra) +// _(Lazy, extra) +// _(Meta, extra) +// _(MTIA, extra) +// _(PrivateUse1, extra) + +@Namespace("c10") public enum DeviceType { + CPU((byte)(0)), + CUDA((byte)(1)), // CUDA. + MKLDNN((byte)(2)), // Reserved for explicit MKLDNN + OPENGL((byte)(3)), // OpenGL + OPENCL((byte)(4)), // OpenCL + IDEEP((byte)(5)), // IDEEP. + HIP((byte)(6)), // AMD HIP + FPGA((byte)(7)), // FPGA + ORT((byte)(8)), // ONNX Runtime / Microsoft + XLA((byte)(9)), // XLA / TPU + Vulkan((byte)(10)), // Vulkan + Metal((byte)(11)), // Metal + XPU((byte)(12)), // XPU + MPS((byte)(13)), // MPS + Meta((byte)(14)), // Meta (tensors with no data) + HPU((byte)(15)), // HPU / HABANA + VE((byte)(16)), // SX-Aurora / NEC + Lazy((byte)(17)), // Lazy Tensors + IPU((byte)(18)), // Graphcore IPU + MTIA((byte)(19)), // Meta training and inference devices + PrivateUse1((byte)(20)), // PrivateUse1 device + // NB: If you add more devices: + // - Change the implementations of DeviceTypeName and isValidDeviceType + // in DeviceType.cpp + // - Change the number below + COMPILE_TIME_MAX_DEVICE_TYPES((byte)(21)); + + public final byte value; + private DeviceType(byte v) { this.value = v; } + private DeviceType(DeviceType e) { this.value = e.value; } + public DeviceType intern() { for (DeviceType e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} + +@Namespace("c10") @MemberGetter public static native DeviceType kCPU(); +@Namespace("c10") @MemberGetter public static native DeviceType kCUDA(); +@Namespace("c10") @MemberGetter public static native DeviceType kHIP(); +@Namespace("c10") @MemberGetter public static native DeviceType kFPGA(); +@Namespace("c10") @MemberGetter public static native DeviceType kORT(); +@Namespace("c10") @MemberGetter public static native DeviceType kXLA(); +@Namespace("c10") @MemberGetter public static native DeviceType kMPS(); +@Namespace("c10") @MemberGetter public static native DeviceType kMeta(); +@Namespace("c10") @MemberGetter public static native DeviceType kVulkan(); +@Namespace("c10") @MemberGetter public static native DeviceType kMetal(); +@Namespace("c10") @MemberGetter public static native DeviceType kXPU(); +@Namespace("c10") @MemberGetter public static native DeviceType kHPU(); +@Namespace("c10") @MemberGetter public static native DeviceType kVE(); +@Namespace("c10") @MemberGetter public static native DeviceType kLazy(); +@Namespace("c10") @MemberGetter public static native DeviceType kIPU(); +@Namespace("c10") @MemberGetter public static native DeviceType kMTIA(); +@Namespace("c10") @MemberGetter public static native DeviceType kPrivateUse1(); + +// define explicit int constant +@Namespace("c10") @MemberGetter public static native int COMPILE_TIME_MAX_DEVICE_TYPES(); + +@Namespace("c10") public static native @StdString BytePointer DeviceTypeName(DeviceType d, @Cast("bool") boolean lower_case/*=false*/); +@Namespace("c10") public static native @StdString BytePointer DeviceTypeName(DeviceType d); +@Namespace("c10") public static native @StdString String DeviceTypeName(@Cast("c10::DeviceType") byte d, @Cast("bool") boolean lower_case/*=false*/); +@Namespace("c10") public static native @StdString String DeviceTypeName(@Cast("c10::DeviceType") byte d); + +@Namespace("c10") public static native @Cast("bool") boolean isValidDeviceType(DeviceType d); +@Namespace("c10") public static native @Cast("bool") boolean isValidDeviceType(@Cast("c10::DeviceType") byte d); + +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer stream, DeviceType type); +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer stream, @Cast("c10::DeviceType") byte type); + +@Namespace("c10") public static native void register_privateuse1_backend(@StdString BytePointer backend_name); +@Namespace("c10") public static native void register_privateuse1_backend(@StdString String backend_name); +@Namespace("c10") public static native @StdString BytePointer get_privateuse1_backend(@Cast("bool") boolean lower_case/*=true*/); +@Namespace("c10") public static native @StdString BytePointer get_privateuse1_backend(); + + // namespace c10 + // namespace std + + + // Parsed from c10/macros/Macros.h // #ifndef C10_MACROS_MACROS_H_ @@ -863,11 +966,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #define __ubsan_ignore_undefined__ __attribute__((no_sanitize("undefined"))) // #define __ubsan_ignore_signed_int_overflow__ // __attribute__((no_sanitize("signed-integer-overflow"))) +// #define __ubsan_ignore_pointer_overflow__ +// __attribute__((no_sanitize("pointer-overflow"))) // #define __ubsan_ignore_function__ __attribute__((no_sanitize("function"))) // #else // #define __ubsan_ignore_float_divide_by_zero__ // #define __ubsan_ignore_undefined__ // #define __ubsan_ignore_signed_int_overflow__ +// #define __ubsan_ignore_pointer_overflow__ // #define __ubsan_ignore_function__ // #endif @@ -963,6 +1069,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #define C10_UNUSED __attribute__((__unused__)) // #endif //_MSC_VER +// #if !defined(__has_attribute) +// #define __has_attribute(x) 0 +// #endif + // Direct port of LLVM_ATTRIBUTE_USED. // #if __has_attribute(used) // #define C10_USED __attribute__((__used__)) @@ -1248,193 +1358,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #endif // C10_MACROS_MACROS_H_ -// Parsed from c10/core/DeviceType.h - -// #pragma once - -// This is directly synchronized with caffe2/proto/caffe2.proto, but -// doesn't require me to figure out how to get Protobuf headers into -// ATen/core (which would require a lot more build system hacking.) -// If you modify me, keep me synchronized with that file. - -// #include - -// #include -// #include - -// These contains all device types that also have a BackendComponent -// and therefore participate in per-backend functionality dispatch keys. -// This is most backends except PrivateUse2 and PrivateUse3 -// #define C10_FORALL_BACKEND_DEVICE_TYPES(_, extra) -// _(CPU, extra) -// _(CUDA, extra) -// _(HIP, extra) -// _(XLA, extra) -// _(MPS, extra) -// _(IPU, extra) -// _(XPU, extra) -// _(HPU, extra) -// _(VE, extra) -// _(Lazy, extra) -// _(Meta, extra) -// _(MTIA, extra) -// _(PrivateUse1, extra) - -@Namespace("c10") public enum DeviceType { - CPU((byte)(0)), - CUDA((byte)(1)), // CUDA. - MKLDNN((byte)(2)), // Reserved for explicit MKLDNN - OPENGL((byte)(3)), // OpenGL - OPENCL((byte)(4)), // OpenCL - IDEEP((byte)(5)), // IDEEP. - HIP((byte)(6)), // AMD HIP - FPGA((byte)(7)), // FPGA - ORT((byte)(8)), // ONNX Runtime / Microsoft - XLA((byte)(9)), // XLA / TPU - Vulkan((byte)(10)), // Vulkan - Metal((byte)(11)), // Metal - XPU((byte)(12)), // XPU - MPS((byte)(13)), // MPS - Meta((byte)(14)), // Meta (tensors with no data) - HPU((byte)(15)), // HPU / HABANA - VE((byte)(16)), // SX-Aurora / NEC - Lazy((byte)(17)), // Lazy Tensors - IPU((byte)(18)), // Graphcore IPU - MTIA((byte)(19)), // Meta training and inference devices - PrivateUse1((byte)(20)), // PrivateUse1 device - // NB: If you add more devices: - // - Change the implementations of DeviceTypeName and isValidDeviceType - // in DeviceType.cpp - // - Change the number below - COMPILE_TIME_MAX_DEVICE_TYPES((byte)(21)); - - public final byte value; - private DeviceType(byte v) { this.value = v; } - private DeviceType(DeviceType e) { this.value = e.value; } - public DeviceType intern() { for (DeviceType e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} - -@Namespace("c10") @MemberGetter public static native DeviceType kCPU(); -@Namespace("c10") @MemberGetter public static native DeviceType kCUDA(); -@Namespace("c10") @MemberGetter public static native DeviceType kHIP(); -@Namespace("c10") @MemberGetter public static native DeviceType kFPGA(); -@Namespace("c10") @MemberGetter public static native DeviceType kORT(); -@Namespace("c10") @MemberGetter public static native DeviceType kXLA(); -@Namespace("c10") @MemberGetter public static native DeviceType kMPS(); -@Namespace("c10") @MemberGetter public static native DeviceType kMeta(); -@Namespace("c10") @MemberGetter public static native DeviceType kVulkan(); -@Namespace("c10") @MemberGetter public static native DeviceType kMetal(); -@Namespace("c10") @MemberGetter public static native DeviceType kXPU(); -@Namespace("c10") @MemberGetter public static native DeviceType kHPU(); -@Namespace("c10") @MemberGetter public static native DeviceType kVE(); -@Namespace("c10") @MemberGetter public static native DeviceType kLazy(); -@Namespace("c10") @MemberGetter public static native DeviceType kIPU(); -@Namespace("c10") @MemberGetter public static native DeviceType kMTIA(); -@Namespace("c10") @MemberGetter public static native DeviceType kPrivateUse1(); - -// define explicit int constant -@Namespace("c10") @MemberGetter public static native int COMPILE_TIME_MAX_DEVICE_TYPES(); - -@Namespace("c10") public static native @StdString BytePointer DeviceTypeName(DeviceType d, @Cast("bool") boolean lower_case/*=false*/); -@Namespace("c10") public static native @StdString BytePointer DeviceTypeName(DeviceType d); -@Namespace("c10") public static native @StdString String DeviceTypeName(@Cast("c10::DeviceType") byte d, @Cast("bool") boolean lower_case/*=false*/); -@Namespace("c10") public static native @StdString String DeviceTypeName(@Cast("c10::DeviceType") byte d); - -@Namespace("c10") public static native @Cast("bool") boolean isValidDeviceType(DeviceType d); -@Namespace("c10") public static native @Cast("bool") boolean isValidDeviceType(@Cast("c10::DeviceType") byte d); - -@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer stream, DeviceType type); -@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer stream, @Cast("c10::DeviceType") byte type); - -@Namespace("c10") public static native void register_privateuse1_backend(@StdString BytePointer backend_name); -@Namespace("c10") public static native void register_privateuse1_backend(@StdString String backend_name); -@Namespace("c10") public static native @StdString BytePointer get_privateuse1_backend(@Cast("bool") boolean lower_case/*=true*/); -@Namespace("c10") public static native @StdString BytePointer get_privateuse1_backend(); - - // namespace c10 - // namespace std - - - -// Parsed from c10/util/Deprecated.h - -// #pragma once - -/** - * This file provides portable macros for marking declarations - * as deprecated. You should generally use C10_DEPRECATED, - * except when marking 'using' declarations as deprecated, - * in which case you should use C10_DEFINE_DEPRECATED_USING - * (due to portability concerns). - */ - -// Sample usage: -// -// C10_DEPRECATED void bad_func(); -// struct C10_DEPRECATED BadStruct { -// ... -// }; - -// NB: __cplusplus doesn't work for MSVC, so for now MSVC always uses -// the "__declspec(deprecated)" implementation and not the C++14 -// "[[deprecated]]" attribute. We tried enabling "[[deprecated]]" for C++14 on -// MSVC, but ran into issues with some older MSVC versions. -// #if (defined(__cplusplus) && __cplusplus >= 201402L) -// #define C10_DEPRECATED [[deprecated]] -// #define C10_DEPRECATED_MESSAGE(message) [[deprecated(message)]] -// #elif defined(__GNUC__) -// #define C10_DEPRECATED __attribute__((deprecated)) -// TODO Is there some way to implement this? -// #define C10_DEPRECATED_MESSAGE(message) __attribute__((deprecated)) - -// #elif defined(_MSC_VER) -// #else -// #warning "You need to implement C10_DEPRECATED for this compiler" -// #define C10_DEPRECATED -// #endif - -// Sample usage: -// -// C10_DEFINE_DEPRECATED_USING(BadType, int) -// -// which is the portable version of -// -// using BadType [[deprecated]] = int; - -// technically [[deprecated]] syntax is from c++14 standard, but it works in -// many compilers. -// #if defined(__has_cpp_attribute) -// #if __has_cpp_attribute(deprecated) && !defined(__CUDACC__) -// #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) -// using TypeName [[deprecated]] = TypeThingy; -// #endif -// #endif - -// #if defined(_MSC_VER) -// #endif - -// #if !defined(C10_DEFINE_DEPRECATED_USING) && defined(__GNUC__) -// nvcc has a bug where it doesn't understand __attribute__((deprecated)) -// declarations even when the host compiler supports it. We'll only use this gcc -// attribute when not cuda, and when using a GCC compiler that doesn't support -// the c++14 syntax we checked for above (available in __GNUC__ >= 5) -// #if !defined(__CUDACC__) -// #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) -// using TypeName __attribute__((deprecated)) = TypeThingy; -// #else -// using cuda + gcc < 5, neither deprecated syntax is available so turning off. -// #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) -// using TypeName = TypeThingy; -// #endif -// #endif - -// #if !defined(C10_DEFINE_DEPRECATED_USING) -// #warning "You need to implement C10_DEFINE_DEPRECATED_USING for this compiler" -// #define C10_DEFINE_DEPRECATED_USING -// #endif - - // Parsed from c10/util/reverse_iterator.h // #pragma once @@ -1578,14 +1501,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #define C10_UTIL_EXCEPTION_H_ // #include -// #include // #include // #include // #include // #include -// #include -// #include // #include // #include @@ -2067,7 +1987,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #pragma once // #include -// #include +// #include // #include // #include @@ -2094,7 +2014,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #pragma once // #include -// #include +// #include +// #include // #include // #include @@ -2112,8 +2033,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // use. // WARNING! If you add a new backend component to the end of this list, -// make sure you update PrivateUse3Bit. (But you shouldn't: private use -// keys should have higher precedence than all built-in keys) +// make sure you register it before Meta. +// Meta must be at the end so that meta key in tls triggers meta kernels. +// (But you shouldn't: private use keys should have higher precedence than all +// built-in keys) // If you add a new (non-privateuse) backend here, // make sure to add an Autograd fallthrough kernel @@ -2130,11 +2053,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // _(HPU, extra) // _(VE, extra) // _(Lazy, extra) -// _(Meta, extra) // _(MTIA, extra) // _(PrivateUse1, extra) // _(PrivateUse2, extra) // _(PrivateUse3, extra) +// _(Meta, extra) // WARNING! If we add a new per-backend functionality key that has higher // priority than Autograd, then make sure you update EndOfRuntimeBackendKeys @@ -2187,15 +2110,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { HPUBit((byte)(8)), VEBit((byte)(9)), LazyBit((byte)(10)), - MetaBit((byte)(11)), - MTIABit((byte)(12)), - PrivateUse1Bit((byte)(13)), - PrivateUse2Bit((byte)(14)), - PrivateUse3Bit((byte)(15)), + MTIABit((byte)(11)), + PrivateUse1Bit((byte)(12)), + PrivateUse2Bit((byte)(13)), + PrivateUse3Bit((byte)(14)), + MetaBit((byte)(15)), // Define an alias to represent end of backend dispatch keys. // If you add new backend keys after PrivateUse3, please also update it here. - EndOfBackendKeys((byte)(PrivateUse3Bit.value)); + EndOfBackendKeys((byte)(MetaBit.value)); public final byte value; private BackendComponent(byte v) { this.value = v; } @@ -2461,48 +2384,51 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // and inputs are saved for backward in the post-autocast type. AutocastCPU((short)(Undefined.value + 27)), AutocastXPU((short)(Undefined.value + 28)), - AutocastHPU((short)(Undefined.value + 29)), - // Naughtily, AutocastCUDA is also being used for XLA. In the terminal state, - // it probably should get its own Autocast key - AutocastCUDA((short)(Undefined.value + 30)), + AutocastIPU((short)(Undefined.value + 29)), + AutocastHPU((short)(Undefined.value + 30)), + AutocastXLA((short)(Undefined.value + 31)), + // AutocastXLA is only being used for TPUs. XLA GPUs continue to use + // AutocastCUDA. + AutocastCUDA((short)(Undefined.value + 32)), + AutocastPrivateUse1((short)(Undefined.value + 33)), // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ WRAPPERS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // There are a number of alternative modes which may want to handle before // autograd; for example, error checking, tracing, profiling or vmap. They // go here. - FuncTorchBatched((short)(Undefined.value + 31)), // See Note [Out-of-tree vmap+grad prototype] - FuncTorchVmapMode((short)(Undefined.value + 32)), // See Note [Out-of-tree vmap+grad prototype] + FuncTorchBatched((short)(Undefined.value + 34)), // See Note [Out-of-tree vmap+grad prototype] + FuncTorchVmapMode((short)(Undefined.value + 35)), // See Note [Out-of-tree vmap+grad prototype] // This is the dispatch key for BatchedTensorImpl, which is used to implement // batching rules for vmap. - Batched((short)(Undefined.value + 33)), + Batched((short)(Undefined.value + 36)), // When we are inside a vmap, all tensors dispatch on this key. // See Note: [DispatchKey::VmapMode usage] for more details. - VmapMode((short)(Undefined.value + 34)), + VmapMode((short)(Undefined.value + 37)), - FuncTorchGradWrapper((short)(Undefined.value + 35)), // See Note [Out-of-tree vmap+grad prototype] + FuncTorchGradWrapper((short)(Undefined.value + 38)), // See Note [Out-of-tree vmap+grad prototype] // Out-of-core key for Deferred Module Initialization in torchdistx. // See https://pytorch.org/torchdistx/latest/deferred_init.html - DeferredInit((short)(Undefined.value + 36)), + DeferredInit((short)(Undefined.value + 39)), // Used by Python key logic to know the set of tls on entry to the dispatcher // This kernel assumes it is the top-most non-functorch-related DispatchKey. // If you add a key above, make sure to update the fallback implementation for // this. - PythonTLSSnapshot((short)(Undefined.value + 37)), + PythonTLSSnapshot((short)(Undefined.value + 40)), // This key should be at the very top of the dispatcher - FuncTorchDynamicLayerFrontMode((short)(Undefined.value + 38)), // See Note [Out-of-tree vmap+grad prototype] + FuncTorchDynamicLayerFrontMode((short)(Undefined.value + 41)), // See Note [Out-of-tree vmap+grad prototype] // TESTING: This is intended to be a generic testing tensor type id. // Don't use it for anything real; its only acceptable use is within a single // process test. Use it by creating a TensorImpl with this DispatchKey, and // then registering operators to operate on this type id. See // aten/src/ATen/core/dispatch/backend_fallback_test.cpp for a usage example. - TESTING_ONLY_GenericWrapper((short)(Undefined.value + 39)), + TESTING_ONLY_GenericWrapper((short)(Undefined.value + 42)), // TESTING: This is intended to be a generic testing tensor type id. // Don't use it for anything real; its only acceptable use is within a ingle @@ -2511,45 +2437,51 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // to operate on this type id. See // aten/src/ATen/core/dispatch/backend_fallback_test.cpp // for a usage example - TESTING_ONLY_GenericMode((short)(Undefined.value + 40)), + TESTING_ONLY_GenericMode((short)(Undefined.value + 43)), + + // This key is used for pre-dispatch tracing in make_fx. + // It has lower priority than the PythonDispatcher key + // because we use the PythonDispatcher to intercept the key from python, + // and avoid having to implement it in C++. + PreDispatch((short)(Undefined.value + 44)), // This is a bypass that allows you to skip running the C++ dispatcher // entirely - PythonDispatcher((short)(Undefined.value + 41)), + PythonDispatcher((short)(Undefined.value + 45)), // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FIN ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // - EndOfFunctionalityKeys((short)(Undefined.value + 42)), + EndOfFunctionalityKeys((short)(Undefined.value + 46)), - StartOfDenseBackends((short)(Undefined.value + 43)), - CPU((short)(Undefined.value + 44)), + StartOfDenseBackends((short)(Undefined.value + 47)), + CPU((short)(Undefined.value + 48)), - CUDA((short)(Undefined.value + 45)), + CUDA((short)(Undefined.value + 49)), - HIP((short)(Undefined.value + 46)), + HIP((short)(Undefined.value + 50)), - XLA((short)(Undefined.value + 47)), + XLA((short)(Undefined.value + 51)), - MPS((short)(Undefined.value + 48)), + MPS((short)(Undefined.value + 52)), - IPU((short)(Undefined.value + 49)), + IPU((short)(Undefined.value + 53)), - XPU((short)(Undefined.value + 50)), + XPU((short)(Undefined.value + 54)), - HPU((short)(Undefined.value + 51)), + HPU((short)(Undefined.value + 55)), - VE((short)(Undefined.value + 52)), + VE((short)(Undefined.value + 56)), - Lazy((short)(Undefined.value + 53)), + Lazy((short)(Undefined.value + 57)), - Meta((short)(Undefined.value + 54)), + MTIA((short)(Undefined.value + 58)), - MTIA((short)(Undefined.value + 55)), + PrivateUse1((short)(Undefined.value + 59)), - PrivateUse1((short)(Undefined.value + 56)), + PrivateUse2((short)(Undefined.value + 60)), - PrivateUse2((short)(Undefined.value + 57)), + PrivateUse3((short)(Undefined.value + 61)), - PrivateUse3((short)(Undefined.value + 58)), + Meta((short)(Undefined.value + 62)), EndOfDenseBackends((short)(0)), StartOfQuantizedBackends((short)(1)), QuantizedCPU((short)(2)), @@ -2572,109 +2504,109 @@ public class torch extends org.bytedeco.pytorch.presets.torch { QuantizedLazy((short)(11)), - QuantizedMeta((short)(12)), + QuantizedMTIA((short)(12)), - QuantizedMTIA((short)(13)), + QuantizedPrivateUse1((short)(13)), - QuantizedPrivateUse1((short)(14)), + QuantizedPrivateUse2((short)(14)), - QuantizedPrivateUse2((short)(15)), + QuantizedPrivateUse3((short)(15)), - QuantizedPrivateUse3((short)(16)), - EndOfQuantizedBackends((short)( QuantizedPrivateUse3.value)), - StartOfSparseBackends((short)( QuantizedPrivateUse3.value + 1)), - SparseCPU((short)( QuantizedPrivateUse3.value + 2)), + QuantizedMeta((short)(16)), + EndOfQuantizedBackends((short)( QuantizedMeta.value)), + StartOfSparseBackends((short)( QuantizedMeta.value + 1)), + SparseCPU((short)( QuantizedMeta.value + 2)), - SparseCUDA((short)( QuantizedPrivateUse3.value + 3)), + SparseCUDA((short)( QuantizedMeta.value + 3)), - SparseHIP((short)( QuantizedPrivateUse3.value + 4)), + SparseHIP((short)( QuantizedMeta.value + 4)), - SparseXLA((short)( QuantizedPrivateUse3.value + 5)), + SparseXLA((short)( QuantizedMeta.value + 5)), - SparseMPS((short)( QuantizedPrivateUse3.value + 6)), + SparseMPS((short)( QuantizedMeta.value + 6)), - SparseIPU((short)( QuantizedPrivateUse3.value + 7)), + SparseIPU((short)( QuantizedMeta.value + 7)), - SparseXPU((short)( QuantizedPrivateUse3.value + 8)), + SparseXPU((short)( QuantizedMeta.value + 8)), - SparseHPU((short)( QuantizedPrivateUse3.value + 9)), + SparseHPU((short)( QuantizedMeta.value + 9)), - SparseVE((short)( QuantizedPrivateUse3.value + 10)), + SparseVE((short)( QuantizedMeta.value + 10)), - SparseLazy((short)( QuantizedPrivateUse3.value + 11)), + SparseLazy((short)( QuantizedMeta.value + 11)), - SparseMeta((short)( QuantizedPrivateUse3.value + 12)), + SparseMTIA((short)( QuantizedMeta.value + 12)), - SparseMTIA((short)( QuantizedPrivateUse3.value + 13)), + SparsePrivateUse1((short)( QuantizedMeta.value + 13)), - SparsePrivateUse1((short)( QuantizedPrivateUse3.value + 14)), + SparsePrivateUse2((short)( QuantizedMeta.value + 14)), - SparsePrivateUse2((short)( QuantizedPrivateUse3.value + 15)), + SparsePrivateUse3((short)( QuantizedMeta.value + 15)), - SparsePrivateUse3((short)( QuantizedPrivateUse3.value + 16)), - EndOfSparseBackends((short)( SparsePrivateUse3.value)), - StartOfNestedTensorBackends((short)( SparsePrivateUse3.value + 1)), - NestedTensorCPU((short)( SparsePrivateUse3.value + 2)), + SparseMeta((short)( QuantizedMeta.value + 16)), + EndOfSparseBackends((short)( SparseMeta.value)), + StartOfNestedTensorBackends((short)( SparseMeta.value + 1)), + NestedTensorCPU((short)( SparseMeta.value + 2)), - NestedTensorCUDA((short)( SparsePrivateUse3.value + 3)), + NestedTensorCUDA((short)( SparseMeta.value + 3)), - NestedTensorHIP((short)( SparsePrivateUse3.value + 4)), + NestedTensorHIP((short)( SparseMeta.value + 4)), - NestedTensorXLA((short)( SparsePrivateUse3.value + 5)), + NestedTensorXLA((short)( SparseMeta.value + 5)), - NestedTensorMPS((short)( SparsePrivateUse3.value + 6)), + NestedTensorMPS((short)( SparseMeta.value + 6)), - NestedTensorIPU((short)( SparsePrivateUse3.value + 7)), + NestedTensorIPU((short)( SparseMeta.value + 7)), - NestedTensorXPU((short)( SparsePrivateUse3.value + 8)), + NestedTensorXPU((short)( SparseMeta.value + 8)), - NestedTensorHPU((short)( SparsePrivateUse3.value + 9)), + NestedTensorHPU((short)( SparseMeta.value + 9)), - NestedTensorVE((short)( SparsePrivateUse3.value + 10)), + NestedTensorVE((short)( SparseMeta.value + 10)), - NestedTensorLazy((short)( SparsePrivateUse3.value + 11)), + NestedTensorLazy((short)( SparseMeta.value + 11)), - NestedTensorMeta((short)( SparsePrivateUse3.value + 12)), + NestedTensorMTIA((short)( SparseMeta.value + 12)), - NestedTensorMTIA((short)( SparsePrivateUse3.value + 13)), + NestedTensorPrivateUse1((short)( SparseMeta.value + 13)), - NestedTensorPrivateUse1((short)( SparsePrivateUse3.value + 14)), + NestedTensorPrivateUse2((short)( SparseMeta.value + 14)), - NestedTensorPrivateUse2((short)( SparsePrivateUse3.value + 15)), + NestedTensorPrivateUse3((short)( SparseMeta.value + 15)), - NestedTensorPrivateUse3((short)( SparsePrivateUse3.value + 16)), - EndOfNestedTensorBackends((short)( NestedTensorPrivateUse3.value)), - StartOfAutogradFunctionalityBackends((short)( NestedTensorPrivateUse3.value + 1)), - AutogradCPU((short)( NestedTensorPrivateUse3.value + 2)), + NestedTensorMeta((short)( SparseMeta.value + 16)), + EndOfNestedTensorBackends((short)( NestedTensorMeta.value)), + StartOfAutogradFunctionalityBackends((short)( NestedTensorMeta.value + 1)), + AutogradCPU((short)( NestedTensorMeta.value + 2)), - AutogradCUDA((short)( NestedTensorPrivateUse3.value + 3)), + AutogradCUDA((short)( NestedTensorMeta.value + 3)), - AutogradHIP((short)( NestedTensorPrivateUse3.value + 4)), + AutogradHIP((short)( NestedTensorMeta.value + 4)), - AutogradXLA((short)( NestedTensorPrivateUse3.value + 5)), + AutogradXLA((short)( NestedTensorMeta.value + 5)), - AutogradMPS((short)( NestedTensorPrivateUse3.value + 6)), + AutogradMPS((short)( NestedTensorMeta.value + 6)), - AutogradIPU((short)( NestedTensorPrivateUse3.value + 7)), + AutogradIPU((short)( NestedTensorMeta.value + 7)), - AutogradXPU((short)( NestedTensorPrivateUse3.value + 8)), + AutogradXPU((short)( NestedTensorMeta.value + 8)), - AutogradHPU((short)( NestedTensorPrivateUse3.value + 9)), + AutogradHPU((short)( NestedTensorMeta.value + 9)), - AutogradVE((short)( NestedTensorPrivateUse3.value + 10)), + AutogradVE((short)( NestedTensorMeta.value + 10)), - AutogradLazy((short)( NestedTensorPrivateUse3.value + 11)), + AutogradLazy((short)( NestedTensorMeta.value + 11)), - AutogradMeta((short)( NestedTensorPrivateUse3.value + 12)), + AutogradMTIA((short)( NestedTensorMeta.value + 12)), - AutogradMTIA((short)( NestedTensorPrivateUse3.value + 13)), + AutogradPrivateUse1((short)( NestedTensorMeta.value + 13)), - AutogradPrivateUse1((short)( NestedTensorPrivateUse3.value + 14)), + AutogradPrivateUse2((short)( NestedTensorMeta.value + 14)), - AutogradPrivateUse2((short)( NestedTensorPrivateUse3.value + 15)), + AutogradPrivateUse3((short)( NestedTensorMeta.value + 15)), - AutogradPrivateUse3((short)( NestedTensorPrivateUse3.value + 16)), - EndOfAutogradFunctionalityBackends((short)( AutogradPrivateUse3.value)), + AutogradMeta((short)( NestedTensorMeta.value + 16)), + EndOfAutogradFunctionalityBackends((short)( AutogradMeta.value)), EndOfRuntimeBackendKeys((short)(EndOfAutogradFunctionalityBackends.value)), @@ -2765,7 +2697,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // [Note: Per-Backend Functionality Dispatch Keys] // Check if a DispatchKey is a per-backend functionality key // Any functionalities that can be customized per-backend should be added here. -// These keys correspond to functionalities that can be customized indivually +// These keys correspond to functionalities that can be customized individually // per backend. While they only take up one bit in the `DispatchKeySet` bitset, // they map to (# backends) slots in the operator table. // Each of these keys also has a separate set of "runtime keys" in the dispatch @@ -2859,85 +2791,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Parsed from c10/util/Array.h -/** - * This file is based on the std::array implementation of libstdc++ at - * https://gcc.gnu.org/onlinedocs/gcc-7.1.0/libstdc++/api/a01056_source.html - * - * Changes: - * - isolate, i.e. remove dependencies on internal libstdc++ stuff - * - use c++17 behavior even in c++11 or c++14 - * - remove std::swappable special case because that doesn't work with MSVC - * - constexpr more things - * - add some features like prepend/tail - * - * If using std::array at runtime, feel free to either keep using std::array or - * use this one - it doesn't really matter. For compile time computations, this - * one here is preferred because std::array in C++11 misses some constexpr - * specifiers, forcing these methods to be called at runtime instead of compile - * time. - */ - -// Copyright (C) 2007-2017 Free Software Foundation, Inc. -// -// This file is part of the GNU ISO C++ Library. This library is free -// software; you can redistribute it and/or modify it under the -// terms of the GNU General Public License as published by the -// Free Software Foundation; either version 3, or (at your option) -// any later version. - -// This library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// Under Section 7 of GPL version 3, you are granted additional -// permissions described in the GCC Runtime Library Exception, version -// 3.1, as published by the Free Software Foundation. - -// You should have received a copy of the GNU General Public License and -// a copy of the GCC Runtime Library Exception along with this program; -// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see -// . - // #pragma once -// #include -// #include -// #include -// #include +// #include // #include -@Namespace("c10::guts::detail") public static native void __throw_out_of_range(@StdString BytePointer msg); -@Namespace("c10::guts::detail") public static native void __throw_out_of_range(@StdString String msg); - // namespace detail - -// #if defined(__cpp_deduction_guides) && __cpp_deduction_guides >= 201606 - -// #endif - -// Array comparisons. - // namespace detail - -// Specialized algorithms. - -/** - * Some added features not available in std::array. - * Only call these at compile time, they're slow if called at runtime. - * Examples: - * tail({2, 3, 4}) == {3, 4} - * prepend(2, {3, 4}) == {2, 3, 4} - */ - // namespace detail - // namespace detail - -/** - * Convert a C array into a std::array. - * Example: - * int source[3] = {2, 3, 4}; - * std::array target = to_std_array(source); - */ - // namespace detail - // namespace guts // namespace c10 @@ -2947,7 +2805,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #pragma once // #include -// #include /** * is_equality_comparable is true_type iff the equality operator is defined @@ -3018,6 +2875,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include +// #include /** * Type holding a list of types for compile time type computations @@ -3183,6 +3041,23 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace c10 +// Parsed from c10/util/bit_cast.h + +// #pragma once + +// #include +// #include + +// Implementations of std::bit_cast() from C++ 20. +// +// This is a less sketchy version of reinterpret_cast. +// +// See https://en.cppreference.com/w/cpp/numeric/bit_cast for more +// information as well as the source of our implementations. + + // namespace c10 + + // Parsed from c10/core/DispatchKeySet.h // #pragma once @@ -3190,6 +3065,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // Targeting ../FunctionalityOffsetAndMask.java @@ -3256,7 +3132,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet after_func_keyset(); @Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet backend_bitset_mask(); -// keyset correpsonding to functorch keys that have their own dedicated +// keyset corresponding to functorch keys that have their own dedicated // TensorImpl subclass. // This keyset has: @@ -3367,22 +3243,24 @@ public class torch extends org.bytedeco.pytorch.presets.torch { SparseHIP(11), SparseVE(12), SparseXPU(13), - ORT(14), - XLA(15), - Vulkan(16), - Metal(17), - Meta(18), - QuantizedCPU(19), - QuantizedCUDA(20), - QuantizedXPU(21), - Undefined(22), - MkldnnCPU(23), - MPS(24), - HPU(25), - Lazy(26), - MTIA(27), - PrivateUse1(28), - NumOptions(29); + SparsePrivateUse1(14), + ORT(15), + XLA(16), + Vulkan(17), + Metal(18), + Meta(19), + QuantizedCPU(20), + QuantizedCUDA(21), + QuantizedXPU(22), + QuantizedPrivateUse1(23), + Undefined(24), + MkldnnCPU(25), + MPS(26), + HPU(27), + Lazy(28), + MTIA(29), + PrivateUse1(30), + NumOptions(31); public final int value; private Backend(int v) { this.value = v; } @@ -3446,6 +3324,84 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace c10 +// Parsed from c10/util/Deprecated.h + +// #pragma once + +/** + * This file provides portable macros for marking declarations + * as deprecated. You should generally use C10_DEPRECATED, + * except when marking 'using' declarations as deprecated, + * in which case you should use C10_DEFINE_DEPRECATED_USING + * (due to portability concerns). + */ + +// Sample usage: +// +// C10_DEPRECATED void bad_func(); +// struct C10_DEPRECATED BadStruct { +// ... +// }; + +// NB: __cplusplus doesn't work for MSVC, so for now MSVC always uses +// the "__declspec(deprecated)" implementation and not the C++14 +// "[[deprecated]]" attribute. We tried enabling "[[deprecated]]" for C++14 on +// MSVC, but ran into issues with some older MSVC versions. +// #if (defined(__cplusplus) && __cplusplus >= 201402L) +// #define C10_DEPRECATED [[deprecated]] +// #define C10_DEPRECATED_MESSAGE(message) [[deprecated(message)]] +// #elif defined(__GNUC__) +// #define C10_DEPRECATED __attribute__((deprecated)) +// TODO Is there some way to implement this? +// #define C10_DEPRECATED_MESSAGE(message) __attribute__((deprecated)) + +// #elif defined(_MSC_VER) +// #else +// #warning "You need to implement C10_DEPRECATED for this compiler" +// #define C10_DEPRECATED +// #endif + +// Sample usage: +// +// C10_DEFINE_DEPRECATED_USING(BadType, int) +// +// which is the portable version of +// +// using BadType [[deprecated]] = int; + +// technically [[deprecated]] syntax is from c++14 standard, but it works in +// many compilers. +// #if defined(__has_cpp_attribute) +// #if __has_cpp_attribute(deprecated) && !defined(__CUDACC__) +// #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) +// using TypeName [[deprecated]] = TypeThingy; +// #endif +// #endif + +// #if defined(_MSC_VER) +// #endif + +// #if !defined(C10_DEFINE_DEPRECATED_USING) && defined(__GNUC__) +// nvcc has a bug where it doesn't understand __attribute__((deprecated)) +// declarations even when the host compiler supports it. We'll only use this gcc +// attribute when not cuda, and when using a GCC compiler that doesn't support +// the c++14 syntax we checked for above (available in __GNUC__ >= 5) +// #if !defined(__CUDACC__) +// #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) +// using TypeName __attribute__((deprecated)) = TypeThingy; +// #else +// using cuda + gcc < 5, neither deprecated syntax is available so turning off. +// #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) +// using TypeName = TypeThingy; +// #endif +// #endif + +// #if !defined(C10_DEFINE_DEPRECATED_USING) +// #warning "You need to implement C10_DEFINE_DEPRECATED_USING for this compiler" +// #define C10_DEFINE_DEPRECATED_USING +// #endif + + // Parsed from c10/util/AlignOf.h //===--- AlignOf.h - Portable calculation of type alignment -----*- C++ -*-===// @@ -3576,9 +3532,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../NodeSmallVectorCommon.java -// Targeting ../TreeRefSmallVectorCommon.java - - // Targeting ../SymIntSmallVectorBase.java @@ -3588,9 +3541,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../NodeSmallVectorBase.java -// Targeting ../TreeRefSmallVectorBase.java - - // Define this out-of-line to dissuade the C++ compiler from inlining it. @@ -3614,9 +3564,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../NodeSmallVectorImpl.java -// Targeting ../TreeRefSmallVectorImpl.java - - @@ -3649,9 +3596,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../SmallNodeVector.java -// Targeting ../TreeList.java - - /** Given a range of type R, iterate the entire range and return a * SmallVector with elements of the vector. This is useful, for example, @@ -4046,6 +3990,137 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace c10 +// Parsed from c10/util/BFloat16-inl.h + +// #pragma once + +// #include +// #include + +// #include + +// #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +// #endif + +// #if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS) +// #endif + +/** Constructors */ + + +/** Implicit conversions */ + + +// #if defined(__CUDACC__) && !defined(USE_ROCM) +// #endif + +// #if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS) +// #endif + +// CUDA intrinsics + +// #if defined(__CUDACC__) || defined(__HIPCC__) +// #endif + +/** Arithmetic */ + +@Namespace("c10") public static native @ByVal @Name("operator +") BFloat16 add(@Const @ByRef BFloat16 a, @Const @ByRef BFloat16 b); + +@Namespace("c10") public static native @ByVal @Name("operator -") BFloat16 subtract(@Const @ByRef BFloat16 a, @Const @ByRef BFloat16 b); + +@Namespace("c10") public static native @ByVal @Name("operator *") BFloat16 multiply(@Const @ByRef BFloat16 a, @Const @ByRef BFloat16 b); + +@Namespace("c10") public static native @ByVal @Name("operator /") BFloat16 divide(@Const @ByRef BFloat16 a, @Const @ByRef BFloat16 b); + +@Namespace("c10") public static native @ByVal @Name("operator -") BFloat16 subtract(@Const @ByRef BFloat16 a); + +@Namespace("c10") public static native @ByRef @Name("operator +=") BFloat16 addPut(@ByRef BFloat16 a, @Const @ByRef BFloat16 b); + +@Namespace("c10") public static native @ByRef @Name("operator -=") BFloat16 subtractPut(@ByRef BFloat16 a, @Const @ByRef BFloat16 b); + +@Namespace("c10") public static native @ByRef @Name("operator *=") BFloat16 multiplyPut(@ByRef BFloat16 a, @Const @ByRef BFloat16 b); + +@Namespace("c10") public static native @ByRef @Name("operator /=") BFloat16 dividePut(@ByRef BFloat16 a, @Const @ByRef BFloat16 b); + +@Namespace("c10") public static native @ByRef @Name("operator |") BFloat16 or(@ByRef BFloat16 a, @Const @ByRef BFloat16 b); + +@Namespace("c10") public static native @ByRef @Name("operator ^") BFloat16 xor(@ByRef BFloat16 a, @Const @ByRef BFloat16 b); + +@Namespace("c10") public static native @ByRef @Name("operator &") BFloat16 and(@ByRef BFloat16 a, @Const @ByRef BFloat16 b); + +/** Arithmetic with floats */ + +@Namespace("c10") public static native @Name("operator +") float add(@ByVal BFloat16 a, float b); +@Namespace("c10") public static native @Name("operator -") float subtract(@ByVal BFloat16 a, float b); +@Namespace("c10") public static native @Name("operator *") float multiply(@ByVal BFloat16 a, float b); +@Namespace("c10") public static native @Name("operator /") float divide(@ByVal BFloat16 a, float b); + +@Namespace("c10") public static native @Name("operator +") float add(float a, @ByVal BFloat16 b); +@Namespace("c10") public static native @Name("operator -") float subtract(float a, @ByVal BFloat16 b); +@Namespace("c10") public static native @Name("operator *") float multiply(float a, @ByVal BFloat16 b); +@Namespace("c10") public static native @Name("operator /") float divide(float a, @ByVal BFloat16 b); + +@Namespace("c10") public static native @ByRef @Name("operator +=") FloatPointer addPut(@ByRef FloatPointer a, @Const @ByRef BFloat16 b); +@Namespace("c10") public static native @ByRef @Name("operator +=") FloatBuffer addPut(@ByRef FloatBuffer a, @Const @ByRef BFloat16 b); +@Namespace("c10") public static native @ByRef @Name("operator +=") float[] addPut(@ByRef float[] a, @Const @ByRef BFloat16 b); +@Namespace("c10") public static native @ByRef @Name("operator -=") FloatPointer subtractPut(@ByRef FloatPointer a, @Const @ByRef BFloat16 b); +@Namespace("c10") public static native @ByRef @Name("operator -=") FloatBuffer subtractPut(@ByRef FloatBuffer a, @Const @ByRef BFloat16 b); +@Namespace("c10") public static native @ByRef @Name("operator -=") float[] subtractPut(@ByRef float[] a, @Const @ByRef BFloat16 b); +@Namespace("c10") public static native @ByRef @Name("operator *=") FloatPointer multiplyPut(@ByRef FloatPointer a, @Const @ByRef BFloat16 b); +@Namespace("c10") public static native @ByRef @Name("operator *=") FloatBuffer multiplyPut(@ByRef FloatBuffer a, @Const @ByRef BFloat16 b); +@Namespace("c10") public static native @ByRef @Name("operator *=") float[] multiplyPut(@ByRef float[] a, @Const @ByRef BFloat16 b); +@Namespace("c10") public static native @ByRef @Name("operator /=") FloatPointer dividePut(@ByRef FloatPointer a, @Const @ByRef BFloat16 b); +@Namespace("c10") public static native @ByRef @Name("operator /=") FloatBuffer dividePut(@ByRef FloatBuffer a, @Const @ByRef BFloat16 b); +@Namespace("c10") public static native @ByRef @Name("operator /=") float[] dividePut(@ByRef float[] a, @Const @ByRef BFloat16 b); + +/** Arithmetic with doubles */ + +@Namespace("c10") public static native @Name("operator +") double add(@ByVal BFloat16 a, double b); +@Namespace("c10") public static native @Name("operator -") double subtract(@ByVal BFloat16 a, double b); +@Namespace("c10") public static native @Name("operator *") double multiply(@ByVal BFloat16 a, double b); +@Namespace("c10") public static native @Name("operator /") double divide(@ByVal BFloat16 a, double b); + +@Namespace("c10") public static native @Name("operator +") double add(double a, @ByVal BFloat16 b); +@Namespace("c10") public static native @Name("operator -") double subtract(double a, @ByVal BFloat16 b); +@Namespace("c10") public static native @Name("operator *") double multiply(double a, @ByVal BFloat16 b); +@Namespace("c10") public static native @Name("operator /") double divide(double a, @ByVal BFloat16 b); + +/** Arithmetic with ints */ + +@Namespace("c10") public static native @ByVal @Name("operator +") BFloat16 add(@ByVal BFloat16 a, int b); +@Namespace("c10") public static native @ByVal @Name("operator -") BFloat16 subtract(@ByVal BFloat16 a, int b); +@Namespace("c10") public static native @ByVal @Name("operator *") BFloat16 multiply(@ByVal BFloat16 a, int b); +@Namespace("c10") public static native @ByVal @Name("operator /") BFloat16 divide(@ByVal BFloat16 a, int b); + +@Namespace("c10") public static native @ByVal @Name("operator +") BFloat16 add(int a, @ByVal BFloat16 b); +@Namespace("c10") public static native @ByVal @Name("operator -") BFloat16 subtract(int a, @ByVal BFloat16 b); +@Namespace("c10") public static native @ByVal @Name("operator *") BFloat16 multiply(int a, @ByVal BFloat16 b); +@Namespace("c10") public static native @ByVal @Name("operator /") BFloat16 divide(int a, @ByVal BFloat16 b); + +//// Arithmetic with int64_t + +@Namespace("c10") public static native @ByVal @Name("operator +") BFloat16 add(@ByVal BFloat16 a, @Cast("int64_t") long b); +@Namespace("c10") public static native @ByVal @Name("operator -") BFloat16 subtract(@ByVal BFloat16 a, @Cast("int64_t") long b); +@Namespace("c10") public static native @ByVal @Name("operator *") BFloat16 multiply(@ByVal BFloat16 a, @Cast("int64_t") long b); +@Namespace("c10") public static native @ByVal @Name("operator /") BFloat16 divide(@ByVal BFloat16 a, @Cast("int64_t") long b); + +@Namespace("c10") public static native @ByVal @Name("operator +") BFloat16 add(@Cast("int64_t") long a, @ByVal BFloat16 b); +@Namespace("c10") public static native @ByVal @Name("operator -") BFloat16 subtract(@Cast("int64_t") long a, @ByVal BFloat16 b); +@Namespace("c10") public static native @ByVal @Name("operator *") BFloat16 multiply(@Cast("int64_t") long a, @ByVal BFloat16 b); +@Namespace("c10") public static native @ByVal @Name("operator /") BFloat16 divide(@Cast("int64_t") long a, @ByVal BFloat16 b); + +// Overloading < and > operators, because std::max and std::min use them. + +@Namespace("c10") public static native @Cast("bool") @Name("operator >") boolean greaterThan(@ByRef BFloat16 lhs, @ByRef BFloat16 rhs); + +@Namespace("c10") public static native @Cast("bool") @Name("operator <") boolean lessThan(@ByRef BFloat16 lhs, @ByRef BFloat16 rhs); + + // namespace c10 + + // namespace std + + + // Parsed from c10/util/BFloat16.h // #pragma once @@ -4077,128 +4152,204 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // IWYU pragma: keep -// Parsed from c10/util/BFloat16-inl.h +// Parsed from c10/util/TypeSafeSignMath.h // #pragma once // #include // #include +// #include +// #if C10_CLANG_HAS_WARNING("-Wstring-conversion") +// #endif // #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") // #endif -// #if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS) -// #endif +/** Returns false since we cannot have x < 0 if x is unsigned. */ -/** Constructors */ +/** Returns true if a signed variable x < 0 */ + +/** Returns true if x < 0 + * NOTE: Will fail on an unsigned custom type + * For the most part it's possible to fix this if + * the custom type has a constexpr constructor. + * However, notably, c10::Half does not :-( */ +/** Returns the sign of an unsigned variable x as 0, 1 */ -/** Implicit conversions */ +/** Returns the sign of a signed variable x as -1, 0, 1 */ +/** Returns the sign of x as -1, 0, 1 + * NOTE: Will fail on an unsigned custom type + * For the most part it's possible to fix this if + * the custom type has a constexpr constructor. + * However, notably, c10::Half does not :-( */ -// #if defined(__CUDACC__) && !defined(USE_ROCM) +/** Returns true if a and b are not both negative */ + +// Suppress sign compare warning when compiling with GCC +// as later does not account for short-circuit rule before +// raising the warning, see https://godbolt.org/z/Tr3Msnz99 +// #ifdef __GNUC__ +// #pragma GCC diagnostic push +// #pragma GCC diagnostic ignored "-Wsign-compare" // #endif -// #if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS) +/** Returns true if x is greater than the greatest value of the type Limit */ + +// #ifdef __GNUC__ +// #pragma GCC diagnostic pop // #endif -// CUDA intrinsics +/** Returns true if x < lowest(Limit). Standard comparison */ -// #if defined(__CUDACC__) || defined(__HIPCC__) +/** Returns false since all the limit is signed and therefore includes + * negative values but x cannot be negative because it is unsigned */ + +/** Returns true if x < 0, where 0 is constructed from T. + * Limit is not signed, so its lower value is zero */ + +/** Returns false sign both types are unsigned */ + +/** Returns true if x is less than the lowest value of type T + * NOTE: Will fail on an unsigned custom type + * For the most part it's possible to fix this if + * the custom type has a constexpr constructor. + * However, notably, c10::Half does not : */ + + // namespace c10 + + + +// Parsed from c10/util/floating_point_utils.h + +// #pragma once + +// #include + +@Namespace("c10::detail") public static native float fp32_from_bits(@Cast("uint32_t") int w); + +@Namespace("c10::detail") public static native @Cast("uint32_t") int fp32_to_bits(float f); + + // namespace c10::detail + + +// Parsed from c10/util/Float8_e4m3fn-inl.h + +// #pragma once + +// #include +// #include +// #include + +// #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") // #endif -/** Arithmetic */ +/** Constructors */ -@Namespace("c10") public static native @ByVal @Name("operator +") BFloat16 add(@Const @ByRef BFloat16 a, @Const @ByRef BFloat16 b); -@Namespace("c10") public static native @ByVal @Name("operator -") BFloat16 subtract(@Const @ByRef BFloat16 a, @Const @ByRef BFloat16 b); -@Namespace("c10") public static native @ByVal @Name("operator *") BFloat16 multiply(@Const @ByRef BFloat16 a, @Const @ByRef BFloat16 b); +/** Implicit conversions */ -@Namespace("c10") public static native @ByVal @Name("operator /") BFloat16 divide(@Const @ByRef BFloat16 a, @Const @ByRef BFloat16 b); -@Namespace("c10") public static native @ByVal @Name("operator -") BFloat16 subtract(@Const @ByRef BFloat16 a); -@Namespace("c10") public static native @ByRef @Name("operator +=") BFloat16 addPut(@ByRef BFloat16 a, @Const @ByRef BFloat16 b); +/** Special values helper */ -@Namespace("c10") public static native @ByRef @Name("operator -=") BFloat16 subtractPut(@ByRef BFloat16 a, @Const @ByRef BFloat16 b); -@Namespace("c10") public static native @ByRef @Name("operator *=") BFloat16 multiplyPut(@ByRef BFloat16 a, @Const @ByRef BFloat16 b); -@Namespace("c10") public static native @ByRef @Name("operator /=") BFloat16 dividePut(@ByRef BFloat16 a, @Const @ByRef BFloat16 b); +/** Arithmetic */ -@Namespace("c10") public static native @ByRef @Name("operator |") BFloat16 or(@ByRef BFloat16 a, @Const @ByRef BFloat16 b); +@Namespace("c10") public static native @ByVal @Name("operator +") Float8_e4m3fn add(@Const @ByRef Float8_e4m3fn a, @Const @ByRef Float8_e4m3fn b); -@Namespace("c10") public static native @ByRef @Name("operator ^") BFloat16 xor(@ByRef BFloat16 a, @Const @ByRef BFloat16 b); +@Namespace("c10") public static native @ByVal @Name("operator -") Float8_e4m3fn subtract(@Const @ByRef Float8_e4m3fn a, @Const @ByRef Float8_e4m3fn b); -@Namespace("c10") public static native @ByRef @Name("operator &") BFloat16 and(@ByRef BFloat16 a, @Const @ByRef BFloat16 b); +@Namespace("c10") public static native @ByVal @Name("operator *") Float8_e4m3fn multiply(@Const @ByRef Float8_e4m3fn a, @Const @ByRef Float8_e4m3fn b); -/** Arithmetic with floats */ +@Namespace("c10") public static native @ByVal @Name("operator /") Float8_e4m3fn divide( + @Const @ByRef Float8_e4m3fn a, + @Const @ByRef Float8_e4m3fn b); -@Namespace("c10") public static native @Name("operator +") float add(@ByVal BFloat16 a, float b); -@Namespace("c10") public static native @Name("operator -") float subtract(@ByVal BFloat16 a, float b); -@Namespace("c10") public static native @Name("operator *") float multiply(@ByVal BFloat16 a, float b); -@Namespace("c10") public static native @Name("operator /") float divide(@ByVal BFloat16 a, float b); +@Namespace("c10") public static native @ByVal @Name("operator -") Float8_e4m3fn subtract(@Const @ByRef Float8_e4m3fn a); -@Namespace("c10") public static native @Name("operator +") float add(float a, @ByVal BFloat16 b); -@Namespace("c10") public static native @Name("operator -") float subtract(float a, @ByVal BFloat16 b); -@Namespace("c10") public static native @Name("operator *") float multiply(float a, @ByVal BFloat16 b); -@Namespace("c10") public static native @Name("operator /") float divide(float a, @ByVal BFloat16 b); +@Namespace("c10") public static native @ByRef @Name("operator +=") Float8_e4m3fn addPut( + @ByRef Float8_e4m3fn a, + @Const @ByRef Float8_e4m3fn b); -@Namespace("c10") public static native @ByRef @Name("operator +=") FloatPointer addPut(@ByRef FloatPointer a, @Const @ByRef BFloat16 b); -@Namespace("c10") public static native @ByRef @Name("operator +=") FloatBuffer addPut(@ByRef FloatBuffer a, @Const @ByRef BFloat16 b); -@Namespace("c10") public static native @ByRef @Name("operator +=") float[] addPut(@ByRef float[] a, @Const @ByRef BFloat16 b); -@Namespace("c10") public static native @ByRef @Name("operator -=") FloatPointer subtractPut(@ByRef FloatPointer a, @Const @ByRef BFloat16 b); -@Namespace("c10") public static native @ByRef @Name("operator -=") FloatBuffer subtractPut(@ByRef FloatBuffer a, @Const @ByRef BFloat16 b); -@Namespace("c10") public static native @ByRef @Name("operator -=") float[] subtractPut(@ByRef float[] a, @Const @ByRef BFloat16 b); -@Namespace("c10") public static native @ByRef @Name("operator *=") FloatPointer multiplyPut(@ByRef FloatPointer a, @Const @ByRef BFloat16 b); -@Namespace("c10") public static native @ByRef @Name("operator *=") FloatBuffer multiplyPut(@ByRef FloatBuffer a, @Const @ByRef BFloat16 b); -@Namespace("c10") public static native @ByRef @Name("operator *=") float[] multiplyPut(@ByRef float[] a, @Const @ByRef BFloat16 b); -@Namespace("c10") public static native @ByRef @Name("operator /=") FloatPointer dividePut(@ByRef FloatPointer a, @Const @ByRef BFloat16 b); -@Namespace("c10") public static native @ByRef @Name("operator /=") FloatBuffer dividePut(@ByRef FloatBuffer a, @Const @ByRef BFloat16 b); -@Namespace("c10") public static native @ByRef @Name("operator /=") float[] dividePut(@ByRef float[] a, @Const @ByRef BFloat16 b); +@Namespace("c10") public static native @ByRef @Name("operator -=") Float8_e4m3fn subtractPut( + @ByRef Float8_e4m3fn a, + @Const @ByRef Float8_e4m3fn b); + +@Namespace("c10") public static native @ByRef @Name("operator *=") Float8_e4m3fn multiplyPut( + @ByRef Float8_e4m3fn a, + @Const @ByRef Float8_e4m3fn b); + +@Namespace("c10") public static native @ByRef @Name("operator /=") Float8_e4m3fn dividePut( + @ByRef Float8_e4m3fn a, + @Const @ByRef Float8_e4m3fn b); + +/** Arithmetic with floats */ + +@Namespace("c10") public static native @Name("operator +") float add(@ByVal Float8_e4m3fn a, float b); +@Namespace("c10") public static native @Name("operator -") float subtract(@ByVal Float8_e4m3fn a, float b); +@Namespace("c10") public static native @Name("operator *") float multiply(@ByVal Float8_e4m3fn a, float b); +@Namespace("c10") public static native @Name("operator /") float divide(@ByVal Float8_e4m3fn a, float b); + +@Namespace("c10") public static native @Name("operator +") float add(float a, @ByVal Float8_e4m3fn b); +@Namespace("c10") public static native @Name("operator -") float subtract(float a, @ByVal Float8_e4m3fn b); +@Namespace("c10") public static native @Name("operator *") float multiply(float a, @ByVal Float8_e4m3fn b); +@Namespace("c10") public static native @Name("operator /") float divide(float a, @ByVal Float8_e4m3fn b); + +@Namespace("c10") public static native @ByRef @Name("operator +=") FloatPointer addPut(@ByRef FloatPointer a, @Const @ByRef Float8_e4m3fn b); +@Namespace("c10") public static native @ByRef @Name("operator +=") FloatBuffer addPut(@ByRef FloatBuffer a, @Const @ByRef Float8_e4m3fn b); +@Namespace("c10") public static native @ByRef @Name("operator +=") float[] addPut(@ByRef float[] a, @Const @ByRef Float8_e4m3fn b); +@Namespace("c10") public static native @ByRef @Name("operator -=") FloatPointer subtractPut(@ByRef FloatPointer a, @Const @ByRef Float8_e4m3fn b); +@Namespace("c10") public static native @ByRef @Name("operator -=") FloatBuffer subtractPut(@ByRef FloatBuffer a, @Const @ByRef Float8_e4m3fn b); +@Namespace("c10") public static native @ByRef @Name("operator -=") float[] subtractPut(@ByRef float[] a, @Const @ByRef Float8_e4m3fn b); +@Namespace("c10") public static native @ByRef @Name("operator *=") FloatPointer multiplyPut(@ByRef FloatPointer a, @Const @ByRef Float8_e4m3fn b); +@Namespace("c10") public static native @ByRef @Name("operator *=") FloatBuffer multiplyPut(@ByRef FloatBuffer a, @Const @ByRef Float8_e4m3fn b); +@Namespace("c10") public static native @ByRef @Name("operator *=") float[] multiplyPut(@ByRef float[] a, @Const @ByRef Float8_e4m3fn b); +@Namespace("c10") public static native @ByRef @Name("operator /=") FloatPointer dividePut(@ByRef FloatPointer a, @Const @ByRef Float8_e4m3fn b); +@Namespace("c10") public static native @ByRef @Name("operator /=") FloatBuffer dividePut(@ByRef FloatBuffer a, @Const @ByRef Float8_e4m3fn b); +@Namespace("c10") public static native @ByRef @Name("operator /=") float[] dividePut(@ByRef float[] a, @Const @ByRef Float8_e4m3fn b); /** Arithmetic with doubles */ -@Namespace("c10") public static native @Name("operator +") double add(@ByVal BFloat16 a, double b); -@Namespace("c10") public static native @Name("operator -") double subtract(@ByVal BFloat16 a, double b); -@Namespace("c10") public static native @Name("operator *") double multiply(@ByVal BFloat16 a, double b); -@Namespace("c10") public static native @Name("operator /") double divide(@ByVal BFloat16 a, double b); +@Namespace("c10") public static native @Name("operator +") double add(@ByVal Float8_e4m3fn a, double b); +@Namespace("c10") public static native @Name("operator -") double subtract(@ByVal Float8_e4m3fn a, double b); +@Namespace("c10") public static native @Name("operator *") double multiply(@ByVal Float8_e4m3fn a, double b); +@Namespace("c10") public static native @Name("operator /") double divide(@ByVal Float8_e4m3fn a, double b); -@Namespace("c10") public static native @Name("operator +") double add(double a, @ByVal BFloat16 b); -@Namespace("c10") public static native @Name("operator -") double subtract(double a, @ByVal BFloat16 b); -@Namespace("c10") public static native @Name("operator *") double multiply(double a, @ByVal BFloat16 b); -@Namespace("c10") public static native @Name("operator /") double divide(double a, @ByVal BFloat16 b); +@Namespace("c10") public static native @Name("operator +") double add(double a, @ByVal Float8_e4m3fn b); +@Namespace("c10") public static native @Name("operator -") double subtract(double a, @ByVal Float8_e4m3fn b); +@Namespace("c10") public static native @Name("operator *") double multiply(double a, @ByVal Float8_e4m3fn b); +@Namespace("c10") public static native @Name("operator /") double divide(double a, @ByVal Float8_e4m3fn b); /** Arithmetic with ints */ -@Namespace("c10") public static native @ByVal @Name("operator +") BFloat16 add(@ByVal BFloat16 a, int b); -@Namespace("c10") public static native @ByVal @Name("operator -") BFloat16 subtract(@ByVal BFloat16 a, int b); -@Namespace("c10") public static native @ByVal @Name("operator *") BFloat16 multiply(@ByVal BFloat16 a, int b); -@Namespace("c10") public static native @ByVal @Name("operator /") BFloat16 divide(@ByVal BFloat16 a, int b); +@Namespace("c10") public static native @ByVal @Name("operator +") Float8_e4m3fn add(@ByVal Float8_e4m3fn a, int b); +@Namespace("c10") public static native @ByVal @Name("operator -") Float8_e4m3fn subtract(@ByVal Float8_e4m3fn a, int b); +@Namespace("c10") public static native @ByVal @Name("operator *") Float8_e4m3fn multiply(@ByVal Float8_e4m3fn a, int b); +@Namespace("c10") public static native @ByVal @Name("operator /") Float8_e4m3fn divide(@ByVal Float8_e4m3fn a, int b); -@Namespace("c10") public static native @ByVal @Name("operator +") BFloat16 add(int a, @ByVal BFloat16 b); -@Namespace("c10") public static native @ByVal @Name("operator -") BFloat16 subtract(int a, @ByVal BFloat16 b); -@Namespace("c10") public static native @ByVal @Name("operator *") BFloat16 multiply(int a, @ByVal BFloat16 b); -@Namespace("c10") public static native @ByVal @Name("operator /") BFloat16 divide(int a, @ByVal BFloat16 b); +@Namespace("c10") public static native @ByVal @Name("operator +") Float8_e4m3fn add(int a, @ByVal Float8_e4m3fn b); +@Namespace("c10") public static native @ByVal @Name("operator -") Float8_e4m3fn subtract(int a, @ByVal Float8_e4m3fn b); +@Namespace("c10") public static native @ByVal @Name("operator *") Float8_e4m3fn multiply(int a, @ByVal Float8_e4m3fn b); +@Namespace("c10") public static native @ByVal @Name("operator /") Float8_e4m3fn divide(int a, @ByVal Float8_e4m3fn b); //// Arithmetic with int64_t -@Namespace("c10") public static native @ByVal @Name("operator +") BFloat16 add(@ByVal BFloat16 a, @Cast("int64_t") long b); -@Namespace("c10") public static native @ByVal @Name("operator -") BFloat16 subtract(@ByVal BFloat16 a, @Cast("int64_t") long b); -@Namespace("c10") public static native @ByVal @Name("operator *") BFloat16 multiply(@ByVal BFloat16 a, @Cast("int64_t") long b); -@Namespace("c10") public static native @ByVal @Name("operator /") BFloat16 divide(@ByVal BFloat16 a, @Cast("int64_t") long b); +@Namespace("c10") public static native @ByVal @Name("operator +") Float8_e4m3fn add(@ByVal Float8_e4m3fn a, @Cast("int64_t") long b); +@Namespace("c10") public static native @ByVal @Name("operator -") Float8_e4m3fn subtract(@ByVal Float8_e4m3fn a, @Cast("int64_t") long b); +@Namespace("c10") public static native @ByVal @Name("operator *") Float8_e4m3fn multiply(@ByVal Float8_e4m3fn a, @Cast("int64_t") long b); +@Namespace("c10") public static native @ByVal @Name("operator /") Float8_e4m3fn divide(@ByVal Float8_e4m3fn a, @Cast("int64_t") long b); -@Namespace("c10") public static native @ByVal @Name("operator +") BFloat16 add(@Cast("int64_t") long a, @ByVal BFloat16 b); -@Namespace("c10") public static native @ByVal @Name("operator -") BFloat16 subtract(@Cast("int64_t") long a, @ByVal BFloat16 b); -@Namespace("c10") public static native @ByVal @Name("operator *") BFloat16 multiply(@Cast("int64_t") long a, @ByVal BFloat16 b); -@Namespace("c10") public static native @ByVal @Name("operator /") BFloat16 divide(@Cast("int64_t") long a, @ByVal BFloat16 b); - -// Overloading < and > operators, because std::max and std::min use them. +@Namespace("c10") public static native @ByVal @Name("operator +") Float8_e4m3fn add(@Cast("int64_t") long a, @ByVal Float8_e4m3fn b); +@Namespace("c10") public static native @ByVal @Name("operator -") Float8_e4m3fn subtract(@Cast("int64_t") long a, @ByVal Float8_e4m3fn b); +@Namespace("c10") public static native @ByVal @Name("operator *") Float8_e4m3fn multiply(@Cast("int64_t") long a, @ByVal Float8_e4m3fn b); +@Namespace("c10") public static native @ByVal @Name("operator /") Float8_e4m3fn divide(@Cast("int64_t") long a, @ByVal Float8_e4m3fn b); -@Namespace("c10") public static native @Cast("bool") @Name("operator >") boolean greaterThan(@ByRef BFloat16 lhs, @ByRef BFloat16 rhs); - -@Namespace("c10") public static native @Cast("bool") @Name("operator <") boolean lessThan(@ByRef BFloat16 lhs, @ByRef BFloat16 rhs); +/** NOTE: we do not define comparisons directly and instead rely on the implicit + * conversion from c10::Float8_e4m3fn to float. */ // namespace c10 @@ -4206,73 +4357,81 @@ public class torch extends org.bytedeco.pytorch.presets.torch { -// Parsed from c10/util/TypeSafeSignMath.h +// Parsed from c10/util/Float8_e4m3fn.h + +/// // #pragma once +/** Defines the Float8_e4m3fn type (8-bit floating-point) including conversions + * to standard C types and basic arithmetic operations. Note that arithmetic + * operations are implemented by converting to floating point and + * performing the operation in float32. + * Binary configuration: + * s eeee mmm + * 1 sign bit + * 4 exponent bits + * 3 mantissa bits + * bias = 7 + * + * Implementation based on the paper https://arxiv.org/pdf/2209.05433.pdf + * and inspired by Half implementation from pytorch/c10/util/Half.h */ + // #include -// #include +// #include +// #include +// #include // #include -// #if C10_CLANG_HAS_WARNING("-Wstring-conversion") -// #endif -// #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +// #if defined(__cplusplus) && (__cplusplus >= 201103L) +// #include +// #include +// #elif !defined(__OPENCL_VERSION__) +// #include +// #include // #endif -/** Returns false since we cannot have x < 0 if x is unsigned. */ - -/** Returns true if a signed variable x < 0 */ - -/** Returns true if x < 0 - * NOTE: Will fail on an unsigned custom type - * For the most part it's possible to fix this if - * the custom type has a constexpr constructor. - * However, notably, c10::Half does not :-( */ - -/** Returns the sign of an unsigned variable x as 0, 1 */ - -/** Returns the sign of a signed variable x as -1, 0, 1 */ - -/** Returns the sign of x as -1, 0, 1 - * NOTE: Will fail on an unsigned custom type - * For the most part it's possible to fix this if - * the custom type has a constexpr constructor. - * However, notably, c10::Half does not :-( */ +// #ifdef _MSC_VER +// #include +// #endif -/** Returns true if a and b are not both negative */ +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// Suppress sign compare warning when compiling with GCC -// as later does not account for short-circuit rule before -// raising the warning, see https://godbolt.org/z/Tr3Msnz99 -// #ifdef __GNUC__ -// #pragma GCC diagnostic push -// #pragma GCC diagnostic ignored "-Wsign-compare" -// #endif +// #include // operator typeid -/** Returns true if x is greater than the greatest value of the type Limit */ +/* + * Convert a 8-bit floating-point number in fp8 E4M3FN format, in bit + * representation, to a 32-bit floating-point number in IEEE single-precision + * format, in bit representation. + * + * @note The implementation doesn't use any floating-point operations. + */ +@Namespace("c10::detail") public static native float fp8e4m3fn_to_fp32_value(@Cast("uint8_t") byte input); -// #ifdef __GNUC__ -// #pragma GCC diagnostic pop -// #endif +/* + * Convert a 32-bit floating-point number in IEEE single-precision format to a + * 8-bit floating-point number in fp8 E4M3FN format, in bit representation. + */ +@Namespace("c10::detail") public static native @Cast("uint8_t") byte fp8e4m3fn_from_fp32_value(float f); -/** Returns true if x < lowest(Limit). Standard comparison */ -/** Returns false since all the limit is signed and therefore includes - * negative values but x cannot be negative because it is unsigned */ +// Targeting ../Float8_e4m3fn.java -/** Returns true if x < 0, where 0 is constructed from T. - * Limit is not signed, so its lower value is zero */ -/** Returns false sign both types are unsigned */ -/** Returns true if x is less than the lowest value of type T - * NOTE: Will fail on an unsigned custom type - * For the most part it's possible to fix this if - * the custom type has a constexpr constructor. - * However, notably, c10::Half does not : */ +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef Float8_e4m3fn value); // namespace c10 +// #include // IWYU pragma: keep // Parsed from c10/util/complex_math.h @@ -4409,6 +4568,23 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace std +// Parsed from c10/util/complex_utils.h + +// #if !defined(C10_INTERNAL_INCLUDE_COMPLEX_REMAINING_H) +// #error +// "c10/util/complex_utils.h is not meant to be individually included. Include c10/util/complex.h instead." +// #endif + +// #include + +// Extract double from std::complex; is identity otherwise +// TODO: Write in more idiomatic C++17 + + // namespace c10 + + // namespace std + + // Parsed from c10/util/Half.h // #pragma once @@ -4426,6 +4602,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #if defined(__cplusplus) && (__cplusplus >= 201103L) @@ -4464,19 +4641,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // for SYCL 2020 // #endif -// Standard check for compiling CUDA with clang -// #if defined(__clang__) && defined(__CUDA__) && defined(__CUDA_ARCH__) -// #define C10_DEVICE_HOST_FUNCTION __device__ __host__ -// #else -// #define C10_DEVICE_HOST_FUNCTION -// #endif - // #include // operator typeid -@Namespace("c10::detail") public static native float fp32_from_bits(@Cast("uint32_t") int w); - -@Namespace("c10::detail") public static native @Cast("uint32_t") int fp32_to_bits(float f); - /* * Convert a 16-bit floating-point number in IEEE half-precision format, in bit * representation, to a 32-bit floating-point number in IEEE single-precision @@ -4530,10 +4696,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // The overflow checks may involve float to int conversion which may // trigger precision loss warning. Re-enable the warning once the code // is fixed. See T58053069. -// #ifdef __clang__ -// #pragma GCC diagnostic push -// #pragma GCC diagnostic ignored "-Wunknown-warning-option" -// #pragma GCC diagnostic ignored "-Wimplicit-int-float-conversion" +// #if C10_CLANG_HAS_WARNING("-Wimplicit-float-conversion") // #endif // bool can be converted to any type. @@ -4543,10 +4706,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // skip isnan and isinf check for integral types -// #ifdef __clang__ -// #pragma GCC diagnostic pop -// #endif - // #ifdef _MSC_VER // #pragma warning(pop) // #endif @@ -4558,11 +4717,96 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // IWYU pragma: keep +// Parsed from c10/util/complex.h + +// #pragma once + +// #include + +// #include + +// #if defined(__CUDACC__) || defined(__HIPCC__) +// #endif + +// #if C10_CLANG_HAS_WARNING("-Wimplicit-float-conversion") +// #endif +// #if C10_CLANG_HAS_WARNING("-Wfloat-conversion") +// #endif +// Targeting ../DoubleComplex.java + + +// Targeting ../FloatComplex.java + + + + + + + + + + + + // namespace complex_literals + +// Define operators between integral scalars and c10::complex. std::complex does +// not support this when T is a floating-point number. This is useful because it +// saves a lot of "static_cast" when operate a complex and an integer. This +// makes the code both less verbose and potentially more efficient. +// #define COMPLEX_INTEGER_OP_TEMPLATE_CONDITION +// typename std::enable_if_t< +// std::is_floating_point::value && std::is_integral::value, +// int> = 0 + +// #undef COMPLEX_INTEGER_OP_TEMPLATE_CONDITION + + // namespace c10 + +// std functions +// +// The implementation of these functions also follow the design of C++20 + +// #if defined(USE_ROCM) +// #else +// #define ROCm_Bug(x) x +// #endif + +// #undef ROCm_Bug + +// For std::conj, there are other versions of it: +// constexpr std::complex conj( float z ); +// template< class DoubleOrInteger > +// constexpr std::complex conj( DoubleOrInteger z ); +// constexpr std::complex conj( long double z ); +// These are not implemented +// TODO(@zasdfgbnm): implement them as c10::conj + +// Thrust does not have complex --> complex version of thrust::proj, +// so this function is not implemented at c10 right now. +// TODO(@zasdfgbnm): implement it by ourselves + +// There is no c10 version of std::polar, because std::polar always +// returns std::complex. Use c10::polar instead; + + // namespace std + + // namespace c10 + +// #define C10_INTERNAL_INCLUDE_COMPLEX_REMAINING_H +// math functions are included in a separate file +// #include // IWYU pragma: keep +// utilities for complex types +// #include // IWYU pragma: keep +// #undef C10_INTERNAL_INCLUDE_COMPLEX_REMAINING_H + + // Parsed from c10/util/Half-inl.h // #pragma once // #include +// #include + // #include // #include @@ -4694,104 +4938,207 @@ public class torch extends org.bytedeco.pytorch.presets.torch { -// Parsed from c10/util/complex_utils.h +// Parsed from c10/util/Float8_e5m2-inl.h -// #if !defined(C10_INTERNAL_INCLUDE_COMPLEX_REMAINING_H) -// #error -// "c10/util/complex_utils.h is not meant to be individually included. Include c10/util/complex.h instead." -// #endif +// #pragma once +// #include +// #include // #include -// Extract double from std::complex; is identity otherwise -// TODO: Write in more idiomatic C++17 +// #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +// #endif - // namespace c10 +public static final int EXP_WIDTH_FP8 = 5; +public static final int MAN_WIDTH_FP8 = 2; +public static final int EXP_BIAS_FP8 = 15; - // namespace std +/** Constructors */ -// Parsed from c10/util/complex.h -// #pragma once +/** Implicit conversions */ -// #include -// #include -// #if defined(__CUDACC__) || defined(__HIPCC__) -// #endif +/** Special values helpers */ -// #if C10_CLANG_HAS_WARNING("-Wimplicit-float-conversion") -// #endif -// #if C10_CLANG_HAS_WARNING("-Wfloat-conversion") -// #endif -// Targeting ../DoubleComplex.java -// Targeting ../FloatComplex.java +/** Arithmetic */ +@Namespace("c10") public static native @ByVal @Name("operator +") Float8_e5m2 add(@Const @ByRef Float8_e5m2 a, @Const @ByRef Float8_e5m2 b); +@Namespace("c10") public static native @ByVal @Name("operator -") Float8_e5m2 subtract(@Const @ByRef Float8_e5m2 a, @Const @ByRef Float8_e5m2 b); +@Namespace("c10") public static native @ByVal @Name("operator *") Float8_e5m2 multiply(@Const @ByRef Float8_e5m2 a, @Const @ByRef Float8_e5m2 b); +@Namespace("c10") public static native @ByVal @Name("operator /") Float8_e5m2 divide( + @Const @ByRef Float8_e5m2 a, + @Const @ByRef Float8_e5m2 b); +@Namespace("c10") public static native @ByVal @Name("operator -") Float8_e5m2 subtract(@Const @ByRef Float8_e5m2 a); +@Namespace("c10") public static native @ByRef @Name("operator +=") Float8_e5m2 addPut( + @ByRef Float8_e5m2 a, + @Const @ByRef Float8_e5m2 b); +@Namespace("c10") public static native @ByRef @Name("operator -=") Float8_e5m2 subtractPut( + @ByRef Float8_e5m2 a, + @Const @ByRef Float8_e5m2 b); +@Namespace("c10") public static native @ByRef @Name("operator *=") Float8_e5m2 multiplyPut( + @ByRef Float8_e5m2 a, + @Const @ByRef Float8_e5m2 b); - // namespace complex_literals +@Namespace("c10") public static native @ByRef @Name("operator /=") Float8_e5m2 dividePut( + @ByRef Float8_e5m2 a, + @Const @ByRef Float8_e5m2 b); -// Define operators between integral scalars and c10::complex. std::complex does -// not support this when T is a floating-point number. This is useful because it -// saves a lot of "static_cast" when operate a complex and an integer. This -// makes the code both less verbose and potentially more efficient. -// #define COMPLEX_INTEGER_OP_TEMPLATE_CONDITION -// typename std::enable_if_t< -// std::is_floating_point::value && std::is_integral::value, -// int> = 0 +/** Arithmetic with floats */ -// #undef COMPLEX_INTEGER_OP_TEMPLATE_CONDITION +@Namespace("c10") public static native @Name("operator +") float add(@ByVal Float8_e5m2 a, float b); +@Namespace("c10") public static native @Name("operator -") float subtract(@ByVal Float8_e5m2 a, float b); +@Namespace("c10") public static native @Name("operator *") float multiply(@ByVal Float8_e5m2 a, float b); +@Namespace("c10") public static native @Name("operator /") float divide(@ByVal Float8_e5m2 a, float b); + +@Namespace("c10") public static native @Name("operator +") float add(float a, @ByVal Float8_e5m2 b); +@Namespace("c10") public static native @Name("operator -") float subtract(float a, @ByVal Float8_e5m2 b); +@Namespace("c10") public static native @Name("operator *") float multiply(float a, @ByVal Float8_e5m2 b); +@Namespace("c10") public static native @Name("operator /") float divide(float a, @ByVal Float8_e5m2 b); + +@Namespace("c10") public static native @ByRef @Name("operator +=") FloatPointer addPut(@ByRef FloatPointer a, @Const @ByRef Float8_e5m2 b); +@Namespace("c10") public static native @ByRef @Name("operator +=") FloatBuffer addPut(@ByRef FloatBuffer a, @Const @ByRef Float8_e5m2 b); +@Namespace("c10") public static native @ByRef @Name("operator +=") float[] addPut(@ByRef float[] a, @Const @ByRef Float8_e5m2 b); +@Namespace("c10") public static native @ByRef @Name("operator -=") FloatPointer subtractPut(@ByRef FloatPointer a, @Const @ByRef Float8_e5m2 b); +@Namespace("c10") public static native @ByRef @Name("operator -=") FloatBuffer subtractPut(@ByRef FloatBuffer a, @Const @ByRef Float8_e5m2 b); +@Namespace("c10") public static native @ByRef @Name("operator -=") float[] subtractPut(@ByRef float[] a, @Const @ByRef Float8_e5m2 b); +@Namespace("c10") public static native @ByRef @Name("operator *=") FloatPointer multiplyPut(@ByRef FloatPointer a, @Const @ByRef Float8_e5m2 b); +@Namespace("c10") public static native @ByRef @Name("operator *=") FloatBuffer multiplyPut(@ByRef FloatBuffer a, @Const @ByRef Float8_e5m2 b); +@Namespace("c10") public static native @ByRef @Name("operator *=") float[] multiplyPut(@ByRef float[] a, @Const @ByRef Float8_e5m2 b); +@Namespace("c10") public static native @ByRef @Name("operator /=") FloatPointer dividePut(@ByRef FloatPointer a, @Const @ByRef Float8_e5m2 b); +@Namespace("c10") public static native @ByRef @Name("operator /=") FloatBuffer dividePut(@ByRef FloatBuffer a, @Const @ByRef Float8_e5m2 b); +@Namespace("c10") public static native @ByRef @Name("operator /=") float[] dividePut(@ByRef float[] a, @Const @ByRef Float8_e5m2 b); - // namespace c10 +/** Arithmetic with doubles */ -// std functions -// -// The implementation of these functions also follow the design of C++20 +@Namespace("c10") public static native @Name("operator +") double add(@ByVal Float8_e5m2 a, double b); +@Namespace("c10") public static native @Name("operator -") double subtract(@ByVal Float8_e5m2 a, double b); +@Namespace("c10") public static native @Name("operator *") double multiply(@ByVal Float8_e5m2 a, double b); +@Namespace("c10") public static native @Name("operator /") double divide(@ByVal Float8_e5m2 a, double b); -// #if defined(USE_ROCM) -// #else -// #define ROCm_Bug(x) x -// #endif +@Namespace("c10") public static native @Name("operator +") double add(double a, @ByVal Float8_e5m2 b); +@Namespace("c10") public static native @Name("operator -") double subtract(double a, @ByVal Float8_e5m2 b); +@Namespace("c10") public static native @Name("operator *") double multiply(double a, @ByVal Float8_e5m2 b); +@Namespace("c10") public static native @Name("operator /") double divide(double a, @ByVal Float8_e5m2 b); -// #undef ROCm_Bug +/** Arithmetic with ints */ -// For std::conj, there are other versions of it: -// constexpr std::complex conj( float z ); -// template< class DoubleOrInteger > -// constexpr std::complex conj( DoubleOrInteger z ); -// constexpr std::complex conj( long double z ); -// These are not implemented -// TODO(@zasdfgbnm): implement them as c10::conj +@Namespace("c10") public static native @ByVal @Name("operator +") Float8_e5m2 add(@ByVal Float8_e5m2 a, int b); +@Namespace("c10") public static native @ByVal @Name("operator -") Float8_e5m2 subtract(@ByVal Float8_e5m2 a, int b); +@Namespace("c10") public static native @ByVal @Name("operator *") Float8_e5m2 multiply(@ByVal Float8_e5m2 a, int b); +@Namespace("c10") public static native @ByVal @Name("operator /") Float8_e5m2 divide(@ByVal Float8_e5m2 a, int b); -// Thrust does not have complex --> complex version of thrust::proj, -// so this function is not implemented at c10 right now. -// TODO(@zasdfgbnm): implement it by ourselves +@Namespace("c10") public static native @ByVal @Name("operator +") Float8_e5m2 add(int a, @ByVal Float8_e5m2 b); +@Namespace("c10") public static native @ByVal @Name("operator -") Float8_e5m2 subtract(int a, @ByVal Float8_e5m2 b); +@Namespace("c10") public static native @ByVal @Name("operator *") Float8_e5m2 multiply(int a, @ByVal Float8_e5m2 b); +@Namespace("c10") public static native @ByVal @Name("operator /") Float8_e5m2 divide(int a, @ByVal Float8_e5m2 b); -// There is no c10 version of std::polar, because std::polar always -// returns std::complex. Use c10::polar instead; +//// Arithmetic with int64_t + +@Namespace("c10") public static native @ByVal @Name("operator +") Float8_e5m2 add(@ByVal Float8_e5m2 a, @Cast("int64_t") long b); +@Namespace("c10") public static native @ByVal @Name("operator -") Float8_e5m2 subtract(@ByVal Float8_e5m2 a, @Cast("int64_t") long b); +@Namespace("c10") public static native @ByVal @Name("operator *") Float8_e5m2 multiply(@ByVal Float8_e5m2 a, @Cast("int64_t") long b); +@Namespace("c10") public static native @ByVal @Name("operator /") Float8_e5m2 divide(@ByVal Float8_e5m2 a, @Cast("int64_t") long b); + +@Namespace("c10") public static native @ByVal @Name("operator +") Float8_e5m2 add(@Cast("int64_t") long a, @ByVal Float8_e5m2 b); +@Namespace("c10") public static native @ByVal @Name("operator -") Float8_e5m2 subtract(@Cast("int64_t") long a, @ByVal Float8_e5m2 b); +@Namespace("c10") public static native @ByVal @Name("operator *") Float8_e5m2 multiply(@Cast("int64_t") long a, @ByVal Float8_e5m2 b); +@Namespace("c10") public static native @ByVal @Name("operator /") Float8_e5m2 divide(@Cast("int64_t") long a, @ByVal Float8_e5m2 b); + +/** NOTE: we do not define comparisons directly and instead rely on the implicit + * conversion from c10::Float8_e5m2 to float. */ + + // namespace c10 // namespace std + + +// Parsed from c10/util/Float8_e5m2.h + + +/// +// #pragma once + +/** Defines the Float8_e5m2 type (8-bit floating-point) including conversions + * to standard C types and basic arithmetic operations. Note that arithmetic + * operations are implemented by converting to floating point and + * performing the operation in float32. + * Binary configuration: + * s eeeee mm + * 1 sign bit + * 5 exponent bits + * 2 mantissa bits + * bias = 15 + * + * Implementation based on the paper https://arxiv.org/pdf/2209.05433.pdf + * and inspired by Half implementation from pytorch/c10/util/Half.h */ + +// #include + +/* + * Convert a 8-bit floating-point number in fp8 E5M2 format, in bit + * representation, to a 32-bit floating-point number in IEEE single-precision + * format, in bit representation. + * + * @note The implementation doesn't use any floating-point operations. + */ +@Namespace("c10::detail") public static native float fp8e5m2_to_fp32_value(@Cast("uint8_t") byte input); + +/* + * Convert a 32-bit floating-point number in IEEE single-precision format to a + * 8-bit floating-point number in fp8 E5M2 format, in bit representation. + */ +@Namespace("c10::detail") public static native @Cast("uint8_t") byte fp8e5m2_from_fp32_value(float f); + + +// Targeting ../Float8_e5m2.java + + + +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef Float8_e5m2 value); + // namespace c10 -// #define C10_INTERNAL_INCLUDE_COMPLEX_REMAINING_H -// math functions are included in a separate file -// #include // IWYU pragma: keep -// utilities for complex types -// #include // IWYU pragma: keep -// #undef C10_INTERNAL_INCLUDE_COMPLEX_REMAINING_H +// #include // IWYU pragma: keep + + +// Parsed from c10/util/bits.h + +// #pragma once +// #include + +// #include +// Targeting ../bits1x8.java + + +// Targeting ../bits2x4.java + + +// Targeting ../bits4x2.java + + +// Targeting ../bits8.java + + +// Targeting ../bits16.java + + + + // namespace c10 // Parsed from c10/util/qint32.h @@ -4864,8 +5211,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #pragma once // #include +// #include // #include +// #include +// #include // #include +// #include // #include // #include // #include @@ -4905,6 +5256,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // _(at::BFloat16, BFloat16) /* 15 */ // _(c10::quint4x2, QUInt4x2) /* 16 */ // _(c10::quint2x4, QUInt2x4) /* 17 */ +// _(c10::bits1x8, Bits1x8) /* 18 */ +// _(c10::bits2x4, Bits2x4) /* 19 */ +// _(c10::bits4x2, Bits4x2) /* 20 */ +// _(c10::bits8, Bits8) /* 21 */ +// _(c10::bits16, Bits16) /* 22 */ +// _(c10::Float8_e5m2, Float8_e5m2) /* 23 */ +// _(c10::Float8_e4m3fn, Float8_e4m3fn) /* 24 */ // If you want to support ComplexHalf for real, add ComplexHalf // into this macro (and change the name). But beware: convert() @@ -4922,6 +5280,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // _(c10::complex, ComplexDouble) // _(bool, Bool) // _(at::BFloat16, BFloat16) +// _(at::Float8_e5m2, Float8_e5m2) +// _(at::Float8_e4m3fn, Float8_e4m3fn) // #define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(_) // _(uint8_t, Byte) @@ -4937,6 +5297,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // _(c10::complex, ComplexDouble) // _(bool, Bool) // _(at::BFloat16, BFloat16) +// _(at::Float8_e5m2, Float8_e5m2) +// _(at::Float8_e4m3fn, Float8_e4m3fn) @Namespace("c10") public enum ScalarType { Byte((byte)(0)), /* 0 */ @@ -4956,9 +5318,16 @@ public class torch extends org.bytedeco.pytorch.presets.torch { QInt32((byte)(14)), /* 14 */ BFloat16((byte)(15)), /* 15 */ QUInt4x2((byte)(16)), /* 16 */ - QUInt2x4((byte)(17)), - Undefined((byte)(18)), - NumOptions((byte)(19)); + QUInt2x4((byte)(17)), /* 17 */ + Bits1x8((byte)(18)), /* 18 */ + Bits2x4((byte)(19)), /* 19 */ + Bits4x2((byte)(20)), /* 20 */ + Bits8((byte)(21)), /* 21 */ + Bits16((byte)(22)), /* 22 */ + Float8_e5m2((byte)(23)), /* 23 */ + Float8_e4m3fn((byte)(24)), + Undefined((byte)(25)), + NumOptions((byte)(26)); public final byte value; private ScalarType(byte v) { this.value = v; } @@ -4984,7 +5353,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // /* https://gist.github.com/izdeby/952ae7cf256ddb740a73776d39a7e7ba */ // /* TODO: remove once the bug is fixed. */ // static type t; -// }; /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ /* 8 */ /* 9 */ /* 10 */ /* 11 */ /* 12 */ /* 13 */ /* 14 */ /* 15 */ /* 16 */ /* 17 */ +// }; /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ /* 8 */ /* 9 */ /* 10 */ /* 11 */ /* 12 */ /* 13 */ /* 14 */ /* 15 */ /* 16 */ /* 17 */ /* 18 */ /* 19 */ /* 20 */ /* 21 */ /* 22 */ /* 23 */ /* 24 */ // #undef SPECIALIZE_ScalarTypeToCPPType @@ -4995,7 +5364,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // struct CppTypeToScalarType // : std:: // integral_constant { -// }; /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ /* 8 */ /* 9 */ /* 10 */ /* 11 */ /* 12 */ /* 13 */ /* 14 */ /* 15 */ /* 16 */ /* 17 */ +// }; /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ /* 8 */ /* 9 */ /* 10 */ /* 11 */ /* 12 */ /* 13 */ /* 14 */ /* 15 */ /* 16 */ /* 17 */ /* 18 */ /* 19 */ /* 20 */ /* 21 */ /* 22 */ /* 23 */ /* 24 */ // #undef SPECIALIZE_CppTypeToScalarType @@ -5060,6 +5429,53 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // ::c10::ScalarType::SCALARTYPE3>::t), // SCALARTYPE3) +// #define AT_FORALL_SCALAR_TYPES_AND4( +// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, _) +// _(uint8_t, Byte) +// _(int8_t, Char) +// _(int16_t, Short) +// _(int, Int) +// _(int64_t, Long) +// _(float, Float) +// _(double, Double) +// _(decltype(::c10::impl::ScalarTypeToCPPType< +// ::c10::ScalarType::SCALARTYPE1>::t), +// SCALARTYPE1) +// _(decltype(::c10::impl::ScalarTypeToCPPType< +// ::c10::ScalarType::SCALARTYPE2>::t), +// SCALARTYPE2) +// _(decltype(::c10::impl::ScalarTypeToCPPType< +// ::c10::ScalarType::SCALARTYPE3>::t), +// SCALARTYPE3) +// _(decltype(::c10::impl::ScalarTypeToCPPType< +// ::c10::ScalarType::SCALARTYPE4>::t), +// SCALARTYPE4) + +// #define AT_FORALL_SCALAR_TYPES_AND5( +// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, SCALARTYPE5, _) +// _(uint8_t, Byte) +// _(int8_t, Char) +// _(int16_t, Short) +// _(int, Int) +// _(int64_t, Long) +// _(float, Float) +// _(double, Double) +// _(decltype(::c10::impl::ScalarTypeToCPPType< +// ::c10::ScalarType::SCALARTYPE1>::t), +// SCALARTYPE1) +// _(decltype(::c10::impl::ScalarTypeToCPPType< +// ::c10::ScalarType::SCALARTYPE2>::t), +// SCALARTYPE2) +// _(decltype(::c10::impl::ScalarTypeToCPPType< +// ::c10::ScalarType::SCALARTYPE3>::t), +// SCALARTYPE3) +// _(decltype(::c10::impl::ScalarTypeToCPPType< +// ::c10::ScalarType::SCALARTYPE4>::t), +// SCALARTYPE4) +// _(decltype(::c10::impl::ScalarTypeToCPPType< +// ::c10::ScalarType::SCALARTYPE5>::t), +// SCALARTYPE5) + // #define AT_FORALL_QINT_TYPES(_) // _(c10::qint8, QInt8) // _(c10::quint8, QUInt8) @@ -5092,6 +5508,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("c10") @MemberGetter public static native ScalarType kBFloat16(); /* 15 */ @Namespace("c10") @MemberGetter public static native ScalarType kQUInt4x2(); /* 16 */ @Namespace("c10") @MemberGetter public static native ScalarType kQUInt2x4(); /* 17 */ + @Namespace("c10") @MemberGetter public static native ScalarType kBits1x8(); /* 18 */ + @Namespace("c10") @MemberGetter public static native ScalarType kBits2x4(); /* 19 */ + @Namespace("c10") @MemberGetter public static native ScalarType kBits4x2(); /* 20 */ + @Namespace("c10") @MemberGetter public static native ScalarType kBits8(); /* 21 */ + @Namespace("c10") @MemberGetter public static native ScalarType kBits16(); /* 22 */ + @Namespace("c10") @MemberGetter public static native ScalarType kFloat8_e5m2(); /* 23 */ + @Namespace("c10") @MemberGetter public static native ScalarType kFloat8_e4m3fn(); /* 24 */ // #undef DEFINE_CONSTANT @Namespace("c10") public static native @Cast("const char*") BytePointer toString(ScalarType t); @@ -5104,10 +5527,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("c10") public static native @Cast("bool") boolean isFloatingType(ScalarType t); +@Namespace("c10") public static native @Cast("bool") boolean isFloat8Type(ScalarType t); +@Namespace("c10") public static native @Cast("bool") boolean isReducedFloatingType(ScalarType t); + @Namespace("c10") public static native @Cast("bool") boolean isComplexType(ScalarType t); @Namespace("c10") public static native @Cast("bool") boolean isQIntType(ScalarType t); +@Namespace("c10") public static native @Cast("bool") boolean isBitsType(ScalarType t); + @Namespace("c10") public static native ScalarType toQIntType(ScalarType t); @Namespace("c10") public static native ScalarType toUnderlying(ScalarType t); @@ -5130,66 +5558,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Cast("std::ostream*") @ByRef Pointer stream, ScalarType scalar_type); -// #define AT_FORAUTOCAST_SCALAR_TYPES(_) -// _(half, Half) /* 0 */ -// _(bfloat16, BFloat16) /* 1 */ - - // namespace c10 - - -// Parsed from c10/util/ExclusivelyOwned.h - -// #pragma once - -// #include - -// See example implementation in TensorBase.h and TensorBody.h. -// Synopsis: -// -// repr_type -- type to use to store an owned T in ExclusivelyOwned. -// -// pointer_type -- pointer-esque type to return from -// ExclusivelyOwned's get() and operator*() methods. -// -// const_pointer_type -- similar to pointer_type, used for the const methods. -// -// static repr_type nullRepr() -- return a null instance of repr_type. -// -// template -// static repr_type createInPlace(Args&&... args) -- used by the in-place -// ExclusivelyOwned constructor. -// -// static repr_type moveToRepr(T&& x) -- move the given x into an -// instance of repr_type. used by the ExclusivelyOwned(T&&) -// constructor. -// -// static void destroyOwned(repr_type x) -- free memory for a -// known-exclusively-owned instance of x. Replaces calling repr_type's -// destructor. Being able to implement this more efficiently than -// repr_type's destructor is the main reason to use ExclusivelyOwned -// for a type. -// -// static T take(repr_type&) -- move out of the given repr_type into an owned T. -// -// static pointer_type getImpl(const repr_type&) -- return a pointer -// to the given repr_type. May take repr_type by value if that is more -// efficient. - -/** ExclusivelyOwned is a smart-pointer-like wrapper around an - * exclusively-owned instance of some type T that normally has - * mandatory reference counting (currently just Tensor). If you have - * an isolated piece of code that knows that it has sole ownership of - * an object of one of these types (i.e., because you created it - * directly or using a factory function) and that object will not - * escape from that isolated piece of code, then moving the object - * into an ExclusivelyOwned will avoid an atomic reference count - * decrement at destruction time. - * - * If you directly create the Tensor in the first - * place, you can use the in_place constructor of ExclusivelyOwned to - * additionally avoid doing any stores to initialize the refcount & - * weakcount. */ - // namespace c10 @@ -5227,11 +5595,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #pragma once -// #include +// #include // #include // #include +// #include // #include -// #include // Targeting ../SymNodeImpl.java @@ -5239,7 +5607,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace c10 -// Parsed from c10/core/SymFloat.h +// Parsed from c10/core/SymBool.h // #pragma once @@ -5247,30 +5615,36 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// Targeting ../SymBool.java -// #include -// #include -// Targeting ../SymFloat.java +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer os, @Const @ByRef SymBool s); + +// #define TORCH_SYM_CHECK(cond, ...) +// TORCH_CHECK((cond).expect_true(__FILE__, __LINE__), __VA_ARGS__) +// #define TORCH_SYM_INTERNAL_ASSERT(cond, ...) +// TORCH_INTERNAL_ASSERT((cond).expect_true(__FILE__, __LINE__), __VA_ARGS__) -@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer os, @Const @ByRef SymFloat s); // namespace c10 -// Parsed from c10/core/SymBool.h +// Parsed from c10/core/SymFloat.h // #pragma once +// #include // #include // #include // #include // #include -// Targeting ../SymBool.java +// #include +// Targeting ../SymFloat.java -@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer os, @Const @ByRef SymBool s); + +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer os, @Const @ByRef SymFloat s); // namespace c10 @@ -5282,25 +5656,133 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include +// #include // Targeting ../SymInt.java /** Sum of a list of SymInt; accumulates into the c10::SymInt expression */ -@Namespace("c10") public static native @ByVal @Name("operator +") SymInt add(@Cast("int64_t") long a, @Const @ByRef SymInt b); -@Namespace("c10") public static native @ByVal @Name("operator -") SymInt subtract(@Cast("int64_t") long a, @Const @ByRef SymInt b); -@Namespace("c10") public static native @ByVal @Name("operator *") SymInt multiply(@Cast("int64_t") long a, @Const @ByRef SymInt b); -@Namespace("c10") public static native @ByVal @Name("operator /") SymInt divide(@Cast("int64_t") long a, @Const @ByRef SymInt b); -@Namespace("c10") public static native @ByVal @Name("operator %") SymInt mod(@Cast("int64_t") long a, @Const @ByRef SymInt b); -@Namespace("c10") public static native @Cast("bool") @Name("operator ==") boolean equals(@Cast("int64_t") long a, @Const @ByRef SymInt b); -@Namespace("c10") public static native @Cast("bool") @Name("operator !=") boolean notEquals(@Cast("int64_t") long a, @Const @ByRef SymInt b); -@Namespace("c10") public static native @Cast("bool") @Name("operator <") boolean lessThan(@Cast("int64_t") long a, @Const @ByRef SymInt b); -@Namespace("c10") public static native @Cast("bool") @Name("operator <=") boolean lessThanEquals(@Cast("int64_t") long a, @Const @ByRef SymInt b); -@Namespace("c10") public static native @Cast("bool") @Name("operator >") boolean greaterThan(@Cast("int64_t") long a, @Const @ByRef SymInt b); -@Namespace("c10") public static native @Cast("bool") @Name("operator >=") boolean greaterThanEquals(@Cast("int64_t") long a, @Const @ByRef SymInt b); +// #define DECLARE_SYMINT_OP_INTONLY(scalar_t, RetTy) +// C10_API RetTy operator%(const SymInt& a, scalar_t b); +// C10_API RetTy operator%(scalar_t a, const SymInt& b); + +// #define DECLARE_SYMINT_OP(scalar_t, RetTy) +// C10_API RetTy operator+(const SymInt& a, scalar_t b); +// C10_API RetTy operator-(const SymInt& a, scalar_t b); +// C10_API RetTy operator*(const SymInt& a, scalar_t b); +// C10_API RetTy operator/(const SymInt& a, scalar_t b); +// C10_API RetTy operator+(scalar_t a, const SymInt& b); +// C10_API RetTy operator-(scalar_t a, const SymInt& b); +// C10_API RetTy operator*(scalar_t a, const SymInt& b); +// C10_API RetTy operator/(scalar_t a, const SymInt& b); +// C10_API bool operator==(const SymInt& a, scalar_t b); +// C10_API bool operator!=(const SymInt& a, scalar_t b); +// C10_API bool operator<(const SymInt& a, scalar_t b); +// C10_API bool operator<=(const SymInt& a, scalar_t b); +// C10_API bool operator>(const SymInt& a, scalar_t b); +// C10_API bool operator>=(const SymInt& a, scalar_t b); +// C10_API bool operator==(scalar_t a, const SymInt& b); +// C10_API bool operator!=(scalar_t a, const SymInt& b); +// C10_API bool operator<(scalar_t a, const SymInt& b); +// C10_API bool operator<=(scalar_t a, const SymInt& b); +// C10_API bool operator>(scalar_t a, const SymInt& b); +// C10_API bool operator>=(scalar_t a, const SymInt& b); + +@Namespace("c10") public static native @ByVal @Name("operator %") SymInt mod(@Const @ByRef SymInt a, @Cast("int64_t") long b); + @Namespace("c10") public static native @ByVal @Name("operator %") SymInt mod(@Cast("int64_t") long a, @Const @ByRef SymInt b); +@Namespace("c10") public static native @ByVal @Name("operator %") SymInt mod(@Const @ByRef SymInt a, int b); + @Namespace("c10") public static native @ByVal @Name("operator %") SymInt mod(int a, @Const @ByRef SymInt b); +@Namespace("c10") public static native @ByVal @Name("operator +") SymInt add(@Const @ByRef SymInt a, @Cast("int64_t") long b); + @Namespace("c10") public static native @ByVal @Name("operator -") SymInt subtract(@Const @ByRef SymInt a, @Cast("int64_t") long b); + @Namespace("c10") public static native @ByVal @Name("operator *") SymInt multiply(@Const @ByRef SymInt a, @Cast("int64_t") long b); + @Namespace("c10") public static native @ByVal @Name("operator /") SymInt divide(@Const @ByRef SymInt a, @Cast("int64_t") long b); + @Namespace("c10") public static native @ByVal @Name("operator +") SymInt add(@Cast("int64_t") long a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @ByVal @Name("operator -") SymInt subtract(@Cast("int64_t") long a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @ByVal @Name("operator *") SymInt multiply(@Cast("int64_t") long a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @ByVal @Name("operator /") SymInt divide(@Cast("int64_t") long a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef SymInt a, @Cast("int64_t") long b); + @Namespace("c10") public static native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef SymInt a, @Cast("int64_t") long b); + @Namespace("c10") public static native @Cast("bool") @Name("operator <") boolean lessThan(@Const @ByRef SymInt a, @Cast("int64_t") long b); + @Namespace("c10") public static native @Cast("bool") @Name("operator <=") boolean lessThanEquals(@Const @ByRef SymInt a, @Cast("int64_t") long b); + @Namespace("c10") public static native @Cast("bool") @Name("operator >") boolean greaterThan(@Const @ByRef SymInt a, @Cast("int64_t") long b); + @Namespace("c10") public static native @Cast("bool") @Name("operator >=") boolean greaterThanEquals(@Const @ByRef SymInt a, @Cast("int64_t") long b); + @Namespace("c10") public static native @Cast("bool") @Name("operator ==") boolean equals(@Cast("int64_t") long a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @Cast("bool") @Name("operator !=") boolean notEquals(@Cast("int64_t") long a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @Cast("bool") @Name("operator <") boolean lessThan(@Cast("int64_t") long a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @Cast("bool") @Name("operator <=") boolean lessThanEquals(@Cast("int64_t") long a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @Cast("bool") @Name("operator >") boolean greaterThan(@Cast("int64_t") long a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @Cast("bool") @Name("operator >=") boolean greaterThanEquals(@Cast("int64_t") long a, @Const @ByRef SymInt b); +@Namespace("c10") public static native @ByVal @Name("operator +") SymInt add(@Const @ByRef SymInt a, int b); + @Namespace("c10") public static native @ByVal @Name("operator -") SymInt subtract(@Const @ByRef SymInt a, int b); + @Namespace("c10") public static native @ByVal @Name("operator *") SymInt multiply(@Const @ByRef SymInt a, int b); + @Namespace("c10") public static native @ByVal @Name("operator /") SymInt divide(@Const @ByRef SymInt a, int b); + @Namespace("c10") public static native @ByVal @Name("operator +") SymInt add(int a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @ByVal @Name("operator -") SymInt subtract(int a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @ByVal @Name("operator *") SymInt multiply(int a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @ByVal @Name("operator /") SymInt divide(int a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef SymInt a, int b); + @Namespace("c10") public static native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef SymInt a, int b); + @Namespace("c10") public static native @Cast("bool") @Name("operator <") boolean lessThan(@Const @ByRef SymInt a, int b); + @Namespace("c10") public static native @Cast("bool") @Name("operator <=") boolean lessThanEquals(@Const @ByRef SymInt a, int b); + @Namespace("c10") public static native @Cast("bool") @Name("operator >") boolean greaterThan(@Const @ByRef SymInt a, int b); + @Namespace("c10") public static native @Cast("bool") @Name("operator >=") boolean greaterThanEquals(@Const @ByRef SymInt a, int b); + @Namespace("c10") public static native @Cast("bool") @Name("operator ==") boolean equals(int a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @Cast("bool") @Name("operator !=") boolean notEquals(int a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @Cast("bool") @Name("operator <") boolean lessThan(int a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @Cast("bool") @Name("operator <=") boolean lessThanEquals(int a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @Cast("bool") @Name("operator >") boolean greaterThan(int a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @Cast("bool") @Name("operator >=") boolean greaterThanEquals(int a, @Const @ByRef SymInt b); // make sure constants work +@Namespace("c10") public static native @ByVal @Name("operator +") SymFloat add(@Const @ByRef SymInt a, double b); + @Namespace("c10") public static native @ByVal @Name("operator -") SymFloat subtract(@Const @ByRef SymInt a, double b); + @Namespace("c10") public static native @ByVal @Name("operator *") SymFloat multiply(@Const @ByRef SymInt a, double b); + @Namespace("c10") public static native @ByVal @Name("operator /") SymFloat divide(@Const @ByRef SymInt a, double b); + @Namespace("c10") public static native @ByVal @Name("operator +") SymFloat add(double a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @ByVal @Name("operator -") SymFloat subtract(double a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @ByVal @Name("operator *") SymFloat multiply(double a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @ByVal @Name("operator /") SymFloat divide(double a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef SymInt a, double b); + @Namespace("c10") public static native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef SymInt a, double b); + @Namespace("c10") public static native @Cast("bool") @Name("operator <") boolean lessThan(@Const @ByRef SymInt a, double b); + @Namespace("c10") public static native @Cast("bool") @Name("operator <=") boolean lessThanEquals(@Const @ByRef SymInt a, double b); + @Namespace("c10") public static native @Cast("bool") @Name("operator >") boolean greaterThan(@Const @ByRef SymInt a, double b); + @Namespace("c10") public static native @Cast("bool") @Name("operator >=") boolean greaterThanEquals(@Const @ByRef SymInt a, double b); + @Namespace("c10") public static native @Cast("bool") @Name("operator ==") boolean equals(double a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @Cast("bool") @Name("operator !=") boolean notEquals(double a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @Cast("bool") @Name("operator <") boolean lessThan(double a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @Cast("bool") @Name("operator <=") boolean lessThanEquals(double a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @Cast("bool") @Name("operator >") boolean greaterThan(double a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @Cast("bool") @Name("operator >=") boolean greaterThanEquals(double a, @Const @ByRef SymInt b); +@Namespace("c10") public static native @ByVal @Name("operator +") SymFloat add(@Const @ByRef SymInt a, float b); + @Namespace("c10") public static native @ByVal @Name("operator -") SymFloat subtract(@Const @ByRef SymInt a, float b); + @Namespace("c10") public static native @ByVal @Name("operator *") SymFloat multiply(@Const @ByRef SymInt a, float b); + @Namespace("c10") public static native @ByVal @Name("operator /") SymFloat divide(@Const @ByRef SymInt a, float b); + @Namespace("c10") public static native @ByVal @Name("operator +") SymFloat add(float a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @ByVal @Name("operator -") SymFloat subtract(float a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @ByVal @Name("operator *") SymFloat multiply(float a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @ByVal @Name("operator /") SymFloat divide(float a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef SymInt a, float b); + @Namespace("c10") public static native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef SymInt a, float b); + @Namespace("c10") public static native @Cast("bool") @Name("operator <") boolean lessThan(@Const @ByRef SymInt a, float b); + @Namespace("c10") public static native @Cast("bool") @Name("operator <=") boolean lessThanEquals(@Const @ByRef SymInt a, float b); + @Namespace("c10") public static native @Cast("bool") @Name("operator >") boolean greaterThan(@Const @ByRef SymInt a, float b); + @Namespace("c10") public static native @Cast("bool") @Name("operator >=") boolean greaterThanEquals(@Const @ByRef SymInt a, float b); + @Namespace("c10") public static native @Cast("bool") @Name("operator ==") boolean equals(float a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @Cast("bool") @Name("operator !=") boolean notEquals(float a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @Cast("bool") @Name("operator <") boolean lessThan(float a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @Cast("bool") @Name("operator <=") boolean lessThanEquals(float a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @Cast("bool") @Name("operator >") boolean greaterThan(float a, @Const @ByRef SymInt b); + @Namespace("c10") public static native @Cast("bool") @Name("operator >=") boolean greaterThanEquals(float a, @Const @ByRef SymInt b); // just for completeness + +// On OSX size_t is different than uint64_t so we have to +// define it separately +// #if defined(__APPLE__) +// #endif + +// #undef DECLARE_SYMINT_OP @Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer os, @Const @ByRef SymInt s); @Namespace("c10") public static native @ByVal @Name("operator -") SymInt subtract(@Const @ByRef SymInt s); @@ -5312,6 +5794,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #pragma once // #include // #include +// #include +// #include // #include // #include @@ -5349,10 +5833,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #pragma once -// #include // #include // #include -// #include // #include // #include @@ -5391,32 +5873,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { + + // #undef DEFINE_TO // namespace c10 -// Parsed from c10/util/Backtrace.h - -// #ifndef C10_UTIL_BACKTRACE_H_ -// #define C10_UTIL_BACKTRACE_H_ - -// #include -// #include -// #include - -// #include -@Namespace("c10") public static native @StdString BytePointer get_backtrace( - @Cast("size_t") long frames_to_skip/*=0*/, - @Cast("size_t") long maximum_number_of_frames/*=64*/, - @Cast("bool") boolean skip_python_frames/*=true*/); -@Namespace("c10") public static native @StdString BytePointer get_backtrace(); - // namespace c10 - -// #endif // C10_UTIL_BACKTRACE_H_ - - // Parsed from c10/util/IdWrapper.h // #pragma once @@ -5456,28 +5920,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // } -// Parsed from c10/util/Type.h - -// #ifndef C10_UTIL_TYPE_H_ -// #define C10_UTIL_TYPE_H_ - -// #include -// #include -// #include - -// #include - -/** Utility to demangle a C++ symbol name. */ -@Namespace("c10") public static native @StdString BytePointer demangle(@Cast("const char*") BytePointer name); -@Namespace("c10") public static native @StdString String demangle(String name); - -/** Returns the printable name of the type. */ - - // namespace c10 - -// #endif // C10_UTIL_TYPE_H_ - - // Parsed from c10/util/ConstexprCrc.h // #pragma once @@ -5587,59 +6029,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { -// Parsed from c10/util/flat_hash_map.h - -// Taken from -// https://github.com/skarupke/flat_hash_map/blob/2c4687431f978f02a3780e24b8b701d22aa32d9c/flat_hash_map.hpp -// with fixes applied: -// - https://github.com/skarupke/flat_hash_map/pull/25 -// - https://github.com/skarupke/flat_hash_map/pull/26 -// - replace size_t with uint64_t to fix it for 32bit -// - add "GCC diagnostic" pragma to ignore -Wshadow -// - make sherwood_v3_table::convertible_to_iterator public because GCC5 seems -// to have issues with it otherwise -// - fix compiler warnings in operator templated_iterator - -// Copyright Malte Skarupke 2017. -// Distributed under the Boost Software License, Version 1.0. -// (See http://www.boost.org/LICENSE_1_0.txt) - -// #pragma once - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - -// #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") -// #endif - -// #ifdef _MSC_VER -// #define SKA_NOINLINE(...) __declspec(noinline) __VA_ARGS__ -// #else -// #define SKA_NOINLINE(...) __VA_ARGS__ __attribute__((noinline)) -// #endif -@Namespace("ska::detailv3") @MemberGetter public static native byte min_lookups(); -public static final byte min_lookups = min_lookups(); - -@Namespace("ska::detailv3") public static native byte log2(@Cast("uint64_t") long value); - -@Namespace("ska::detailv3") public static native @Cast("uint64_t") long next_power_of_two(@Cast("uint64_t") long i); - -// Implementation taken from http://en.cppreference.com/w/cpp/types/void_t -// (it takes CWG1558 into account and also works for older compilers) - // namespace detailv3 - - // end namespace ska - - - // Parsed from c10/util/irange.h // Copyright 2004-present Facebook. All Rights Reserved. @@ -5647,6 +6036,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #pragma once // #include +// #include // #include // #include @@ -5671,30 +6061,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #pragma once // #include -// #include -// #include // #include // #include // #include // #include -// #include -// #include // #include -// #ifdef __GXX_RTTI -// #include -// #endif - -// #include // #include -// #include -// #include // #include // #include -// #include // #include // #include -// #include // #include // #include @@ -5803,14 +6180,21 @@ public class torch extends org.bytedeco.pytorch.presets.torch { /* 15 */ /* 16 */ /* 17 */ + /* 18 */ + /* 19 */ + /* 20 */ + /* 21 */ + /* 22 */ + /* 23 */ + /* 24 */ // #undef DEFINE_SCALAR_METADATA_INSTANCE -@Namespace("caffe2") public static native @Cast("bool") @Name("operator ==") @NoException(true) boolean equals(@Const @ByVal TypeMeta lhs, @Const @ByVal TypeMeta rhs); -@Namespace("caffe2") public static native @Cast("bool") @Name("operator !=") @NoException(true) boolean notEquals(@Const @ByVal TypeMeta lhs, @Const @ByVal TypeMeta rhs); +@Namespace("caffe2") public static native @Cast("bool") @Name("operator ==") @NoException(true) boolean equals(@Const @ByRef TypeMeta lhs, @Const @ByRef TypeMeta rhs); +@Namespace("caffe2") public static native @Cast("bool") @Name("operator !=") @NoException(true) boolean notEquals(@Const @ByRef TypeMeta lhs, @Const @ByRef TypeMeta rhs); @Namespace("caffe2") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft( @Cast("std::ostream*") @ByRef Pointer stream, @@ -5854,33 +6238,25 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // return index; // } -// #define CAFFE_DEFINE_KNOWN_TYPE(T) +// #define CAFFE_DEFINE_KNOWN_TYPE(T, ident) // template uint16_t TypeMeta::addTypeMetaData(); +// namespace detail { +// EXPORT_IF_NOT_GCC const uint16_t ident##_metadata_index = +// TypeMeta::addTypeMetaData(); +// } // namespace detail // Unlike CAFFE_KNOWN_TYPE, CAFFE_DECLARE_KNOWN_TYPE avoids a function // call to access _typeMetaData in the common case. -// #ifdef __CUDACC__ -// nvcc needs its own specialization that doesn't use -// C10_ALWAYS_INLINE so that it doesn't need to see a definition for -// _addTypeMeta. See NOTE [ TypeIdentifier::Get nvcc/clang discrepancy -// ]. -// #define CAFFE_DECLARE_KNOWN_TYPE(T) -// extern template uint16_t TypeMeta::addTypeMetaData(); -// template <> -// EXPORT_IF_NOT_GCC inline uint16_t TypeMeta::_typeMetaData() noexcept { -// static const uint16_t index = addTypeMetaData(); -// return index; -// } -// #else -// #define CAFFE_DECLARE_KNOWN_TYPE(T) +// #define CAFFE_DECLARE_KNOWN_TYPE(T, ident) // extern template uint16_t TypeMeta::addTypeMetaData(); +// namespace detail { +// extern C10_API const uint16_t ident##_metadata_index; +// } /* namespace detail */ // template <> // EXPORT_IF_NOT_GCC C10_ALWAYS_INLINE uint16_t // TypeMeta::_typeMetaData() noexcept { -// static const uint16_t index = addTypeMetaData(); -// return index; +// return detail::ident##_metadata_index; // } -// #endif // #define CAFFE_KNOWN_TYPE_NOEXPORT(T) // template <> @@ -5890,26 +6266,48 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // } + @Namespace("caffe2::detail") @MemberGetter public static native @Cast("const uint16_t") short std_string_metadata_index(); + /* namespace detail */ + @Namespace("caffe2::detail") @MemberGetter public static native @Cast("const uint16_t") short uint16_t_metadata_index(); + /* namespace detail */ + @Namespace("caffe2::detail") @MemberGetter public static native @Cast("const uint16_t") short char_metadata_index(); + /* namespace detail */ + @Namespace("caffe2::detail") @MemberGetter public static native @Cast("const uint16_t") short std_unique_ptr_std_mutex_metadata_index(); + /* namespace detail */ + @Namespace("caffe2::detail") @MemberGetter public static native @Cast("const uint16_t") short std_unique_ptr_std_atomic_bool_metadata_index(); + /* namespace detail */ + @Namespace("caffe2::detail") @MemberGetter public static native @Cast("const uint16_t") short std_vector_int32_t_metadata_index(); + /* namespace detail */ + @Namespace("caffe2::detail") @MemberGetter public static native @Cast("const uint16_t") short std_vector_int64_t_metadata_index(); + /* namespace detail */ + @Namespace("caffe2::detail") @MemberGetter public static native @Cast("const uint16_t") short std_vector_unsigned_long_metadata_index(); + /* namespace detail */ + @Namespace("caffe2::detail") @MemberGetter public static native @Cast("const uint16_t") short bool_ptr_metadata_index(); + /* namespace detail */ + @Namespace("caffe2::detail") @MemberGetter public static native @Cast("const uint16_t") short char_ptr_metadata_index(); + /* namespace detail */ + @Namespace("caffe2::detail") @MemberGetter public static native @Cast("const uint16_t") short int_ptr_metadata_index(); + /* namespace detail */ // For some of the compilers, long is defined separately from int32_t and @@ -5924,13 +6322,21 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace detail + @Namespace("caffe2::detail") @MemberGetter public static native @Cast("const uint16_t") short detail_guard_long_unique_long_metadata_index(); + /* namespace detail */ + @Namespace("caffe2::detail") @MemberGetter public static native @Cast("const uint16_t") short detail_guard_long_unique_std_vector_long_metadata_index(); + /* namespace detail */ + @Namespace("caffe2::detail") @MemberGetter public static native @Cast("const uint16_t") short float_ptr_metadata_index(); + /* namespace detail */ + @Namespace("caffe2::detail") @MemberGetter public static native @Cast("const uint16_t") short at_Half_metadata_index(); + /* namespace detail */ // namespace caffe2 @@ -6022,9 +6428,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// Targeting ../DeleterFnPtr.java - - // Does not delete anything @Namespace("c10::detail") public static native void deleteNothing(Pointer arg0); @@ -6138,286 +6541,565 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Cast("size_t") long total_allocated, @Cast("size_t") long total_reserved, @ByVal Device device); +// Targeting ../GatheredContext.java + + // namespace c10 -// Parsed from c10/core/StorageImpl.h +// Parsed from c10/core/impl/HermeticPyObjectTLS.h // #pragma once -// #include -// #include -// #include +// #include +// #include +// Targeting ../HermeticPyObjectTLS.java -// #include -// Targeting ../StorageImpl.java + // namespace impl // namespace c10 -// Parsed from c10/core/Storage.h +// Parsed from c10/core/SymIntArrayRef.h // #pragma once -// #include -// Targeting ../Storage.java - +// #include +// #include +// #include +// #include +@Namespace("c10") public static native @ByVal LongArrayRef asIntArrayRefUnchecked(@ByVal SymIntArrayRef ar); - // namespace c10 +// TODO: a SymIntArrayRef containing a heap allocated large negative integer +// can actually technically be converted to an IntArrayRef... but not with +// the non-owning API we have here. We can't reinterpet cast; we have to +// allocate another buffer and write the integers into it. If you need it, +// we can do it. But I don't think you need it. +@Namespace("c10") public static native @ByVal LongArrayRefOptional asIntArrayRefSlowOpt( + @ByVal SymIntArrayRef ar); -// Parsed from c10/core/CopyBytes.h -// #pragma once -// #include -// Targeting ../CopyBytesFunction.java +// #define C10_AS_INTARRAYREF_SLOW(a) c10::asIntArrayRefSlow(a, __FILE__, __LINE__) +// Prefer using a more semantic constructor, like +// fromIntArrayRefKnownNonNegative +@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefUnchecked(@ByVal LongArrayRef array_ref); +@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefUnchecked(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... array_ref); +@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefKnownNonNegative(@ByVal LongArrayRef array_ref); +@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefKnownNonNegative(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... array_ref); -// #define REGISTER_COPY_BYTES_FUNCTION(from, to, ...) -// namespace { -// static _CopyBytesFunctionRegisterer C10_ANONYMOUS_VARIABLE( -// g_copy_function)(from, to, __VA_ARGS__); -// } +@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefSlow(@ByVal LongArrayRef array_ref); +@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefSlow(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... array_ref); -/* - * WARNING: Implementations for this function are currently registered from - * ATen and caffe2, not yet from c10. Don't use this if not either ATen - * or caffe2 is present as well. - * We can't move them yet, because the CUDA implementations aren't unified yet - * between ATen and caffe2. - * We're planning to move the implementations into c10/backend/xxx - * to make c10 self contained again. - */ -@Namespace("c10") public static native void CopyBytes( - @Cast("size_t") long nbytes, - @Const Pointer src, - @ByVal Device src_device, - Pointer dst, - @ByVal Device dst_device, - @Cast("bool") boolean async); // namespace c10 -// Parsed from c10/core/AutogradState.h +// Parsed from c10/util/python_stub.h // #pragma once -// #include -// Targeting ../AutogradState.java - +// Parsed from c10/core/impl/PyInterpreter.h - // namespace c10 +// #pragma once +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// Parsed from c10/core/GradMode.h +// Forward declarations + // namespace c10 -// #pragma once + // namespace torch -// #include -// #include -// Targeting ../GradMode.java +// Actual implementation +// Targeting ../PyInterpreterVTable.java -// Targeting ../AutoGradMode.java +// Targeting ../PyInterpreter.java -// Targeting ../NoGradGuard.java +// PyInterpreterStatus describes what the state of its interpreter tag +// is, relative to the thread currently holding the GIL. +@Namespace("c10::impl") public enum PyInterpreterStatus { + // We just allocated the Tensor, it hasn't escaped to other threads, + // we know that it definitely hasn't been tagged to be associated + // with an interpreter. + DEFINITELY_UNINITIALIZED(0), + // We queried the interpreter field and it looked uninitialized. But + // another thread may have raced with us to tag it with some other + // interpreter id. So we will have to do a CEX to make sure we can + // actually nab it. + MAYBE_UNINITIALIZED(1), + // We queried the interpreter field and it was tagged to belong to us. + // This means we have sole write access (as we hold the GIL for this + // interpreter) + TAGGED_BY_US(2), + // Someone else tagged this. We can't use this TensorImpl from Python. + TAGGED_BY_OTHER(3); -// Targeting ../AutoFwGradMode.java + public final int value; + private PyInterpreterStatus(int v) { this.value = v; } + private PyInterpreterStatus(PyInterpreterStatus e) { this.value = e.value; } + public PyInterpreterStatus intern() { for (PyInterpreterStatus e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} + // namespace impl + // namespace c10 - // namespace c10 +// Parsed from c10/core/impl/PyObjectSlot.h +// #pragma once -// Parsed from c10/util/Registry.h +// #include +// #include +// #include +// #include -// #ifndef C10_UTIL_REGISTRY_H_ -// #define C10_UTIL_REGISTRY_H_ +// #include -/** - * Simple registry implementation that uses static variables to - * register object creators during program initialization time. - */ + // namespace impl + // namespace c10 -// NB: This Registry works poorly when you have other namespaces. -// Make all macro invocations from inside the at namespace. -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// Parsed from c10/core/StorageImpl.h -// #include -// #include +// #pragma once -@Namespace("c10") public static native @StdString BytePointer KeyStrRepr(@StdString BytePointer key); -@Namespace("c10") public static native @StdString String KeyStrRepr(@StdString String key); +// #include +// #include +// #include -@Namespace("c10") public enum RegistryPriority { - REGISTRY_FALLBACK(1), - REGISTRY_DEFAULT(2), - REGISTRY_PREFERRED(3); +// #include +// Targeting ../StorageImpl.java - public final int value; - private RegistryPriority(int v) { this.value = v; } - private RegistryPriority(RegistryPriority e) { this.value = e.value; } - public RegistryPriority intern() { for (RegistryPriority e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} -/** - * \brief A template class that allows one to register classes by keys. - * - * The keys are usually a std::string specifying the name, but can be anything - * that can be used in a std::map. - * - * You should most likely not use the Registry class explicitly, but use the - * helper macros below to declare specific registries as well as registering - * objects. - */ -/** - * C10_DECLARE_TYPED_REGISTRY is a macro that expands to a function - * declaration, as well as creating a convenient typename for its corresponding - * registerer. - */ -// Note on C10_IMPORT and C10_EXPORT below: we need to explicitly mark DECLARE -// as import and DEFINE as export, because these registry macros will be used -// in downstream shared libraries as well, and one cannot use *_API - the API -// macro will be defined on a per-shared-library basis. Semantically, when one -// declares a typed registry it is always going to be IMPORT, and when one -// defines a registry (which should happen ONLY ONCE and ONLY IN SOURCE FILE), -// the instantiation unit is always going to be exported. -// -// The only unique condition is when in the same file one does DECLARE and -// DEFINE - in Windows compilers, this generates a warning that dllimport and -// dllexport are mixed, but the warning is fine and linker will be properly -// exporting the symbol. Same thing happens in the gflags flag declaration and -// definition caes. -// #define C10_DECLARE_TYPED_REGISTRY( -// RegistryName, SrcType, ObjectType, PtrType, ...) -// C10_IMPORT ::c10::Registry, ##__VA_ARGS__>* -// RegistryName(); -// typedef ::c10::Registerer, ##__VA_ARGS__> -// Registerer##RegistryName +// Declare StorageImpl create function pointer types. -// #define C10_DEFINE_TYPED_REGISTRY( -// RegistryName, SrcType, ObjectType, PtrType, ...) -// C10_EXPORT ::c10::Registry, ##__VA_ARGS__>* -// RegistryName() { -// static ::c10::Registry, ##__VA_ARGS__>* -// registry = new ::c10:: -// Registry, ##__VA_ARGS__>(); -// return registry; -// } +@Namespace("c10") public static native void SetStorageImplCreate(DeviceType t, @Cast("c10::StorageImplCreateHelper") StorageImplPtr fptr); +@Namespace("c10") public static native void SetStorageImplCreate(@Cast("c10::DeviceType") byte t, @Cast("c10::StorageImplCreateHelper") StorageImplPtr fptr); -// #define C10_DEFINE_TYPED_REGISTRY_WITHOUT_WARNING( -// RegistryName, SrcType, ObjectType, PtrType, ...) -// C10_EXPORT ::c10::Registry, ##__VA_ARGS__>* -// RegistryName() { -// static ::c10::Registry, ##__VA_ARGS__>* -// registry = -// new ::c10::Registry, ##__VA_ARGS__>( -// false); -// return registry; -// } +@Namespace("c10") public static native @Cast("c10::StorageImplCreateHelper") StorageImplPtr GetStorageImplCreate(DeviceType t); +@Namespace("c10") public static native @Cast("c10::StorageImplCreateHelper") StorageImplPtr GetStorageImplCreate(@Cast("c10::DeviceType") byte t); -// Note(Yangqing): The __VA_ARGS__ below allows one to specify a templated -// creator with comma in its templated arguments. -// #define C10_REGISTER_TYPED_CREATOR(RegistryName, key, ...) -// static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( -// key, RegistryName(), ##__VA_ARGS__); + // namespace c10 -// #define C10_REGISTER_TYPED_CREATOR_WITH_PRIORITY( -// RegistryName, key, priority, ...) -// static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( -// key, priority, RegistryName(), ##__VA_ARGS__); -// #define C10_REGISTER_TYPED_CLASS(RegistryName, key, ...) -// static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( -// key, -// RegistryName(), -// Registerer##RegistryName::DefaultCreator<__VA_ARGS__>, -// ::c10::demangle_type<__VA_ARGS__>()); +// Parsed from c10/core/Storage.h -// #define C10_REGISTER_TYPED_CLASS_WITH_PRIORITY( -// RegistryName, key, priority, ...) -// static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( -// key, -// priority, -// RegistryName(), -// Registerer##RegistryName::DefaultCreator<__VA_ARGS__>, -// ::c10::demangle_type<__VA_ARGS__>()); +// #pragma once -// C10_DECLARE_REGISTRY and C10_DEFINE_REGISTRY are hard-wired to use -// std::string as the key type, because that is the most commonly used cases. -// #define C10_DECLARE_REGISTRY(RegistryName, ObjectType, ...) -// C10_DECLARE_TYPED_REGISTRY( -// RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__) +// #include +// Targeting ../Storage.java -// #define C10_DEFINE_REGISTRY(RegistryName, ObjectType, ...) -// C10_DEFINE_TYPED_REGISTRY( -// RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__) -// #define C10_DEFINE_REGISTRY_WITHOUT_WARNING(RegistryName, ObjectType, ...) -// C10_DEFINE_TYPED_REGISTRY_WITHOUT_WARNING( -// RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__) -// #define C10_DECLARE_SHARED_REGISTRY(RegistryName, ObjectType, ...) -// C10_DECLARE_TYPED_REGISTRY( -// RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__) + // namespace c10 -// #define C10_DEFINE_SHARED_REGISTRY(RegistryName, ObjectType, ...) -// C10_DEFINE_TYPED_REGISTRY( -// RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__) -// #define C10_DEFINE_SHARED_REGISTRY_WITHOUT_WARNING( -// RegistryName, ObjectType, ...) -// C10_DEFINE_TYPED_REGISTRY_WITHOUT_WARNING( -// RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__) +// Parsed from c10/core/AutogradState.h -// C10_REGISTER_CREATOR and C10_REGISTER_CLASS are hard-wired to use std::string -// as the key -// type, because that is the most commonly used cases. -// #define C10_REGISTER_CREATOR(RegistryName, key, ...) -// C10_REGISTER_TYPED_CREATOR(RegistryName, #key, __VA_ARGS__) +// #pragma once -// #define C10_REGISTER_CREATOR_WITH_PRIORITY(RegistryName, key, priority, ...) -// C10_REGISTER_TYPED_CREATOR_WITH_PRIORITY( -// RegistryName, #key, priority, __VA_ARGS__) +// #include +// Targeting ../AutogradState.java -// #define C10_REGISTER_CLASS(RegistryName, key, ...) -// C10_REGISTER_TYPED_CLASS(RegistryName, #key, __VA_ARGS__) -// #define C10_REGISTER_CLASS_WITH_PRIORITY(RegistryName, key, priority, ...) -// C10_REGISTER_TYPED_CLASS_WITH_PRIORITY( -// RegistryName, #key, priority, __VA_ARGS__) // namespace c10 -// #endif // C10_UTIL_REGISTRY_H_ +// Parsed from c10/core/impl/LocalDispatchKeySet.h -// Parsed from c10/util/Flags.h +// #pragma once -// #ifndef C10_UTIL_FLAGS_H_ -// #define C10_UTIL_FLAGS_H_ +// #include +// #include -/* Commandline flags support for C10. - * +// TLS management for DispatchKeySet (the "local" DispatchKeySet(s)) +// +// This manages two thread-local DispatchKeySets: +// +// - The included type set, which adds a tensor type for consideration +// in dispatch. (For example, you might add Profiling to +// the included type set to turn on profiling on all tensor operations.) +// +// - The excluded type set, which disqualifies a tensor type from dispatch. +// (For example, after redispatching on variable, we disqualify +// Autograd so we don't attempt to handle variable again.) +// (Exclusion wins over inclusion.) +// +// NB: Originally, I implemented the excluded type set as storing the inverted +// set, but TLS is defined to be zero-initialized, so this doesn't actually work +// (if it's inverted, you want the set to be -1 initialized). +// Targeting ../PODLocalDispatchKeySet.java + + +// Targeting ../LocalDispatchKeySet.java + + + +// thread_local variables cannot be C10_API on Windows. +// Inlining this seems to break AutoDispatchBelowAutograd on Android. +// #if defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE) +@Namespace("c10::impl") public static native @ByVal LocalDispatchKeySet tls_local_dispatch_key_set(); +// #else // defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE) + +// #endif // defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE) + +// Internal, use ThreadLocalStateGuard + +// Targeting ../IncludeDispatchKeyGuard.java + + +// Targeting ../ForceDispatchKeyGuard.java + + + +// Non-RAII API for manipulating the thread-local dispatch state. +// Please prefer the RAII API. The non-RAII API may be useful when +// the included/excluded state of a given DispatchKey must span +// many calls from the Python to the C++, so you cannot conveniently +// use an RAII guard. +// +// Example use case: a Python context manager that includes a certain +// DispatchKey, to ensure ops running under the context manager dispatch +// through that DispatchKey's registered overrides. +// +// The non-RAII API is less efficient than the RAII guards because both the +// getter and setter will do a tls_getaddr lookup (the RAII struct only needs +// one!) + +@Namespace("c10::impl") public static native @Cast("bool") boolean tls_is_dispatch_key_excluded(DispatchKey x); +@Namespace("c10::impl") public static native @Cast("bool") boolean tls_is_dispatch_key_excluded(@Cast("c10::DispatchKey") short x); +@Namespace("c10::impl") public static native void tls_set_dispatch_key_excluded(DispatchKey x, @Cast("bool") boolean desired_state); +@Namespace("c10::impl") public static native void tls_set_dispatch_key_excluded(@Cast("c10::DispatchKey") short x, @Cast("bool") boolean desired_state); +@Namespace("c10::impl") public static native @Cast("bool") boolean tls_is_dispatch_key_included(DispatchKey x); +@Namespace("c10::impl") public static native @Cast("bool") boolean tls_is_dispatch_key_included(@Cast("c10::DispatchKey") short x); +@Namespace("c10::impl") public static native void tls_set_dispatch_key_included(DispatchKey x, @Cast("bool") boolean desired_state); +@Namespace("c10::impl") public static native void tls_set_dispatch_key_included(@Cast("c10::DispatchKey") short x, @Cast("bool") boolean desired_state); +@Namespace("c10::impl") public static native @Cast("bool") boolean tls_is_dispatch_keyset_excluded(@ByVal DispatchKeySet ks); +@Namespace("c10::impl") public static native @Cast("bool") boolean tls_is_dispatch_keyset_included(@ByVal DispatchKeySet ks); + + // namespace impl + // namespace c10 + + +// Parsed from c10/core/InferenceMode.h + +// #pragma once + +// #include +// #include +// #include +// Targeting ../InferenceMode.java + + + // namespace c10 + + +// Parsed from c10/core/WrapDimMinimal.h + +// #pragma once + +// #include +// This template can only be specialized at int64_t and c10::SymInt; +// you'll get linker errors otherwise + // namespace detail + +@Namespace("c10") public static native @Cast("int64_t") long maybe_wrap_dim( + @Cast("int64_t") long dim, + @Cast("int64_t") long dim_post_expr, + @Cast("bool") boolean wrap_scalar/*=true*/); +@Namespace("c10") public static native @Cast("int64_t") long maybe_wrap_dim( + @Cast("int64_t") long dim, + @Cast("int64_t") long dim_post_expr); + +@Namespace("c10") public static native @ByVal SymInt maybe_wrap_dim( + @ByVal SymInt dim, + @ByVal SymInt dim_post_expr, + @Cast("bool") boolean wrap_scalar/*=true*/); +@Namespace("c10") public static native @ByVal SymInt maybe_wrap_dim( + @ByVal SymInt dim, + @ByVal SymInt dim_post_expr); + + // namespace c10 + + +// Parsed from c10/core/impl/SizesAndStrides.h + +// #pragma once + +// #include +// #include + +// #include +// #include +// #include + +public static final int C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE = 5; +// Targeting ../SizesAndStrides.java + + + + // namespace impl + // namespace c10 + + +// Parsed from c10/util/DimVector.h + +// #pragma once + +// #include +// #include +// #include +// #include + +@Namespace("c10") @MemberGetter public static native @Cast("const size_t") long kDimVectorStaticSize(); + +/** A container for sizes or strides */ + + // namespace c10 + + +// Parsed from c10/util/Type.h + +// #ifndef C10_UTIL_TYPE_H_ +// #define C10_UTIL_TYPE_H_ + +// #include +// #include +// #ifdef __GXX_RTTI +// #include +// #endif // __GXX_RTTI + +// #include + +/** Utility to demangle a C++ symbol name. */ +@Namespace("c10") public static native @StdString BytePointer demangle(@Cast("const char*") BytePointer name); +@Namespace("c10") public static native @StdString String demangle(String name); + +/** Returns the printable name of the type. */ + + // namespace c10 + +// #endif // C10_UTIL_TYPE_H_ + + +// Parsed from c10/util/Registry.h + +// #ifndef C10_UTIL_REGISTRY_H_ +// #define C10_UTIL_REGISTRY_H_ + +/** + * Simple registry implementation that uses static variables to + * register object creators during program initialization time. + */ + +// NB: This Registry works poorly when you have other namespaces. +// Make all macro invocations from inside the at namespace. + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + +// #include +// #include + +@Namespace("c10") public static native @StdString BytePointer KeyStrRepr(@StdString BytePointer key); +@Namespace("c10") public static native @StdString String KeyStrRepr(@StdString String key); + +@Namespace("c10") public enum RegistryPriority { + REGISTRY_FALLBACK(1), + REGISTRY_DEFAULT(2), + REGISTRY_PREFERRED(3); + + public final int value; + private RegistryPriority(int v) { this.value = v; } + private RegistryPriority(RegistryPriority e) { this.value = e.value; } + public RegistryPriority intern() { for (RegistryPriority e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} + +/** + * \brief A template class that allows one to register classes by keys. + * + * The keys are usually a std::string specifying the name, but can be anything + * that can be used in a std::map. + * + * You should most likely not use the Registry class explicitly, but use the + * helper macros below to declare specific registries as well as registering + * objects. + */ + +/** + * C10_DECLARE_TYPED_REGISTRY is a macro that expands to a function + * declaration, as well as creating a convenient typename for its corresponding + * registerer. + */ +// Note on C10_IMPORT and C10_EXPORT below: we need to explicitly mark DECLARE +// as import and DEFINE as export, because these registry macros will be used +// in downstream shared libraries as well, and one cannot use *_API - the API +// macro will be defined on a per-shared-library basis. Semantically, when one +// declares a typed registry it is always going to be IMPORT, and when one +// defines a registry (which should happen ONLY ONCE and ONLY IN SOURCE FILE), +// the instantiation unit is always going to be exported. +// +// The only unique condition is when in the same file one does DECLARE and +// DEFINE - in Windows compilers, this generates a warning that dllimport and +// dllexport are mixed, but the warning is fine and linker will be properly +// exporting the symbol. Same thing happens in the gflags flag declaration and +// definition caes. +// #define C10_DECLARE_TYPED_REGISTRY( +// RegistryName, SrcType, ObjectType, PtrType, ...) +// C10_API ::c10::Registry, ##__VA_ARGS__>* +// RegistryName(); +// typedef ::c10::Registerer, ##__VA_ARGS__> +// Registerer##RegistryName + +// #define TORCH_DECLARE_TYPED_REGISTRY( +// RegistryName, SrcType, ObjectType, PtrType, ...) +// TORCH_API ::c10::Registry, ##__VA_ARGS__>* +// RegistryName(); +// typedef ::c10::Registerer, ##__VA_ARGS__> +// Registerer##RegistryName + +// #define C10_DEFINE_TYPED_REGISTRY( +// RegistryName, SrcType, ObjectType, PtrType, ...) +// C10_EXPORT ::c10::Registry, ##__VA_ARGS__>* +// RegistryName() { +// static ::c10::Registry, ##__VA_ARGS__>* +// registry = new ::c10:: +// Registry, ##__VA_ARGS__>(); +// return registry; +// } + +// #define C10_DEFINE_TYPED_REGISTRY_WITHOUT_WARNING( +// RegistryName, SrcType, ObjectType, PtrType, ...) +// C10_EXPORT ::c10::Registry, ##__VA_ARGS__>* +// RegistryName() { +// static ::c10::Registry, ##__VA_ARGS__>* +// registry = +// new ::c10::Registry, ##__VA_ARGS__>( +// false); +// return registry; +// } + +// Note(Yangqing): The __VA_ARGS__ below allows one to specify a templated +// creator with comma in its templated arguments. +// #define C10_REGISTER_TYPED_CREATOR(RegistryName, key, ...) +// static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( +// key, RegistryName(), ##__VA_ARGS__); + +// #define C10_REGISTER_TYPED_CREATOR_WITH_PRIORITY( +// RegistryName, key, priority, ...) +// static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( +// key, priority, RegistryName(), ##__VA_ARGS__); + +// #define C10_REGISTER_TYPED_CLASS(RegistryName, key, ...) +// static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( +// key, +// RegistryName(), +// Registerer##RegistryName::DefaultCreator<__VA_ARGS__>, +// ::c10::demangle_type<__VA_ARGS__>()); + +// #define C10_REGISTER_TYPED_CLASS_WITH_PRIORITY( +// RegistryName, key, priority, ...) +// static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( +// key, +// priority, +// RegistryName(), +// Registerer##RegistryName::DefaultCreator<__VA_ARGS__>, +// ::c10::demangle_type<__VA_ARGS__>()); + +// C10_DECLARE_REGISTRY and C10_DEFINE_REGISTRY are hard-wired to use +// std::string as the key type, because that is the most commonly used cases. +// #define C10_DECLARE_REGISTRY(RegistryName, ObjectType, ...) +// C10_DECLARE_TYPED_REGISTRY( +// RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__) + +// #define TORCH_DECLARE_REGISTRY(RegistryName, ObjectType, ...) +// TORCH_DECLARE_TYPED_REGISTRY( +// RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__) + +// #define C10_DEFINE_REGISTRY(RegistryName, ObjectType, ...) +// C10_DEFINE_TYPED_REGISTRY( +// RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__) + +// #define C10_DEFINE_REGISTRY_WITHOUT_WARNING(RegistryName, ObjectType, ...) +// C10_DEFINE_TYPED_REGISTRY_WITHOUT_WARNING( +// RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__) + +// #define C10_DECLARE_SHARED_REGISTRY(RegistryName, ObjectType, ...) +// C10_DECLARE_TYPED_REGISTRY( +// RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__) + +// #define TORCH_DECLARE_SHARED_REGISTRY(RegistryName, ObjectType, ...) +// TORCH_DECLARE_TYPED_REGISTRY( +// RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__) + +// #define C10_DEFINE_SHARED_REGISTRY(RegistryName, ObjectType, ...) +// C10_DEFINE_TYPED_REGISTRY( +// RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__) + +// #define C10_DEFINE_SHARED_REGISTRY_WITHOUT_WARNING( +// RegistryName, ObjectType, ...) +// C10_DEFINE_TYPED_REGISTRY_WITHOUT_WARNING( +// RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__) + +// C10_REGISTER_CREATOR and C10_REGISTER_CLASS are hard-wired to use std::string +// as the key +// type, because that is the most commonly used cases. +// #define C10_REGISTER_CREATOR(RegistryName, key, ...) +// C10_REGISTER_TYPED_CREATOR(RegistryName, #key, __VA_ARGS__) + +// #define C10_REGISTER_CREATOR_WITH_PRIORITY(RegistryName, key, priority, ...) +// C10_REGISTER_TYPED_CREATOR_WITH_PRIORITY( +// RegistryName, #key, priority, __VA_ARGS__) + +// #define C10_REGISTER_CLASS(RegistryName, key, ...) +// C10_REGISTER_TYPED_CLASS(RegistryName, #key, __VA_ARGS__) + +// #define C10_REGISTER_CLASS_WITH_PRIORITY(RegistryName, key, priority, ...) +// C10_REGISTER_TYPED_CLASS_WITH_PRIORITY( +// RegistryName, #key, priority, __VA_ARGS__) + + // namespace c10 + +// #endif // C10_UTIL_REGISTRY_H_ + + +// Parsed from c10/util/Flags.h + +// #ifndef C10_UTIL_FLAGS_H_ +// #define C10_UTIL_FLAGS_H_ + +/* Commandline flags support for C10. + * * This is a portable commandline flags tool for c10, so we can optionally * choose to use gflags or a lightweight custom implementation if gflags is * not possible on a certain platform. If you have gflags installed, set the @@ -6497,7 +7179,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // export on Windows platform (with dllexport) but not on linux/mac (with // default visibility). As a result, to ensure that we are always exporting // global variables, we will redefine the GFLAGS_DLL_DEFINE_FLAG macro if we -// are building C10 as a shared libray. +// are building C10 as a shared library. // This has to be done after the inclusion of gflags, because some early // versions of gflags.h (e.g. 2.0 on ubuntu 14.04) directly defines the // macros, so we need to do definition after gflags is done. @@ -6524,7 +7206,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // (3) Gflags has a design issue that does not properly expose the global flags, // if one builds the library with -fvisibility=hidden. The current gflags (as of // Aug 2018) only deals with the Windows case using dllexport, and not the Linux -// counterparts. As a result, we will explciitly use C10_EXPORT to export the +// counterparts. As a result, we will explicitly use C10_EXPORT to export the // flags defined in C10. This is done via a global reference, so the flag // itself is not duplicated - under the hood it is the same global gflags flag. // #define C10_GFLAGS_DEF_WRAPPER(type, real_type, name, default_value, help_str) @@ -6600,7 +7282,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // C10_DEFINE_typed_var(std::string, name, default_value, help_str) // DECLARE_typed_var should be used in header files and in the global namespace. -// #define C10_DECLARE_typed_var(type, name) C10_IMPORT extern type FLAGS_##name +// #define C10_DECLARE_typed_var(type, name) C10_API extern type FLAGS_##name // #define C10_DECLARE_int(name) C10_DECLARE_typed_var(int, name) // #define C10_DECLARE_int32(name) C10_DECLARE_int(name) @@ -6618,841 +7300,120 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #endif // C10_UTIL_FLAGS_H_ -// Parsed from c10/core/impl/LocalDispatchKeySet.h - -// #pragma once - -// #include -// #include -// #include - -// TLS management for DispatchKeySet (the "local" DispatchKeySet(s)) -// -// This manages two thread-local DispatchKeySets: -// -// - The included type set, which adds a tensor type for consideration -// in dispatch. (For example, you might add Profiling to -// the included type set to turn on profiling on all tensor operations.) -// -// - The excluded type set, which disqualifies a tensor type from dispatch. -// (For example, after redispatching on variable, we disqualify -// Autograd so we don't attempt to handle variable again.) -// (Exclusion wins over inclusion.) -// -// NB: Originally, I implemented the excluded type set as storing the inverted -// set, but TLS is defined to be zero-initialized, so this doesn't actually work -// (if it's inverted, you want the set to be -1 initialized). -// Targeting ../PODLocalDispatchKeySet.java - - -// Targeting ../LocalDispatchKeySet.java - +// Parsed from c10/util/accumulate.h +// Copyright 2004-present Facebook. All Rights Reserved. -// thread_local variables cannot be C10_API on Windows. -// Inlining this seems to break AutoDispatchBelowAutograd on Android. -// #if defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE) -@Namespace("c10::impl") public static native @ByVal LocalDispatchKeySet tls_local_dispatch_key_set(); -// #else // defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE) +// #pragma once -// #endif // defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE) +// #include -// Internal, use ThreadLocalStateGuard +// #include +// #include +// #include -// Targeting ../IncludeDispatchKeyGuard.java +/** Sum of a list of integers; accumulates into the int64_t datatype */ +/** Sum of integer elements referred to by iterators; accumulates into the + * int64_t datatype */ -// Targeting ../ForceDispatchKeyGuard.java +/** Product of a list of integers; accumulates into the int64_t datatype */ +/** Product of integer elements referred to by iterators; accumulates into the + * int64_t datatype */ +/** Return product of all dimensions starting from k + * Returns 1 if k>=dims.size() */ -// Non-RAII API for manipulating the thread-local dispatch state. -// Please prefer the RAII API. The non-RAII API may be useful when -// the included/excluded state of a given DispatchKey must span -// many calls from the Python to the C++, so you cannot conveniently -// use an RAII guard. -// -// Example use case: a Python context manager that includes a certain -// DispatchKey, to ensure ops running under the context manager dispatch -// through that DispatchKey's registered overrides. -// -// The non-RAII API is less efficient than the RAII guards because both the -// getter and setter will do a tls_getaddr lookup (the RAII struct only needs -// one!) +/** Product of all dims up to k (not including dims[k]) + * Throws an error if k>dims.size() */ -@Namespace("c10::impl") public static native @Cast("bool") boolean tls_is_dispatch_key_excluded(DispatchKey x); -@Namespace("c10::impl") public static native @Cast("bool") boolean tls_is_dispatch_key_excluded(@Cast("c10::DispatchKey") short x); -@Namespace("c10::impl") public static native void tls_set_dispatch_key_excluded(DispatchKey x, @Cast("bool") boolean desired_state); -@Namespace("c10::impl") public static native void tls_set_dispatch_key_excluded(@Cast("c10::DispatchKey") short x, @Cast("bool") boolean desired_state); -@Namespace("c10::impl") public static native @Cast("bool") boolean tls_is_dispatch_key_included(DispatchKey x); -@Namespace("c10::impl") public static native @Cast("bool") boolean tls_is_dispatch_key_included(@Cast("c10::DispatchKey") short x); -@Namespace("c10::impl") public static native void tls_set_dispatch_key_included(DispatchKey x, @Cast("bool") boolean desired_state); -@Namespace("c10::impl") public static native void tls_set_dispatch_key_included(@Cast("c10::DispatchKey") short x, @Cast("bool") boolean desired_state); -@Namespace("c10::impl") public static native @Cast("bool") boolean tls_is_dispatch_keyset_excluded(@ByVal DispatchKeySet ks); -@Namespace("c10::impl") public static native @Cast("bool") boolean tls_is_dispatch_keyset_included(@ByVal DispatchKeySet ks); +/** Product of all dims between k and l (including dims[k] and excluding + * dims[l]) k and l may be supplied in either order */ - // namespace impl // namespace c10 -// Parsed from c10/core/InferenceMode.h +// Parsed from c10/util/safe_numerics.h // #pragma once - -// #include -// #include -// #include // #include -// Targeting ../InferenceMode.java +// #include +// #include +// #include +// #include + +// GCC has __builtin_mul_overflow from before it supported __has_builtin +// #ifdef _MSC_VER +// #define C10_HAS_BUILTIN_OVERFLOW() (0) +// #include +// #include +// #else +// #define C10_HAS_BUILTIN_OVERFLOW() (1) +// #endif + +@Namespace("c10") public static native @Cast("bool") boolean add_overflows(@Cast("uint64_t") long a, @Cast("uint64_t") long b, @Cast("uint64_t*") LongPointer out); +@Namespace("c10") public static native @Cast("bool") boolean add_overflows(@Cast("uint64_t") long a, @Cast("uint64_t") long b, @Cast("uint64_t*") LongBuffer out); +@Namespace("c10") public static native @Cast("bool") boolean add_overflows(@Cast("uint64_t") long a, @Cast("uint64_t") long b, @Cast("uint64_t*") long[] out); + +@Namespace("c10") public static native @Cast("bool") boolean mul_overflows(@Cast("uint64_t") long a, @Cast("uint64_t") long b, @Cast("uint64_t*") LongPointer out); +@Namespace("c10") public static native @Cast("bool") boolean mul_overflows(@Cast("uint64_t") long a, @Cast("uint64_t") long b, @Cast("uint64_t*") LongBuffer out); +@Namespace("c10") public static native @Cast("bool") boolean mul_overflows(@Cast("uint64_t") long a, @Cast("uint64_t") long b, @Cast("uint64_t*") long[] out); // namespace c10 -// Parsed from c10/core/SymIntArrayRef.h +// Parsed from c10/core/TensorImpl.h // #pragma once -// #include -// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include // #include +// #include // #include +// #include +// #include +// #include +// #include +// #include -@Namespace("c10") public static native @ByVal LongArrayRef asIntArrayRefUnchecked(@ByVal SymIntArrayRef ar); +// #include +// #include +// #include +// #include +// #include +// #include -@Namespace("c10") public static native @ByVal LongArrayRefOptional asIntArrayRefSlowOpt( - @ByVal SymIntArrayRef ar); +// A global boolean variable to control whether we free memory when a Tensor +// is shrunk to a smaller size. As a result, a Tensor is always going to +// keep the memory allocated for its maximum capacity reshaped to so far. +// +// This parameter is respected "upper-case" methods which call Resize() +// (e.g., CopyFrom, ResizeLike); it is NOT respected by Tensor::resize_ +// or ShrinkTo, both of which guarantee to never to free memory. - -// #define C10_AS_INTARRAYREF_SLOW(a) c10::asIntArrayRefSlow(a, __FILE__, __LINE__) - -// Prefer using a more semantic constructor, like -// fromIntArrayRefKnownNonNegative -@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefUnchecked(@ByVal LongArrayRef array_ref); -@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefUnchecked(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... array_ref); - -@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefKnownNonNegative(@ByVal LongArrayRef array_ref); -@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefKnownNonNegative(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... array_ref); - -@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefSlow(@ByVal LongArrayRef array_ref); -@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefSlow(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... array_ref); - - // namespace c10 - - -// Parsed from c10/core/DefaultDtype.h - -// #pragma once - -// #include -// #include - // namespace caffe2 -@Namespace("c10") public static native void set_default_dtype(@ByVal TypeMeta dtype); -@Namespace("c10") public static native @Const @ByVal TypeMeta get_default_dtype(); -@Namespace("c10") public static native ScalarType get_default_dtype_as_scalartype(); -@Namespace("c10") public static native @Const @ByVal TypeMeta get_default_complex_dtype(); - // namespace c10 - - -// Parsed from c10/core/TensorOptions.h - -// #pragma once - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - -// #include -// #include -// #include - -// #include -// #include -// #include - -@Namespace("c10") public static native DispatchKey computeDispatchKey( - @ByVal ScalarTypeOptional dtype, - @ByVal LayoutOptional layout, - @ByVal DeviceOptional device); - -@Namespace("c10") public static native ScalarType dtype_or_default(@ByVal ScalarTypeOptional dtype); - -@Namespace("c10") public static native @ByVal TypeMeta dtype_or_default( - @ByVal TypeMetaOptional dtype); - -@Namespace("c10") public static native Layout layout_or_default(@ByVal LayoutOptional layout); - -@Namespace("c10") public static native @ByVal Device device_or_default(@ByVal DeviceOptional device); - - -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -@Namespace("c10") public static native @Cast("bool") boolean pinned_memory_or_default(@ByVal BoolOptional pinned_memory); -// Targeting ../TensorOptions.java - - - -// We should aspire to fit in one machine-size word; but a size greater than two -// words is too much. (We are doing terribly on 32-bit archs, where we require -// three machine size words to store tensor options. Eek!) - -/** Convenience function that returns a {@code TensorOptions} object with the {@code dtype} - * set to the given one. */ -@Namespace("c10") public static native @ByVal TensorOptions dtype(@ByVal TypeMeta dtype); - -// legacy function to support ScalarType -@Namespace("c10") public static native @ByVal TensorOptions dtype(ScalarType dtype); - -/** Convenience function that returns a {@code TensorOptions} object with the {@code layout} - * set to the given one. */ -@Namespace("c10") public static native @ByVal TensorOptions layout(Layout layout); -@Namespace("c10") public static native @ByVal TensorOptions layout(@Cast("c10::Layout") byte layout); - -/** Convenience function that returns a {@code TensorOptions} object with the {@code device} - * set to the given one. */ -@Namespace("c10") public static native @ByVal TensorOptions device(@ByVal Device device); - -/** Convenience function that returns a {@code TensorOptions} object with the - * {@code device} set to CUDA and the {@code device_index} set to the given one. */ -@Namespace("c10") public static native @ByVal TensorOptions device_index(short device_index); - -/** Convenience function that returns a {@code TensorOptions} object with the - * {@code requires_grad} set to the given one. */ -@Namespace("c10") public static native @ByVal TensorOptions requires_grad(@Cast("bool") boolean requires_grad/*=true*/); - -/** Convenience function that returns a {@code TensorOptions} object with the - * {@code memory_format} set to the given one. */ -@Namespace("c10") public static native @ByVal TensorOptions memory_format(MemoryFormat memory_format); -@Namespace("c10") public static native @ByVal TensorOptions memory_format(@Cast("c10::MemoryFormat") byte memory_format); - -@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft( - @Cast("std::ostream*") @ByRef Pointer stream, - @Const @ByRef TensorOptions options); - -@Namespace("c10") public static native @StdString BytePointer toString(@Const @ByVal TensorOptions options); - -// This is intended to be a centralized location by which we can determine -// what an appropriate DispatchKey for a tensor is. - -@Namespace("c10") public static native Layout dispatchKeyToLayout(DispatchKey dispatch_key); -@Namespace("c10") public static native @Cast("c10::Layout") byte dispatchKeyToLayout(@Cast("c10::DispatchKey") short dispatch_key); - -@Namespace("c10") public static native DeviceType dispatchKeyToDeviceType(DispatchKey dispatch_key); -@Namespace("c10") public static native @Cast("c10::DeviceType") byte dispatchKeyToDeviceType(@Cast("c10::DispatchKey") short dispatch_key); - -@Namespace("c10") public static native @ByVal TensorOptions dispatchKeyToTensorOptions(DispatchKey dispatch_key); -@Namespace("c10") public static native @ByVal TensorOptions dispatchKeyToTensorOptions(@Cast("c10::DispatchKey") short dispatch_key); -@Namespace("c10::detail") public static native @Cast("bool") boolean backend_supports_empty_operator(@Const @ByVal TensorOptions options); - - // namespace detail - - // namespace c10 - - -// Parsed from c10/core/WrapDimMinimal.h - -// #pragma once - -// #include -// #include -// This template can only be specialized at int64_t and c10::SymInt; -// you'll get linker errors otherwise - // namespace detail - -@Namespace("c10") public static native @Cast("int64_t") long maybe_wrap_dim( - @Cast("int64_t") long dim, - @Cast("int64_t") long dim_post_expr, - @Cast("bool") boolean wrap_scalar/*=true*/); -@Namespace("c10") public static native @Cast("int64_t") long maybe_wrap_dim( - @Cast("int64_t") long dim, - @Cast("int64_t") long dim_post_expr); - -@Namespace("c10") public static native @ByVal SymInt maybe_wrap_dim( - @ByVal SymInt dim, - @ByVal SymInt dim_post_expr, - @Cast("bool") boolean wrap_scalar/*=true*/); -@Namespace("c10") public static native @ByVal SymInt maybe_wrap_dim( - @ByVal SymInt dim, - @ByVal SymInt dim_post_expr); - - // namespace c10 - - -// Parsed from c10/core/impl/HermeticPyObjectTLS.h - -// #pragma once - -// #include -// #include -// Targeting ../HermeticPyObjectTLS.java - - - - // namespace impl - // namespace c10 - - -// Parsed from c10/core/impl/PyInterpreter.h - -// #pragma once - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - -// Forward declarations - // namespace c10 - - // namespace torch - -// Actual implementation -// Targeting ../PyInterpreterVTable.java - - -// Targeting ../PyInterpreter.java - - - -// PyInterpreterStatus describes what the state of its interpreter tag -// is, relative to the thread currently holding the GIL. -@Namespace("c10::impl") public enum PyInterpreterStatus { - // We just allocated the Tensor, it hasn't escaped to other threads, - // we know that it definitely hasn't been tagged to be associated - // with an interpreter. - DEFINITELY_UNINITIALIZED(0), - // We queried the interpreter field and it looked uninitialized. But - // another thread may have raced with us to tag it with some other - // interpreter id. So we will have to do a CEX to make sure we can - // actually nab it. - MAYBE_UNINITIALIZED(1), - // We queried the interpreter field and it was tagged to belong to us. - // This means we have sole write access (as we hold the GIL for this - // interpreter) - TAGGED_BY_US(2), - // Someone else tagged this. We can't use this TensorImpl from Python. - TAGGED_BY_OTHER(3); - - public final int value; - private PyInterpreterStatus(int v) { this.value = v; } - private PyInterpreterStatus(PyInterpreterStatus e) { this.value = e.value; } - public PyInterpreterStatus intern() { for (PyInterpreterStatus e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} - - // namespace impl - // namespace c10 - - -// Parsed from c10/core/impl/PyObjectSlot.h - -// #pragma once - -// #include -// #include -// #include -// #include - -// #include - - // namespace impl - // namespace c10 - - -// Parsed from c10/core/impl/SizesAndStrides.h - -// #pragma once - -// #include -// #include - -// #include -// #include -// #include - -public static final int C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE = 5; -// Targeting ../SizesAndStrides.java - - - - // namespace impl - // namespace c10 - - -// Parsed from c10/util/DimVector.h - -// #pragma once - -// #include -// #include -// #include -// #include - -@Namespace("c10") @MemberGetter public static native @Cast("const size_t") long kDimVectorStaticSize(); - -/** A container for sizes or strides */ - - // namespace c10 - - -// Parsed from c10/util/Logging.h - -// #ifndef C10_UTIL_LOGGING_H_ -// #define C10_UTIL_LOGGING_H_ - -// #include -// #include -// #include -// #include -// #include - -// #include -// #include -// #include -// #include - -// CAFFE2_LOG_THRESHOLD is a compile time flag that would allow us to turn off -// logging at compile time so no logging message below that level is produced -// at all. The value should be between INT_MIN and CAFFE_FATAL. -// #ifndef CAFFE2_LOG_THRESHOLD -// If we have not defined the compile time log threshold, we keep all the -// log cases. -public static native @MemberGetter int CAFFE2_LOG_THRESHOLD(); -public static final int CAFFE2_LOG_THRESHOLD = CAFFE2_LOG_THRESHOLD(); -// #endif // CAFFE2_LOG_THRESHOLD - -// Below are different implementations for glog and non-glog cases. -// #ifdef C10_USE_GLOG -// #include -// #else // !C10_USE_GLOG -// #include -// #endif // C10_USE_GLOG - - - - -// Some versions of GLOG support less-spammy version of LOG_EVERY_MS. If it's -// not available - just short-circuit to the always working one one. -// We define the C10_ name to avoid confusing other files -// #ifdef LOG_EVERY_MS -// #define C10_LOG_EVERY_MS(severity, ms) LOG_EVERY_MS(severity, ms) -// #else -// #define C10_LOG_EVERY_MS(severity, ms) LOG(severity) -// #endif - -// Same for LOG_FIRST_N -// #ifdef LOG_FIRST_N -// #define C10_LOG_FIRST_N(severity, n) LOG_FIRST_N(severity, n) -// #else -// #define C10_LOG_FIRST_N(severity, n) LOG(severity) -// #endif - -// Same for LOG_EVERY_N -// #ifdef LOG_EVERY_N -// #define C10_LOG_EVERY_N(severity, n) LOG_EVERY_N(severity, n) -// #else -// #define C10_LOG_EVERY_N(severity, n) LOG(severity) -// #endif - -// Functions that we use for initialization. -@Namespace("c10") public static native @Cast("bool") boolean InitCaffeLogging(IntPointer argc, @Cast("char**") PointerPointer argv); -@Namespace("c10") public static native @Cast("bool") boolean InitCaffeLogging(IntPointer argc, @Cast("char**") @ByPtrPtr BytePointer argv); -@Namespace("c10") public static native @Cast("bool") boolean InitCaffeLogging(IntBuffer argc, @Cast("char**") @ByPtrPtr ByteBuffer argv); -@Namespace("c10") public static native @Cast("bool") boolean InitCaffeLogging(int[] argc, @Cast("char**") @ByPtrPtr byte[] argv); -@Namespace("c10") public static native void UpdateLoggingLevelsFromFlags(); - -@Namespace("c10") public static native void ThrowEnforceNotMet( - @Cast("const char*") BytePointer file, - int line, - @Cast("const char*") BytePointer condition, - @StdString BytePointer msg, - @Const Pointer caller/*=nullptr*/); -@Namespace("c10") public static native void ThrowEnforceNotMet( - @Cast("const char*") BytePointer file, - int line, - @Cast("const char*") BytePointer condition, - @StdString BytePointer msg); -@Namespace("c10") public static native void ThrowEnforceNotMet( - String file, - int line, - String condition, - @StdString String msg, - @Const Pointer caller/*=nullptr*/); -@Namespace("c10") public static native void ThrowEnforceNotMet( - String file, - int line, - String condition, - @StdString String msg); - -@Namespace("c10") public static native void ThrowEnforceNotMet( - @Cast("const char*") BytePointer file, - int line, - @Cast("const char*") BytePointer condition, - @ByVal CompileTimeEmptyString arg3, - @Const Pointer caller/*=nullptr*/); -@Namespace("c10") public static native void ThrowEnforceNotMet( - @Cast("const char*") BytePointer file, - int line, - @Cast("const char*") BytePointer condition, - @ByVal CompileTimeEmptyString arg3); -@Namespace("c10") public static native void ThrowEnforceNotMet( - String file, - int line, - String condition, - @ByVal CompileTimeEmptyString arg3, - @Const Pointer caller/*=nullptr*/); -@Namespace("c10") public static native void ThrowEnforceNotMet( - String file, - int line, - String condition, - @ByVal CompileTimeEmptyString arg3); - -@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( - @Cast("const char*") BytePointer file, - int line, - @Cast("const char*") BytePointer condition, - @StdString BytePointer msg, - @Const Pointer caller/*=nullptr*/); -@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( - @Cast("const char*") BytePointer file, - int line, - @Cast("const char*") BytePointer condition, - @StdString BytePointer msg); -@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( - String file, - int line, - String condition, - @StdString String msg, - @Const Pointer caller/*=nullptr*/); -@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( - String file, - int line, - String condition, - @StdString String msg); - -@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( - @Cast("const char*") BytePointer file, - int line, - @Cast("const char*") BytePointer condition, - @ByVal CompileTimeEmptyString arg3, - @Const Pointer caller/*=nullptr*/); -@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( - @Cast("const char*") BytePointer file, - int line, - @Cast("const char*") BytePointer condition, - @ByVal CompileTimeEmptyString arg3); -@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( - String file, - int line, - String condition, - @ByVal CompileTimeEmptyString arg3, - @Const Pointer caller/*=nullptr*/); -@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( - String file, - int line, - String condition, - @ByVal CompileTimeEmptyString arg3); - -@Namespace("c10") public static native @Cast("const bool") boolean IsUsingGoogleLogging(); - -/** - * A utility to allow one to show log info to stderr after the program starts. - * - * This is similar to calling GLOG's --logtostderr, or setting caffe2_log_level - * to smaller than INFO. You are recommended to only use this in a few sparse - * cases, such as when you want to write a tutorial or something. Normally, use - * the commandline flags to set the log level. - */ -@Namespace("c10") public static native void ShowLogInfoToStderr(); - -@Namespace("c10") public static native void SetStackTraceFetcher(@ByVal StringSupplier fetcher); - -// #define CAFFE_ENFORCE(condition, ...) -// do { -// if (C10_UNLIKELY(!(condition))) { -// ::c10::ThrowEnforceNotMet( -// __FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__)); -// } -// } while (false) - -// #define CAFFE_ENFORCE_FINITE(condition, ...) -// do { -// if (C10_UNLIKELY(!(condition))) { -// ::c10::ThrowEnforceFiniteNotMet( -// __FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__)); -// } -// } while (false) - -// #define CAFFE_ENFORCE_WITH_CALLER(condition, ...) -// do { -// if (C10_UNLIKELY(!(condition))) { -// ::c10::ThrowEnforceNotMet( -// __FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__), this); -// } -// } while (false) - -// #define CAFFE_THROW(...) -// ::c10::ThrowEnforceNotMet(__FILE__, __LINE__, "", ::c10::str(__VA_ARGS__)) - -/** - * Rich logging messages - * - * CAFFE_ENFORCE_THAT can be used with one of the "checker functions" that - * capture input argument values and add it to the exception message. E.g. - * {@code CAFFE_ENFORCE_THAT(Equals(foo(x), bar(y)), "Optional additional message")} - * would evaluate both foo and bar only once and if the results are not equal - - * include them in the exception message. - * - * Some of the basic checker functions like Equals or Greater are already - * defined below. Other header might define customized checkers by adding - * functions to caffe2::enforce_detail namespace. For example: - * - * namespace caffe2 { namespace enforce_detail { - * inline EnforceFailMessage IsVector(const vector& shape) { - * if (shape.size() == 1) { return EnforceOK(); } - * return c10::str("Shape ", shape, " is not a vector"); - * } - * }} - * - * With further usages like {@code CAFFE_ENFORCE_THAT(IsVector(Input(0).dims()))} - * - * Convenient wrappers for binary operations like CAFFE_ENFORCE_EQ are provided - * too. Please use them instead of TORCH_CHECK_EQ and friends for failures in - * user-provided input. - */ -// #define CAFFE_ENFORCE_THAT_IMPL(op, lhs, rhs, expr, ...) -// ::c10::enforce_detail::enforceThatImpl( -// op, lhs, rhs, __FILE__, __LINE__, expr, nullptr, ##__VA_ARGS__) - -// #define CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER(op, lhs, rhs, expr, ...) -// ::c10::enforce_detail::enforceThatImpl( -// op, (lhs), (rhs), __FILE__, __LINE__, expr, this, ##__VA_ARGS__) - - // namespace enforce_detail - -// #define CAFFE_ENFORCE_THAT(cmp, op, lhs, rhs, ...) -// CAFFE_ENFORCE_THAT_IMPL(cmp, lhs, rhs, #lhs " " #op " " #rhs, ##__VA_ARGS__) - -// #define CAFFE_ENFORCE_BINARY_OP(cmp, op, x, y, ...) -// CAFFE_ENFORCE_THAT_IMPL(cmp, x, y, #x " " #op " " #y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_EQ(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP(std::equal_to(), ==, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_NE(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP(std::not_equal_to(), !=, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_LE(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP(std::less_equal(), <=, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_LT(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP(std::less(), <, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_GE(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP(std::greater_equal(), >=, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_GT(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP(std::greater(), >, x, y, ##__VA_ARGS__) - -// #define CAFFE_ENFORCE_BINARY_OP_WITH_CALLER(cmp, op, x, y, ...) -// CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER( -// cmp, x, y, #x " " #op " " #y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_EQ_WITH_CALLER(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( -// std::equal_to(), ==, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_NE_WITH_CALLER(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( -// std::not_equal_to(), !=, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_LE_WITH_CALLER(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( -// std::less_equal(), <=, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_LT_WITH_CALLER(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER(std::less(), <, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_GE_WITH_CALLER(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( -// std::greater_equal(), >=, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_GT_WITH_CALLER(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( -// std::greater(), >, x, y, ##__VA_ARGS__) - -/** - * Very lightweight logging for the first time API usage. It's beneficial for - * tracking of individual functionality usage in larger applications. - * - * In order to ensure light-weightedness of logging, we utilize static variable - * trick - LogAPIUsage will be invoked only once and further invocations will - * just do an atomic check. - * - * Example: - * // Logs caller info with an arbitrary text event, if there is a usage. - * C10_LOG_API_USAGE_ONCE("my_api"); - */ -// #define C10_LOG_API_USAGE_ONCE(...) -// C10_UNUSED static bool C10_ANONYMOUS_VARIABLE(logFlag) = -// ::c10::detail::LogAPIUsageFakeReturn(__VA_ARGS__); - -// API usage logging capabilities -@Namespace("c10") public static native void SetAPIUsageLogger(@ByVal StringConsumer logger); -@Namespace("c10") public static native void LogAPIUsage(@StdString BytePointer context); -@Namespace("c10") public static native void LogAPIUsage(@StdString String context); -// Targeting ../DDPLoggingData.java - - - -@Namespace("c10") public static native void SetPyTorchDDPUsageLogger( - @ByVal DDPLogger logger); -@Namespace("c10") public static native void LogPyTorchDDPUsage(@Const @ByRef DDPLoggingData ddpData); -// Return value is needed to do the static variable initialization trick -@Namespace("c10::detail") public static native @Cast("bool") boolean LogAPIUsageFakeReturn(@StdString BytePointer context); -@Namespace("c10::detail") public static native @Cast("bool") boolean LogAPIUsageFakeReturn(@StdString String context); - // namespace detail - -// Initializes the c10 logger. -@Namespace("c10") public static native void initLogging(); - - // namespace c10 - -// #endif // C10_UTIL_LOGGING_H_ - - -// Parsed from c10/util/accumulate.h - -// Copyright 2004-present Facebook. All Rights Reserved. - -// #pragma once - -// #include - -// #include -// #include -// #include - -/** Sum of a list of integers; accumulates into the int64_t datatype */ - -/** Sum of integer elements referred to by iterators; accumulates into the - * int64_t datatype */ - -/** Product of a list of integers; accumulates into the int64_t datatype */ - -/** Product of integer elements referred to by iterators; accumulates into the - * int64_t datatype */ - -/** Return product of all dimensions starting from k - * Returns 1 if k>=dims.size() */ - -/** Product of all dims up to k (not including dims[k]) - * Throws an error if k>dims.size() */ - -/** Product of all dims between k and l (including dims[k] and excluding - * dims[l]) k and l may be supplied in either order */ - - // namespace c10 - - -// Parsed from c10/util/safe_numerics.h - -// #pragma once -// #include -// #include - -// #include -// #include -// #include - -// GCC has __builtin_mul_overflow from before it supported __has_builtin -// #ifdef _MSC_VER -// #define C10_HAS_BUILTIN_OVERFLOW() (0) -// #include -// #include -// #else -// #define C10_HAS_BUILTIN_OVERFLOW() (1) -// #endif - -@Namespace("c10") public static native @Cast("bool") boolean add_overflows(@Cast("uint64_t") long a, @Cast("uint64_t") long b, @Cast("uint64_t*") LongPointer out); -@Namespace("c10") public static native @Cast("bool") boolean add_overflows(@Cast("uint64_t") long a, @Cast("uint64_t") long b, @Cast("uint64_t*") LongBuffer out); -@Namespace("c10") public static native @Cast("bool") boolean add_overflows(@Cast("uint64_t") long a, @Cast("uint64_t") long b, @Cast("uint64_t*") long[] out); - -@Namespace("c10") public static native @Cast("bool") boolean mul_overflows(@Cast("uint64_t") long a, @Cast("uint64_t") long b, @Cast("uint64_t*") LongPointer out); -@Namespace("c10") public static native @Cast("bool") boolean mul_overflows(@Cast("uint64_t") long a, @Cast("uint64_t") long b, @Cast("uint64_t*") LongBuffer out); -@Namespace("c10") public static native @Cast("bool") boolean mul_overflows(@Cast("uint64_t") long a, @Cast("uint64_t") long b, @Cast("uint64_t*") long[] out); - - // namespace c10 - - -// Parsed from c10/core/TensorImpl.h - -// #pragma once - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - -// #include -// #include -// #include -// #include -// #include -// #include - -// A global boolean variable to control whether we free memory when a Tensor -// is shrunk to a smaller size. As a result, a Tensor is always going to -// keep the memory allocated for its maximum capacity reshaped to so far. -// -// This parameter is respected "upper-case" methods which call Resize() -// (e.g., CopyFrom, ResizeLike); it is NOT respected by Tensor::resize_ -// or ShrinkTo, both of which guarantee to never to free memory. - - -// Since we can have high variance in blob memory allocated across different -// inputs in the same run, we will shrink the blob only if the memory gain -// is larger than this flag in bytes. This only applies to functions which -// respect caffe2_keep_on_shrink. +// Since we can have high variance in blob memory allocated across different +// inputs in the same run, we will shrink the blob only if the memory gain +// is larger than this flag in bytes. This only applies to functions which +// respect caffe2_keep_on_shrink. // #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") // #endif // namespace at - // namespace c10 /** * A utility function to convert vector to vector. @@ -7475,9 +7436,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Wrap around axis_index if it is negative, s.t., -1 is the last dim @Namespace("c10") public static native int canonical_axis_index_(int axis_index, int ndims); -// Targeting ../PlacementDtor.java - - // Targeting ../PlacementDeleteContext.java @@ -7498,10 +7456,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../NamedTensorMetaInterface.java +// Targeting ../BackendMeta.java + + +// Targeting ../SymbolicShapeMeta.java + -// For ease of copy pasting -// #if 0 -// #endif // Targeting ../VariableVersion.java @@ -7509,10 +7469,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Forward declaration of TensorImpl needed for forward declaration of // C10_TensorImpl_Size_Check_Dummy_Class -// Forward declaration needed because TensorImpl needs to be friends with -// C10_TensorImpl_Size_Check_Dummy_Class in order to check the size -// of its private fields. - /** * NOTE: Some TensorImpl methods are small and not overridden in the * PyTorch codebase itself, but may theoretically need to be @@ -7650,6 +7606,188 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace c10 +// Parsed from c10/util/ExclusivelyOwned.h + +// #pragma once + +// #include + +// See example implementation in TensorBase.h and TensorBody.h. +// Synopsis: +// +// repr_type -- type to use to store an owned T in ExclusivelyOwned. +// +// pointer_type -- pointer-esque type to return from +// ExclusivelyOwned's get() and operator*() methods. +// +// const_pointer_type -- similar to pointer_type, used for the const methods. +// +// static repr_type nullRepr() -- return a null instance of repr_type. +// +// template +// static repr_type createInPlace(Args&&... args) -- used by the in-place +// ExclusivelyOwned constructor. +// +// static repr_type moveToRepr(T&& x) -- move the given x into an +// instance of repr_type. used by the ExclusivelyOwned(T&&) +// constructor. +// +// static void destroyOwned(repr_type x) -- free memory for a +// known-exclusively-owned instance of x. Replaces calling repr_type's +// destructor. Being able to implement this more efficiently than +// repr_type's destructor is the main reason to use ExclusivelyOwned +// for a type. +// +// static T take(repr_type&) -- move out of the given repr_type into an owned T. +// +// static pointer_type getImpl(const repr_type&) -- return a pointer +// to the given repr_type. May take repr_type by value if that is more +// efficient. + +/** ExclusivelyOwned is a smart-pointer-like wrapper around an + * exclusively-owned instance of some type T that normally has + * mandatory reference counting (currently just Tensor). If you have + * an isolated piece of code that knows that it has sole ownership of + * an object of one of these types (i.e., because you created it + * directly or using a factory function) and that object will not + * escape from that isolated piece of code, then moving the object + * into an ExclusivelyOwned will avoid an atomic reference count + * decrement at destruction time. + * + * If you directly create the Tensor in the first + * place, you can use the in_place constructor of ExclusivelyOwned to + * additionally avoid doing any stores to initialize the refcount & + * weakcount. */ + + // namespace c10 + + +// Parsed from c10/core/DefaultDtype.h + +// #pragma once + +// #include +// #include + // namespace caffe2 +@Namespace("c10") public static native void set_default_dtype(@ByVal TypeMeta dtype); +@Namespace("c10") public static native @Const @ByVal TypeMeta get_default_dtype(); +@Namespace("c10") public static native ScalarType get_default_dtype_as_scalartype(); +@Namespace("c10") public static native @Const @ByVal TypeMeta get_default_complex_dtype(); + // namespace c10 + + +// Parsed from c10/core/TensorOptions.h + +// #pragma once + +// #include +// #include +// #include +// #include +// #include +// #include +// #include + +// #include +// #include + +// #include +// #include + +@Namespace("c10") public static native DispatchKey computeDispatchKey( + @ByVal ScalarTypeOptional dtype, + @ByVal LayoutOptional layout, + @ByVal DeviceOptional device); + +@Namespace("c10") public static native ScalarType dtype_or_default(@ByVal ScalarTypeOptional dtype); + +@Namespace("c10") public static native @ByVal TypeMeta dtype_or_default( + @ByVal TypeMetaOptional dtype); + +@Namespace("c10") public static native Layout layout_or_default(@ByVal LayoutOptional layout); + +@Namespace("c10") public static native @ByVal Device device_or_default(@ByVal DeviceOptional device); + + +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +@Namespace("c10") public static native @Cast("bool") boolean pinned_memory_or_default(@ByVal BoolOptional pinned_memory); +// Targeting ../TensorOptions.java + + + +// We should aspire to fit in one machine-size word; but a size greater than two +// words is too much. (We are doing terribly on 32-bit archs, where we require +// three machine size words to store tensor options. Eek!) + +/** Convenience function that returns a {@code TensorOptions} object with the {@code dtype} + * set to the given one. */ +@Namespace("c10") public static native @ByVal TensorOptions dtype(@ByVal TypeMeta dtype); + +// legacy function to support ScalarType +@Namespace("c10") public static native @ByVal TensorOptions dtype(ScalarType dtype); + +/** Convenience function that returns a {@code TensorOptions} object with the {@code layout} + * set to the given one. */ +@Namespace("c10") public static native @ByVal TensorOptions layout(Layout layout); +@Namespace("c10") public static native @ByVal TensorOptions layout(@Cast("c10::Layout") byte layout); + +/** Convenience function that returns a {@code TensorOptions} object with the {@code device} + * set to the given one. */ +@Namespace("c10") public static native @ByVal TensorOptions device(@ByVal Device device); + +/** Convenience function that returns a {@code TensorOptions} object with the + * {@code device} set to CUDA and the {@code device_index} set to the given one. */ +@Namespace("c10") public static native @ByVal TensorOptions device_index(short device_index); + +/** Convenience function that returns a {@code TensorOptions} object with the + * {@code requires_grad} set to the given one. */ +@Namespace("c10") public static native @ByVal TensorOptions requires_grad(@Cast("bool") boolean requires_grad/*=true*/); + +/** Convenience function that returns a {@code TensorOptions} object with the + * {@code memory_format} set to the given one. */ +@Namespace("c10") public static native @ByVal TensorOptions memory_format(MemoryFormat memory_format); +@Namespace("c10") public static native @ByVal TensorOptions memory_format(@Cast("c10::MemoryFormat") byte memory_format); + +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft( + @Cast("std::ostream*") @ByRef Pointer stream, + @Const @ByRef TensorOptions options); + +@Namespace("c10") public static native @StdString BytePointer toString(@Const @ByRef TensorOptions options); + +// This is intended to be a centralized location by which we can determine +// what an appropriate DispatchKey for a tensor is. + +@Namespace("c10") public static native Layout dispatchKeyToLayout(DispatchKey dispatch_key); +@Namespace("c10") public static native @Cast("c10::Layout") byte dispatchKeyToLayout(@Cast("c10::DispatchKey") short dispatch_key); + +@Namespace("c10") public static native DeviceType dispatchKeyToDeviceType(DispatchKey dispatch_key); +@Namespace("c10") public static native @Cast("c10::DeviceType") byte dispatchKeyToDeviceType(@Cast("c10::DispatchKey") short dispatch_key); + +@Namespace("c10") public static native @ByVal TensorOptions dispatchKeyToTensorOptions(DispatchKey dispatch_key); +@Namespace("c10") public static native @ByVal TensorOptions dispatchKeyToTensorOptions(@Cast("c10::DispatchKey") short dispatch_key); +@Namespace("c10::detail") public static native @Cast("bool") boolean backend_supports_empty_operator(@Const @ByRef TensorOptions options); + + // namespace detail + + // namespace c10 + + // Parsed from ATen/core/CheckMemoryFormat.h // #include @@ -7730,13 +7868,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #pragma once -// #include // #include // #include // #include // #include // #include // #include +// #include // #include // #include @@ -8016,6 +8154,58 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace c10 +// Parsed from ATen/StorageUtils.h + +// #pragma once + +// #include +// #include +// #include + +// Here we define a series of utils to create/manipulate ATen backed +// c10 storage implementations. + +/** + * Create a new shared memory storage impl managed by file descriptor + * + * @param size size in bytes + */ +@Namespace("at") public static native @ByVal StorageImplPtr new_shm_fd_storage(@Cast("size_t") long size); + +/** + * Copy src to dst + * Caller must guarantee the validness of the storage objects + * during the entire copy process, esp. when it's async. + * + * This can probably live in c10 namespace later if needed, + * but for now keep it in at to keep implementation simple. + * + * @param dst dst tensor + * @param src src tensor + * @param non_blocking (default false) whether this operation blocks caller + */ +@Namespace("at") public static native void storage_copy( + @ByRef Storage dst, + @Cast({"", "c10::Storage&&"}) @StdMove Storage src, + @Cast("bool") boolean non_blocking/*=false*/); +@Namespace("at") public static native void storage_copy( + @ByRef Storage dst, + @Cast({"", "c10::Storage&&"}) @StdMove Storage src); + +/** + * In place change the storage to shm based. + * + * This is only applicable to CPU tensors not already shared. + * Otherwise, it's a no op to mirror the THP tensor behavior: + * https://pytorch.org/docs/stable/generated/torch.Tensor.share_memory_.html + * + * @param t a tensor + */ +@Namespace("at") public static native void share_memory_(@ByRef TensorBase t); + + // namespace at + + // Parsed from ATen/core/TensorBase.h // #pragma once @@ -8026,11 +8216,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include // #include // #include +// #include // #include // #include // #include @@ -8038,8 +8230,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// #include // #include +// #include // namespace torch::autograd @@ -8115,12 +8307,18 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include // #include +// #include // #include // #include // #include +// #include // #include +// #include +// #include +// #include +// #include +// #include // #include // #include // #include @@ -8373,6 +8571,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -8541,6 +8740,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -9220,10 +9420,16 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a) -// aten::unflatten.int(Tensor(a) self, int dim, int[] sizes) -> Tensor(a) +// aten::unflatten.int(Tensor(a) self, int dim, SymInt[] sizes) -> Tensor(a) + +// aten::unflatten.int(Tensor(a) self, int dim, SymInt[] sizes) -> Tensor(a) -// aten::unflatten.Dimname(Tensor(a) self, Dimname dim, int[] sizes, Dimname[] names) -> Tensor(a) + +// aten::unflatten.Dimname(Tensor(a) self, Dimname dim, SymInt[] sizes, Dimname[] names) -> Tensor(a) + + +// aten::unflatten.Dimname(Tensor(a) self, Dimname dim, SymInt[] sizes, Dimname[] names) -> Tensor(a) // aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!) @@ -9847,7 +10053,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor -// aten::sum_to_size(Tensor self, int[] size) -> Tensor +// aten::sum_to_size(Tensor self, SymInt[] size) -> Tensor + + +// aten::sum_to_size(Tensor self, SymInt[] size) -> Tensor // aten::sqrt(Tensor self) -> Tensor @@ -9868,13 +10077,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor -// aten::std.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor +// aten::std.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor // aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor -// aten::std.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor +// aten::std.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor // aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor @@ -9904,7 +10113,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::tanh_(Tensor(a!) self) -> Tensor(a!) -// aten::tile(Tensor self, int[] dims) -> Tensor +// aten::tile(Tensor self, SymInt[] dims) -> Tensor + + +// aten::tile(Tensor self, SymInt[] dims) -> Tensor // aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a) @@ -9925,7 +10137,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::flipud(Tensor self) -> Tensor -// aten::roll(Tensor self, int[1] shifts, int[1] dims=[]) -> Tensor +// aten::roll(Tensor self, SymInt[1] shifts, int[1] dims=[]) -> Tensor + + +// aten::roll(Tensor self, SymInt[1] shifts, int[1] dims=[]) -> Tensor // aten::rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor @@ -9937,7 +10152,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::_nested_tensor_strides(Tensor self) -> Tensor -// aten::_nested_tensor_offsets(Tensor self) -> int[] +// aten::_nested_tensor_storage_offsets(Tensor self) -> Tensor // aten::trunc(Tensor self) -> Tensor @@ -9967,13 +10182,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor -// aten::var.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor +// aten::var.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor // aten::var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor -// aten::var.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor +// aten::var.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor // aten::view_as(Tensor(a) self, Tensor other) -> Tensor(a) @@ -10069,10 +10284,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::sparse_mask(Tensor self, Tensor mask) -> Tensor -// aten::to_dense(Tensor self, ScalarType? dtype=None) -> Tensor +// aten::_sparse_mask_projection(Tensor self, Tensor mask, bool accumulate_matches=False) -> Tensor + +// aten::to_dense(Tensor self, ScalarType? dtype=None, *, bool? masked_grad=None) -> Tensor -// aten::_to_dense(Tensor self, ScalarType? dtype=None) -> Tensor + +// aten::_to_dense(Tensor self, ScalarType? dtype=None, bool? masked_grad=None) -> Tensor // aten::sparse_dim(Tensor self) -> int @@ -10132,21 +10350,39 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor +// aten::_to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor + + // aten::to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor +// aten::_to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor + + // aten::to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor +// aten::_to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor + + // aten::to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor +// aten::_to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor + + // aten::to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor +// aten::_to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor + + // aten::to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor +// aten::_to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor + + // aten::to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor @@ -10669,6 +10905,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::nonzero(Tensor self) -> Tensor +// aten::nonzero_static(Tensor self, *, int size, int fill_value=-1) -> Tensor + + // aten::nonzero_numpy(Tensor self) -> Tensor[] @@ -10912,7 +11151,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor -// aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) +// aten::topk(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) + + +// aten::topk(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) // aten::all(Tensor self) -> Tensor @@ -11022,6 +11264,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../OptionalTensorRef.java +// Targeting ../TensorRef.java + + @@ -11043,7 +11288,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include +// #include // #include +// Targeting ../CompiledNodeArgs.java + + +// Targeting ../SwapSavedVariables.java + + + // namespace torch::dynamo::autograd // A hook that's called on gradients // Targeting ../FunctionPreHook.java @@ -11052,6 +11305,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../FunctionPostHook.java +// Targeting ../PostAccumulateGradHook.java + + // namespace autograd // namespace torch @@ -11180,6 +11436,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #pragma once // #include +// #include // [ Using ForwardGrad ] // ForwardGrad needs to be a shared_ptr to satisfy constraints of its inner @@ -11413,6 +11670,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -11458,6 +11716,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // _(AnyClassType) // _(SymIntType) // _(SymFloatType) +// _(SymBoolType) // _(UnionType) // _(DynamicType) @@ -11500,8 +11759,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { AnyClassType(35), SymIntType(36), SymFloatType(37), - UnionType(38), - DynamicType(39); + SymBoolType(38), + UnionType(39), + DynamicType(40); public final int value; private TypeKind(int v) { this.value = v; } @@ -11805,15 +12065,16 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #else // #define SKA_NOINLINE(...) __VA_ARGS__ __attribute__((noinline)) // #endif +@Namespace("ska_ordered::detailv3") @MemberGetter public static native byte min_lookups(); +public static final byte min_lookups = min_lookups(); + +@Namespace("ska_ordered::detailv3") public static native @Cast("uint64_t") long next_power_of_two(@Cast("uint64_t") long i); // Implementation taken from http://en.cppreference.com/w/cpp/types/void_t // (it takes CWG1558 into account and also works for older compilers) - -// Targeting ../prime_number_hash_policy.java - - + // namespace detailv3 // Targeting ../power_of_two_hash_policy.java @@ -11964,6 +12225,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include +// #include // #include // #include @@ -12103,6 +12366,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../SymFloatType.java +// Targeting ../SymBoolType.java + + // Targeting ../IntType.java @@ -12527,163 +12793,521 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include - // namespace impl - // namespace c10 + // namespace impl + // namespace c10 + + +// Parsed from c10/core/Event.h + +// #pragma once + +// #include +// #include + +/** + * A backend-generic movable, not copyable, not thread-safe event. + * + * The design of this event follows that of CUDA and HIP events. These events + * are recorded and waited on by streams and can be rerecorded to, + * each rerecording essentially creating a new version of the event. + * For example, if (in CPU time), stream X is asked to record E, + * stream Y waits on E, and stream X is asked to record E again, then Y will + * wait for X to finish the first call to record and not the second, because + * it's waiting on the first version of event E, not the second. + * Querying an event only returns the status of its most recent version. + * + * Backend-generic events are implemented by this class and + * impl::InlineEvent. In addition to these events there are also + * some backend-specific events, like ATen's CUDAEvent. Each of these + * classes has its own use. + * + * impl::InlineEvent<...> or a backend-specific event should be + * preferred when the backend is known at compile time and known to + * be compiled. Backend-specific events may have additional functionality. + * + * This Event should be used if a particular backend may not be available, + * or the backend required is not known at compile time. + * + * These generic events are built on top of DeviceGuardImpls, analogous + * to DeviceGuard and InlineDeviceGuard. The name "DeviceGuardImpls," + * is no longer entirely accurate, as these classes implement the + * backend-specific logic for a generic backend interface. + * + * See DeviceGuardImplInterface.h for a list of all supported flags. + */ + + // namespace c10 + + +// Parsed from c10/core/impl/InlineStreamGuard.h + +// #pragma once + +// #include +// #include +// #include + +/** + * A StreamGuard is an RAII class that changes the current device + * to the device corresponding to some stream, and changes the + * default stream on that device to be this stream. + * + * InlineStreamGuard is a helper class for implementing StreamGuards. + * See InlineDeviceGuard for guidance on how to use this class. + */ + +/** + * An OptionalStreamGuard is an RAII class that sets a device to some value on + * initialization, and resets the device to its original value on destruction. + * See InlineOptionalDeviceGuard for more guidance on how to use this class. + */ + + // namespace impl + // namespace c10 + + +// Parsed from c10/core/StreamGuard.h + +// #pragma once + +// #include + +/** + * A StreamGuard is an RAII class that changes the current device + * to the device corresponding to some stream, and changes the + * default stream on that device to be this stream. + * + * Use of StreamGuard is HIGHLY discouraged in operator definitions. In + * a single operator, you probably don't know enough about the global + * state of the world to profitably decide how to set streams. Let + * the caller handle this appropriately, and just use the current stream + * in your operator code. + * + * This StreamGuard does NOT have an uninitialized state; it is guaranteed + * to reset the stream and device on exit. If you are in a situation + * where you *might* want to setup a stream guard, see OptionalStreamGuard. + */ + +/** + * An OptionalStreamGuard is an RAII class that sets a device to some value on + * initialization, and resets the device to its original value on destruction. + * See OptionalDeviceGuard for more guidance on how to use this class. + */ + +/** + * A MultiStreamGuard is an RAII class that sets the current streams of a set of + * devices all at once, and resets them to their original values on destruction. + */ + + // namespace c10 + + +// Parsed from c10/util/FunctionRef.h + +//===- llvm/ADT/STLExtras.h - Useful STL related functions ------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains some templates that are useful if you are working with the +// STL at all. +// +// No library is required when using these functions. +// +//===----------------------------------------------------------------------===// + +// c10: modified from llvm::function_ref +// c10: added more SFINAE to enable use in overloaded functions + +// #pragma once + +// #include +// #include +// #include + +/** An efficient, type-erasing, non-owning reference to a callable. This is + * intended for use as the type of a function parameter that is not used + * after the function in question returns. + * + * This class does not own the callable, so it is not in general safe to store + * a function_ref. */ + + // namespace c10 + + +// Parsed from c10/util/Logging.h + +// #ifndef C10_UTIL_LOGGING_H_ +// #define C10_UTIL_LOGGING_H_ + +// #include +// #include +// #include +// #include +// #include + +// #include +// #include +// #include +// #include + +// CAFFE2_LOG_THRESHOLD is a compile time flag that would allow us to turn off +// logging at compile time so no logging message below that level is produced +// at all. The value should be between INT_MIN and CAFFE_FATAL. +// #ifndef CAFFE2_LOG_THRESHOLD +// If we have not defined the compile time log threshold, we keep all the +// log cases. +public static native @MemberGetter int CAFFE2_LOG_THRESHOLD(); +public static final int CAFFE2_LOG_THRESHOLD = CAFFE2_LOG_THRESHOLD(); +// #endif // CAFFE2_LOG_THRESHOLD + +// Below are different implementations for glog and non-glog cases. +// #ifdef C10_USE_GLOG +// #include +// #else // !C10_USE_GLOG +// #include +// #endif // C10_USE_GLOG + + + + +// Some versions of GLOG support less-spammy version of LOG_EVERY_MS. If it's +// not available - just short-circuit to the always working one one. +// We define the C10_ name to avoid confusing other files +// #ifdef LOG_EVERY_MS +// #define C10_LOG_EVERY_MS(severity, ms) LOG_EVERY_MS(severity, ms) +// #else +// #define C10_LOG_EVERY_MS(severity, ms) LOG(severity) +// #endif + +// Same for LOG_FIRST_N +// #ifdef LOG_FIRST_N +// #define C10_LOG_FIRST_N(severity, n) LOG_FIRST_N(severity, n) +// #else +// #define C10_LOG_FIRST_N(severity, n) LOG(severity) +// #endif + +// Same for LOG_EVERY_N +// #ifdef LOG_EVERY_N +// #define C10_LOG_EVERY_N(severity, n) LOG_EVERY_N(severity, n) +// #else +// #define C10_LOG_EVERY_N(severity, n) LOG(severity) +// #endif + +// Functions that we use for initialization. +@Namespace("c10") public static native @Cast("bool") boolean InitCaffeLogging(IntPointer argc, @Cast("char**") PointerPointer argv); +@Namespace("c10") public static native @Cast("bool") boolean InitCaffeLogging(IntPointer argc, @Cast("char**") @ByPtrPtr BytePointer argv); +@Namespace("c10") public static native @Cast("bool") boolean InitCaffeLogging(IntBuffer argc, @Cast("char**") @ByPtrPtr ByteBuffer argv); +@Namespace("c10") public static native @Cast("bool") boolean InitCaffeLogging(int[] argc, @Cast("char**") @ByPtrPtr byte[] argv); +@Namespace("c10") public static native void UpdateLoggingLevelsFromFlags(); + +@Namespace("c10") public static native void ThrowEnforceNotMet( + @Cast("const char*") BytePointer file, + int line, + @Cast("const char*") BytePointer condition, + @StdString BytePointer msg, + @Const Pointer caller/*=nullptr*/); +@Namespace("c10") public static native void ThrowEnforceNotMet( + @Cast("const char*") BytePointer file, + int line, + @Cast("const char*") BytePointer condition, + @StdString BytePointer msg); +@Namespace("c10") public static native void ThrowEnforceNotMet( + String file, + int line, + String condition, + @StdString String msg, + @Const Pointer caller/*=nullptr*/); +@Namespace("c10") public static native void ThrowEnforceNotMet( + String file, + int line, + String condition, + @StdString String msg); + +@Namespace("c10") public static native void ThrowEnforceNotMet( + @Cast("const char*") BytePointer file, + int line, + @Cast("const char*") BytePointer condition, + @ByVal CompileTimeEmptyString arg3, + @Const Pointer caller/*=nullptr*/); +@Namespace("c10") public static native void ThrowEnforceNotMet( + @Cast("const char*") BytePointer file, + int line, + @Cast("const char*") BytePointer condition, + @ByVal CompileTimeEmptyString arg3); +@Namespace("c10") public static native void ThrowEnforceNotMet( + String file, + int line, + String condition, + @ByVal CompileTimeEmptyString arg3, + @Const Pointer caller/*=nullptr*/); +@Namespace("c10") public static native void ThrowEnforceNotMet( + String file, + int line, + String condition, + @ByVal CompileTimeEmptyString arg3); + +@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( + @Cast("const char*") BytePointer file, + int line, + @Cast("const char*") BytePointer condition, + @StdString BytePointer msg, + @Const Pointer caller/*=nullptr*/); +@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( + @Cast("const char*") BytePointer file, + int line, + @Cast("const char*") BytePointer condition, + @StdString BytePointer msg); +@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( + String file, + int line, + String condition, + @StdString String msg, + @Const Pointer caller/*=nullptr*/); +@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( + String file, + int line, + String condition, + @StdString String msg); + +@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( + @Cast("const char*") BytePointer file, + int line, + @Cast("const char*") BytePointer condition, + @ByVal CompileTimeEmptyString arg3, + @Const Pointer caller/*=nullptr*/); +@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( + @Cast("const char*") BytePointer file, + int line, + @Cast("const char*") BytePointer condition, + @ByVal CompileTimeEmptyString arg3); +@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( + String file, + int line, + String condition, + @ByVal CompileTimeEmptyString arg3, + @Const Pointer caller/*=nullptr*/); +@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( + String file, + int line, + String condition, + @ByVal CompileTimeEmptyString arg3); + +@Namespace("c10") public static native @Cast("const bool") boolean IsUsingGoogleLogging(); + +/** + * A utility to allow one to show log info to stderr after the program starts. + * + * This is similar to calling GLOG's --logtostderr, or setting caffe2_log_level + * to smaller than INFO. You are recommended to only use this in a few sparse + * cases, such as when you want to write a tutorial or something. Normally, use + * the commandline flags to set the log level. + */ +@Namespace("c10") public static native void ShowLogInfoToStderr(); +@Namespace("c10") public static native void SetStackTraceFetcher(@ByVal StringSupplier fetcher); -// Parsed from c10/core/Event.h +// #define CAFFE_ENFORCE(condition, ...) +// do { +// if (C10_UNLIKELY(!(condition))) { +// ::c10::ThrowEnforceNotMet( +// __FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__)); +// } +// } while (false) -// #pragma once +// #define CAFFE_ENFORCE_FINITE(condition, ...) +// do { +// if (C10_UNLIKELY(!(condition))) { +// ::c10::ThrowEnforceFiniteNotMet( +// __FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__)); +// } +// } while (false) -// #include -// #include +// #define CAFFE_ENFORCE_WITH_CALLER(condition, ...) +// do { +// if (C10_UNLIKELY(!(condition))) { +// ::c10::ThrowEnforceNotMet( +// __FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__), this); +// } +// } while (false) + +// #define CAFFE_THROW(...) +// ::c10::ThrowEnforceNotMet(__FILE__, __LINE__, "", ::c10::str(__VA_ARGS__)) /** - * A backend-generic movable, not copyable, not thread-safe event. - * - * The design of this event follows that of CUDA and HIP events. These events - * are recorded and waited on by streams and can be rerecorded to, - * each rerecording essentially creating a new version of the event. - * For example, if (in CPU time), stream X is asked to record E, - * stream Y waits on E, and stream X is asked to record E again, then Y will - * wait for X to finish the first call to record and not the second, because - * it's waiting on the first version of event E, not the second. - * Querying an event only returns the status of its most recent version. + * Rich logging messages * - * Backend-generic events are implemented by this class and - * impl::InlineEvent. In addition to these events there are also - * some backend-specific events, like ATen's CUDAEvent. Each of these - * classes has its own use. + * CAFFE_ENFORCE_THAT can be used with one of the "checker functions" that + * capture input argument values and add it to the exception message. E.g. + * {@code CAFFE_ENFORCE_THAT(Equals(foo(x), bar(y)), "Optional additional message")} + * would evaluate both foo and bar only once and if the results are not equal - + * include them in the exception message. * - * impl::InlineEvent<...> or a backend-specific event should be - * preferred when the backend is known at compile time and known to - * be compiled. Backend-specific events may have additional functionality. + * Some of the basic checker functions like Equals or Greater are already + * defined below. Other header might define customized checkers by adding + * functions to caffe2::enforce_detail namespace. For example: * - * This Event should be used if a particular backend may not be available, - * or the backend required is not known at compile time. + * namespace caffe2 { namespace enforce_detail { + * inline EnforceFailMessage IsVector(const vector& shape) { + * if (shape.size() == 1) { return EnforceOK(); } + * return c10::str("Shape ", shape, " is not a vector"); + * } + * }} * - * These generic events are built on top of DeviceGuardImpls, analogous - * to DeviceGuard and InlineDeviceGuard. The name "DeviceGuardImpls," - * is no longer entirely accurate, as these classes implement the - * backend-specific logic for a generic backend interface. + * With further usages like {@code CAFFE_ENFORCE_THAT(IsVector(Input(0).dims()))} * - * See DeviceGuardImplInterface.h for a list of all supported flags. + * Convenient wrappers for binary operations like CAFFE_ENFORCE_EQ are provided + * too. Please use them instead of TORCH_CHECK_EQ and friends for failures in + * user-provided input. */ - // namespace c10 - - -// Parsed from c10/core/impl/InlineStreamGuard.h +// GCC7 is getting an internal compiler error on the new +// implementation, so keep the old one (which evaluates the error +// message eagerly and therefore is undesirable for general use +// compared to the new one) around for it. +// #if defined(__GNUG__) && __GNUC__ <= 7 && !defined(__clang__) -// #pragma once +// #define CAFFE_ENFORCE_THAT_IMPL(op, lhs, rhs, expr, ...) +// ::c10::enforce_detail::enforceThatImpl( +// op, lhs, rhs, __FILE__, __LINE__, expr, nullptr, ##__VA_ARGS__) -// #include -// #include -// #include +// #define CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER(op, lhs, rhs, expr, ...) +// ::c10::enforce_detail::enforceThatImpl( +// op, (lhs), (rhs), __FILE__, __LINE__, expr, this, ##__VA_ARGS__) -/** - * A StreamGuard is an RAII class that changes the current device - * to the device corresponding to some stream, and changes the - * default stream on that device to be this stream. - * - * InlineStreamGuard is a helper class for implementing StreamGuards. - * See InlineDeviceGuard for guidance on how to use this class. - */ +// #else -/** - * An OptionalStreamGuard is an RAII class that sets a device to some value on - * initialization, and resets the device to its original value on destruction. - * See InlineOptionalDeviceGuard for more guidance on how to use this class. - */ +// #define CAFFE_ENFORCE_THAT_IMPL(op, lhs, rhs, expr, ...) +// ::c10::enforce_detail::enforceThatImpl( +// op, +// (lhs), +// (rhs), +// __FILE__, +// __LINE__, +// expr, +// nullptr, +// [&](const auto& arg1, const auto& arg2) { +// return ::c10::enforce_detail::enforceFailMsgImpl( +// arg1, arg2, ##__VA_ARGS__); +// }) - // namespace impl - // namespace c10 +// #define CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER(op, lhs, rhs, expr, ...) +// ::c10::enforce_detail::enforceThatImpl( +// op, +// (lhs), +// (rhs), +// __FILE__, +// __LINE__, +// expr, +// this, +// [&](const auto& arg1, const auto& arg2) { +// return ::c10::enforce_detail::enforceFailMsgImpl( +// arg1, arg2, ##__VA_ARGS__); +// }) +// #endif + // namespace enforce_detail -// Parsed from c10/core/StreamGuard.h +// #define CAFFE_ENFORCE_THAT(cmp, op, lhs, rhs, ...) +// CAFFE_ENFORCE_THAT_IMPL(cmp, lhs, rhs, #lhs " " #op " " #rhs, ##__VA_ARGS__) -// #pragma once +// #define CAFFE_ENFORCE_BINARY_OP(cmp, op, x, y, ...) +// CAFFE_ENFORCE_THAT_IMPL(cmp, x, y, #x " " #op " " #y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_EQ(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP(std::equal_to(), ==, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_NE(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP(std::not_equal_to(), !=, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_LE(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP(std::less_equal(), <=, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_LT(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP(std::less(), <, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_GE(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP(std::greater_equal(), >=, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_GT(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP(std::greater(), >, x, y, ##__VA_ARGS__) -// #include +// #define CAFFE_ENFORCE_BINARY_OP_WITH_CALLER(cmp, op, x, y, ...) +// CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER( +// cmp, x, y, #x " " #op " " #y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_EQ_WITH_CALLER(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( +// std::equal_to(), ==, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_NE_WITH_CALLER(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( +// std::not_equal_to(), !=, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_LE_WITH_CALLER(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( +// std::less_equal(), <=, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_LT_WITH_CALLER(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER(std::less(), <, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_GE_WITH_CALLER(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( +// std::greater_equal(), >=, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_GT_WITH_CALLER(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( +// std::greater(), >, x, y, ##__VA_ARGS__) /** - * A StreamGuard is an RAII class that changes the current device - * to the device corresponding to some stream, and changes the - * default stream on that device to be this stream. + * Very lightweight logging for the first time API usage. It's beneficial for + * tracking of individual functionality usage in larger applications. * - * Use of StreamGuard is HIGHLY discouraged in operator definitions. In - * a single operator, you probably don't know enough about the global - * state of the world to profitably decide how to set streams. Let - * the caller handle this appropriately, and just use the current stream - * in your operator code. + * In order to ensure light-weightedness of logging, we utilize static variable + * trick - LogAPIUsage will be invoked only once and further invocations will + * just do an atomic check. * - * This StreamGuard does NOT have an uninitialized state; it is guaranteed - * to reset the stream and device on exit. If you are in a situation - * where you *might* want to setup a stream guard, see OptionalStreamGuard. - */ - -/** - * An OptionalStreamGuard is an RAII class that sets a device to some value on - * initialization, and resets the device to its original value on destruction. - * See OptionalDeviceGuard for more guidance on how to use this class. - */ - -/** - * A MultiStreamGuard is an RAII class that sets the current streams of a set of - * devices all at once, and resets them to their original values on destruction. + * Example: + * // Logs caller info with an arbitrary text event, if there is a usage. + * C10_LOG_API_USAGE_ONCE("my_api"); */ +// #define C10_LOG_API_USAGE_ONCE(...) +// C10_UNUSED static bool C10_ANONYMOUS_VARIABLE(logFlag) = +// ::c10::detail::LogAPIUsageFakeReturn(__VA_ARGS__); - // namespace c10 - - -// Parsed from c10/util/FunctionRef.h +// API usage logging capabilities +@Namespace("c10") public static native void SetAPIUsageLogger(@ByVal StringConsumer logger); +@Namespace("c10") public static native void LogAPIUsage(@StdString BytePointer context); +@Namespace("c10") public static native void LogAPIUsage(@StdString String context); -//===- llvm/ADT/STLExtras.h - Useful STL related functions ------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file contains some templates that are useful if you are working with the -// STL at all. -// -// No library is required when using these functions. -// -//===----------------------------------------------------------------------===// +@Namespace("c10") public static native void SetAPIUsageMetadataLogger( + @ByVal MetadataLogger logger); +@Namespace("c10") public static native void LogAPIUsageMetadata( + @StdString BytePointer context, + @Const @ByRef StringStringMap metadata_map); +@Namespace("c10") public static native void LogAPIUsageMetadata( + @StdString String context, + @Const @ByRef StringStringMap metadata_map); +// Targeting ../DDPLoggingData.java -// c10: modified from llvm::function_ref -// c10: added more SFINAE to enable use in overloaded functions -// #pragma once -// #include -// #include -// #include +@Namespace("c10") public static native void SetPyTorchDDPUsageLogger( + @ByVal DDPLogger logger); +@Namespace("c10") public static native void LogPyTorchDDPUsage(@Const @ByRef DDPLoggingData ddpData); +// Return value is needed to do the static variable initialization trick +@Namespace("c10::detail") public static native @Cast("bool") boolean LogAPIUsageFakeReturn(@StdString BytePointer context); +@Namespace("c10::detail") public static native @Cast("bool") boolean LogAPIUsageFakeReturn(@StdString String context); + // namespace detail -/** An efficient, type-erasing, non-owning reference to a callable. This is - * intended for use as the type of a function parameter that is not used - * after the function in question returns. - * - * This class does not own the callable, so it is not in general safe to store - * a function_ref. */ +// Initializes the c10 logger. +@Namespace("c10") public static native void initLogging(); // namespace c10 +// #endif // C10_UTIL_LOGGING_H_ + // Parsed from c10/util/intrusive_ptr.h // #pragma once -// #include // #include -// #include // #include // #include // #include // #include -// #include // Targeting ../class_.java @@ -12721,6 +13345,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // tells us if the object was allocated by us. If it wasn't, no // intrusive_ptr for you! +// NOLINTNEXTLINE(cppcoreguidelines-virtual-class-destructor) + // Increment needs to be acquire-release to make use_count() and // unique() reliable. @Namespace("c10::detail") public static native @Cast("size_t") long atomic_refcount_increment(@Cast("std::atomic*") @ByRef LongPointer refcount); @@ -12775,6 +13401,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../SymNode.java +// Targeting ../BackendMetaRef.java + + // Targeting ../WeakStorage.java @@ -12846,6 +13475,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -12897,6 +13527,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { + + + @@ -13043,6 +13676,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { + + @@ -13146,259 +13781,268 @@ public class torch extends org.bytedeco.pytorch.presets.torch { - - - - - - - - - - - - - - - // namespace detail - - // namespace ivalue - - - - // namespace c10 - - -// Parsed from ATen/core/ivalue.h - -// #pragma once - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// Targeting ../CustomClassHolder.java - - - // namespace jit - // namespace torch - -@Namespace("c10") public static native @Cast("bool") boolean _fastEqualsForContainer(@Const @ByRef IValue lhs, @Const @ByRef IValue rhs); - -@Namespace("c10") public static native Function checkObjectSortSchema( - @Const @SharedPtr("c10::ClassType") @ByRef ClassType t, - @Cast("std::stringstream*") @ByRef Pointer why_not); - -// A comparator that checks ordering of two IValues of same type. - - - -// We need a ComplexHolder because currently the payloads in the Union -// only take 64 bits. Since ComplexDouble takes up 128 bits, and is too big -// to fit in the IValue directly, we indirect complex numbers through an intrusive -// pointer to ComplexHolder (which contains a c10::complex). - -// Similar to ComplexHolder, for StreamData3 - - // namespace ivalue - -// This is an owning wrapper for a c10::optional> -// that can be implicitly converted to a (non-owning) optional>. -// Its purpose is to be used in generated code to keep the vector alive -// either until the end of a statement (as a temporary), or as a saved arg -// in autograd. - -// Capsule is an internal implementation detail of custom C++ classes. We -// define it as an owning wrapper for -// c10::intrusive_ptr This wrapper is here to serve as -// an abstraction of the type erased custom class object pointer. It also allow -// pybind11 to treat this as a standalone class to register as a separate type -// caster, instead of a custom pointer holder which the pointer holder type -// caster try to "unwrap" it automatically. - -// IValue is the generic tagged union used by the interpreter to hold -// all value types. -// It is a 16-byte object with an 8-byte payload and an 8-byte tag. -// The tag is currently 4 bytes to determine the type, and 1 byte -// to mark whether that type is a subtype of c10::intrusive_ptr_target and needs -// retain/release calls. - - -/// -/// -/// -/// -/// -// #define TORCH_FORALL_TAGS(_) -// _(None) -// _(Tensor) -// _(Storage) -// _(Double) -// _(ComplexDouble) -// _(Int) -// _(SymInt) -// _(SymFloat) -// _(Bool) -// _(Tuple) -// _(String) -// _(Blob) -// _(GenericList) -// _(GenericDict) -// _(Future) -// _(Await) -// _(Device) -// _(Stream) -// _(Object) -// _(PyObject) -// _(Uninitialized) -// _(Capsule) -// _(RRef) -// _(Quantizer) -// _(Generator) -// _(Enum) -// Targeting ../IValue.java - - -// Targeting ../WeakIValue.java - - -// Targeting ../StrongTypePtr.java - - -// Targeting ../WeakTypePtr.java - - -// Targeting ../WeakOrStrongCompilationUnit.java - - -// Targeting ../WeakOrStrongTypePtr.java - - - - - // namespace c10 - -// #include // IWYU pragma: keep - - -// Parsed from ATen/core/List_inl.h - -// #pragma once - -// #include -// #include - - - - - - - - - - - - - - - - - - - - - - - - - -@Namespace("c10::impl") public static native void swap(@ByRef(true) DoubleComplexElementReference lhs, @ByRef(true) DoubleComplexElementReference rhs); - -@Namespace("c10::impl") public static native void swap(@ByRef(true) BooleanElementReference lhs, @ByRef(true) BooleanElementReference rhs); - -@Namespace("c10::impl") public static native void swap(@ByRef(true) LongElementReference lhs, @ByRef(true) LongElementReference rhs); - -@Namespace("c10::impl") public static native void swap(@ByRef(true) DoubleElementReference lhs, @ByRef(true) DoubleElementReference rhs); - -@Namespace("c10::impl") public static native void swap(@ByRef(true) TensorOptionalElementReference lhs, @ByRef(true) TensorOptionalElementReference rhs); - -@Namespace("c10::impl") public static native void swap(@ByRef(true) TensorElementReference lhs, @ByRef(true) TensorElementReference rhs); - -@Namespace("c10::impl") public static native void swap(@ByRef(true) FuturePtrElementReference lhs, @ByRef(true) FuturePtrElementReference rhs); - -@Namespace("c10::impl") public static native void swap(@ByRef(true) GenericElementReference lhs, @ByRef(true) GenericElementReference rhs); - - - - - - // namespace impl - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + // namespace detail + + // namespace ivalue + + + + // namespace c10 + + +// Parsed from ATen/core/ivalue.h + +// #pragma once + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// Targeting ../CustomClassHolder.java + + + // namespace jit + // namespace torch + +@Namespace("c10") public static native @Cast("bool") boolean _fastEqualsForContainer(@Const @ByRef IValue lhs, @Const @ByRef IValue rhs); + +@Namespace("c10") public static native Function checkObjectSortSchema( + @Const @SharedPtr("c10::ClassType") @ByRef ClassType t, + @Cast("std::stringstream*") @ByRef Pointer why_not); + +// A comparator that checks ordering of two IValues of same type. + + + +// We need a ComplexHolder because currently the payloads in the Union +// only take 64 bits. Since ComplexDouble takes up 128 bits, and is too big +// to fit in the IValue directly, we indirect complex numbers through an intrusive +// pointer to ComplexHolder (which contains a c10::complex). + +// Similar to ComplexHolder, for StreamData3 + + // namespace ivalue + +// This is an owning wrapper for a c10::optional> +// that can be implicitly converted to a (non-owning) optional>. +// Its purpose is to be used in generated code to keep the vector alive +// either until the end of a statement (as a temporary), or as a saved arg +// in autograd. + +// Capsule is an internal implementation detail of custom C++ classes. We +// define it as an owning wrapper for +// c10::intrusive_ptr This wrapper is here to serve as +// an abstraction of the type erased custom class object pointer. It also allow +// pybind11 to treat this as a standalone class to register as a separate type +// caster, instead of a custom pointer holder which the pointer holder type +// caster try to "unwrap" it automatically. + +// IValue is the generic tagged union used by the interpreter to hold +// all value types. +// It is a 16-byte object with an 8-byte payload and an 8-byte tag. +// The tag is currently 4 bytes to determine the type, and 1 byte +// to mark whether that type is a subtype of c10::intrusive_ptr_target and needs +// retain/release calls. + + +/// +/// +/// +/// +/// +// #define TORCH_FORALL_TAGS(_) +// _(None) +// _(Tensor) +// _(Storage) +// _(Double) +// _(ComplexDouble) +// _(Int) +// _(SymInt) +// _(SymFloat) +// _(SymBool) +// _(Bool) +// _(Tuple) +// _(String) +// _(Blob) +// _(GenericList) +// _(GenericDict) +// _(Future) +// _(Await) +// _(Device) +// _(Stream) +// _(Object) +// _(PyObject) +// _(Uninitialized) +// _(Capsule) +// _(RRef) +// _(Quantizer) +// _(Generator) +// _(Enum) +// Targeting ../IValue.java + + +// Targeting ../WeakIValue.java + + +// Targeting ../StrongTypePtr.java + + +// Targeting ../WeakTypePtr.java + + +// Targeting ../WeakOrStrongCompilationUnit.java + + +// Targeting ../WeakOrStrongTypePtr.java + + + + + // namespace c10 + +// #include // IWYU pragma: keep + + +// Parsed from ATen/core/List_inl.h + +// #pragma once + +// #include +// #include + + + + + + + + + + + + + + + + + + + + + + + + + +@Namespace("c10::impl") public static native void swap(@ByRef(true) DoubleComplexElementReference lhs, @ByRef(true) DoubleComplexElementReference rhs); + +@Namespace("c10::impl") public static native void swap(@ByRef(true) BooleanElementReference lhs, @ByRef(true) BooleanElementReference rhs); + +@Namespace("c10::impl") public static native void swap(@ByRef(true) LongElementReference lhs, @ByRef(true) LongElementReference rhs); + +@Namespace("c10::impl") public static native void swap(@ByRef(true) DoubleElementReference lhs, @ByRef(true) DoubleElementReference rhs); + +@Namespace("c10::impl") public static native void swap(@ByRef(true) TensorOptionalElementReference lhs, @ByRef(true) TensorOptionalElementReference rhs); + +@Namespace("c10::impl") public static native void swap(@ByRef(true) TensorElementReference lhs, @ByRef(true) TensorElementReference rhs); + +@Namespace("c10::impl") public static native void swap(@ByRef(true) FuturePtrElementReference lhs, @ByRef(true) FuturePtrElementReference rhs); + +@Namespace("c10::impl") public static native void swap(@ByRef(true) GenericElementReference lhs, @ByRef(true) GenericElementReference rhs); + + + + + + // namespace impl + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@Namespace("c10::impl") public static native @Const IValue ptr_to_first_element(@Const @ByRef GenericList list); @@ -13470,7 +14114,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../GenericListIterator.java -@Namespace("c10::impl") public static native @Const IValue ptr_to_first_element(@Const @ByRef GenericList list); // Targeting ../DoubleComplexList.java @@ -14241,6 +14884,54 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace at +// Parsed from ATen/core/VariableHooksInterface.h + +// #pragma once + +// #include +// #include + +// A little explanation about why this file exists at all. We have +// a few methods on Tensor class which require access to reified access to +// AutogradMeta. In open source, this isn't a big deal: we just access +// torch/csrc/autograd/variable.h from aten/src/ATen/core/Tensor.cpp and +// we can put the definitions inline. This is because everything gets balled +// into a single dynamic library in the end. +// +// However, inside our Facebook internal version of our build system, we +// have a split between aten and torch/csrc. So we cannot simply just +// cross this boundary. "Now wait," you might say, "Why don't we just +// merge the libraries inside Facebook". Well, the problem is that there +// are some downstream applications which are at binary size limit, and +// incorporating all of the extra code from libtorch would push them +// over (admarket/adreview/service:adreviewservice, see also +// https://github.com/pytorch/pytorch/pull/29299) So if you want to do that, +// we have to fix all of the services like this. +// +// I didn't want to block eliminating Tensor-Variable on this work, so I +// had to introduce another dynamic dispatch to get to the variable +// implementations (which live in torch/csrc/autograd/variable.cpp, FYI). +// +// I also considered using our existing dynamic dispatch mechanism, c10 +// dispatcher, to do this. However, (1) some of the functions on Tensor +// have weird signatures that are not supported by autograd, and (2) +// see this bug https://github.com/pytorch/pytorch/issues/30102 + + +// Targeting ../VariableHooksInterface.java + + + +@Namespace("at::impl") public static native void SetVariableHooks(VariableHooksInterface hooks); +@Namespace("at::impl") public static native VariableHooksInterface GetVariableHooks(); +@Namespace("at::impl") public static native @Cast("bool") boolean HasVariableHooks(); +// Targeting ../VariableHooksRegisterer.java + + + + // namespace at::impl + + // Parsed from torch/csrc/autograd/variable.h // #pragma once @@ -14255,6 +14946,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include +// #include // #include // #include @@ -14415,6 +15107,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("torch::autograd::impl") public static native @ByRef FunctionPreHookVector hooks(@Cast("const torch::autograd::Variable*") @ByRef Tensor arg0); @Namespace("torch::autograd::impl") public static native void clear_hooks(@Const @ByRef TensorBase arg0); +@Namespace("torch::autograd::impl") public static native void set_post_acc_grad_hooks( + @Const @ByRef TensorBase arg0, + @UniquePtr PostAccumulateGradHook dict); +@Namespace("torch::autograd::impl") public static native @UniquePtr PostAccumulateGradHook post_acc_grad_hooks( + @Cast("const torch::autograd::Variable*") @ByRef Tensor arg0); + @Namespace("torch::autograd::impl") public static native void create_cpp_hook( @Const @ByRef TensorBase arg0, @Cast("bool") boolean is_retains_grad_hooks/*=false*/); @@ -14932,6 +15630,48 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace c10 +// Parsed from ATen/core/function_schema_inl.h + +// #pragma once +// #include +// #include + +// note: windows build doesn't find symbols in operator files unless +// this is a header file + +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef FunctionSchema schema); + +@Namespace("c10") public static native @Cast("size_t") long findFirstOutArg(@StdVector Argument args); + + + + + + + + + + + + + + + + + + + +// covariant subtyping of list of Arguments +@Namespace("c10") public static native @Cast("bool") boolean isSubtypeOfList( + @ByVal ArgumentArrayRef child, + @ByVal ArgumentArrayRef parent, + @Cast("std::ostream*") Pointer why_not); + + + + // namespace c10 + + // Parsed from ATen/core/function_schema.h // #pragma once @@ -14981,8 +15721,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // full format: Type(alias)? name=default_value @Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef Argument arg); -@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef FunctionSchema schema); - @Namespace("c10") public static native @StdString BytePointer toString(@Const @ByRef FunctionSchema schema); // namespace c10 @@ -14992,45 +15730,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // IWYU pragma: keep -// Parsed from ATen/core/function_schema_inl.h - -// #pragma once -// #include - -// note: windows build doesn't find symbols in operator files unless -// this is a header file - -@Namespace("c10") public static native @Cast("size_t") long findFirstOutArg(@StdVector Argument args); - - - - - - - - - - - - - - - - - - - -// covariant subtyping of list of Arguments -@Namespace("c10") public static native @Cast("bool") boolean isSubtypeOfList( - @ByVal ArgumentArrayRef child, - @ByVal ArgumentArrayRef parent, - @Cast("std::ostream*") Pointer why_not); - - - - // namespace c10 - - // Parsed from ATen/core/op_registration/infer_schema.h // #pragma once @@ -15076,281 +15775,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { -// Parsed from ATen/record_function.h - -// #pragma once - -// #include -// #include -// #include -// #include -// #include -// #include - -// #include -// #include -// #include -// #include - - -// Kind of record function scope; -@Namespace("at") public enum RecordScope { - // c10/ATen ops, autograd nodes - FUNCTION((byte)(0)), - // Functions/nodes called from the autograd - BACKWARD_FUNCTION((byte)(1)), - // TorchScript functions, methods - TORCHSCRIPT_FUNCTION((byte)(2)), - // Kernel Function dtype Tag - KERNEL_FUNCTION_DTYPE((byte)(3)), - // Torchbind custom class, - CUSTOM_CLASS((byte)(4)), - // Generic Build Feature - BUILD_FEATURE((byte)(5)), - // Kernel Function dtype Tag - LITE_INTERPRETER((byte)(6)), - // User defined scope (e.g. with record_function()) - USER_SCOPE((byte)(7)), - // Scopes for static runtime, a specialized TorchScript interpreter - STATIC_RUNTIME_OP((byte)(8)), - STATIC_RUNTIME_MODEL((byte)(9)), - NUM_SCOPES((byte)(10));// must be the last in the list - - public final byte value; - private RecordScope(byte v) { this.value = v; } - private RecordScope(RecordScope e) { this.value = e.value; } - public RecordScope intern() { for (RecordScope e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} - - // namespace at - // namespace std - -// Soft limit on the number of callbacks to use; -@Namespace("at") @MemberGetter public static native @Cast("const std::size_t") long kSoftLimitCallbacks(); - -// An abstract base class for various observer contexts that can be attached to -// the RecordFunction. - -// -// PyTorch callbacks/observers API: -// - -/** - * RecordFunctionCallback represents a pair of callbacks to be used with - * RecordFunction, members: - * start, end - the callbacks to run when entering and exiting the scope; - * optionally, the start callback may return an ObserverContext which will - * be passed to the end callback, use appropriate constructor accordingly. - * needs_inputs - whether the callbacks need the inputs passed from the - * observed function/range; NOTE: passing the inputs incurs an additional - * overhead; sampling_probability - if not 1.0, then the callback is - * probabilistically sampled to run; NOTE: start and end callbacks always run as - * a pair and are sampled together; scopes - types of scopes to execute the - * callbacks on (see RecordScope); passing empty set means the callbacks will be - * executed for all possible scope types should_run - optional function that - * returns whether this callback should run; overwrites the effect of setting - * sampling_probability - */ - -// Notes: -// - two types of callbacks are provided: thread local and global -// - thread local callbacks are added/removed only for the given thread -// and are stored locally for each thread and separately from the list -// of the global callbacks -// - global callbacks are stored in a single per process list and are -// invoked by every RecordFunction, in addition to the thread local -// callbacks specific to the given thread -// - we allow the added callbacks to be sampled, by specifying a sampling -// probability for each callback pair, if the start callback is -// not picked to run, the corresponding end callback won't be called -// - a typical use case for the global callbacks is passive monitoring -// in the background (e.g. fleet-wide monitoring), without focusing on -// the specific piece of code -// - in contrast, thread local callbacks are enabled locally, on demand, -// for the specific piece of code (range) and are not sampled -// - a typical use case for thread local callbacks is profiler and code -// execution tracer -// - note, thread local callbacks are automatically propagated with -// ThreadLocalState across JIT continuations and async tasks (at::launch) - -@Namespace("at") @MemberGetter public static native @Cast("const at::CallbackHandle") long INVALID_CALLBACK_HANDLE(); -// Targeting ../RecordFunctionCallbacksEntry.java - - - -// Holds pairs (callbacks, unique_id) -// Targeting ../RecordFunction.java - - - -@Namespace("at") public static native @ByVal @Cast("at::StepCallbacks*") Pointer getStepCallbacks(RecordScope scope); -@Namespace("at") public static native @ByVal @Cast("at::StepCallbacks*") Pointer getStepCallbacks(@Cast("at::RecordScope") byte scope); - -@Namespace("at") public static native @ByVal @Cast("c10::optional*") Pointer getStepCallbacksUnlessEmpty( - RecordScope scope); -@Namespace("at") public static native @ByVal @Cast("c10::optional*") Pointer getStepCallbacksUnlessEmpty( - @Cast("at::RecordScope") byte scope); - - // namespace detail - -// optional argument - function's seq_no -// #define RECORD_FUNCTION_WITH_SCOPE(scope, fn, inputs, ...) -// at::RecordFunction guard(scope); -// if (guard.isActive()) { -// ::at::detail::record_function_with_scope( -// guard, fn, inputs, ##__VA_ARGS__); -// } - -// #define RECORD_FUNCTION_WITH_SCOPE_INPUTS_OUTPUTS( -// scope, fn, inputs, outputs, ...) -// at::RecordFunction guard(scope); -// if (guard.isActive()) { -// if (guard.needsInputs()) { -// guard.before(fn, inputs, ##__VA_ARGS__); -// } else { -// guard.before(fn, ##__VA_ARGS__); -// } -// if (guard.needsOutputs()) { -// guard.setOutputs(outputs); -// } -// } - -// #define RECORD_FUNCTION(fn, inputs, ...) -// RECORD_FUNCTION_WITH_SCOPE( -// at::RecordScope::FUNCTION, fn, inputs, ##__VA_ARGS__) - -// #define RECORD_TORCHSCRIPT_FUNCTION(mn, inputs) -// RECORD_FUNCTION_WITH_SCOPE(at::RecordScope::TORCHSCRIPT_FUNCTION, mn, inputs) - -// #define RECORD_FUNCTION_WITH_INPUTS_OUTPUTS(fn, inputs, outputs, ...) -// RECORD_FUNCTION_WITH_SCOPE_INPUTS_OUTPUTS( -// at::RecordScope::FUNCTION, fn, inputs, outputs, ##__VA_ARGS__) - -// Custom user scopes in C++; similar to Python's 'with record_function("..."):' -// #define RECORD_USER_SCOPE(fn) -// RECORD_FUNCTION_WITH_SCOPE( -// at::RecordScope::USER_SCOPE, fn, c10::ArrayRef{}) - -// RECORD_USER_SCOPE with inputs -// #define RECORD_USER_SCOPE_WITH_INPUTS(fn, inputs) -// RECORD_FUNCTION_WITH_SCOPE(at::RecordScope::USER_SCOPE, fn, inputs) - -// Helper macro to pass in debug handle that is used to -// post process events -// #define RECORD_WITH_SCOPE_DEBUG_HANDLE_AND_INPUTS( -// scope, fn, debug_handle, inputs, ...) -// at::RecordFunction guard(scope); -// if (guard.isActive()) { -// ::at::detail::record_function_with_scope_and_debug_handle( -// guard, fn, debug_handle, inputs, ##__VA_ARGS__); -// } - -// Helper macros to record LITE INTERPETER scope events with debug handles -// #define RECORD_EDGE_SCOPE_WITH_DEBUG_HANDLE_AND_INPUTS( -// fn, debug_handle, inputs) -// RECORD_WITH_SCOPE_DEBUG_HANDLE_AND_INPUTS( -// at::RecordScope::LITE_INTERPRETER, fn, debug_handle, inputs) - -// Bookend to the RECORD_FUNCTION macros. Use this after the kernel -// launch to let the profiler bind the outputs to the op that produced -// them. Note that guard is declared by RECORD_FUNCTION so this macro -// needs to be called from the same scope as RECORD_FUNCTION -// #define RECORD_OUTPUTS(outputs) -// if (guard.needsOutputs()) { -// guard.setOutputs( -// std::vector(outputs.begin(), outputs.end())); -// } - -/** - * addThreadLocalCallback adds a thread local callback to run with - * RecordFunction, returns handle to use with removeThreadLocalCallback - */ -@Namespace("at") public static native @Cast("at::CallbackHandle") long addThreadLocalCallback(@ByVal @Cast("at::RecordFunctionCallback*") Pointer cb); - -/** - * hasThreadLocalCallbacks returns whether there're callbacks registered - * with addThreadLocalCallback - */ -@Namespace("at") public static native @Cast("bool") boolean hasThreadLocalCallbacks(); - -/** - * clearThreadLocalCallbacks removes all thread local callbacks - */ -@Namespace("at") public static native void clearThreadLocalCallbacks(); - -/** - * addGlobalCallback adds a global callback to run with RecordFunction: - * - * only during the program initialization - */ -@Namespace("at") public static native @Cast("at::CallbackHandle") long addGlobalCallback(@ByVal @Cast("at::RecordFunctionCallback*") Pointer cb); - -/** - * removeCallback removes a callback given the handle returned by - * addThreadLocalCallback or addGlobalCallback; - * - * no other code can run simultaneously - */ -@Namespace("at") public static native void removeCallback(@Cast("at::CallbackHandle") long handle); - -/** - * Prevent the given callback from executing. If handle is invalid, - * does nothing. - */ -@Namespace("at") public static native void disableCallback(@Cast("at::CallbackHandle") long handle); - -/** - * Allow the given callback, previously disabled with disableCallback, to - * execute again. If handle is invalid, does nothing. - */ -@Namespace("at") public static native void reenableCallback(@Cast("at::CallbackHandle") long handle); - -/** - * hasGlobalCallbacks returns whether there're global callbacks - * registered with pushGlobalCallback - */ -@Namespace("at") public static native @Cast("bool") boolean hasGlobalCallbacks(); - -/** - * clearGlobalCallbacks removes all global callbacks - */ -@Namespace("at") public static native void clearGlobalCallbacks(); - -// for both thread local and global callbacks -@Namespace("at") public static native @Cast("bool") boolean hasCallbacks(); -@Namespace("at") public static native void clearCallbacks(); - -/** - * enableRecordFunction enables RecordFunction thread locally - */ -@Namespace("at") public static native void enableRecordFunction(@Cast("bool") boolean enable/*=true*/); -@Namespace("at") public static native void enableRecordFunction(); - -/** - * isRecordFunctionEnabled returns whether RecordFunction - * is enabled thread locally - */ -@Namespace("at") public static native @Cast("bool") boolean isRecordFunctionEnabled(); -// Targeting ../RecordFunctionGuard.java - - -// Targeting ../DisableRecordFunctionGuard.java - - -// Targeting ../RecordFunctionTLS.java - - - -@Namespace("at") public static native @Const @ByRef RecordFunctionTLS get_record_function_tls_(); - -@Namespace("at") public static native void set_record_function_tls_(@Const @ByRef RecordFunctionTLS tls); - -@Namespace("at") public static native void set_record_function_seed_for_testing(@Cast("uint32_t") int seed); - - // namespace at - - // Parsed from ATen/core/op_registration/op_allowlist.h // #pragma once @@ -15508,6 +15932,32 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch +// Parsed from ATen/core/enum_tag.h + +// #pragma once + +// @generated by torchgen/gen.py from enum_tag.h + // Enum of valid tags obtained from the entries in tags.yaml + @Namespace("at") public enum Tag { + core(0), + data_dependent_output(1), + dynamic_output_shape(2), + generated(3), + inplace_view(4), + nondeterministic_bitwise(5), + nondeterministic_seeded(6), + pointwise(7), + view_copy(8); + + public final int value; + private Tag(int v) { this.value = v; } + private Tag(Tag e) { this.value = e.value; } + public Tag intern() { for (Tag e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } + } + + + // Parsed from c10/core/CompileTimeFunctionPointer.h // #pragma once @@ -15554,6 +16004,42 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #pragma once // #include // Targeting ../OperatorKernel.java + + + + // namespace c10 + + +// Parsed from ATen/core/boxing/BoxedKernel_impl.h + +// #pragma once + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -15633,42 +16119,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// Parsed from ATen/core/boxing/BoxedKernel_impl.h - -// #pragma once - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - // namespace c10 - - // Parsed from ATen/core/stack.h // #pragma once @@ -16010,25 +16460,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { -// Parsed from ATen/core/boxing/KernelFunction.h - -// #pragma once - -// #include -// #include -// #include -// #include -// #include -// #include // TODO Instead of this, move torch::jit::Stack to the c10 namespace. -// Targeting ../KernelFunction.java - - - - - -// #include - - // Parsed from ATen/core/boxing/KernelFunction_impl.h // #include @@ -16091,6 +16522,25 @@ public class torch extends org.bytedeco.pytorch.presets.torch { + + +// Parsed from ATen/core/boxing/KernelFunction.h + +// #pragma once + +// #include +// #include +// #include +// #include +// #include +// #include // TODO Instead of this, move torch::jit::Stack to the c10 namespace. +// Targeting ../KernelFunction.java + + + + + +// #include // Parsed from ATen/core/dispatch/CppSignature.h @@ -16170,32 +16620,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { -// Parsed from ATen/core/enum_tag.h - -// #pragma once - -// @generated by torchgen/gen.py from enum_tag.h - // Enum of valid tags obtained from the entries in tags.yaml - @Namespace("at") public enum Tag { - core(0), - data_dependent_output(1), - dynamic_output_shape(2), - generated(3), - inplace_view(4), - nondeterministic_bitwise(5), - nondeterministic_seeded(6), - pointwise(7), - view_copy(8); - - public final int value; - private Tag(int v) { this.value = v; } - private Tag(Tag e) { this.value = e.value; } - public Tag intern() { for (Tag e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } - } - - - // Parsed from ATen/core/function.h // #pragma once @@ -16336,8 +16760,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // Just for inferFunctionSchemaFromFunctor -// #include // #include +// #include // #if defined C10_MOBILE /** @@ -16356,9 +16780,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #endif // For multipy/torchdeploy use case -@Namespace("torch") public enum _RegisterOrVerify { - REGISTER(0), - VERIFY(1); +@Namespace("torch") public enum _RegisterOrVerify { REGISTER(0), VERIFY(1); public final int value; private _RegisterOrVerify(int v) { this.value = v; } @@ -16608,13 +17030,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // static const torch::detail::TorchLibraryInit C10_CONCATENATE( // TORCH_LIBRARY_IMPL_static_init_##ns##_##k##_, uid)( // torch::Library::IMPL, -// c10::guts::if_constexpr( -// []() { -// return &C10_CONCATENATE( -// TORCH_LIBRARY_IMPL_init_##ns##_##k##_, uid); -// }, -// []() { return [](torch::Library&) -> void {}; }), +// (c10::impl::dispatch_key_allowlist_check(c10::DispatchKey::k) +// ? &C10_CONCATENATE(TORCH_LIBRARY_IMPL_init_##ns##_##k##_, uid) +// : [](torch::Library&) -> void {}), // #ns, // c10::make_optional(c10::DispatchKey::k), // __FILE__, @@ -16653,14 +17071,99 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include +// Default DispatchKey::Autograd fallback for built-in operators. +// Can be registered for custom operators. @Namespace("torch::autograd") public static native @ByVal CppFunction autogradNotImplementedFallback(); +// Default DispatchKey::AdInplaceOrView fallback for built-in operators +// Can be registered for custom operators. @Namespace("torch::autograd") public static native @ByVal CppFunction autogradNotImplementedInplaceOrViewFallback(); +// Default DispatchKey::Autograd fallback for all other operators (i.e. custom +// operators) +@Namespace("torch::autograd") public static native @ByVal CppFunction basicAutogradNotImplementedFallback(); + +@Namespace("torch::autograd") public enum AutogradFallbackMode { + Nothing(0), // Fallback is a redispatch + Warn(1), // Fallback raises a warning if backward is called + Error(2);// Fallback raises an error if backward is called + + public final int value; + private AutogradFallbackMode(int v) { this.value = v; } + private AutogradFallbackMode(AutogradFallbackMode e) { this.value = e.value; } + public AutogradFallbackMode intern() { for (AutogradFallbackMode e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} + +// Change the behavior of "basicAutogradNotImplementedFallback" +// In Python this is: +// - torch._C._set_autograd_fallback_mode(str) -> None +// - torch._C._get_autograd_fallback_mode() -> str +@Namespace("torch::autograd") public static native void setAutogradFallbackMode(AutogradFallbackMode mode); +@Namespace("torch::autograd") public static native void setAutogradFallbackMode(@Cast("torch::autograd::AutogradFallbackMode") int mode); +@Namespace("torch::autograd") public static native AutogradFallbackMode getAutogradFallbackMode(); + // namespace autograd // namespace torch +// Parsed from c10/util/flat_hash_map.h + +// Taken from +// https://github.com/skarupke/flat_hash_map/blob/2c4687431f978f02a3780e24b8b701d22aa32d9c/flat_hash_map.hpp +// with fixes applied: +// - https://github.com/skarupke/flat_hash_map/pull/25 +// - https://github.com/skarupke/flat_hash_map/pull/26 +// - replace size_t with uint64_t to fix it for 32bit +// - add "GCC diagnostic" pragma to ignore -Wshadow +// - make sherwood_v3_table::convertible_to_iterator public because GCC5 seems +// to have issues with it otherwise +// - fix compiler warnings in operator templated_iterator + +// Copyright Malte Skarupke 2017. +// Distributed under the Boost Software License, Version 1.0. +// (See http://www.boost.org/LICENSE_1_0.txt) + +// #pragma once + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + +// #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +// #endif + +// #if defined(_MSC_VER) && !defined(__clang__) +// #pragma warning(push) +// #pragma warning(disable : 4624) // destructor was implicitly defined as deleted +// #endif + +// #ifdef _MSC_VER +// #define SKA_NOINLINE(...) __declspec(noinline) __VA_ARGS__ +// #else +// #define SKA_NOINLINE(...) __VA_ARGS__ __attribute__((noinline)) +// #endif + +@Namespace("ska::detailv3") public static native byte log2(@Cast("uint64_t") long value); + +// Implementation taken from http://en.cppreference.com/w/cpp/types/void_t +// (it takes CWG1558 into account and also works for older compilers) + // namespace detailv3 + + // end namespace ska + +// #if defined(_MSC_VER) && !defined(__clang__) +// #pragma warning(pop) +// #endif + + // Parsed from torch/csrc/autograd/anomaly_mode.h // #pragma once @@ -16682,6 +17185,28 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch +// Parsed from c10/core/GradMode.h + +// #pragma once + +// #include +// #include +// Targeting ../GradMode.java + + +// Targeting ../AutoGradMode.java + + +// Targeting ../NoGradGuard.java + + +// Targeting ../AutoFwGradMode.java + + + + // namespace c10 + + // Parsed from ATen/core/grad_mode.h // #pragma once @@ -16731,7 +17256,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #pragma once // #include -// #include +// #include // #include // Targeting ../SafePyObject.java @@ -16806,13 +17331,287 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace at -// Parsed from c10/core/impl/PythonDispatcherTLS.h +// Parsed from ATen/record_function.h // #pragma once -// #include -// #include +// #include +// #include +// #include // #include +// #include +// #include + +// #include +// #include +// #include +// #include + + +// Kind of record function scope; +@Namespace("at") public enum RecordScope { + // c10/ATen ops, autograd nodes + FUNCTION((byte)(0)), + // Functions/nodes called from the autograd + BACKWARD_FUNCTION((byte)(1)), + // TorchScript functions, methods + TORCHSCRIPT_FUNCTION((byte)(2)), + // Kernel Function dtype Tag + KERNEL_FUNCTION_DTYPE((byte)(3)), + // Torchbind custom class, + CUSTOM_CLASS((byte)(4)), + // Generic Build Feature + BUILD_FEATURE((byte)(5)), + // Kernel Function dtype Tag + LITE_INTERPRETER((byte)(6)), + // User defined scope (e.g. with record_function()) + USER_SCOPE((byte)(7)), + // Scopes for static runtime, a specialized TorchScript interpreter + STATIC_RUNTIME_OP((byte)(8)), + STATIC_RUNTIME_MODEL((byte)(9)), + NUM_SCOPES((byte)(10));// must be the last in the list + + public final byte value; + private RecordScope(byte v) { this.value = v; } + private RecordScope(RecordScope e) { this.value = e.value; } + public RecordScope intern() { for (RecordScope e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} + + // namespace at + // namespace std + +// Soft limit on the number of callbacks to use; +@Namespace("at") @MemberGetter public static native @Cast("const std::size_t") long kSoftLimitCallbacks(); + +// An abstract base class for various observer contexts that can be attached to +// the RecordFunction. + +// +// PyTorch callbacks/observers API: +// + +/** + * RecordFunctionCallback represents a pair of callbacks to be used with + * RecordFunction, members: + * start, end - the callbacks to run when entering and exiting the scope; + * optionally, the start callback may return an ObserverContext which will + * be passed to the end callback, use appropriate constructor accordingly. + * needs_inputs - whether the callbacks need the inputs passed from the + * observed function/range; NOTE: passing the inputs incurs an additional + * overhead; sampling_probability - if not 1.0, then the callback is + * probabilistically sampled to run; NOTE: start and end callbacks always run as + * a pair and are sampled together; scopes - types of scopes to execute the + * callbacks on (see RecordScope); passing empty set means the callbacks will be + * executed for all possible scope types should_run - optional function that + * returns whether this callback should run; overwrites the effect of setting + * sampling_probability + */ + +// Notes: +// - two types of callbacks are provided: thread local and global +// - thread local callbacks are added/removed only for the given thread +// and are stored locally for each thread and separately from the list +// of the global callbacks +// - global callbacks are stored in a single per process list and are +// invoked by every RecordFunction, in addition to the thread local +// callbacks specific to the given thread +// - we allow the added callbacks to be sampled, by specifying a sampling +// probability for each callback pair, if the start callback is +// not picked to run, the corresponding end callback won't be called +// - a typical use case for the global callbacks is passive monitoring +// in the background (e.g. fleet-wide monitoring), without focusing on +// the specific piece of code +// - in contrast, thread local callbacks are enabled locally, on demand, +// for the specific piece of code (range) and are not sampled +// - a typical use case for thread local callbacks is profiler and code +// execution tracer +// - note, thread local callbacks are automatically propagated with +// ThreadLocalState across JIT continuations and async tasks (at::launch) + +@Namespace("at") @MemberGetter public static native @Cast("const at::CallbackHandle") long INVALID_CALLBACK_HANDLE(); +// Targeting ../RecordFunctionCallbacksEntry.java + + + +// Holds pairs (callbacks, unique_id) +// Targeting ../RecordFunction.java + + + +@Namespace("at") public static native @ByVal @Cast("at::StepCallbacks*") Pointer getStepCallbacks(RecordScope scope); +@Namespace("at") public static native @ByVal @Cast("at::StepCallbacks*") Pointer getStepCallbacks(@Cast("at::RecordScope") byte scope); + +@Namespace("at") public static native @ByVal @Cast("c10::optional*") Pointer getStepCallbacksUnlessEmpty( + RecordScope scope); +@Namespace("at") public static native @ByVal @Cast("c10::optional*") Pointer getStepCallbacksUnlessEmpty( + @Cast("at::RecordScope") byte scope); + + // namespace detail + +// optional argument - function's seq_no +// #define RECORD_FUNCTION_WITH_SCOPE(scope, fn, inputs, ...) +// at::RecordFunction guard(scope); +// if (guard.isActive()) { +// ::at::detail::record_function_with_scope( +// guard, fn, inputs, ##__VA_ARGS__); +// } + +// #define RECORD_FUNCTION_WITH_SCOPE_INPUTS_OUTPUTS( +// scope, fn, inputs, outputs, ...) +// at::RecordFunction guard(scope); +// if (guard.isActive()) { +// if (guard.needsInputs()) { +// guard.before(fn, inputs, ##__VA_ARGS__); +// } else { +// guard.before(fn, ##__VA_ARGS__); +// } +// if (guard.needsOutputs()) { +// guard.setOutputs(outputs); +// } +// } + +// #define RECORD_FUNCTION(fn, inputs, ...) +// RECORD_FUNCTION_WITH_SCOPE( +// at::RecordScope::FUNCTION, fn, inputs, ##__VA_ARGS__) + +// #define RECORD_TORCHSCRIPT_FUNCTION(mn, inputs) +// RECORD_FUNCTION_WITH_SCOPE(at::RecordScope::TORCHSCRIPT_FUNCTION, mn, inputs) + +// #define RECORD_FUNCTION_WITH_INPUTS_OUTPUTS(fn, inputs, outputs, ...) +// RECORD_FUNCTION_WITH_SCOPE_INPUTS_OUTPUTS( +// at::RecordScope::FUNCTION, fn, inputs, outputs, ##__VA_ARGS__) + +// Custom user scopes in C++; similar to Python's 'with record_function("..."):' +// #define RECORD_USER_SCOPE(fn) +// RECORD_FUNCTION_WITH_SCOPE( +// at::RecordScope::USER_SCOPE, fn, c10::ArrayRef{}) + +// RECORD_USER_SCOPE with inputs +// #define RECORD_USER_SCOPE_WITH_INPUTS(fn, inputs) +// RECORD_FUNCTION_WITH_SCOPE(at::RecordScope::USER_SCOPE, fn, inputs) + +// Helper macro to pass in debug handle that is used to +// post process events +// #define RECORD_WITH_SCOPE_DEBUG_HANDLE_AND_INPUTS( +// scope, fn, debug_handle, inputs, ...) +// at::RecordFunction guard(scope); +// if (guard.isActive()) { +// ::at::detail::record_function_with_scope_and_debug_handle( +// guard, fn, debug_handle, inputs, ##__VA_ARGS__); +// } + +// Helper macros to record LITE INTERPETER scope events with debug handles +// #define RECORD_EDGE_SCOPE_WITH_DEBUG_HANDLE_AND_INPUTS( +// fn, debug_handle, inputs) +// RECORD_WITH_SCOPE_DEBUG_HANDLE_AND_INPUTS( +// at::RecordScope::LITE_INTERPRETER, fn, debug_handle, inputs) + +// Bookend to the RECORD_FUNCTION macros. Use this after the kernel +// launch to let the profiler bind the outputs to the op that produced +// them. Note that guard is declared by RECORD_FUNCTION so this macro +// needs to be called from the same scope as RECORD_FUNCTION +// #define RECORD_OUTPUTS(outputs) +// if (guard.needsOutputs()) { +// guard.setOutputs( +// std::vector(outputs.begin(), outputs.end())); +// } + +/** + * addThreadLocalCallback adds a thread local callback to run with + * RecordFunction, returns handle to use with removeThreadLocalCallback + */ +@Namespace("at") public static native @Cast("at::CallbackHandle") long addThreadLocalCallback(@ByVal @Cast("at::RecordFunctionCallback*") Pointer cb); + +/** + * hasThreadLocalCallbacks returns whether there're callbacks registered + * with addThreadLocalCallback + */ +@Namespace("at") public static native @Cast("bool") boolean hasThreadLocalCallbacks(); + +/** + * clearThreadLocalCallbacks removes all thread local callbacks + */ +@Namespace("at") public static native void clearThreadLocalCallbacks(); + +/** + * addGlobalCallback adds a global callback to run with RecordFunction: + * + * only during the program initialization + */ +@Namespace("at") public static native @Cast("at::CallbackHandle") long addGlobalCallback(@ByVal @Cast("at::RecordFunctionCallback*") Pointer cb); + +/** + * removeCallback removes a callback given the handle returned by + * addThreadLocalCallback or addGlobalCallback; + * + * no other code can run simultaneously + */ +@Namespace("at") public static native void removeCallback(@Cast("at::CallbackHandle") long handle); + +/** + * Prevent the given callback from executing. If handle is invalid, + * does nothing. + */ +@Namespace("at") public static native void disableCallback(@Cast("at::CallbackHandle") long handle); + +/** + * Allow the given callback, previously disabled with disableCallback, to + * execute again. If handle is invalid, does nothing. + */ +@Namespace("at") public static native void reenableCallback(@Cast("at::CallbackHandle") long handle); + +/** + * hasGlobalCallbacks returns whether there're global callbacks + * registered with pushGlobalCallback + */ +@Namespace("at") public static native @Cast("bool") boolean hasGlobalCallbacks(); + +/** + * clearGlobalCallbacks removes all global callbacks + */ +@Namespace("at") public static native void clearGlobalCallbacks(); + +// for both thread local and global callbacks +@Namespace("at") public static native @Cast("bool") boolean hasCallbacks(); +@Namespace("at") public static native void clearCallbacks(); + +/** + * enableRecordFunction enables RecordFunction thread locally + */ +@Namespace("at") public static native void enableRecordFunction(@Cast("bool") boolean enable/*=true*/); +@Namespace("at") public static native void enableRecordFunction(); + +/** + * isRecordFunctionEnabled returns whether RecordFunction + * is enabled thread locally + */ +@Namespace("at") public static native @Cast("bool") boolean isRecordFunctionEnabled(); +// Targeting ../RecordFunctionGuard.java + + +// Targeting ../DisableRecordFunctionGuard.java + + +// Targeting ../RecordFunctionTLS.java + + + +@Namespace("at") public static native @Const @ByRef RecordFunctionTLS get_record_function_tls_(); + +@Namespace("at") public static native void set_record_function_tls_(@Const @ByRef RecordFunctionTLS tls); + +@Namespace("at") public static native void set_record_function_seed_for_testing(@Cast("uint32_t") int seed); + + // namespace at + + +// Parsed from c10/core/impl/PythonDispatcherTLS.h + +// #pragma once + +// #include +// #include // Targeting ../PythonDispatcherTLS.java @@ -16829,7 +17628,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #pragma once // #include -// #include +// #include // Targeting ../TorchDispatchModeTLS.java @@ -17149,7 +17948,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #pragma once // #include -// #include // #include // #include // #include @@ -17158,9 +17956,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// Forward-declares at::cuda::NVRTC - // at::cuda - +// Forward-declares at::Context, at::Generator and at::cuda::NVRTC + // namespace cuda + // namespace at // NB: Class must live in `at` due to limitations of Registry.h. @@ -17238,6 +18036,37 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace at +// Parsed from ATen/detail/MTIAHooksInterface.h + +// #pragma once + +// #include +// #include + +// #include + +// #include +// #include +// #include + +// Targeting ../DLDevice_.java + + + +@Namespace("at") @MemberGetter public static native @Cast("const char*") BytePointer MTIA_HELP(); +// Targeting ../MTIAHooksInterface.java + + +// Targeting ../MTIAHooksArgs.java + + +// #define REGISTER_MTIA_HOOKS(clsname) +// C10_REGISTER_CLASS(MTIAHooksRegistry, clsname, clsname) +@Namespace("at::detail") public static native @Const @ByRef MTIAHooksInterface getMTIAHooks(); + // namespace detail + // namespace at + + // Parsed from ATen/detail/ORTHooksInterface.h // #pragma once @@ -17260,6 +18089,59 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace at +// Parsed from ATen/detail/PrivateUse1HooksInterface.h + +// #pragma once + +// #include +// #include +// #include +// Targeting ../PrivateUse1HooksInterface.java + + +// Targeting ../PrivateUse1HooksArgs.java + + + +@Namespace("at") public static native void RegisterPrivateUse1HooksInterface(PrivateUse1HooksInterface hook_); + +@Namespace("at") public static native PrivateUse1HooksInterface GetPrivateUse1HooksInterface(); + + + + +// Parsed from ATen/detail/XPUHooksInterface.h + +// #pragma once + +// #include +// #include + +// #include + +// #include +// #include +// #include + + +// We use forward declaration here instead of #include to avoid +// leaking DLPack implementation detail to every project that includes `ATen/Context.h`, which in turn +// would lead to a conflict when linked with another project using DLPack (for example TVM) + +@Namespace("at") @MemberGetter public static native @Cast("const char*") BytePointer XPU_HELP(); +// Targeting ../XPUHooksInterface.java + + +// Targeting ../XPUHooksArgs.java + + +// #define REGISTER_XPU_HOOKS(clsname) +// C10_REGISTER_CLASS(XPUHooksRegistry, clsname, clsname) +@Namespace("at::detail") public static native @Const @ByRef XPUHooksInterface getXPUHooks(); + // namespace detail + // namespace at + + // Parsed from c10/core/QEngine.h // #pragma once @@ -17298,7 +18180,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// #include // #include // #include @@ -17344,7 +18225,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include +// #include +// #include // #include // #include // #include @@ -17376,6 +18260,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @Cast("bool") boolean hasCUDA(); +@Namespace("at") public static native @Cast("bool") boolean hasMTIA(); + @Namespace("at") public static native @Cast("bool") boolean hasHIP(); @Namespace("at") public static native @Cast("bool") boolean hasIPU(); @@ -17386,6 +18272,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @Cast("bool") boolean hasORT(); +@Namespace("at") public static native @Cast("bool") boolean hasXPU(); + // Despite its name, this function returns the number of *CUDA* GPUs. @Namespace("at") public static native @Cast("size_t") long getNumGPUs(); @@ -17450,6 +18338,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #pragma once // #include +@Namespace("at::detail") public static native void check_size_nonnegative(@ByVal LongArrayRef size); +@Namespace("at::detail") public static native void check_size_nonnegative(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); + +@Namespace("at::detail") public static native void check_size_nonnegative(@ByVal SymIntArrayRef size); + @Namespace("at::detail") public static native @Cast("size_t") long computeStorageNbytesContiguous( @ByVal LongArrayRef sizes, @Cast("size_t") long itemsize, @@ -17958,6 +18851,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Cast("at::CheckedFrom") String c, @Const @ByRef TensorArg t1, @Const @ByRef TensorArg t2); +@Namespace("at") public static native void checkAllSameSize(@Cast("at::CheckedFrom") BytePointer c, @ByVal TensorArgArrayRef tensors); +@Namespace("at") public static native void checkAllSameSize(@Cast("at::CheckedFrom") String c, @ByVal TensorArgArrayRef tensors); @Namespace("at") public static native void checkDefined(@Cast("at::CheckedFrom") BytePointer c, @Const @ByRef TensorArg t); @Namespace("at") public static native void checkDefined(@Cast("at::CheckedFrom") String c, @Const @ByRef TensorArg t); @Namespace("at") public static native void checkAllDefined(@Cast("at::CheckedFrom") BytePointer c, @ByVal TensorArgArrayRef t); @@ -18179,6 +19074,209 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace at +// Parsed from ATen/ops/from_blob.h + +// #pragma once +// #include + +@Namespace("at::detail") public static native void noopDelete(Pointer arg0); + + +// Targeting ../TensorMaker.java + + + +@Namespace("at") public static native @ByVal @NoException(true) TensorMaker for_blob(Pointer data, @ByVal LongArrayRef sizes); +@Namespace("at") public static native @ByVal @NoException(true) TensorMaker for_blob(Pointer data, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); + +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + PointerConsumer deleter, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, + @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + PointerConsumer deleter); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + PointerConsumer deleter, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, + @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + PointerConsumer deleter); + +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + @Cast("int64_t") long storage_offset, + PointerConsumer deleter, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, + @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + @Cast("int64_t") long storage_offset, + PointerConsumer deleter); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @Cast("int64_t") long storage_offset, + PointerConsumer deleter, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, + @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @Cast("int64_t") long storage_offset, + PointerConsumer deleter); + +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + PointerConsumer deleter, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, + @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + PointerConsumer deleter); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + PointerConsumer deleter, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, + @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + PointerConsumer deleter); + +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... strides); + +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); + + // namespace at + + +// Parsed from ATen/ops/tensor.h + +// #pragma once +// #include +// #include + +// These functions are defined in ATen/Utils.cpp. +// #define TENSOR(T, S) +// TORCH_API Tensor tensor(ArrayRef values, const TensorOptions& options); +// inline Tensor tensor( +// std::initializer_list values, const TensorOptions& options) { +// return at::tensor(ArrayRef(values), options); +// } +// inline Tensor tensor(T value, const TensorOptions& options) { +// return at::tensor(ArrayRef(value), options); +// } +// inline Tensor tensor(ArrayRef values) { +// return at::tensor(std::move(values), at::dtype(k##S)); +// } +// inline Tensor tensor(std::initializer_list values) { +// return at::tensor(ArrayRef(values)); +// } +// inline Tensor tensor(T value) { +// return at::tensor(ArrayRef(value)); +// } +@Namespace("at") public static native @ByVal Tensor tensor(@ByVal ByteArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@Cast("uint8_t") byte value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal ByteArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(@Cast("uint8_t") byte value); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal ShortArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(short value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal ShortArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(short value); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal IntArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(int value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal IntArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(int value); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal LongArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@Cast("int64_t") long value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal LongArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... values); + @Namespace("at") public static native @ByVal Tensor tensor(@Cast("int64_t") long value); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(float value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(float value); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(double value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(double value); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BoolArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@Cast("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)") boolean value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BoolArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(@Cast("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)") boolean value); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal HalfArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal Half value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal HalfArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal Half value); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BFloat16ArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BFloat16 value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BFloat16ArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BFloat16 value); +@Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatComplexArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatComplex value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatComplexArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatComplex value); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleComplexArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleComplex value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleComplexArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleComplex value); +// #undef TENSOR + + // namespace at + + // Parsed from ATen/ops/abs.h // #pragma once @@ -19003,18 +20101,34 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::affine_grid_generator(Tensor theta, int[] size, bool align_corners) -> Tensor +// aten::affine_grid_generator(Tensor theta, SymInt[] size, bool align_corners) -> Tensor @Namespace("at") public static native @ByVal Tensor affine_grid_generator(@Const @ByRef Tensor theta, @ByVal LongArrayRef size, @Cast("bool") boolean align_corners); @Namespace("at") public static native @ByVal Tensor affine_grid_generator(@Const @ByRef Tensor theta, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("bool") boolean align_corners); -// aten::affine_grid_generator.out(Tensor theta, int[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) + +// aten::affine_grid_generator(Tensor theta, SymInt[] size, bool align_corners) -> Tensor +@Namespace("at") public static native @ByVal Tensor affine_grid_generator_symint(@Const @ByRef Tensor theta, @ByVal SymIntArrayRef size, @Cast("bool") boolean align_corners); + + +// aten::affine_grid_generator.out(Tensor theta, SymInt[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor affine_grid_generator_out(@ByRef Tensor out, @Const @ByRef Tensor theta, @ByVal LongArrayRef size, @Cast("bool") boolean align_corners); @Namespace("at") public static native @ByRef Tensor affine_grid_generator_out(@ByRef Tensor out, @Const @ByRef Tensor theta, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("bool") boolean align_corners); -// aten::affine_grid_generator.out(Tensor theta, int[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::affine_grid_generator.out(Tensor theta, SymInt[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor affine_grid_generator_outf(@Const @ByRef Tensor theta, @ByVal LongArrayRef size, @Cast("bool") boolean align_corners, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor affine_grid_generator_outf(@Const @ByRef Tensor theta, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("bool") boolean align_corners, @ByRef Tensor out); +// aten::affine_grid_generator.out(Tensor theta, SymInt[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor affine_grid_generator_symint_out(@ByRef Tensor out, @Const @ByRef Tensor theta, @ByVal SymIntArrayRef size, @Cast("bool") boolean align_corners); + + +// aten::affine_grid_generator.out(Tensor theta, SymInt[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor affine_grid_generator_symint_outf(@Const @ByRef Tensor theta, @ByVal SymIntArrayRef size, @Cast("bool") boolean align_corners, @ByRef Tensor out); + + + // Parsed from ATen/ops/affine_grid_generator_backward.h @@ -19041,11 +20155,16 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::affine_grid_generator_backward(Tensor grad, int[] size, bool align_corners) -> Tensor +// aten::affine_grid_generator_backward(Tensor grad, SymInt[] size, bool align_corners) -> Tensor @Namespace("at") public static native @ByVal Tensor affine_grid_generator_backward(@Const @ByRef Tensor grad, @ByVal LongArrayRef size, @Cast("bool") boolean align_corners); @Namespace("at") public static native @ByVal Tensor affine_grid_generator_backward(@Const @ByRef Tensor grad, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("bool") boolean align_corners); +// aten::affine_grid_generator_backward(Tensor grad, SymInt[] size, bool align_corners) -> Tensor +@Namespace("at") public static native @ByVal Tensor affine_grid_generator_backward_symint(@Const @ByRef Tensor grad, @ByVal SymIntArrayRef size, @Cast("bool") boolean align_corners); + + + // Parsed from ATen/ops/alias.h @@ -20815,13 +21934,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count) -> Tensor -@Namespace("at") public static native @ByVal Tensor batch_norm_backward_elemt(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, @Const @ByRef TensorOptional weight, @Const @ByRef Tensor mean_dy, @Const @ByRef Tensor mean_dy_xmu, @Const @ByRef Tensor count); +// aten::batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor sum_dy, Tensor sum_dy_xmu, Tensor count) -> Tensor +@Namespace("at") public static native @ByVal Tensor batch_norm_backward_elemt(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, @Const @ByRef TensorOptional weight, @Const @ByRef Tensor sum_dy, @Const @ByRef Tensor sum_dy_xmu, @Const @ByRef Tensor count); -// aten::batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor batch_norm_backward_elemt_out(@ByRef Tensor out, @Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, @Const @ByRef TensorOptional weight, @Const @ByRef Tensor mean_dy, @Const @ByRef Tensor mean_dy_xmu, @Const @ByRef Tensor count); -// aten::batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor batch_norm_backward_elemt_outf(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, @Const @ByRef TensorOptional weight, @Const @ByRef Tensor mean_dy, @Const @ByRef Tensor mean_dy_xmu, @Const @ByRef Tensor count, @ByRef Tensor out); +// aten::batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor sum_dy, Tensor sum_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor batch_norm_backward_elemt_out(@ByRef Tensor out, @Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, @Const @ByRef TensorOptional weight, @Const @ByRef Tensor sum_dy, @Const @ByRef Tensor sum_dy_xmu, @Const @ByRef Tensor count); +// aten::batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor sum_dy, Tensor sum_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor batch_norm_backward_elemt_outf(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, @Const @ByRef TensorOptional weight, @Const @ByRef Tensor sum_dy, @Const @ByRef Tensor sum_dy_xmu, @Const @ByRef Tensor count, @ByRef Tensor out); @@ -23177,11 +24296,18 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor +// aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, SymInt[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); @Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); @Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); + +// aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, SymInt[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor conv1d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv1d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); +@Namespace("at") public static native @ByVal Tensor conv1d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); + + // aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, str padding="valid", int[1] dilation=1, int groups=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView BytePointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); @Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView BytePointer padding); @@ -23215,11 +24341,18 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor +// aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); @Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); @Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); + +// aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor conv2d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv2d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); +@Namespace("at") public static native @ByVal Tensor conv2d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); + + // aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, str padding="valid", int[2] dilation=1, int groups=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView BytePointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); @Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView BytePointer padding); @@ -23253,11 +24386,18 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor +// aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); @Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); @Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); + +// aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor conv3d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv3d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); +@Namespace("at") public static native @ByVal Tensor conv3d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); + + // aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, str padding="valid", int[3] dilation=1, int groups=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView BytePointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); @Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView BytePointer padding); @@ -23415,12 +24555,19 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor +// aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, SymInt[1] padding=0, SymInt[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv_transpose1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByVal Tensor conv_transpose1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); @Namespace("at") public static native @ByVal Tensor conv_transpose1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); +// aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, SymInt[1] padding=0, SymInt[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor conv_transpose1d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); +@Namespace("at") public static native @ByVal Tensor conv_transpose1d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); +@Namespace("at") public static native @ByVal Tensor conv_transpose1d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); + + + // Parsed from ATen/ops/conv_transpose2d.h @@ -23447,12 +24594,19 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor +// aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv_transpose2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByVal Tensor conv_transpose2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); @Namespace("at") public static native @ByVal Tensor conv_transpose2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); +// aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor conv_transpose2d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); +@Namespace("at") public static native @ByVal Tensor conv_transpose2d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); +@Namespace("at") public static native @ByVal Tensor conv_transpose2d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); + + + // Parsed from ATen/ops/conv_transpose3d.h @@ -23479,12 +24633,19 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor +// aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv_transpose3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByVal Tensor conv_transpose3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); @Namespace("at") public static native @ByVal Tensor conv_transpose3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); +// aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor conv_transpose3d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); +@Namespace("at") public static native @ByVal Tensor conv_transpose3d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); +@Namespace("at") public static native @ByVal Tensor conv_transpose3d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); + + + // Parsed from ATen/ops/convolution.h @@ -26221,6 +27382,77 @@ public class torch extends org.bytedeco.pytorch.presets.torch { +// Parsed from ATen/ops/empty_permuted.h + +// #pragma once + +// @generated by torchgen/gen.py from Function.h + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + + + +// #include + + +// aten::empty_permuted(SymInt[] size, int[] physical_layout, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor empty_permuted(@ByVal LongArrayRef size, @ByVal LongArrayRef physical_layout, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor empty_permuted(@ByVal LongArrayRef size, @ByVal LongArrayRef physical_layout); +@Namespace("at") public static native @ByVal Tensor empty_permuted(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] physical_layout, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor empty_permuted(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... physical_layout); + + +// aten::empty_permuted(SymInt[] size, int[] physical_layout, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor empty_permuted(@ByVal LongArrayRef size, @ByVal LongArrayRef physical_layout, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor empty_permuted(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] physical_layout, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + + +// aten::empty_permuted(SymInt[] size, int[] physical_layout, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor empty_permuted_symint(@ByVal SymIntArrayRef size, @ByVal LongArrayRef physical_layout, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor empty_permuted_symint(@ByVal SymIntArrayRef size, @ByVal LongArrayRef physical_layout); +@Namespace("at") public static native @ByVal Tensor empty_permuted_symint(@ByVal SymIntArrayRef size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] physical_layout, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor empty_permuted_symint(@ByVal SymIntArrayRef size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... physical_layout); + + +// aten::empty_permuted(SymInt[] size, int[] physical_layout, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor empty_permuted_symint(@ByVal SymIntArrayRef size, @ByVal LongArrayRef physical_layout, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor empty_permuted_symint(@ByVal SymIntArrayRef size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] physical_layout, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + + +// aten::empty_permuted.out(SymInt[] size, int[] physical_layout, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor empty_permuted_out(@ByRef Tensor out, @ByVal LongArrayRef size, @ByVal LongArrayRef physical_layout); +@Namespace("at") public static native @ByRef Tensor empty_permuted_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... physical_layout); + + +// aten::empty_permuted.out(SymInt[] size, int[] physical_layout, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor empty_permuted_outf(@ByVal LongArrayRef size, @ByVal LongArrayRef physical_layout, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor empty_permuted_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] physical_layout, @ByRef Tensor out); + + +// aten::empty_permuted.out(SymInt[] size, int[] physical_layout, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor empty_permuted_symint_out(@ByRef Tensor out, @ByVal SymIntArrayRef size, @ByVal LongArrayRef physical_layout); +@Namespace("at") public static native @ByRef Tensor empty_permuted_symint_out(@ByRef Tensor out, @ByVal SymIntArrayRef size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... physical_layout); + + +// aten::empty_permuted.out(SymInt[] size, int[] physical_layout, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor empty_permuted_symint_outf(@ByVal SymIntArrayRef size, @ByVal LongArrayRef physical_layout, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor empty_permuted_symint_outf(@ByVal SymIntArrayRef size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] physical_layout, @ByRef Tensor out); + + + + + // Parsed from ATen/ops/empty_quantized.h // #pragma once @@ -26807,29 +28039,75 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::eye(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +// aten::eye(SymInt n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor eye(@Cast("int64_t") long n, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("at") public static native @ByVal Tensor eye(@Cast("int64_t") long n); -// aten::eye(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + + +// aten::eye(SymInt n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor eye(@Cast("int64_t") long n, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::eye.m(int n, int m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + +// aten::eye(SymInt n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor eye_symint(@ByVal SymInt n, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor eye_symint(@ByVal SymInt n); + + +// aten::eye(SymInt n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor eye_symint(@ByVal SymInt n, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + + +// aten::eye.m(SymInt n, SymInt m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor eye(@Cast("int64_t") long n, @Cast("int64_t") long m, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("at") public static native @ByVal Tensor eye(@Cast("int64_t") long n, @Cast("int64_t") long m); -// aten::eye.m(int n, int m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + + +// aten::eye.m(SymInt n, SymInt m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor eye(@Cast("int64_t") long n, @Cast("int64_t") long m, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::eye.out(int n, *, Tensor(a!) out) -> Tensor(a!) + +// aten::eye.m(SymInt n, SymInt m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor eye_symint(@ByVal SymInt n, @ByVal SymInt m, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor eye_symint(@ByVal SymInt n, @ByVal SymInt m); + + +// aten::eye.m(SymInt n, SymInt m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor eye_symint(@ByVal SymInt n, @ByVal SymInt m, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + + +// aten::eye.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor eye_out(@ByRef Tensor out, @Cast("int64_t") long n); -// aten::eye.out(int n, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::eye.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor eye_outf(@Cast("int64_t") long n, @ByRef Tensor out); -// aten::eye.m_out(int n, int m, *, Tensor(a!) out) -> Tensor(a!) + +// aten::eye.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor eye_symint_out(@ByRef Tensor out, @ByVal SymInt n); + + +// aten::eye.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor eye_symint_outf(@ByVal SymInt n, @ByRef Tensor out); + + +// aten::eye.m_out(SymInt n, SymInt m, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor eye_out(@ByRef Tensor out, @Cast("int64_t") long n, @Cast("int64_t") long m); -// aten::eye.m_out(int n, int m, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::eye.m_out(SymInt n, SymInt m, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor eye_outf(@Cast("int64_t") long n, @Cast("int64_t") long m, @ByRef Tensor out); +// aten::eye.m_out(SymInt n, SymInt m, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor eye_symint_out(@ByRef Tensor out, @ByVal SymInt n, @ByVal SymInt m); + + +// aten::eye.m_out(SymInt n, SymInt m, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor eye_symint_outf(@ByVal SymInt n, @ByVal SymInt m, @ByRef Tensor out); + + + // Parsed from ATen/ops/fake_quantize_per_channel_affine.h @@ -27328,17 +28606,35 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::fft_fft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor +// aten::fft_fft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor @Namespace("at") public static native @ByVal Tensor fft_fft(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_fft(@Const @ByRef Tensor self); -// aten::fft_fft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + +// aten::fft_fft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_fft_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_fft_symint(@Const @ByRef Tensor self); + + +// aten::fft_fft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_fft_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_fft_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::fft_fft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::fft_fft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_fft_outf(@Const @ByRef Tensor self, @ByVal LongOptional n, @Cast("int64_t") long dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +// aten::fft_fft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_fft_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByRef Tensor fft_fft_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self); + + +// aten::fft_fft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_fft_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntOptional n, @Cast("int64_t") long dim, @ByVal StringViewOptional norm, @ByRef Tensor out); + + + // Parsed from ATen/ops/fft_fft2.h @@ -27365,20 +28661,41 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::fft_fft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +// aten::fft_fft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor @Namespace("at") public static native @ByVal Tensor fft_fft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_fft2(@Const @ByRef Tensor self); @Namespace("at") public static native @ByVal Tensor fft_fft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); -// aten::fft_fft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + +// aten::fft_fft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_fft2_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_fft2_symint(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor fft_fft2_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); + + +// aten::fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_fft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_fft2_out(@ByRef Tensor out, @Const @ByRef Tensor self); @Namespace("at") public static native @ByRef Tensor fft_fft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); -// aten::fft_fft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_fft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor fft_fft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +// aten::fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_fft2_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByRef Tensor fft_fft2_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor fft_fft2_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); + + +// aten::fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_fft2_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_fft2_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); + + + // Parsed from ATen/ops/fft_fftfreq.h @@ -27444,20 +28761,41 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::fft_fftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor +// aten::fft_fftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor @Namespace("at") public static native @ByVal Tensor fft_fftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_fftn(@Const @ByRef Tensor self); @Namespace("at") public static native @ByVal Tensor fft_fftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); -// aten::fft_fftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + +// aten::fft_fftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_fftn_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_fftn_symint(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor fft_fftn_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); + + +// aten::fft_fftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_fftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_fftn_out(@ByRef Tensor out, @Const @ByRef Tensor self); @Namespace("at") public static native @ByRef Tensor fft_fftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); -// aten::fft_fftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::fft_fftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_fftn_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal StringViewOptional norm, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor fft_fftn_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +// aten::fft_fftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_fftn_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByRef Tensor fft_fftn_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor fft_fftn_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); + + +// aten::fft_fftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_fftn_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_fftn_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); + + + // Parsed from ATen/ops/fft_fftshift.h @@ -27516,17 +28854,35 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::fft_hfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor +// aten::fft_hfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor @Namespace("at") public static native @ByVal Tensor fft_hfft(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_hfft(@Const @ByRef Tensor self); -// aten::fft_hfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + +// aten::fft_hfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_hfft_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_hfft_symint(@Const @ByRef Tensor self); + + +// aten::fft_hfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_hfft_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_hfft_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::fft_hfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::fft_hfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_hfft_outf(@Const @ByRef Tensor self, @ByVal LongOptional n, @Cast("int64_t") long dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +// aten::fft_hfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_hfft_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByRef Tensor fft_hfft_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self); + + +// aten::fft_hfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_hfft_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntOptional n, @Cast("int64_t") long dim, @ByVal StringViewOptional norm, @ByRef Tensor out); + + + // Parsed from ATen/ops/fft_hfft2.h @@ -27553,20 +28909,41 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::fft_hfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +// aten::fft_hfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor @Namespace("at") public static native @ByVal Tensor fft_hfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_hfft2(@Const @ByRef Tensor self); @Namespace("at") public static native @ByVal Tensor fft_hfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); -// aten::fft_hfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + +// aten::fft_hfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_hfft2_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_hfft2_symint(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor fft_hfft2_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); + + +// aten::fft_hfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self); @Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); -// aten::fft_hfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::fft_hfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); @Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); +// aten::fft_hfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_symint_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_symint_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_symint_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); + + +// aten::fft_hfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); +@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); + + + // Parsed from ATen/ops/fft_hfftn.h @@ -27593,20 +28970,41 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::fft_hfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor +// aten::fft_hfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor @Namespace("at") public static native @ByVal Tensor fft_hfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_hfftn(@Const @ByRef Tensor self); @Namespace("at") public static native @ByVal Tensor fft_hfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); -// aten::fft_hfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + +// aten::fft_hfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_hfftn_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_hfftn_symint(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor fft_hfftn_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); + + +// aten::fft_hfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @Const @ByRef Tensor fft_hfftn_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @Const @ByRef Tensor fft_hfftn_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self); @Namespace("at") public static native @Const @ByRef Tensor fft_hfftn_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); -// aten::fft_hfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::fft_hfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @Const @ByRef Tensor fft_hfftn_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); @Namespace("at") public static native @Const @ByRef Tensor fft_hfftn_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); +// aten::fft_hfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor fft_hfftn_symint_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @Const @ByRef Tensor fft_hfftn_symint_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @Const @ByRef Tensor fft_hfftn_symint_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); + + +// aten::fft_hfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor fft_hfftn_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); +@Namespace("at") public static native @Const @ByRef Tensor fft_hfftn_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); + + + // Parsed from ATen/ops/fft_ifft.h @@ -27633,17 +29031,35 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::fft_ifft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor +// aten::fft_ifft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor @Namespace("at") public static native @ByVal Tensor fft_ifft(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_ifft(@Const @ByRef Tensor self); -// aten::fft_ifft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + +// aten::fft_ifft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_ifft_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_ifft_symint(@Const @ByRef Tensor self); + + +// aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_ifft_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_ifft_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::fft_ifft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_ifft_outf(@Const @ByRef Tensor self, @ByVal LongOptional n, @Cast("int64_t") long dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +// aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_ifft_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByRef Tensor fft_ifft_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self); + + +// aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_ifft_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntOptional n, @Cast("int64_t") long dim, @ByVal StringViewOptional norm, @ByRef Tensor out); + + + // Parsed from ATen/ops/fft_ifft2.h @@ -27670,20 +29086,41 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::fft_ifft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +// aten::fft_ifft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor @Namespace("at") public static native @ByVal Tensor fft_ifft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_ifft2(@Const @ByRef Tensor self); @Namespace("at") public static native @ByVal Tensor fft_ifft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); -// aten::fft_ifft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + +// aten::fft_ifft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_ifft2_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_ifft2_symint(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor fft_ifft2_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); + + +// aten::fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_ifft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_ifft2_out(@ByRef Tensor out, @Const @ByRef Tensor self); @Namespace("at") public static native @ByRef Tensor fft_ifft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); -// aten::fft_ifft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_ifft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor fft_ifft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +// aten::fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_ifft2_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByRef Tensor fft_ifft2_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor fft_ifft2_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); + + +// aten::fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_ifft2_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_ifft2_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); + + + // Parsed from ATen/ops/fft_ifftn.h @@ -27710,20 +29147,41 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::fft_ifftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor +// aten::fft_ifftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor @Namespace("at") public static native @ByVal Tensor fft_ifftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_ifftn(@Const @ByRef Tensor self); @Namespace("at") public static native @ByVal Tensor fft_ifftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); -// aten::fft_ifftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + +// aten::fft_ifftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_ifftn_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_ifftn_symint(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor fft_ifftn_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); + + +// aten::fft_ifftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_ifftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_ifftn_out(@ByRef Tensor out, @Const @ByRef Tensor self); @Namespace("at") public static native @ByRef Tensor fft_ifftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); -// aten::fft_ifftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::fft_ifftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_ifftn_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal StringViewOptional norm, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor fft_ifftn_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +// aten::fft_ifftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_ifftn_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByRef Tensor fft_ifftn_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor fft_ifftn_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); + + +// aten::fft_ifftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_ifftn_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_ifftn_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); + + + // Parsed from ATen/ops/fft_ifftshift.h @@ -27782,17 +29240,35 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::fft_ihfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor +// aten::fft_ihfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor @Namespace("at") public static native @ByVal Tensor fft_ihfft(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_ihfft(@Const @ByRef Tensor self); -// aten::fft_ihfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + +// aten::fft_ihfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_ihfft_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_ihfft_symint(@Const @ByRef Tensor self); + + +// aten::fft_ihfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_ihfft_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_ihfft_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::fft_ihfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::fft_ihfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_ihfft_outf(@Const @ByRef Tensor self, @ByVal LongOptional n, @Cast("int64_t") long dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +// aten::fft_ihfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_ihfft_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByRef Tensor fft_ihfft_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self); + + +// aten::fft_ihfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_ihfft_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntOptional n, @Cast("int64_t") long dim, @ByVal StringViewOptional norm, @ByRef Tensor out); + + + // Parsed from ATen/ops/fft_ihfft2.h @@ -27819,20 +29295,41 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::fft_ihfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +// aten::fft_ihfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor @Namespace("at") public static native @ByVal Tensor fft_ihfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_ihfft2(@Const @ByRef Tensor self); @Namespace("at") public static native @ByVal Tensor fft_ihfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); -// aten::fft_ihfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + +// aten::fft_ihfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_ihfft2_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_ihfft2_symint(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor fft_ihfft2_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); + + +// aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self); @Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); -// aten::fft_ihfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); @Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); +// aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_symint_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_symint_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_symint_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); + + +// aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); + + + // Parsed from ATen/ops/fft_ihfftn.h @@ -27859,20 +29356,41 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::fft_ihfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor +// aten::fft_ihfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor @Namespace("at") public static native @ByVal Tensor fft_ihfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_ihfftn(@Const @ByRef Tensor self); @Namespace("at") public static native @ByVal Tensor fft_ihfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); -// aten::fft_ihfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + +// aten::fft_ihfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_ihfftn_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_ihfftn_symint(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor fft_ihfftn_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); + + +// aten::fft_ihfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @Const @ByRef Tensor fft_ihfftn_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @Const @ByRef Tensor fft_ihfftn_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self); @Namespace("at") public static native @Const @ByRef Tensor fft_ihfftn_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); -// aten::fft_ihfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::fft_ihfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @Const @ByRef Tensor fft_ihfftn_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); @Namespace("at") public static native @Const @ByRef Tensor fft_ihfftn_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); +// aten::fft_ihfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfftn_symint_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfftn_symint_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfftn_symint_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); + + +// aten::fft_ihfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfftn_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfftn_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); + + + // Parsed from ATen/ops/fft_irfft.h @@ -27899,17 +29417,35 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::fft_irfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor +// aten::fft_irfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor @Namespace("at") public static native @ByVal Tensor fft_irfft(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_irfft(@Const @ByRef Tensor self); -// aten::fft_irfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + +// aten::fft_irfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_irfft_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_irfft_symint(@Const @ByRef Tensor self); + + +// aten::fft_irfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_irfft_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_irfft_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::fft_irfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::fft_irfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_irfft_outf(@Const @ByRef Tensor self, @ByVal LongOptional n, @Cast("int64_t") long dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +// aten::fft_irfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_irfft_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByRef Tensor fft_irfft_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self); + + +// aten::fft_irfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_irfft_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntOptional n, @Cast("int64_t") long dim, @ByVal StringViewOptional norm, @ByRef Tensor out); + + + // Parsed from ATen/ops/fft_irfft2.h @@ -27936,20 +29472,41 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::fft_irfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +// aten::fft_irfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor @Namespace("at") public static native @ByVal Tensor fft_irfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_irfft2(@Const @ByRef Tensor self); @Namespace("at") public static native @ByVal Tensor fft_irfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); -// aten::fft_irfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + +// aten::fft_irfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_irfft2_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_irfft2_symint(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor fft_irfft2_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); + + +// aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_irfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_irfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self); @Namespace("at") public static native @ByRef Tensor fft_irfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); -// aten::fft_irfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_irfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor fft_irfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +// aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_irfft2_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByRef Tensor fft_irfft2_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor fft_irfft2_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); + + +// aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_irfft2_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_irfft2_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); + + + // Parsed from ATen/ops/fft_irfftn.h @@ -27976,20 +29533,41 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::fft_irfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor +// aten::fft_irfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor @Namespace("at") public static native @ByVal Tensor fft_irfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_irfftn(@Const @ByRef Tensor self); @Namespace("at") public static native @ByVal Tensor fft_irfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); -// aten::fft_irfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + +// aten::fft_irfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_irfftn_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_irfftn_symint(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor fft_irfftn_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); + + +// aten::fft_irfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_irfftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_irfftn_out(@ByRef Tensor out, @Const @ByRef Tensor self); @Namespace("at") public static native @ByRef Tensor fft_irfftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); -// aten::fft_irfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::fft_irfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_irfftn_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal StringViewOptional norm, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor fft_irfftn_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +// aten::fft_irfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_irfftn_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByRef Tensor fft_irfftn_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor fft_irfftn_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); + + +// aten::fft_irfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_irfftn_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_irfftn_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); + + + // Parsed from ATen/ops/fft_rfft.h @@ -28016,17 +29594,35 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::fft_rfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor +// aten::fft_rfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor @Namespace("at") public static native @ByVal Tensor fft_rfft(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_rfft(@Const @ByRef Tensor self); -// aten::fft_rfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + +// aten::fft_rfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_rfft_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_rfft_symint(@Const @ByRef Tensor self); + + +// aten::fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_rfft_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_rfft_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::fft_rfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_rfft_outf(@Const @ByRef Tensor self, @ByVal LongOptional n, @Cast("int64_t") long dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +// aten::fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_rfft_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByRef Tensor fft_rfft_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self); + + +// aten::fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_rfft_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntOptional n, @Cast("int64_t") long dim, @ByVal StringViewOptional norm, @ByRef Tensor out); + + + // Parsed from ATen/ops/fft_rfft2.h @@ -28053,20 +29649,41 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::fft_rfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +// aten::fft_rfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor @Namespace("at") public static native @ByVal Tensor fft_rfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_rfft2(@Const @ByRef Tensor self); @Namespace("at") public static native @ByVal Tensor fft_rfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); -// aten::fft_rfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + +// aten::fft_rfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_rfft2_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_rfft2_symint(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor fft_rfft2_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); + + +// aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_rfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_rfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self); @Namespace("at") public static native @ByRef Tensor fft_rfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); -// aten::fft_rfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_rfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor fft_rfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +// aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_rfft2_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByRef Tensor fft_rfft2_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor fft_rfft2_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); + + +// aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_rfft2_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_rfft2_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); + + + // Parsed from ATen/ops/fft_rfftfreq.h @@ -28132,20 +29749,41 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::fft_rfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor +// aten::fft_rfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor @Namespace("at") public static native @ByVal Tensor fft_rfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_rfftn(@Const @ByRef Tensor self); @Namespace("at") public static native @ByVal Tensor fft_rfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); -// aten::fft_rfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + +// aten::fft_rfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_rfftn_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_rfftn_symint(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor fft_rfftn_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); + + +// aten::fft_rfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_rfftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_rfftn_out(@ByRef Tensor out, @Const @ByRef Tensor self); @Namespace("at") public static native @ByRef Tensor fft_rfftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); -// aten::fft_rfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::fft_rfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_rfftn_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal StringViewOptional norm, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor fft_rfftn_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +// aten::fft_rfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_rfftn_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByRef Tensor fft_rfftn_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor fft_rfftn_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); + + +// aten::fft_rfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_rfftn_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_rfftn_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); + + + // Parsed from ATen/ops/fill.h @@ -28938,271 +30576,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { -// Parsed from ATen/ops/from_blob.h - -// #pragma once -// #include - -@Namespace("at::detail") public static native void noopDelete(Pointer arg0); - - -// Targeting ../TensorMaker.java - - - -@Namespace("at") public static native @ByVal @NoException(true) TensorMaker for_blob(Pointer data, @ByVal LongArrayRef sizes); -@Namespace("at") public static native @ByVal @NoException(true) TensorMaker for_blob(Pointer data, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); - -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - @ByVal LongArrayRef strides, - @Const @ByRef PointerConsumer deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - @ByVal LongArrayRef strides, - @Const @ByRef PointerConsumer deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @ByRef @Cast("void(*)(void*)") Pointer deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @ByRef @Cast("void(*)(void*)") Pointer deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - @ByVal LongArrayRef strides, - @ByRef @Cast("void(*)(void*)") long deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - @ByVal LongArrayRef strides, - @ByRef @Cast("void(*)(void*)") long deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @Const @ByRef PointerConsumer deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @Const @ByRef PointerConsumer deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - @ByVal LongArrayRef strides, - @ByRef @Cast("void(*)(void*)") Pointer deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - @ByVal LongArrayRef strides, - @ByRef @Cast("void(*)(void*)") Pointer deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @ByRef @Cast("void(*)(void*)") long deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @ByRef @Cast("void(*)(void*)") long deleter); - -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - @ByVal LongArrayRef strides, - @Cast("int64_t") long storage_offset, - @Const @ByRef PointerConsumer deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - @ByVal LongArrayRef strides, - @Cast("int64_t") long storage_offset, - @Const @ByRef PointerConsumer deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @Cast("int64_t") long storage_offset, - @ByRef @Cast("void(*)(void*)") Pointer deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @Cast("int64_t") long storage_offset, - @ByRef @Cast("void(*)(void*)") Pointer deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - @ByVal LongArrayRef strides, - @Cast("int64_t") long storage_offset, - @ByRef @Cast("void(*)(void*)") long deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - @ByVal LongArrayRef strides, - @Cast("int64_t") long storage_offset, - @ByRef @Cast("void(*)(void*)") long deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @Cast("int64_t") long storage_offset, - @Const @ByRef PointerConsumer deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @Cast("int64_t") long storage_offset, - @Const @ByRef PointerConsumer deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - @ByVal LongArrayRef strides, - @Cast("int64_t") long storage_offset, - @ByRef @Cast("void(*)(void*)") Pointer deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - @ByVal LongArrayRef strides, - @Cast("int64_t") long storage_offset, - @ByRef @Cast("void(*)(void*)") Pointer deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @Cast("int64_t") long storage_offset, - @ByRef @Cast("void(*)(void*)") long deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @Cast("int64_t") long storage_offset, - @ByRef @Cast("void(*)(void*)") long deleter); - -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - @Const @ByRef PointerConsumer deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - @Const @ByRef PointerConsumer deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByRef @Cast("void(*)(void*)") Pointer deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByRef @Cast("void(*)(void*)") Pointer deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - @ByRef @Cast("void(*)(void*)") long deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - @ByRef @Cast("void(*)(void*)") long deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @Const @ByRef PointerConsumer deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @Const @ByRef PointerConsumer deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - @ByRef @Cast("void(*)(void*)") Pointer deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - @ByRef @Cast("void(*)(void*)") Pointer deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByRef @Cast("void(*)(void*)") long deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByRef @Cast("void(*)(void*)") long deleter); - -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - @ByVal LongArrayRef strides, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - @ByVal LongArrayRef strides); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... strides); - -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); - - // namespace at - - // Parsed from ATen/ops/from_file.h // #pragma once @@ -34876,11 +36249,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::linalg_vander(Tensor x, *, int? N=None) -> Tensor +// aten::linalg_vander(Tensor x, *, SymInt? N=None) -> Tensor @Namespace("at") public static native @ByVal Tensor linalg_vander(@Const @ByRef Tensor x, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional N); @Namespace("at") public static native @ByVal Tensor linalg_vander(@Const @ByRef Tensor x); +// aten::linalg_vander(Tensor x, *, SymInt? N=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_vander_symint(@Const @ByRef Tensor x, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional N); +@Namespace("at") public static native @ByVal Tensor linalg_vander_symint(@Const @ByRef Tensor x); + + + // Parsed from ATen/ops/linalg_vecdot.h @@ -35988,13 +37367,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::lstm_mps_backward(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[]) -@Namespace("at") public static native @ByVal T_TensorTensorVectorTensorVector_T lstm_mps_backward(@Const @ByRef Tensor grad_y, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Const @ByRef Tensor z_state, @Const @ByRef Tensor cell_state_fwd, @Const @ByRef Tensor input, @Const @ByRef Tensor layersOutputs, @ByVal @Cast("at::TensorList*") TensorArrayRef hx, @ByVal @Cast("at::TensorList*") TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first); +// aten::lstm_mps_backward(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[]) +@Namespace("at") public static native @ByVal T_TensorTensorVectorTensorVector_T lstm_mps_backward(@Const @ByRef TensorOptional grad_y, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Const @ByRef Tensor z_state, @Const @ByRef Tensor cell_state_fwd, @Const @ByRef Tensor input, @Const @ByRef Tensor layersOutputs, @ByVal @Cast("at::TensorList*") TensorArrayRef hx, @ByVal @Cast("at::TensorList*") TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first); -// aten::lstm_mps_backward.out(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> () -@Namespace("at") public static native void lstm_mps_backward_out(@ByRef Tensor out0, @ByVal @Cast("at::TensorList*") TensorArrayRef out1, @ByVal @Cast("at::TensorList*") TensorArrayRef out2, @Const @ByRef Tensor grad_y, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Const @ByRef Tensor z_state, @Const @ByRef Tensor cell_state_fwd, @Const @ByRef Tensor input, @Const @ByRef Tensor layersOutputs, @ByVal @Cast("at::TensorList*") TensorArrayRef hx, @ByVal @Cast("at::TensorList*") TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first); -// aten::lstm_mps_backward.out(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> () -@Namespace("at") public static native void lstm_mps_backward_outf(@Const @ByRef Tensor grad_y, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Const @ByRef Tensor z_state, @Const @ByRef Tensor cell_state_fwd, @Const @ByRef Tensor input, @Const @ByRef Tensor layersOutputs, @ByVal @Cast("at::TensorList*") TensorArrayRef hx, @ByVal @Cast("at::TensorList*") TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first, @ByRef Tensor out0, @ByVal @Cast("at::TensorList*") TensorArrayRef out1, @ByVal @Cast("at::TensorList*") TensorArrayRef out2); +// aten::lstm_mps_backward.out(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> () +@Namespace("at") public static native void lstm_mps_backward_out(@ByRef Tensor out0, @ByVal @Cast("at::TensorList*") TensorArrayRef out1, @ByVal @Cast("at::TensorList*") TensorArrayRef out2, @Const @ByRef TensorOptional grad_y, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Const @ByRef Tensor z_state, @Const @ByRef Tensor cell_state_fwd, @Const @ByRef Tensor input, @Const @ByRef Tensor layersOutputs, @ByVal @Cast("at::TensorList*") TensorArrayRef hx, @ByVal @Cast("at::TensorList*") TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first); +// aten::lstm_mps_backward.out(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> () +@Namespace("at") public static native void lstm_mps_backward_outf(@Const @ByRef TensorOptional grad_y, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Const @ByRef Tensor z_state, @Const @ByRef Tensor cell_state_fwd, @Const @ByRef Tensor input, @Const @ByRef Tensor layersOutputs, @ByVal @Cast("at::TensorList*") TensorArrayRef hx, @ByVal @Cast("at::TensorList*") TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first, @ByRef Tensor out0, @ByVal @Cast("at::TensorList*") TensorArrayRef out1, @ByVal @Cast("at::TensorList*") TensorArrayRef out2); @@ -36958,18 +38337,34 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::max_unpool2d.out(Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) +// aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor max_unpool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal LongArrayRef output_size); @Namespace("at") public static native @ByRef Tensor max_unpool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); -// aten::max_unpool2d.out(Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor max_unpool2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal LongArrayRef output_size, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor max_unpool2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByRef Tensor out); -// aten::max_unpool2d(Tensor self, Tensor indices, int[2] output_size) -> Tensor + +// aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor max_unpool2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal SymIntArrayRef output_size); + + +// aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor max_unpool2d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal SymIntArrayRef output_size, @ByRef Tensor out); + + +// aten::max_unpool2d(Tensor self, Tensor indices, SymInt[2] output_size) -> Tensor @Namespace("at") public static native @ByVal Tensor max_unpool2d(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal LongArrayRef output_size); @Namespace("at") public static native @ByVal Tensor max_unpool2d(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +// aten::max_unpool2d(Tensor self, Tensor indices, SymInt[2] output_size) -> Tensor +@Namespace("at") public static native @ByVal Tensor max_unpool2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal SymIntArrayRef output_size); + + + // Parsed from ATen/ops/max_unpool3d.h @@ -36996,18 +38391,37 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::max_unpool3d.out(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) +// aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor max_unpool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal LongArrayRef output_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding); @Namespace("at") public static native @ByRef Tensor max_unpool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -// aten::max_unpool3d.out(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor max_unpool3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal LongArrayRef output_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor max_unpool3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor out); -// aten::max_unpool3d(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> Tensor + +// aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor max_unpool3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal SymIntArrayRef output_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding); +@Namespace("at") public static native @ByRef Tensor max_unpool3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal SymIntArrayRef output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); + + +// aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor max_unpool3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal SymIntArrayRef output_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor max_unpool3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal SymIntArrayRef output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor out); + + +// aten::max_unpool3d(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding) -> Tensor @Namespace("at") public static native @ByVal Tensor max_unpool3d(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal LongArrayRef output_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding); @Namespace("at") public static native @ByVal Tensor max_unpool3d(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +// aten::max_unpool3d(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding) -> Tensor +@Namespace("at") public static native @ByVal Tensor max_unpool3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal SymIntArrayRef output_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding); +@Namespace("at") public static native @ByVal Tensor max_unpool3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal SymIntArrayRef output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); + + + // Parsed from ATen/ops/maximum.h @@ -37237,6 +38651,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::min(Tensor self) -> Tensor @Namespace("at") public static native @ByVal Tensor min(@Const @ByRef Tensor self); +// aten::min.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor min_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::min.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor min_outf(@Const @ByRef Tensor self, @ByRef Tensor out); + // aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor min_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); // aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) @@ -40624,6 +42043,43 @@ public class torch extends org.bytedeco.pytorch.presets.torch { +// Parsed from ATen/ops/nonzero_static.h + +// #pragma once + +// @generated by torchgen/gen.py from Function.h + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + + + +// #include + + +// aten::nonzero_static.out(Tensor self, *, int size, int fill_value=-1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nonzero_static_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long size, @Cast("int64_t") long fill_value/*=-1*/); +@Namespace("at") public static native @ByRef Tensor nonzero_static_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long size); +// aten::nonzero_static.out(Tensor self, *, int size, int fill_value=-1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nonzero_static_outf(@Const @ByRef Tensor self, @Cast("int64_t") long size, @Cast("int64_t") long fill_value, @ByRef Tensor out); + +// aten::nonzero_static(Tensor self, *, int size, int fill_value=-1) -> Tensor +@Namespace("at") public static native @ByVal Tensor nonzero_static(@Const @ByRef Tensor self, @Cast("int64_t") long size, @Cast("int64_t") long fill_value/*=-1*/); +@Namespace("at") public static native @ByVal Tensor nonzero_static(@Const @ByRef Tensor self, @Cast("int64_t") long size); + + + + // Parsed from ATen/ops/norm.h // #pragma once @@ -42573,6 +44029,48 @@ public class torch extends org.bytedeco.pytorch.presets.torch { +// Parsed from ATen/ops/quantized_max_pool3d.h + +// #pragma once + +// @generated by torchgen/gen.py from Function.h + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + + + +// #include + + +// aten::quantized_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor quantized_max_pool3d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor quantized_max_pool3d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByVal Tensor quantized_max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor quantized_max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); + +// aten::quantized_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor quantized_max_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByRef Tensor quantized_max_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByRef Tensor quantized_max_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByRef Tensor quantized_max_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::quantized_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor quantized_max_pool3d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor quantized_max_pool3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); + + + + // Parsed from ATen/ops/quantized_rnn_relu_cell.h // #pragma once @@ -42917,160 +44415,160 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::randint(int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +// aten::randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal LongArrayRef size); @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::randint(int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +// aten::randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::randint(int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long high, @ByVal SymIntArrayRef size); +// aten::randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint_symint(@ByVal SymInt high, @ByVal SymIntArrayRef size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randint_symint(@ByVal SymInt high, @ByVal SymIntArrayRef size); -// aten::randint(int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint_symint(@ByVal SymInt high, @ByVal SymIntArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::randint.generator(int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +// aten::randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator); @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); -// aten::randint.generator(int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +// aten::randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::randint.generator(int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator); +// aten::randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint_symint(@ByVal SymInt high, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randint_symint(@ByVal SymInt high, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator); -// aten::randint.generator(int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint_symint(@ByVal SymInt high, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::randint.low(int low, int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +// aten::randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size); @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::randint.low(int low, int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +// aten::randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::randint.low(int low, int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal SymIntArrayRef size); +// aten::randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint_symint(@ByVal SymInt low, @ByVal SymInt high, @ByVal SymIntArrayRef size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randint_symint(@ByVal SymInt low, @ByVal SymInt high, @ByVal SymIntArrayRef size); -// aten::randint.low(int low, int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint_symint(@ByVal SymInt low, @ByVal SymInt high, @ByVal SymIntArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::randint.low_generator(int low, int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +// aten::randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator); @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); -// aten::randint.low_generator(int low, int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +// aten::randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::randint.low_generator(int low, int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator); +// aten::randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint_symint(@ByVal SymInt low, @ByVal SymInt high, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randint_symint(@ByVal SymInt low, @ByVal SymInt high, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator); -// aten::randint.low_generator(int low, int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint_symint(@ByVal SymInt low, @ByVal SymInt high, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::randint.out(int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +// aten::randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long high, @ByVal LongArrayRef size); @Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::randint.out(int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +// aten::randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long high, @ByVal LongArrayRef size, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); -// aten::randint.out(int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randint_symint_out(@ByRef Tensor out, @Cast("int64_t") long high, @ByVal SymIntArrayRef size); +// aten::randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_symint_out(@ByRef Tensor out, @ByVal SymInt high, @ByVal SymIntArrayRef size); -// aten::randint.out(int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randint_symint_outf(@Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByRef Tensor out); +// aten::randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_symint_outf(@ByVal SymInt high, @ByVal SymIntArrayRef size, @ByRef Tensor out); -// aten::randint.generator_out(int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +// aten::randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator); @Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); -// aten::randint.generator_out(int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +// aten::randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByRef Tensor out); -// aten::randint.generator_out(int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randint_symint_out(@ByRef Tensor out, @Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator); +// aten::randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_symint_out(@ByRef Tensor out, @ByVal SymInt high, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator); -// aten::randint.generator_out(int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randint_symint_outf(@Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); +// aten::randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_symint_outf(@ByVal SymInt high, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); -// aten::randint.low_out(int low, int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +// aten::randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size); @Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::randint.low_out(int low, int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +// aten::randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); -// aten::randint.low_out(int low, int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randint_symint_out(@ByRef Tensor out, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal SymIntArrayRef size); +// aten::randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_symint_out(@ByRef Tensor out, @ByVal SymInt low, @ByVal SymInt high, @ByVal SymIntArrayRef size); -// aten::randint.low_out(int low, int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randint_symint_outf(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByRef Tensor out); +// aten::randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_symint_outf(@ByVal SymInt low, @ByVal SymInt high, @ByVal SymIntArrayRef size, @ByRef Tensor out); -// aten::randint.low_generator_out(int low, int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +// aten::randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator); @Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); -// aten::randint.low_generator_out(int low, int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +// aten::randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByRef Tensor out); -// aten::randint.low_generator_out(int low, int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randint_symint_out(@ByRef Tensor out, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator); +// aten::randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_symint_out(@ByRef Tensor out, @ByVal SymInt low, @ByVal SymInt high, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator); -// aten::randint.low_generator_out(int low, int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randint_symint_outf(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); +// aten::randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_symint_outf(@ByVal SymInt low, @ByVal SymInt high, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); @@ -43100,31 +44598,79 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::randint_like(Tensor self, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +// aten::randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randint_like(@Const @ByRef Tensor self, @Cast("int64_t") long high, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); @Namespace("at") public static native @ByVal Tensor randint_like(@Const @ByRef Tensor self, @Cast("int64_t") long high); -// aten::randint_like(Tensor self, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + + +// aten::randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randint_like(@Const @ByRef Tensor self, @Cast("int64_t") long high, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); -// aten::randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + +// aten::randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint_like_symint(@Const @ByRef Tensor self, @ByVal SymInt high, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor randint_like_symint(@Const @ByRef Tensor self, @ByVal SymInt high); + + +// aten::randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint_like_symint(@Const @ByRef Tensor self, @ByVal SymInt high, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); + + +// aten::randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randint_like(@Const @ByRef Tensor self, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); @Namespace("at") public static native @ByVal Tensor randint_like(@Const @ByRef Tensor self, @Cast("int64_t") long low, @Cast("int64_t") long high); -// aten::randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + + +// aten::randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randint_like(@Const @ByRef Tensor self, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); -// aten::randint_like.out(Tensor self, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + +// aten::randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint_like_symint(@Const @ByRef Tensor self, @ByVal SymInt low, @ByVal SymInt high, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor randint_like_symint(@Const @ByRef Tensor self, @ByVal SymInt low, @ByVal SymInt high); + + +// aten::randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint_like_symint(@Const @ByRef Tensor self, @ByVal SymInt low, @ByVal SymInt high, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); + + +// aten::randint_like.out(Tensor self, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randint_like_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long high, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); @Namespace("at") public static native @ByRef Tensor randint_like_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long high); -// aten::randint_like.out(Tensor self, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + + +// aten::randint_like.out(Tensor self, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randint_like_outf(@Const @ByRef Tensor self, @Cast("int64_t") long high, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); -// aten::randint_like.low_dtype_out(Tensor self, int low, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + +// aten::randint_like.out(Tensor self, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_like_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymInt high, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByRef Tensor randint_like_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymInt high); + + +// aten::randint_like.out(Tensor self, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_like_symint_outf(@Const @ByRef Tensor self, @ByVal SymInt high, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); + + +// aten::randint_like.low_dtype_out(Tensor self, SymInt low, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randint_like_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); @Namespace("at") public static native @ByRef Tensor randint_like_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long low, @Cast("int64_t") long high); -// aten::randint_like.low_dtype_out(Tensor self, int low, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + + +// aten::randint_like.low_dtype_out(Tensor self, SymInt low, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randint_like_outf(@Const @ByRef Tensor self, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); +// aten::randint_like.low_dtype_out(Tensor self, SymInt low, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_like_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymInt low, @ByVal SymInt high, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByRef Tensor randint_like_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymInt low, @ByVal SymInt high); + + +// aten::randint_like.low_dtype_out(Tensor self, SymInt low, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_like_symint_outf(@Const @ByRef Tensor self, @ByVal SymInt low, @ByVal SymInt high, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); + + + // Parsed from ATen/ops/randn.h @@ -43430,29 +44976,75 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::randperm(int n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +// aten::randperm(SymInt n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randperm(@Cast("int64_t") long n, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); @Namespace("at") public static native @ByVal Tensor randperm(@Cast("int64_t") long n); -// aten::randperm(int n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + + +// aten::randperm(SymInt n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randperm(@Cast("int64_t") long n, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::randperm.generator(int n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + +// aten::randperm(SymInt n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randperm_symint(@ByVal SymInt n, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randperm_symint(@ByVal SymInt n); + + +// aten::randperm(SymInt n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randperm_symint(@ByVal SymInt n, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + + +// aten::randperm.generator(SymInt n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randperm(@Cast("int64_t") long n, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); @Namespace("at") public static native @ByVal Tensor randperm(@Cast("int64_t") long n, @ByVal GeneratorOptional generator); -// aten::randperm.generator(int n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + + +// aten::randperm.generator(SymInt n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randperm(@Cast("int64_t") long n, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::randperm.out(int n, *, Tensor(a!) out) -> Tensor(a!) + +// aten::randperm.generator(SymInt n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randperm_symint(@ByVal SymInt n, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randperm_symint(@ByVal SymInt n, @ByVal GeneratorOptional generator); + + +// aten::randperm.generator(SymInt n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randperm_symint(@ByVal SymInt n, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + + +// aten::randperm.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randperm_out(@ByRef Tensor out, @Cast("int64_t") long n); -// aten::randperm.out(int n, *, Tensor(a!) out) -> Tensor(a!) + + +// aten::randperm.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randperm_outf(@Cast("int64_t") long n, @ByRef Tensor out); -// aten::randperm.generator_out(int n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + +// aten::randperm.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randperm_symint_out(@ByRef Tensor out, @ByVal SymInt n); + + +// aten::randperm.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randperm_symint_outf(@ByVal SymInt n, @ByRef Tensor out); + + +// aten::randperm.generator_out(SymInt n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randperm_out(@ByRef Tensor out, @Cast("int64_t") long n, @ByVal GeneratorOptional generator); -// aten::randperm.generator_out(int n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + + +// aten::randperm.generator_out(SymInt n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randperm_outf(@Cast("int64_t") long n, @ByVal GeneratorOptional generator, @ByRef Tensor out); +// aten::randperm.generator_out(SymInt n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randperm_symint_out(@ByRef Tensor out, @ByVal SymInt n, @ByVal GeneratorOptional generator); + + +// aten::randperm.generator_out(SymInt n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randperm_symint_outf(@ByVal SymInt n, @ByVal GeneratorOptional generator, @ByRef Tensor out); + + + // Parsed from ATen/ops/range.h @@ -45125,22 +46717,43 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::roll(Tensor self, int[1] shifts, int[1] dims=[]) -> Tensor +// aten::roll(Tensor self, SymInt[1] shifts, int[1] dims=[]) -> Tensor @Namespace("at") public static native @ByVal Tensor roll(@Const @ByRef Tensor self, @ByVal LongArrayRef shifts, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef dims); @Namespace("at") public static native @ByVal Tensor roll(@Const @ByRef Tensor self, @ByVal LongArrayRef shifts); @Namespace("at") public static native @ByVal Tensor roll(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] shifts, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); @Namespace("at") public static native @ByVal Tensor roll(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... shifts); -// aten::roll.out(Tensor self, int[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!) + +// aten::roll(Tensor self, SymInt[1] shifts, int[1] dims=[]) -> Tensor +@Namespace("at") public static native @ByVal Tensor roll_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef shifts, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef dims); +@Namespace("at") public static native @ByVal Tensor roll_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef shifts); +@Namespace("at") public static native @ByVal Tensor roll_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef shifts, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); + + +// aten::roll.out(Tensor self, SymInt[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor roll_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef shifts, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef dims); @Namespace("at") public static native @ByRef Tensor roll_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef shifts); @Namespace("at") public static native @ByRef Tensor roll_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] shifts, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); @Namespace("at") public static native @ByRef Tensor roll_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... shifts); -// aten::roll.out(Tensor self, int[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!) + + +// aten::roll.out(Tensor self, SymInt[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor roll_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef shifts, @ByVal LongArrayRef dims, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor roll_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] shifts, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dims, @ByRef Tensor out); +// aten::roll.out(Tensor self, SymInt[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor roll_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef shifts, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef dims); +@Namespace("at") public static native @ByRef Tensor roll_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef shifts); +@Namespace("at") public static native @ByRef Tensor roll_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef shifts, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); + + +// aten::roll.out(Tensor self, SymInt[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor roll_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef shifts, @ByVal LongArrayRef dims, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor roll_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef shifts, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dims, @ByRef Tensor out); + + + // Parsed from ATen/ops/rot90.h @@ -45631,8 +47244,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor scaled_dot_product_attention(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional attn_mask, double dropout_p/*=0.0*/, @Cast("bool") boolean is_causal/*=false*/); +// aten::scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor scaled_dot_product_attention(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional attn_mask, double dropout_p/*=0.0*/, @Cast("bool") boolean is_causal/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scale); @Namespace("at") public static native @ByVal Tensor scaled_dot_product_attention(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value); @@ -47810,20 +49423,20 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); @Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +// aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional is_coalesced); @Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values); -// aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal BoolOptional is_coalesced); -// aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +// aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional is_coalesced); @Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional is_coalesced); @Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal BoolOptional is_coalesced); +@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal BoolOptional is_coalesced); // aten::sparse_coo_tensor.size_out(int[] size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor sparse_coo_tensor_out(@ByRef Tensor out, @ByVal LongArrayRef size); @@ -50769,10 +52382,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased); -// aten::std.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +// aten::std.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional correction, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional correction, @Cast("bool") boolean keepdim/*=false*/); // aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); @@ -50783,13 +52396,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByRef Tensor std_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor std_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim, @ByRef Tensor out); -// aten::std.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +// aten::std.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional correction, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); -// aten::std.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor std_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @ByVal LongOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor std_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal LongOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional correction, @Cast("bool") boolean keepdim/*=false*/); +// aten::std.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor std_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Const @ByRef ScalarOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor std_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Const @ByRef ScalarOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out); // aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor @Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); @@ -50801,15 +52414,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor std_outf(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim, @ByRef Tensor out); -// aten::std.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +// aten::std.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional correction, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim); -// aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +// aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional correction, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim); -// aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor std_outf(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @ByVal LongOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out); +// aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor std_outf(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Const @ByRef ScalarOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out); @@ -50847,26 +52460,26 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal T_TensorTensor_T std_mean(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByVal T_TensorTensor_T std_mean(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased); -// aten::std_mean.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +// aten::std_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional correction, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByVal T_TensorTensor_T std_mean(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional correction, @Cast("bool") boolean keepdim/*=false*/); // aten::std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) @Namespace("at") public static native @ByVal T_TensorTensor_T std_mean(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByVal T_TensorTensor_T std_mean(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased); -// aten::std_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +// aten::std_mean.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional correction, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByVal T_TensorTensor_T std_mean(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim); -// aten::std_mean.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +// aten::std_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional correction, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByVal T_TensorTensor_T std_mean_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); -// aten::std_mean.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @ByVal LongOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out0, @ByRef Tensor out1); -@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal LongOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out0, @ByRef Tensor out1); +@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional correction, @Cast("bool") boolean keepdim/*=false*/); +// aten::std_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Const @ByRef ScalarOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out0, @ByRef Tensor out1); +@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Const @ByRef ScalarOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out0, @ByRef Tensor out1); @@ -51213,6 +52826,187 @@ public class torch extends org.bytedeco.pytorch.presets.torch { +// Parsed from ATen/ops/sym_constrain_range.h + +// #pragma once + +// @generated by torchgen/gen.py from Function.h + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + + + +// #include + + +// aten::sym_constrain_range(Scalar size, *, int? min=None, int? max=None) -> () +@Namespace("at") public static native void sym_constrain_range(@Const @ByRef Scalar size, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional min, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional max); +@Namespace("at") public static native void sym_constrain_range(@Const @ByRef Scalar size); + + + + +// Parsed from ATen/ops/sym_constrain_range_for_size.h + +// #pragma once + +// @generated by torchgen/gen.py from Function.h + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + + + +// #include + + +// aten::sym_constrain_range_for_size(Scalar size, *, int? min, int? max) -> () +@Namespace("at") public static native void sym_constrain_range_for_size(@Const @ByRef Scalar size, @ByVal LongOptional min, @ByVal LongOptional max); + + + + +// Parsed from ATen/ops/sym_numel.h + +// #pragma once + +// @generated by torchgen/gen.py from Function.h + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + + + +// #include + + +// aten::sym_numel(Tensor self) -> SymInt +@Namespace("at") public static native @ByVal SymInt __dispatch_sym_numel(@Const @ByRef Tensor self); + + + + +// Parsed from ATen/ops/sym_size.h + +// #pragma once + +// @generated by torchgen/gen.py from Function.h + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + + + +// #include + + +// aten::sym_size.int(Tensor self, int dim) -> SymInt +@Namespace("at") public static native @ByVal SymInt __dispatch_sym_size(@Const @ByRef Tensor self, @Cast("int64_t") long dim); + + + + +// Parsed from ATen/ops/sym_storage_offset.h + +// #pragma once + +// @generated by torchgen/gen.py from Function.h + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + + + +// #include + + +// aten::sym_storage_offset(Tensor self) -> SymInt +@Namespace("at") public static native @ByVal SymInt __dispatch_sym_storage_offset(@Const @ByRef Tensor self); + + + + +// Parsed from ATen/ops/sym_stride.h + +// #pragma once + +// @generated by torchgen/gen.py from Function.h + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + + + +// #include + + +// aten::sym_stride.int(Tensor self, int dim) -> SymInt +@Namespace("at") public static native @ByVal SymInt __dispatch_sym_stride(@Const @ByRef Tensor self, @Cast("int64_t") long dim); + + + + // Parsed from ATen/ops/t.h // #pragma once @@ -51514,82 +53308,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { -// Parsed from ATen/ops/tensor.h - -// #pragma once -// #include -// #include - -// These functions are defined in ATen/Utils.cpp. -// #define TENSOR(T, S) -// TORCH_API Tensor tensor(ArrayRef values, const TensorOptions& options); -// inline Tensor tensor( -// std::initializer_list values, const TensorOptions& options) { -// return at::tensor(ArrayRef(values), options); -// } -// inline Tensor tensor(T value, const TensorOptions& options) { -// return at::tensor(ArrayRef(value), options); -// } -// inline Tensor tensor(ArrayRef values) { -// return at::tensor(std::move(values), at::dtype(k##S)); -// } -// inline Tensor tensor(std::initializer_list values) { -// return at::tensor(ArrayRef(values)); -// } -// inline Tensor tensor(T value) { -// return at::tensor(ArrayRef(value)); -// } -@Namespace("at") public static native @ByVal Tensor tensor(@ByVal ByteArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@Cast("uint8_t") byte value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal ByteArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(@Cast("uint8_t") byte value); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal ShortArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(short value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal ShortArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(short value); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal IntArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(int value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal IntArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(int value); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal LongArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@Cast("int64_t") long value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal LongArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... values); - @Namespace("at") public static native @ByVal Tensor tensor(@Cast("int64_t") long value); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(float value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(float value); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(double value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(double value); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BoolArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@Cast("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)") boolean value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BoolArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(@Cast("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)") boolean value); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal HalfArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal Half value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal HalfArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal Half value); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BFloat16ArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BFloat16 value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BFloat16ArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BFloat16 value); -@Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatComplexArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatComplex value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatComplexArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatComplex value); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleComplexArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleComplex value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleComplexArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleComplex value); -// #undef TENSOR - - // namespace at - - // Parsed from ATen/ops/tensordot.h // #pragma once @@ -51767,11 +53485,16 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::tile(Tensor self, int[] dims) -> Tensor +// aten::tile(Tensor self, SymInt[] dims) -> Tensor @Namespace("at") public static native @ByVal Tensor tile(@Const @ByRef Tensor self, @ByVal LongArrayRef dims); @Namespace("at") public static native @ByVal Tensor tile(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); +// aten::tile(Tensor self, SymInt[] dims) -> Tensor +@Namespace("at") public static native @ByVal Tensor tile_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef dims); + + + // Parsed from ATen/ops/to.h @@ -51854,7 +53577,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::to_dense_backward(Tensor grad, Tensor input) -> Tensor +// aten::to_dense_backward(Tensor grad, Tensor input, bool? masked_grad=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor to_dense_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor input, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional masked_grad); @Namespace("at") public static native @ByVal Tensor to_dense_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor input); @@ -51995,18 +53719,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::to_sparse.sparse_dim_out(Tensor self, int sparse_dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor to_sparse_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long sparse_dim); -// aten::to_sparse.sparse_dim_out(Tensor self, int sparse_dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor to_sparse_outf(@Const @ByRef Tensor self, @Cast("int64_t") long sparse_dim, @ByRef Tensor out); - -// aten::to_sparse.out(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor to_sparse_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LayoutOptional layout, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); -@Namespace("at") public static native @ByRef Tensor to_sparse_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor to_sparse_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LayoutOptional layout, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); -// aten::to_sparse.out(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor to_sparse_outf(@Const @ByRef Tensor self, @ByVal LayoutOptional layout, @ByVal LongArrayRefOptional blocksize, @ByVal LongOptional dense_dim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor to_sparse_outf(@Const @ByRef Tensor self, @ByVal LayoutOptional layout, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] blocksize, @ByVal LongOptional dense_dim, @ByRef Tensor out); @@ -52035,14 +53747,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::to_sparse_bsc.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor to_sparse_bsc_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); -@Namespace("at") public static native @ByRef Tensor to_sparse_bsc_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef blocksize); -@Namespace("at") public static native @ByRef Tensor to_sparse_bsc_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); -@Namespace("at") public static native @ByRef Tensor to_sparse_bsc_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... blocksize); -// aten::to_sparse_bsc.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor to_sparse_bsc_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef blocksize, @ByVal LongOptional dense_dim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor to_sparse_bsc_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] blocksize, @ByVal LongOptional dense_dim, @ByRef Tensor out); @@ -52071,14 +53775,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::to_sparse_bsr.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor to_sparse_bsr_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); -@Namespace("at") public static native @ByRef Tensor to_sparse_bsr_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef blocksize); -@Namespace("at") public static native @ByRef Tensor to_sparse_bsr_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); -@Namespace("at") public static native @ByRef Tensor to_sparse_bsr_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... blocksize); -// aten::to_sparse_bsr.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor to_sparse_bsr_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef blocksize, @ByVal LongOptional dense_dim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor to_sparse_bsr_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] blocksize, @ByVal LongOptional dense_dim, @ByRef Tensor out); @@ -52107,11 +53803,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::to_sparse_csc.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor to_sparse_csc_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); -@Namespace("at") public static native @ByRef Tensor to_sparse_csc_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::to_sparse_csc.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor to_sparse_csc_outf(@Const @ByRef Tensor self, @ByVal LongOptional dense_dim, @ByRef Tensor out); @@ -52140,11 +53831,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::to_sparse_csr.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor to_sparse_csr_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); -@Namespace("at") public static native @ByRef Tensor to_sparse_csr_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::to_sparse_csr.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor to_sparse_csr_outf(@Const @ByRef Tensor self, @ByVal LongOptional dense_dim, @ByRef Tensor out); @@ -52173,17 +53859,35 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +// aten::topk.values(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) @Namespace("at") public static native @ByVal T_TensorTensor_T topk_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @Cast("int64_t") long k, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean largest/*=true*/, @Cast("bool") boolean sorted/*=true*/); @Namespace("at") public static native @ByVal T_TensorTensor_T topk_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @Cast("int64_t") long k); -// aten::topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + + +// aten::topk.values(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) @Namespace("at") public static native @ByVal T_TensorTensor_T topk_outf(@Const @ByRef Tensor self, @Cast("int64_t") long k, @Cast("int64_t") long dim, @Cast("bool") boolean largest, @Cast("bool") boolean sorted, @ByRef Tensor values, @ByRef Tensor indices); -// aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) + +// aten::topk.values(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T topk_symint_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal SymInt k, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean largest/*=true*/, @Cast("bool") boolean sorted/*=true*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T topk_symint_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal SymInt k); + + +// aten::topk.values(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T topk_symint_outf(@Const @ByRef Tensor self, @ByVal SymInt k, @Cast("int64_t") long dim, @Cast("bool") boolean largest, @Cast("bool") boolean sorted, @ByRef Tensor values, @ByRef Tensor indices); + + +// aten::topk(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) @Namespace("at") public static native @ByVal T_TensorTensor_T topk(@Const @ByRef Tensor self, @Cast("int64_t") long k, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean largest/*=true*/, @Cast("bool") boolean sorted/*=true*/); @Namespace("at") public static native @ByVal T_TensorTensor_T topk(@Const @ByRef Tensor self, @Cast("int64_t") long k); +// aten::topk(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T topk_symint(@Const @ByRef Tensor self, @ByVal SymInt k, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean largest/*=true*/, @Cast("bool") boolean sorted/*=true*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T topk_symint(@Const @ByRef Tensor self, @ByVal SymInt k); + + + // Parsed from ATen/ops/trace.h @@ -52814,15 +54518,25 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// aten::unflatten.int(Tensor(a) self, int dim, int[] sizes) -> Tensor(a) +// aten::unflatten.int(Tensor(a) self, int dim, SymInt[] sizes) -> Tensor(a) @Namespace("at") public static native @ByVal Tensor unflatten(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal LongArrayRef sizes); @Namespace("at") public static native @ByVal Tensor unflatten(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); -// aten::unflatten.Dimname(Tensor(a) self, Dimname dim, int[] sizes, Dimname[] names) -> Tensor(a) + +// aten::unflatten.int(Tensor(a) self, int dim, SymInt[] sizes) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor unflatten_symint(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal SymIntArrayRef sizes); + + +// aten::unflatten.Dimname(Tensor(a) self, Dimname dim, SymInt[] sizes, Dimname[] names) -> Tensor(a) @Namespace("at") public static native @ByVal Tensor unflatten(@Const @ByRef Tensor self, @ByVal Dimname dim, @ByVal LongArrayRef sizes, @ByVal DimnameArrayRef names); @Namespace("at") public static native @ByVal Tensor unflatten(@Const @ByRef Tensor self, @ByVal Dimname dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, @ByVal DimnameArrayRef names); +// aten::unflatten.Dimname(Tensor(a) self, Dimname dim, SymInt[] sizes, Dimname[] names) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor unflatten_symint(@Const @ByRef Tensor self, @ByVal Dimname dim, @ByVal SymIntArrayRef sizes, @ByVal DimnameArrayRef names); + + + // Parsed from ATen/ops/unflatten_dense_tensors.h @@ -54397,10 +56111,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased); -// aten::var.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +// aten::var.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional correction, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional correction, @Cast("bool") boolean keepdim/*=false*/); // aten::var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); @@ -54411,13 +56125,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByRef Tensor var_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim, @ByRef Tensor out); @Namespace("at") public static native @ByRef Tensor var_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim, @ByRef Tensor out); -// aten::var.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +// aten::var.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional correction, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); -// aten::var.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor var_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @ByVal LongOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor var_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal LongOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional correction, @Cast("bool") boolean keepdim/*=false*/); +// aten::var.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor var_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Const @ByRef ScalarOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor var_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Const @ByRef ScalarOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out); // aten::var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor @Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); @@ -54429,15 +56143,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor var_outf(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim, @ByRef Tensor out); -// aten::var.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +// aten::var.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional correction, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim); -// aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +// aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional correction, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim); -// aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor var_outf(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @ByVal LongOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out); +// aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor var_outf(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Const @ByRef ScalarOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out); @@ -54475,26 +56189,26 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal T_TensorTensor_T var_mean(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByVal T_TensorTensor_T var_mean(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased); -// aten::var_mean.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +// aten::var_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional correction, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByVal T_TensorTensor_T var_mean(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional correction, @Cast("bool") boolean keepdim/*=false*/); // aten::var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) @Namespace("at") public static native @ByVal T_TensorTensor_T var_mean(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByVal T_TensorTensor_T var_mean(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased); -// aten::var_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +// aten::var_mean.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional correction, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByVal T_TensorTensor_T var_mean(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim); -// aten::var_mean.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +// aten::var_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional correction, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByVal T_TensorTensor_T var_mean_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); -// aten::var_mean.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @ByVal LongOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out0, @ByRef Tensor out1); -@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal LongOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out0, @ByRef Tensor out1); +@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional correction, @Cast("bool") boolean keepdim/*=false*/); +// aten::var_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Const @ByRef ScalarOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out0, @ByRef Tensor out1); +@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Const @ByRef ScalarOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out0, @ByRef Tensor out1); @@ -55254,7 +56968,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include // #include // #include // #include @@ -55269,6 +56982,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include +// #include // #include // #include // #include @@ -55307,6 +57022,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -55320,6 +57036,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -55340,9 +57057,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include +// #include // #include // #include // #include @@ -55351,6 +57070,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include +// #include +// #include // #include // #include // #include @@ -55369,6 +57091,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -55384,6 +57107,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -55396,7 +57120,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include // #include // #include // #include @@ -55407,9 +57131,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include // #include // #include +// #include // #include // #include // #include @@ -55427,6 +57151,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -55436,12 +57161,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include // #include // #include // #include // #include // #include +// #include // #include // #include // #include @@ -55467,9 +57192,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include +// #include // #include // #include // #include @@ -55484,6 +57211,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -55500,8 +57228,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include +// #include +// #include +// #include +// #include +// #include // #include -// #include // #include // #include // #include @@ -55509,6 +57242,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include +// #include // #include // #include // #include @@ -55745,6 +57480,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -56114,6 +57850,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -56164,6 +57901,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -56362,6 +58100,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include +// #include +// #include +// #include +// #include +// #include // #include // #include // #include @@ -56514,6 +58258,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal @Cast("std::vector*") LongVector infer_size(@ByVal LongArrayRef a, @ByVal LongArrayRef b); @Namespace("at") public static native @ByVal @Cast("std::vector*") LongVector infer_size(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] a, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... b); +@Namespace("at") public static native @ByVal SymIntVector infer_size_symint( + @ByVal SymIntArrayRef a, + @ByVal SymIntArrayRef b); @Namespace("at") public static native @ByVal DimVector infer_size_dimvector(@ByVal LongArrayRef a, @ByVal LongArrayRef b); @Namespace("at") public static native @ByVal DimVector infer_size_dimvector(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] a, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... b); @Namespace("at") public static native @ByVal SymDimVector infer_size_symdimvector(@ByVal SymIntArrayRef a, @ByVal SymIntArrayRef b); @@ -56756,7 +58503,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native void assert_no_internal_overlap(TensorImpl t); @Namespace("at") public static native MemOverlapStatus get_overlap_status(@Const @ByRef TensorBase a, @Const @ByRef TensorBase b); -@Namespace("at") public static native MemOverlapStatus get_overlap_status(TensorImpl a, TensorImpl b); +@Namespace("at") public static native MemOverlapStatus get_overlap_status(@Const TensorImpl a, @Const TensorImpl b); @Namespace("at") public static native void assert_no_partial_overlap( @Const @ByRef TensorBase a, @@ -56783,6 +58530,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include @Namespace("at::native") public static native @Cast("bool") boolean nested_tensor_impl_is_contiguous(@Const NestedTensorImpl nt); +@Namespace("at::native") public static native @Cast("int64_t") long get_numel_from_nested_size_tensor(@Const @ByRef Tensor tensor); // Targeting ../NestedTensorImpl.java @@ -56792,7 +58540,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at::native") public static native NestedTensorImpl get_nested_tensor_impl(@Const @ByRef Tensor tensor); -@Namespace("at::native") public static native @Const @ByRef Tensor get_nested_size_tensor(@Const @ByRef Tensor tensor); +@Namespace("at::native") public static native @Const @ByRef Tensor get_nested_sizes(@Const @ByRef Tensor tensor); // namespace native // namespace at @@ -57020,8 +58768,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { /** Return true if any of the variables in the list require a gradient. */ @Namespace("torch::autograd") public static native @Cast("bool") boolean any_variable_requires_grad(@Cast({"", "std::vector"}) @StdMove TensorVector variables); +// Targeting ../TypeAndSize.java + + -/** Return the next edges of all the given variables, or tuples of variables. */ // namespace autograd // namespace torch @@ -57032,6 +58782,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #pragma once // #include +// #include // #include // #include // #include @@ -57090,7 +58841,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch -// Parsed from torch/autograd.h +// Parsed from torch/csrc/api/include/torch/autograd.h // #pragma once @@ -57099,7 +58850,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// Parsed from torch/cuda.h +// Parsed from torch/csrc/api/include/torch/cuda.h // #pragma once @@ -57131,7 +58882,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch -// Parsed from torch/arg.h +// Parsed from torch/csrc/api/include/torch/arg.h // #pragma once @@ -57211,8 +58962,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #define RECORD_KERNEL_FUNCTION_DTYPE(NAME, enum_type) // #endif -// Avoid if_constexpr if possble, as it's more expensive to compile -// #if defined __cpp_if_constexpr // #define AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type) // do { // if constexpr (!at::should_include_kernel_dtype( @@ -57224,17 +58973,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // at_dispatch_name); // } // } while (0) -// #else // defined __cpp_if_constexpr -// #define AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type) -// at::guts::if_constexpr([&] { -// AT_ERROR( -// "dtype '", -// toString(enum_type), -// "' not selected for kernel tag ", -// at_dispatch_name); -// }) -// #endif // #define AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, HINT, ...) // case enum_type: { @@ -57402,6 +59140,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // AT_DISPATCH_SWITCH( // TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES_AND_HALF(__VA_ARGS__)) +// #define AT_DISPATCH_CASE_REDUCED_FLOATING_TYPES(...) +// AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) +// AT_DISPATCH_CASE(at::ScalarType::BFloat16, __VA_ARGS__) + +// #define AT_DISPATCH_REDUCED_FLOATING_TYPES(TYPE, NAME, ...) +// AT_DISPATCH_SWITCH( +// TYPE, NAME, AT_DISPATCH_CASE_REDUCED_FLOATING_TYPES(__VA_ARGS__)) + // #define AT_DISPATCH_CASE_FLOATING_TYPES_AND(SCALARTYPE, ...) // AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) // AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__) @@ -57425,6 +59171,37 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // AT_DISPATCH_CASE_FLOATING_TYPES_AND2( // SCALARTYPE1, SCALARTYPE2, __VA_ARGS__)) +// #define AT_DISPATCH_CASE_FLOATING_TYPES_AND3( +// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, ...) +// AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) + +// #define AT_DISPATCH_FLOATING_TYPES_AND3( +// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, TYPE, NAME, ...) +// AT_DISPATCH_SWITCH( +// TYPE, +// NAME, +// AT_DISPATCH_CASE_FLOATING_TYPES_AND3( +// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, __VA_ARGS__)) + +// #define AT_DISPATCH_CASE_FLOATING_TYPES_AND4( +// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, ...) +// AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) + +// #define AT_DISPATCH_FLOATING_TYPES_AND4( +// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, TYPE, NAME, ...) +// AT_DISPATCH_SWITCH( +// TYPE, +// NAME, +// AT_DISPATCH_CASE_FLOATING_TYPES_AND4( +// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, __VA_ARGS__)) + // #define AT_DISPATCH_CASE_COMPLEX_TYPES(...) // AT_DISPATCH_CASE(at::ScalarType::ComplexDouble, __VA_ARGS__) // AT_DISPATCH_CASE(at::ScalarType::ComplexFloat, __VA_ARGS__) @@ -57489,6 +59266,22 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND3( // SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, __VA_ARGS__)) +// #define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND4( +// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, ...) +// AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) + +// #define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND4( +// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, TYPE, NAME, ...) +// AT_DISPATCH_SWITCH( +// TYPE, +// NAME, +// AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND4( +// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, __VA_ARGS__)) + // #define AT_DISPATCH_CASE_INTEGRAL_TYPES(...) // AT_DISPATCH_CASE(at::ScalarType::Byte, __VA_ARGS__) // AT_DISPATCH_CASE(at::ScalarType::Char, __VA_ARGS__) @@ -57524,6 +59317,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #define AT_DISPATCH_QINT_TYPES(TYPE, NAME, ...) // AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_QINT_TYPES(__VA_ARGS__)) +// #define AT_DISPATCH_CASE_QINT_TYPES_AND(SCALARTYPE, ...) +// AT_DISPATCH_CASE_QINT_TYPES(__VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__) + +// #define AT_DISPATCH_QINT_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) +// AT_DISPATCH_SWITCH( +// TYPE, NAME, AT_DISPATCH_CASE_QINT_TYPES_AND(SCALARTYPE, __VA_ARGS__)) + // #define AT_DISPATCH_CASE_QINT_BYTE_TYPES(...) // AT_DISPATCH_CASE_QINT(at::kQInt8, at::qint8, __VA_ARGS__) // AT_DISPATCH_CASE_QINT(at::kQUInt8, at::quint8, __VA_ARGS__) @@ -57649,6 +59450,73 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND4( // SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, __VA_ARGS__)) +// #define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND5( +// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, SCALARTYPE5, ...) +// AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE5, __VA_ARGS__) + +// #define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND5( +// SCALARTYPE1, +// SCALARTYPE2, +// SCALARTYPE3, +// SCALARTYPE4, +// SCALARTYPE5, +// TYPE, +// NAME, +// ...) +// AT_DISPATCH_SWITCH( +// TYPE, +// NAME, +// AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND5( +// SCALARTYPE1, +// SCALARTYPE2, +// SCALARTYPE3, +// SCALARTYPE4, +// SCALARTYPE5, +// __VA_ARGS__)) + +// #define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND6( +// SCALARTYPE1, +// SCALARTYPE2, +// SCALARTYPE3, +// SCALARTYPE4, +// SCALARTYPE5, +// SCALARTYPE6, +// ...) +// AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE5, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE6, __VA_ARGS__) + +// #define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND6( +// SCALARTYPE1, +// SCALARTYPE2, +// SCALARTYPE3, +// SCALARTYPE4, +// SCALARTYPE5, +// SCALARTYPE6, +// TYPE, +// NAME, +// ...) +// AT_DISPATCH_SWITCH( +// TYPE, +// NAME, +// AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND6( +// SCALARTYPE1, +// SCALARTYPE2, +// SCALARTYPE3, +// SCALARTYPE4, +// SCALARTYPE5, +// SCALARTYPE6, +// __VA_ARGS__)) + // #define AT_DISPATCH_INDEX_TYPES(TYPE, NAME, ...) // AT_DISPATCH_SWITCH( // TYPE, @@ -57939,7 +59807,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { ScalarType src_type, @Const Pointer ptr); -@Namespace("c10") public static native @ByVal @Name("fetch_and_cast") BFloat16 fetch_and_cast_to_BFload16( +@Namespace("c10") public static native @ByVal @Name("fetch_and_cast") BFloat16 fetch_and_cast_to_BFloat16( + ScalarType src_type, + @Const Pointer ptr); + +@Namespace("c10") public static native @ByVal @Name("fetch_and_cast") Float8_e4m3fn fetch_and_cast_to_Float8_e4m3fn( + ScalarType src_type, + @Const Pointer ptr); + +@Namespace("c10") public static native @ByVal @Name("fetch_and_cast") Float8_e5m2 fetch_and_cast_to_Float8_e5m2( ScalarType src_type, @Const Pointer ptr); @@ -58009,10 +59885,18 @@ public class torch extends org.bytedeco.pytorch.presets.torch { ScalarType dest_type, Pointer ptr, @Cast("bool") boolean value); -@Namespace("c10") public static native @Name("cast_and_store") void cast_and_store_from_BFload16( +@Namespace("c10") public static native @Name("cast_and_store") void cast_and_store_from_BFloat16( ScalarType dest_type, Pointer ptr, @ByVal BFloat16 value); +@Namespace("c10") public static native @Name("cast_and_store") void cast_and_store_from_Float8_e4m3fn( + ScalarType dest_type, + Pointer ptr, + @ByVal Float8_e4m3fn value); +@Namespace("c10") public static native @Name("cast_and_store") void cast_and_store_from_Float8_e5m2( + ScalarType dest_type, + Pointer ptr, + @ByVal Float8_e5m2 value); // #define DEFINE_UNCASTABLE(T, scalartype_) // template <> @@ -58210,7 +60094,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include // #include // #include // #include @@ -58225,6 +60108,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include +// #include // #include // #include // #include @@ -58263,6 +60148,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -58276,6 +60162,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -58296,9 +60183,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include +// #include // #include // #include // #include @@ -58307,6 +60196,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include +// #include +// #include // #include // #include // #include @@ -58325,6 +60217,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -58340,6 +60233,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -58352,7 +60246,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include // #include // #include // #include @@ -58363,9 +60257,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include // #include // #include +// #include // #include // #include // #include @@ -58383,6 +60277,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -58392,12 +60287,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include // #include // #include // #include // #include // #include +// #include // #include // #include // #include @@ -58423,9 +60318,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include +// #include // #include // #include // #include @@ -58440,6 +60337,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -58456,8 +60354,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include +// #include +// #include +// #include +// #include +// #include // #include -// #include // #include // #include // #include @@ -58465,6 +60368,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include +// #include // #include // #include // #include @@ -58701,6 +60606,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -59070,6 +60976,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -59120,6 +61027,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -59318,6 +61226,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include +// #include +// #include +// #include +// #include +// #include // #include // #include // #include @@ -59443,7 +61357,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at::indexing") @MemberGetter public static native @Cast("const int64_t") long INDEX_MIN(); @Namespace("at::indexing") @MemberGetter public static native @Cast("const int64_t") long INDEX_MAX(); -@Namespace("at::indexing") public enum TensorIndexType { None(0), Ellipsis(1), Integer(2), Boolean(3), Slice(4), Tensor(5); +@Namespace("at::indexing") public enum TensorIndexType { None(0), Ellipsis(1), SymInt(2), Boolean(3), Slice(4), Tensor(5); public final int value; private TensorIndexType(int v) { this.value = v; } @@ -59485,7 +61399,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at::indexing::impl") public static native @ByVal Tensor applySelect( @Const @ByRef Tensor self, @Cast("int64_t") long dim, - @Cast("int64_t") long index, + @ByVal SymInt index, @Cast("int64_t") long real_dim, @Const @ByRef Device arg4, @Const @ByRef SymIntArrayRefOptional self_sizes); @@ -59782,6 +61696,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @StdString BytePointer get_cxx_flags(); +@Namespace("at") public static native @StdString BytePointer get_cpu_capability(); + // namespace at @@ -59806,8 +61722,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #pragma once -// #if !defined(_MSC_VER) && __cplusplus < 201402L -// #error C++14 or later compatible compiler is required to use ATen. +// #if !defined(_MSC_VER) && __cplusplus < 201703L +// #error C++17 or later compatible compiler is required to use ATen. // #endif // #include @@ -59850,6 +61766,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include @@ -59967,6 +61884,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #else // #include +// #include // #include // #include // #include @@ -59977,6 +61895,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -60095,37 +62014,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { Pointer data, @ByVal LongArrayRef sizes, @ByVal LongArrayRef strides, - @Const @ByRef PointerConsumer deleter, + PointerConsumer deleter, @Const @ByRef(nullValue = "at::TensorOptions()") TensorOptions options); @Namespace("torch") public static native @ByVal Tensor from_blob( Pointer data, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @ByRef @Cast("void(*)(void*)") Pointer deleter, - @Const @ByRef(nullValue = "at::TensorOptions()") TensorOptions options); -@Namespace("torch") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - @ByVal LongArrayRef strides, - @ByRef @Cast("void(*)(void*)") long deleter, - @Const @ByRef(nullValue = "at::TensorOptions()") TensorOptions options); -@Namespace("torch") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @Const @ByRef PointerConsumer deleter, - @Const @ByRef(nullValue = "at::TensorOptions()") TensorOptions options); -@Namespace("torch") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - @ByVal LongArrayRef strides, - @ByRef @Cast("void(*)(void*)") Pointer deleter, - @Const @ByRef(nullValue = "at::TensorOptions()") TensorOptions options); -@Namespace("torch") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @ByRef @Cast("void(*)(void*)") long deleter, + PointerConsumer deleter, @Const @ByRef(nullValue = "at::TensorOptions()") TensorOptions options); /** Exposes the given {@code data} as a {@code Tensor} without taking ownership of the @@ -60140,12 +62035,24 @@ public class torch extends org.bytedeco.pytorch.presets.torch { * the Tensor data would normally be deallocated. The {@code TensorOptions} specify * additional configuration options for the returned tensor, such as what type * to interpret the {@code data} as. */ +@Namespace("torch") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + PointerConsumer deleter, + @Const @ByRef(nullValue = "at::TensorOptions()") TensorOptions options); +@Namespace("torch") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + PointerConsumer deleter, + @Const @ByRef(nullValue = "at::TensorOptions()") TensorOptions options); /** Exposes the given {@code data} as a {@code Tensor} without taking ownership of the * original data. {@code sizes} should specify the shape of the tensor. The * {@code TensorOptions} specify additional configuration options for the returned * tensor, such as what type to interpret the {@code data} as. */ +@Namespace("torch") public static native @ByVal Tensor _make_dep_token(@ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("torch") public static native @ByVal Tensor _make_dep_token(); @Namespace("torch") public static native @ByVal @Name("_cudnn_init_dropout_state") Tensor torch__cudnn_init_dropout_state(double dropout, @Cast("bool") boolean train, @Cast("int64_t") long dropout_seed, @ByVal TensorOptions options); @Namespace("torch") public static native @ByVal @Name("arange") Tensor torch_arange(@Const @ByRef Scalar end, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("arange") Tensor torch_arange(@Const @ByRef Scalar end); @@ -60173,10 +62080,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("torch") public static native @ByVal @Name("_empty_affine_quantized") Tensor torch__empty_affine_quantized(@ByVal LongArrayRef size); @Namespace("torch") public static native @ByVal @Name("_empty_affine_quantized") Tensor torch__empty_affine_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, double scale/*=1*/, @Cast("int64_t") long zero_point/*=0*/, @ByVal(nullValue = "c10::optional(c10::MemoryFormat::Contiguous)") MemoryFormatOptional memory_format); @Namespace("torch") public static native @ByVal @Name("_empty_affine_quantized") Tensor torch__empty_affine_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal Tensor _empty_affine_quantized_symint(@ByVal SymIntArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, double scale/*=1*/, @Cast("int64_t") long zero_point/*=0*/, @ByVal(nullValue = "c10::optional(c10::MemoryFormat::Contiguous)") MemoryFormatOptional memory_format); +@Namespace("torch") public static native @ByVal Tensor _empty_affine_quantized_symint(@ByVal SymIntArrayRef size); @Namespace("torch") public static native @ByVal @Name("_empty_per_channel_affine_quantized") Tensor torch__empty_per_channel_affine_quantized(@ByVal LongArrayRef size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::MemoryFormat::Contiguous)") MemoryFormatOptional memory_format); @Namespace("torch") public static native @ByVal @Name("_empty_per_channel_affine_quantized") Tensor torch__empty_per_channel_affine_quantized(@ByVal LongArrayRef size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis); @Namespace("torch") public static native @ByVal @Name("_empty_per_channel_affine_quantized") Tensor torch__empty_per_channel_affine_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::MemoryFormat::Contiguous)") MemoryFormatOptional memory_format); @Namespace("torch") public static native @ByVal @Name("_empty_per_channel_affine_quantized") Tensor torch__empty_per_channel_affine_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis); +@Namespace("torch") public static native @ByVal Tensor _empty_per_channel_affine_quantized_symint(@ByVal SymIntArrayRef size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::MemoryFormat::Contiguous)") MemoryFormatOptional memory_format); +@Namespace("torch") public static native @ByVal Tensor _empty_per_channel_affine_quantized_symint(@ByVal SymIntArrayRef size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis); @Namespace("torch") public static native @ByVal @Name("empty_quantized") Tensor torch_empty_quantized(@ByVal LongArrayRef size, @Const @ByRef Tensor qtensor, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); @Namespace("torch") public static native @ByVal @Name("empty_quantized") Tensor torch_empty_quantized(@ByVal LongArrayRef size, @Const @ByRef Tensor qtensor); @Namespace("torch") public static native @ByVal @Name("empty_quantized") Tensor torch_empty_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor qtensor, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); @@ -60309,6 +62220,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("torch") public static native @ByVal @Name("_efficientzerotensor") Tensor torch__efficientzerotensor(@ByVal LongArrayRef size); @Namespace("torch") public static native @ByVal @Name("_efficientzerotensor") Tensor torch__efficientzerotensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("_efficientzerotensor") Tensor torch__efficientzerotensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal Tensor _efficientzerotensor_symint(@ByVal SymIntArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal Tensor _efficientzerotensor_symint(@ByVal SymIntArrayRef size); @Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal LongArrayRef size); @Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @@ -60352,22 +62265,25 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("torch") public static native @ByVal @Name("_sparse_bsc_tensor_unsafe") Tensor torch__sparse_bsc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); @Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@ByVal LongArrayRef size, @ByVal TensorOptions options); @Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional is_coalesced); @Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values); -@Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional is_coalesced); @Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional is_coalesced); @Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_unsafe") Tensor torch__sparse_coo_tensor_unsafe(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_unsafe") Tensor torch__sparse_coo_tensor_unsafe(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional is_coalesced); @Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_unsafe") Tensor torch__sparse_coo_tensor_unsafe(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_unsafe") Tensor torch__sparse_coo_tensor_unsafe(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_unsafe") Tensor torch__sparse_coo_tensor_unsafe(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional is_coalesced); @Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_unsafe") Tensor torch__sparse_coo_tensor_unsafe(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -@Namespace("torch") public static native @ByVal Tensor _sparse_coo_tensor_unsafe_symint(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal SymIntArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal Tensor _sparse_coo_tensor_unsafe_symint(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal SymIntArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional is_coalesced); @Namespace("torch") public static native @ByVal Tensor _sparse_coo_tensor_unsafe_symint(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal SymIntArrayRef size); @Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_with_dims") Tensor torch__sparse_coo_tensor_with_dims(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal LongArrayRef size, @ByVal TensorOptions options); @Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_with_dims") Tensor torch__sparse_coo_tensor_with_dims(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_with_dims_and_tensors") Tensor torch__sparse_coo_tensor_with_dims_and_tensors(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal LongArrayRef size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional is_coalesced); @Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_with_dims_and_tensors") Tensor torch__sparse_coo_tensor_with_dims_and_tensors(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal LongArrayRef size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_with_dims_and_tensors") Tensor torch__sparse_coo_tensor_with_dims_and_tensors(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional is_coalesced); @Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_with_dims_and_tensors") Tensor torch__sparse_coo_tensor_with_dims_and_tensors(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal SymIntArrayRef size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional is_coalesced); @Namespace("torch") public static native @ByVal Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal SymIntArrayRef size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); @Namespace("torch") public static native @ByVal @Name("_to_copy") Tensor torch__to_copy(@Const @ByRef Tensor self, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @Cast("bool") boolean non_blocking/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); @Namespace("torch") public static native @ByVal @Name("_to_copy") Tensor torch__to_copy(@Const @ByRef Tensor self); @@ -60497,6 +62413,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -60551,7 +62468,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { * * This class implements a small subset of the generic functionality * implemented by folly:Synchronized. Specifically, only withLock - * is implemeted here since it's the smallest possible API that is + * is implemented here since it's the smallest possible API that is * able to cover a large surface area of functionality offered by * folly::Synchronized. */ @@ -60575,10 +62492,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include +// #ifndef NDEBUG +// #include +// #endif + @Namespace("c10") public static native @Cast("bool") boolean show_dispatch_trace(); @Namespace("c10") public static native void dispatch_trace_nesting_incr(); @Namespace("c10") public static native void dispatch_trace_nesting_decr(); @@ -60643,7 +62565,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace std -// Parsed from torch/types.h +// Parsed from torch/csrc/api/include/torch/types.h // #pragma once @@ -60688,7 +62610,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch -// Parsed from torch/data/dataloader_options.h +// Parsed from torch/csrc/api/include/torch/data/dataloader_options.h // #pragma once @@ -60711,7 +62633,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch -// Parsed from torch/data/detail/queue.h +// Parsed from torch/csrc/api/include/torch/data/detail/queue.h // #pragma once @@ -60739,7 +62661,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch -// Parsed from torch/data/detail/data_shuttle.h +// Parsed from torch/csrc/api/include/torch/data/detail/data_shuttle.h // #pragma once @@ -60767,7 +62689,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch -// Parsed from torch/data/detail/sequencers.h +// Parsed from torch/csrc/api/include/torch/data/detail/sequencers.h // #pragma once @@ -60810,7 +62732,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch -// Parsed from torch/data/iterator.h +// Parsed from torch/csrc/api/include/torch/data/iterator.h // #pragma once @@ -60844,7 +62766,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch -// Parsed from torch/data/samplers/base.h +// Parsed from torch/csrc/api/include/torch/data/samplers/base.h // #pragma once @@ -60868,7 +62790,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch -// Parsed from torch/data/samplers/random.h +// Parsed from torch/csrc/api/include/torch/data/samplers/random.h // #pragma once @@ -60888,7 +62810,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch -// Parsed from torch/data/worker_exception.h +// Parsed from torch/csrc/api/include/torch/data/worker_exception.h // #pragma once @@ -60917,7 +62839,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch -// Parsed from torch/data/dataloader/base.h +// Parsed from torch/csrc/api/include/torch/data/dataloader/base.h // #pragma once @@ -60952,7 +62874,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch -// Parsed from torch/data/dataloader/stateful.h +// Parsed from torch/csrc/api/include/torch/data/dataloader/stateful.h // #pragma once @@ -60969,7 +62891,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch -// Parsed from torch/data/dataloader/stateless.h +// Parsed from torch/csrc/api/include/torch/data/dataloader/stateless.h // #pragma once @@ -60991,7 +62913,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch -// Parsed from torch/data/dataloader.h +// Parsed from torch/csrc/api/include/torch/data/dataloader.h // #pragma once @@ -61020,7 +62942,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch -// Parsed from torch/data/example.h +// Parsed from torch/csrc/api/include/torch/data/example.h // #pragma once @@ -61044,7 +62966,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch -// Parsed from torch/data/datasets/base.h +// Parsed from torch/csrc/api/include/torch/data/datasets/base.h // #pragma once @@ -61095,7 +63017,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch -// Parsed from torch/data/datasets/stateful.h +// Parsed from torch/csrc/api/include/torch/data/datasets/stateful.h // #pragma once @@ -61119,7 +63041,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch -// Parsed from torch/data/samplers/custom_batch_request.h +// Parsed from torch/csrc/api/include/torch/data/samplers/custom_batch_request.h // #pragma once @@ -61133,7 +63055,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch -// Parsed from torch/data/samplers/distributed.h +// Parsed from torch/csrc/api/include/torch/data/samplers/distributed.h // #pragma once @@ -61159,7 +63081,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch -// Parsed from torch/data/samplers/sequential.h +// Parsed from torch/csrc/api/include/torch/data/samplers/sequential.h // #pragma once @@ -61306,10 +63228,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// #include // #include // #include // #include +// #include +// #include // #include // Targeting ../SourceRangeUnpickler.java @@ -61335,7 +63258,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("torch::jit") public static native void format_stack_trace( @Cast("std::ostream*") @ByRef Pointer out, - @Const @ByRef StackEntryVector entries); + @StdVector StackEntry entries); @Namespace("torch::jit") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef SourceRange range); @@ -61522,9 +63445,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// Targeting ../OperationCreator.java - - // Targeting ../Operator.java @@ -61624,7 +63544,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// #include +// #include // #include // #include @@ -61820,7 +63740,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include // #include // #if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32") @@ -61913,7 +63833,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Const @ByRef Tensor t); // current (TLS) TorchScript interpreter callstack -@Namespace("torch::jit") public static native @ByVal StackEntryVector currentCallstack(); +@Namespace("torch::jit") public static native @StdVector StackEntry currentCallstack(); @Namespace("torch::jit") public static native @ByVal StringVector currentModuleHierarchy(); // namespace jit @@ -62279,12 +64199,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../NamedJitModule.java -// Targeting ../NamedTensor.java - - -// Targeting ../NamedIValue.java - - // namespace detail // Targeting ../JitModule.java @@ -62294,7 +64208,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // details. @Namespace("torch::jit") public static native @ByVal JitModule freeze( @Const @ByRef JitModule module, - @ByVal(nullValue = "c10::optional >(c10::nullopt)") StringVectorOptional preserved_attrs, + @Const @ByRef(nullValue = "c10::optional >(c10::nullopt)") StringVectorOptional preserved_attrs, @Cast("bool") boolean optimize_numerics/*=true*/); @Namespace("torch::jit") public static native @ByVal JitModule freeze( @Const @ByRef JitModule module); @@ -62361,48 +64275,12 @@ The list of (type, depth) pairs controls the type of specializations and the num // Targeting ../named_module_iterator.java -// Targeting ../parameter_iterator.java - - -// Targeting ../named_parameter_iterator.java - - -// Targeting ../attribute_iterator.java - - -// Targeting ../named_attribute_iterator.java - - -// Targeting ../buffer_iterator.java - - -// Targeting ../named_buffer_iterator.java - - // Targeting ../module_list.java // Targeting ../named_module_list.java -// Targeting ../parameter_list.java - - -// Targeting ../named_parameter_list.java - - -// Targeting ../attribute_list.java - - -// Targeting ../named_attribute_list.java - - -// Targeting ../buffer_list.java - - -// Targeting ../named_buffer_list.java - - // Targeting ../ModulePolicy.java @@ -62418,12 +64296,6 @@ The list of (type, depth) pairs controls the type of specializations and the num // Targeting ../NamedJitModulePolicy.java -// Targeting ../NamedTensorPolicy.java - - -// Targeting ../NamedIValuePolicy.java - - // namespace detail @@ -62436,7 +64308,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // namespace torch -// Parsed from torch/serialize/input-archive.h +// Parsed from torch/csrc/api/include/torch/serialize/input-archive.h // #pragma once @@ -62460,7 +64332,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // namespace torch -// Parsed from torch/serialize/output-archive.h +// Parsed from torch/csrc/api/include/torch/serialize/output-archive.h // #pragma once @@ -62481,7 +64353,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // namespace torch -// Parsed from torch/serialize/archive.h +// Parsed from torch/csrc/api/include/torch/serialize/archive.h // #pragma once @@ -62489,7 +64361,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // #include -// Parsed from torch/data/samplers/serialize.h +// Parsed from torch/csrc/api/include/torch/data/samplers/serialize.h // #pragma once @@ -62503,7 +64375,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // namespace torch -// Parsed from torch/data/samplers/stream.h +// Parsed from torch/csrc/api/include/torch/data/samplers/stream.h // #pragma once @@ -62527,7 +64399,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // namespace torch -// Parsed from torch/data/samplers.h +// Parsed from torch/csrc/api/include/torch/data/samplers.h // #pragma once @@ -62540,7 +64412,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // #include -// Parsed from torch/serialize/tensor.h +// Parsed from torch/csrc/api/include/torch/serialize/tensor.h // #pragma once @@ -62556,7 +64428,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // namespace torch -// Parsed from torch/serialize.h +// Parsed from torch/csrc/api/include/torch/serialize.h // #pragma once @@ -62673,7 +64545,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // namespace torch -// Parsed from torch/data/datasets/chunk.h +// Parsed from torch/csrc/api/include/torch/data/datasets/chunk.h // #pragma once @@ -62706,7 +64578,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // namespace torch -// Parsed from torch/data/datasets/map.h +// Parsed from torch/csrc/api/include/torch/data/datasets/map.h // #pragma once @@ -62733,7 +64605,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // namespace torch -// Parsed from torch/data/datasets/mnist.h +// Parsed from torch/csrc/api/include/torch/data/datasets/mnist.h // #pragma once @@ -62753,7 +64625,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // namespace torch -// Parsed from torch/data/datasets/shared.h +// Parsed from torch/csrc/api/include/torch/data/datasets/shared.h // #pragma once @@ -62773,7 +64645,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // namespace torch -// Parsed from torch/data/datasets/tensor.h +// Parsed from torch/csrc/api/include/torch/data/datasets/tensor.h // #pragma once @@ -62792,7 +64664,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // namespace torch -// Parsed from torch/data/datasets.h +// Parsed from torch/csrc/api/include/torch/data/datasets.h // #pragma once @@ -62805,7 +64677,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // #include -// Parsed from torch/data/transforms/base.h +// Parsed from torch/csrc/api/include/torch/data/transforms/base.h // #pragma once @@ -62828,7 +64700,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // namespace torch -// Parsed from torch/data/transforms/lambda.h +// Parsed from torch/csrc/api/include/torch/data/transforms/lambda.h // #pragma once @@ -62847,7 +64719,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // namespace torch -// Parsed from torch/data/transforms/collate.h +// Parsed from torch/csrc/api/include/torch/data/transforms/collate.h // #pragma once @@ -62881,7 +64753,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // namespace torch -// Parsed from torch/data/transforms/stack.h +// Parsed from torch/csrc/api/include/torch/data/transforms/stack.h // #pragma once @@ -62902,7 +64774,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // namespace torch -// Parsed from torch/data/transforms/tensor.h +// Parsed from torch/csrc/api/include/torch/data/transforms/tensor.h // #pragma once @@ -62926,7 +64798,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // namespace torch -// Parsed from torch/data/transforms.h +// Parsed from torch/csrc/api/include/torch/data/transforms.h // #pragma once @@ -62937,7 +64809,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // #include -// Parsed from torch/data.h +// Parsed from torch/csrc/api/include/torch/data.h // #pragma once @@ -62951,7 +64823,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // namespace torch -// Parsed from torch/enum.h +// Parsed from torch/csrc/api/include/torch/enum.h // #pragma once @@ -63178,7 +65050,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // namespace torch -// Parsed from torch/fft.h +// Parsed from torch/csrc/api/include/torch/fft.h // #pragma once @@ -63196,7 +65068,7 @@ The list of (type, depth) pairs controls the type of specializations and the num /// @Namespace("torch::fft") public static native @ByVal Tensor fft( @Const @ByRef Tensor self, - @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, + @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor fft( @@ -63214,7 +65086,7 @@ The list of (type, depth) pairs controls the type of specializations and the num /// @Namespace("torch::fft") public static native @ByVal Tensor ifft( @Const @ByRef Tensor self, - @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, + @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor ifft( @@ -63326,7 +65198,7 @@ The list of (type, depth) pairs controls the type of specializations and the num /// @Namespace("torch::fft") public static native @ByVal Tensor rfft( @Const @ByRef Tensor self, - @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, + @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor rfft( @@ -63347,7 +65219,7 @@ The list of (type, depth) pairs controls the type of specializations and the num /// @Namespace("torch::fft") public static native @ByVal Tensor irfft( @Const @ByRef Tensor self, - @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, + @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor irfft( @@ -63463,7 +65335,7 @@ The list of (type, depth) pairs controls the type of specializations and the num /// @Namespace("torch::fft") public static native @ByVal Tensor hfft( @Const @ByRef Tensor self, - @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, + @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor hfft( @@ -63485,7 +65357,7 @@ The list of (type, depth) pairs controls the type of specializations and the num /// @Namespace("torch::fft") public static native @ByVal Tensor ihfft( @Const @ByRef Tensor self, - @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, + @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor ihfft( @@ -63681,7 +65553,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // namespace torch -// Parsed from torch/jit.h +// Parsed from torch/csrc/api/include/torch/jit.h // #pragma once @@ -63719,7 +65591,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // namespace torch -// Parsed from torch/linalg.h +// Parsed from torch/csrc/api/include/torch/linalg.h // #pragma once @@ -64306,7 +66178,48 @@ The list of (type, depth) pairs controls the type of specializations and the num // namespace torch -// Parsed from torch/nested.h +// Parsed from torch/csrc/api/include/torch/mps.h + +// #pragma once + +// #include + +// #include +// #include +// Targeting ../MTLCommandBuffer_t.java + + +// Targeting ../DispatchQueue_t.java + + +// #endif + +/** Returns true if MPS device is available. */ +@Namespace("torch::mps") public static native @Cast("bool") boolean is_available(); + +/** Sets the RNG seed for the MPS device. */ + +/** Waits for all streams on the MPS device to complete. + * This blocks the calling CPU thread by using the 'waitUntilCompleted()' + * method to wait for Metal command buffers finish executing all the + * encoded GPU operations before returning. */ +@Namespace("torch::mps") public static native void synchronize(); + +/** Submits the currently active command buffer to run on the MPS device. */ +@Namespace("torch::mps") public static native void commit(); + +/** Get the current command buffer to encode the Metal commands. */ +@Namespace("torch::mps") public static native MTLCommandBuffer_t get_command_buffer(); + +/** Get the dispatch_queue_t to synchronize encoding the custom kernels + * with the PyTorch MPS backend. */ +@Namespace("torch::mps") public static native DispatchQueue_t get_dispatch_queue(); + + // namespace mps + // namespace torch + + +// Parsed from torch/csrc/api/include/torch/nested.h // #pragma once @@ -64380,7 +66293,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // namespace torch -// Parsed from torch/detail/static.h +// Parsed from torch/csrc/api/include/torch/detail/static.h // #pragma once @@ -64439,7 +66352,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // and otherwise deduces to the type `void`. -// Parsed from torch/nn/pimpl.h +// Parsed from torch/csrc/api/include/torch/nn/pimpl.h // #pragma once @@ -64493,7 +66406,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // #define TORCH_MODULE(Name) TORCH_MODULE_IMPL(Name, Name##Impl) -// Parsed from torch/nn/modules/container/any_value.h +// Parsed from torch/csrc/api/include/torch/nn/modules/container/any_value.h // #pragma once @@ -64518,7 +66431,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // namespace torch -// Parsed from torch/nn/modules/container/any_module_holder.h +// Parsed from torch/csrc/api/include/torch/nn/modules/container/any_module_holder.h // #pragma once @@ -64541,92 +66454,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // namespace torch -// Parsed from torch/ordered_dict.h - -// #pragma once - -// #include -// #include -// #include -// #include -// #include -// #include -/** An ordered dictionary implementation, akin to Python's {@code OrderedDict}. */ - -// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OrderedDict::Item ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OrderedDict ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - // namespace torch - - -// Parsed from torch/nn/module.h +// Parsed from torch/csrc/api/include/torch/nn/module.h // #pragma once @@ -64699,6 +66527,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // DO NOT put the macros for CUDA libraries in this file; they belong in cuda/CUDAConfig.h // #define AT_MKLDNN_ENABLED() 1 +// #define AT_MKLDNN_ACL_ENABLED() 0 // #define AT_MKL_ENABLED() 0 // #define AT_MKL_SEQUENTIAL() 0 // #define AT_FFTW_ENABLED() 1 @@ -64849,51 +66678,6 @@ scalar_t sf(scalar_t x, scalar_t y) // #include // IWYU pragma: keep -// Parsed from torch/csrc/api/include/torch/types.h - -// #pragma once - -// #include - -// #include - -// #include -// #include - -// TODO: These don't really belong here but torchvision builds in CI need them -// Remove once the torchvision version being compiled in CI is updated -// #include -// #include - -// NOTE [ Exposing declarations in `at::` to `torch::` ] -// -// The following line `using namespace at;` is responsible for exposing all -// declarations in `at::` namespace to `torch::` namespace. -// -// According to the rules laid out in -// https://en.cppreference.com/w/cpp/language/qualified_lookup, section -// "Namespace members": -// ``` -// Qualified lookup within the scope of a namespace N first considers all -// declarations that are located in N and all declarations that are located in -// the inline namespace members of N (and, transitively, in their inline -// namespace members). If there are no declarations in that set then it -// considers declarations in all namespaces named by using-directives found in N -// and in all transitive inline namespace members of N. -// ``` -// -// This means that if both `at::` and `torch::` namespaces have a function with -// the same signature (e.g. both `at::func()` and `torch::func()` exist), after -// `namespace torch { using namespace at; }`, when we call `torch::func()`, the -// `func()` function defined in `torch::` namespace will always be called, and -// the `func()` function defined in `at::` namespace is always hidden. // NOLINT - -/** Fixed width dtypes. */ - -/** Rust-style short dtypes. */ - // namespace torch - - // Parsed from torch/csrc/profiler/orchestration/observer.h // #pragma once @@ -64908,8 +66692,10 @@ scalar_t sf(scalar_t x, scalar_t y) // ---------------------------------------------------------------------------- @Namespace("torch::profiler::impl") public enum ActivityType { CPU(0), - CUDA(1), // CUDA kernels, runtime - NUM_KINETO_ACTIVITIES(2);// must be the last one + XPU(1), // XPU kernels, runtime + CUDA(2), // CUDA kernels, runtime + MTIA(3), // MTIA kernels, runtime + NUM_KINETO_ACTIVITIES(4);// must be the last one public final int value; private ActivityType(int v) { this.value = v; } @@ -64926,8 +66712,9 @@ scalar_t sf(scalar_t x, scalar_t y) ITT(4), // only emit ITT markers KINETO(5), // use libkineto KINETO_GPU_FALLBACK(6), // use CUDA events when CUPTI is not available - KINETO_ONDEMAND(7), // run the profiler in on-demand mode - NUM_PROFILER_STATES(8);// must be the last one + KINETO_PRIVATEUSE1_FALLBACK(7), // use PrivateUse1 events + KINETO_ONDEMAND(8), // run the profiler in on-demand mode + NUM_PROFILER_STATES(9);// must be the last one public final int value; private ProfilerState(int v) { this.value = v; } @@ -65015,6 +66802,9 @@ scalar_t sf(scalar_t x, scalar_t y) // ---------------------------------------------------------------------------- // -- Annotation -------------------------------------------------------------- // ---------------------------------------------------------------------------- +// Targeting ../ProfilerVoidEventStub.java + + // namespace impl // namespace profiler @@ -65128,7 +66918,7 @@ scalar_t sf(scalar_t x, scalar_t y) @Namespace("torch::profiler::impl") public static native @StdVector FileLineFunc prepareCallstack( - @Const @ByRef StackEntryVector cs); + @StdVector StackEntry cs); @Namespace("torch::profiler::impl") public static native @ByVal StringVector callstackStr( @StdVector FileLineFunc cs); @Namespace("torch::profiler::impl") public static native @StdString BytePointer stacksToStr( @@ -65144,9 +66934,10 @@ scalar_t sf(scalar_t x, scalar_t y) @Const @ByRef RecordFunction fn); @Namespace("torch::profiler::impl") public static native @StdString BytePointer shapesToStr( @Cast("std::vector*") @StdVector LongVector shapes); -@Namespace("torch::profiler::impl") public static native @StdString BytePointer dtypesToStr(@Const @ByRef StringVector types); +@Namespace("torch::profiler::impl") public static native @StdString BytePointer strListToStr(@Const @ByRef StringVector types); @Namespace("torch::profiler::impl") public static native @StdString BytePointer inputOpIdsToStr( @Const @ByRef RecordFunctionHandleIntList input_op_ids); +@Namespace("torch::profiler::impl") public static native @StdString BytePointer ivalueListToStr(@Const @ByRef IValueVector list); @Namespace("torch::profiler::impl") public static native @ByVal StringVector inputTypes(@Const @ByRef RecordFunction fn); @Namespace("torch::profiler::impl") public static native @ByVal StringIValueMap saveExtraArgs(@Const @ByRef RecordFunction fn); @@ -65197,7 +66988,7 @@ scalar_t sf(scalar_t x, scalar_t y) * For example, if part of the model is lowered to a dsp backend, then * the execution of that part of the model is delegated to the backend. * When backend finishes execution it has an option to provide profiling - * information (latency only at th emoment) corresponding to different operators + * information (latency only at the moment) corresponding to different operators * that were executed in the backend. * When such events are recorded by backend using this API, the event * records will be collected by active kineto profiler. If no kineto profiler @@ -65273,7 +67064,7 @@ scalar_t sf(scalar_t x, scalar_t y) // #include -// Parsed from torch/utils.h +// Parsed from torch/csrc/api/include/torch/utils.h // #pragma once @@ -65368,7 +67159,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/cloneable.h +// Parsed from torch/csrc/api/include/torch/nn/cloneable.h // #pragma once @@ -65552,6 +67343,9 @@ scalar_t sf(scalar_t x, scalar_t y) // Targeting ../ConstantPad1dImplCloneable.java +// Targeting ../ZeroPad1dImplCloneable.java + + // Targeting ../AvgPool1dImplCloneable.java @@ -65612,6 +67406,9 @@ scalar_t sf(scalar_t x, scalar_t y) // Targeting ../ConstantPad3dImplCloneable.java +// Targeting ../ZeroPad3dImplCloneable.java + + // Targeting ../AvgPool3dImplCloneable.java @@ -65770,7 +67567,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/options/batchnorm.h +// Parsed from torch/csrc/api/include/torch/nn/options/batchnorm.h // #pragma once @@ -65820,7 +67617,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/functional/batchnorm.h +// Parsed from torch/csrc/api/include/torch/nn/functional/batchnorm.h // #pragma once @@ -65869,7 +67666,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/options/conv.h +// Parsed from torch/csrc/api/include/torch/nn/options/conv.h // #pragma once @@ -66050,7 +67847,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/functional/conv.h +// Parsed from torch/csrc/api/include/torch/nn/functional/conv.h // #pragma once @@ -66276,7 +68073,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/options/distance.h +// Parsed from torch/csrc/api/include/torch/nn/options/distance.h // #pragma once @@ -66317,7 +68114,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/functional/distance.h +// Parsed from torch/csrc/api/include/torch/nn/functional/distance.h // #pragma once @@ -66380,7 +68177,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/options/dropout.h +// Parsed from torch/csrc/api/include/torch/nn/options/dropout.h // #pragma once @@ -66460,7 +68257,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/functional/dropout.h +// Parsed from torch/csrc/api/include/torch/nn/functional/dropout.h // #pragma once @@ -66614,7 +68411,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/options/embedding.h +// Parsed from torch/csrc/api/include/torch/nn/options/embedding.h // #pragma once @@ -66658,7 +68455,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/functional/embedding.h +// Parsed from torch/csrc/api/include/torch/nn/functional/embedding.h // #pragma once @@ -66742,7 +68539,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/options/fold.h +// Parsed from torch/csrc/api/include/torch/nn/options/fold.h // #pragma once @@ -66783,7 +68580,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/functional/fold.h +// Parsed from torch/csrc/api/include/torch/nn/functional/fold.h // #pragma once @@ -66845,7 +68642,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/options/instancenorm.h +// Parsed from torch/csrc/api/include/torch/nn/options/instancenorm.h // #pragma once @@ -66894,7 +68691,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/functional/instancenorm.h +// Parsed from torch/csrc/api/include/torch/nn/functional/instancenorm.h // #pragma once @@ -66937,7 +68734,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/functional/linear.h +// Parsed from torch/csrc/api/include/torch/nn/functional/linear.h // #pragma once @@ -66961,7 +68758,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/options/activation.h +// Parsed from torch/csrc/api/include/torch/nn/options/activation.h // #pragma once @@ -67218,7 +69015,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/options/linear.h +// Parsed from torch/csrc/api/include/torch/nn/options/linear.h // #pragma once @@ -67243,7 +69040,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/functional/activation.h +// Parsed from torch/csrc/api/include/torch/nn/functional/activation.h // #pragma once @@ -67727,7 +69524,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/options/loss.h +// Parsed from torch/csrc/api/include/torch/nn/options/loss.h // #pragma once @@ -68036,7 +69833,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/functional/loss.h +// Parsed from torch/csrc/api/include/torch/nn/functional/loss.h // #pragma once @@ -68715,7 +70512,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace at -// Parsed from torch/nn/options/padding.h +// Parsed from torch/csrc/api/include/torch/nn/options/padding.h // #pragma once @@ -68793,11 +70590,40 @@ scalar_t sf(scalar_t x, scalar_t y) *
{@code
  *  ReplicationPad3d model(ReplicationPad3dOptions({1, 2, 1, 2, 1, 2}));
  *  }
*/ +// Targeting ../ZeroPad1dOptions.java + -/// // Targeting ../ZeroPad2dOptions.java +// Targeting ../ZeroPad3dOptions.java + + + +/** {@code ZeroPadOptions} specialized for the {@code ZeroPad1d} module. + * + * Example: + *
{@code
+ *  ConstantPad1d model(ConstantPad1dOptions({3, 1});
+ *  }
*/ + +/// + +/** {@code ZeroPadOptions} specialized for the {@code ZeroPad2d} module. + * + * Example: + *
{@code
+ *  ConstantPad2d model(ConstantPad2dOptions({1, 1, 2, 0});
+ *  }
*/ + +/// + +/** {@code ZeroPadOptions} specialized for the {@code ZeroPad3d} module. + * + * Example: + *
{@code
+ *  ConstantPad3d model(ConstantPad3dOptions({1, 2, 1, 2, 1, 2});
+ *  }
*/ // Targeting ../ConstantPad1dOptions.java @@ -68844,7 +70670,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/functional/padding.h +// Parsed from torch/csrc/api/include/torch/nn/functional/padding.h // #pragma once @@ -68885,7 +70711,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/modules/utils.h +// Parsed from torch/csrc/api/include/torch/nn/modules/utils.h // #pragma once @@ -68920,7 +70746,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/options/pooling.h +// Parsed from torch/csrc/api/include/torch/nn/options/pooling.h // #pragma once @@ -69362,7 +71188,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/functional/pooling.h +// Parsed from torch/csrc/api/include/torch/nn/functional/pooling.h // #pragma once @@ -70069,7 +71895,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/options/normalization.h +// Parsed from torch/csrc/api/include/torch/nn/options/normalization.h // #pragma once @@ -70126,7 +71952,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/functional/normalization.h +// Parsed from torch/csrc/api/include/torch/nn/functional/normalization.h // #pragma once @@ -70253,7 +72079,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/options/pixelshuffle.h +// Parsed from torch/csrc/api/include/torch/nn/options/pixelshuffle.h // #pragma once @@ -70296,7 +72122,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/functional/pixelshuffle.h +// Parsed from torch/csrc/api/include/torch/nn/functional/pixelshuffle.h // #pragma once @@ -70331,7 +72157,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/options/upsampling.h +// Parsed from torch/csrc/api/include/torch/nn/options/upsampling.h // #pragma once @@ -70356,7 +72182,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/functional/upsampling.h +// Parsed from torch/csrc/api/include/torch/nn/functional/upsampling.h // #pragma once @@ -70407,7 +72233,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/options/vision.h +// Parsed from torch/csrc/api/include/torch/nn/options/vision.h // #pragma once @@ -70424,7 +72250,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/functional/vision.h +// Parsed from torch/csrc/api/include/torch/nn/functional/vision.h // #pragma once @@ -70484,7 +72310,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/functional.h +// Parsed from torch/csrc/api/include/torch/nn/functional.h // #pragma once @@ -70505,7 +72331,7 @@ scalar_t sf(scalar_t x, scalar_t y) // #include -// Parsed from torch/nn/init.h +// Parsed from torch/csrc/api/include/torch/nn/init.h // #pragma once @@ -70623,7 +72449,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/modules/common.h +// Parsed from torch/csrc/api/include/torch/nn/modules/common.h /// @@ -70730,7 +72556,7 @@ scalar_t sf(scalar_t x, scalar_t y) // } -// Parsed from torch/nn/modules/container/any.h +// Parsed from torch/csrc/api/include/torch/nn/modules/container/any.h // #pragma once @@ -70802,7 +72628,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/modules/container/moduledict.h +// Parsed from torch/csrc/api/include/torch/nn/modules/container/moduledict.h // #pragma once @@ -70823,7 +72649,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/modules/container/modulelist.h +// Parsed from torch/csrc/api/include/torch/nn/modules/container/modulelist.h // #pragma once @@ -70846,7 +72672,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/modules/container/named_any.h +// Parsed from torch/csrc/api/include/torch/nn/modules/container/named_any.h // #pragma once @@ -70895,7 +72721,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/modules/container/parameterdict.h +// Parsed from torch/csrc/api/include/torch/nn/modules/container/parameterdict.h // #pragma once @@ -70912,7 +72738,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/modules/container/parameterlist.h +// Parsed from torch/csrc/api/include/torch/nn/modules/container/parameterlist.h // #pragma once @@ -70927,7 +72753,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/modules/container/sequential.h +// Parsed from torch/csrc/api/include/torch/nn/modules/container/sequential.h // #pragma once @@ -70960,7 +72786,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/modules/linear.h +// Parsed from torch/csrc/api/include/torch/nn/modules/linear.h // #pragma once @@ -71022,7 +72848,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/modules/activation.h +// Parsed from torch/csrc/api/include/torch/nn/modules/activation.h // #pragma once @@ -71272,7 +73098,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/options/adaptive.h +// Parsed from torch/csrc/api/include/torch/nn/options/adaptive.h // #pragma once @@ -71287,7 +73113,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/modules/adaptive.h +// Parsed from torch/csrc/api/include/torch/nn/modules/adaptive.h // #pragma once @@ -71316,7 +73142,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/modules/batchnorm.h +// Parsed from torch/csrc/api/include/torch/nn/modules/batchnorm.h // #pragma once @@ -71387,7 +73213,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/modules/conv.h +// Parsed from torch/csrc/api/include/torch/nn/modules/conv.h // #pragma once @@ -71493,7 +73319,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/modules/distance.h +// Parsed from torch/csrc/api/include/torch/nn/modules/distance.h // #pragma once @@ -71527,7 +73353,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/modules/dropout.h +// Parsed from torch/csrc/api/include/torch/nn/modules/dropout.h // #pragma once @@ -71607,7 +73433,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/modules/embedding.h +// Parsed from torch/csrc/api/include/torch/nn/modules/embedding.h // #pragma once @@ -71641,7 +73467,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/modules/fold.h +// Parsed from torch/csrc/api/include/torch/nn/modules/fold.h // #pragma once @@ -71674,7 +73500,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/modules/instancenorm.h +// Parsed from torch/csrc/api/include/torch/nn/modules/instancenorm.h // #pragma once @@ -71721,7 +73547,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/modules/loss.h +// Parsed from torch/csrc/api/include/torch/nn/modules/loss.h // #pragma once @@ -71923,7 +73749,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/modules/_functions.h +// Parsed from torch/csrc/api/include/torch/nn/modules/_functions.h // #pragma once @@ -71937,7 +73763,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/modules/normalization.h +// Parsed from torch/csrc/api/include/torch/nn/modules/normalization.h // #pragma once @@ -71991,7 +73817,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/modules/padding.h +// Parsed from torch/csrc/api/include/torch/nn/modules/padding.h // #pragma once @@ -72072,6 +73898,24 @@ scalar_t sf(scalar_t x, scalar_t y) * it provides, and examples of how to use {@code ReplicationPad3d} with * {@code torch::nn::ReplicationPad3dOptions}. See the documentation for * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ +// Targeting ../ZeroPad1dImplBase.java + + +// Targeting ../ZeroPad2dImplBase.java + + +// Targeting ../ZeroPad3dImplBase.java + + +// Targeting ../ZeroPad1dImpl.java + + + +/** A {@code ModuleHolder} subclass for {@code ZeroPad1dImpl}. + * See the documentation for {@code ZeroPad1dImpl} class to learn what methods it + * provides, and examples of how to use {@code ZeroPad1d} with + * {@code torch::nn::ZeroPad1dOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ // Targeting ../ZeroPad2dImpl.java @@ -72081,6 +73925,15 @@ scalar_t sf(scalar_t x, scalar_t y) * provides, and examples of how to use {@code ZeroPad2d} with * {@code torch::nn::ZeroPad2dOptions}. See the documentation for {@code ModuleHolder} to * learn about PyTorch's module storage semantics. */ +// Targeting ../ZeroPad3dImpl.java + + + +/** A {@code ModuleHolder} subclass for {@code ZeroPad3dImpl}. + * See the documentation for {@code ZeroPad3dImpl} class to learn what methods it + * provides, and examples of how to use {@code ZeroPad3d} with + * {@code torch::nn::ZeroPad3dOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ // Targeting ../ConstantPad1dImplBase.java @@ -72122,7 +73975,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/modules/pixelshuffle.h +// Parsed from torch/csrc/api/include/torch/nn/modules/pixelshuffle.h // #pragma once @@ -72154,7 +74007,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/modules/pooling.h +// Parsed from torch/csrc/api/include/torch/nn/modules/pooling.h // #pragma once @@ -72392,7 +74245,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/options/rnn.h +// Parsed from torch/csrc/api/include/torch/nn/options/rnn.h // #pragma once @@ -72433,7 +74286,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/utils/rnn.h +// Parsed from torch/csrc/api/include/torch/nn/utils/rnn.h // #pragma once @@ -72600,7 +74453,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/modules/rnn.h +// Parsed from torch/csrc/api/include/torch/nn/modules/rnn.h // #pragma once @@ -72701,7 +74554,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/options/transformerlayer.h +// Parsed from torch/csrc/api/include/torch/nn/options/transformerlayer.h // #pragma once @@ -72723,7 +74576,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/options/transformer.h +// Parsed from torch/csrc/api/include/torch/nn/options/transformer.h // #pragma once @@ -72742,7 +74595,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/modules/transformer.h +// Parsed from torch/csrc/api/include/torch/nn/modules/transformer.h // #pragma once @@ -72770,7 +74623,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/modules/transformerlayer.h +// Parsed from torch/csrc/api/include/torch/nn/modules/transformerlayer.h // #pragma once @@ -72810,7 +74663,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/options/transformercoder.h +// Parsed from torch/csrc/api/include/torch/nn/options/transformercoder.h // #pragma once @@ -72832,7 +74685,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/modules/transformercoder.h +// Parsed from torch/csrc/api/include/torch/nn/modules/transformercoder.h // #pragma once @@ -72872,7 +74725,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/modules/upsampling.h +// Parsed from torch/csrc/api/include/torch/nn/modules/upsampling.h // #pragma once @@ -72900,7 +74753,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/modules.h +// Parsed from torch/csrc/api/include/torch/nn/modules.h // #pragma once @@ -72940,7 +74793,7 @@ scalar_t sf(scalar_t x, scalar_t y) // #include -// Parsed from torch/nn/options.h +// Parsed from torch/csrc/api/include/torch/nn/options.h // #pragma once @@ -72962,7 +74815,7 @@ scalar_t sf(scalar_t x, scalar_t y) // #include -// Parsed from torch/nn/utils/clip_grad.h +// Parsed from torch/csrc/api/include/torch/nn/utils/clip_grad.h // #pragma once @@ -73023,7 +74876,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/utils/convert_parameters.h +// Parsed from torch/csrc/api/include/torch/nn/utils/convert_parameters.h // #pragma once @@ -73052,7 +74905,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/nn/utils.h +// Parsed from torch/csrc/api/include/torch/nn/utils.h // #pragma once @@ -73061,7 +74914,7 @@ scalar_t sf(scalar_t x, scalar_t y) // #include -// Parsed from torch/nn.h +// Parsed from torch/csrc/api/include/torch/nn.h // #pragma once @@ -73075,7 +74928,7 @@ scalar_t sf(scalar_t x, scalar_t y) // #include -// Parsed from torch/optim/optimizer.h +// Parsed from torch/csrc/api/include/torch/optim/optimizer.h // #pragma once @@ -73179,7 +75032,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/optim/serialize.h +// Parsed from torch/csrc/api/include/torch/optim/serialize.h // #pragma once @@ -73294,7 +75147,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/optim/adagrad.h +// Parsed from torch/csrc/api/include/torch/optim/adagrad.h // #pragma once @@ -73321,7 +75174,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/optim/adam.h +// Parsed from torch/csrc/api/include/torch/optim/adam.h // #pragma once @@ -73346,7 +75199,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/optim/adamw.h +// Parsed from torch/csrc/api/include/torch/optim/adamw.h // #pragma once @@ -73371,7 +75224,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/optim/lbfgs.h +// Parsed from torch/csrc/api/include/torch/optim/lbfgs.h // #pragma once @@ -73397,7 +75250,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/optim/rmsprop.h +// Parsed from torch/csrc/api/include/torch/optim/rmsprop.h // #pragma once @@ -73426,7 +75279,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/optim/sgd.h +// Parsed from torch/csrc/api/include/torch/optim/sgd.h // #pragma once @@ -73454,7 +75307,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/optim/schedulers/lr_scheduler.h +// Parsed from torch/csrc/api/include/torch/optim/schedulers/lr_scheduler.h // #pragma once @@ -73468,7 +75321,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/optim/schedulers/step_lr.h +// Parsed from torch/csrc/api/include/torch/optim/schedulers/step_lr.h // #pragma once @@ -73480,7 +75333,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/optim.h +// Parsed from torch/csrc/api/include/torch/optim.h // #pragma once @@ -73496,7 +75349,7 @@ scalar_t sf(scalar_t x, scalar_t y) // #include -// Parsed from torch/sparse.h +// Parsed from torch/csrc/api/include/torch/sparse.h // #pragma once @@ -73505,7 +75358,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/special.h +// Parsed from torch/csrc/api/include/torch/special.h // #pragma once @@ -74648,7 +76501,7 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/version.h +// Parsed from torch/csrc/api/include/torch/version.h // #pragma once @@ -74656,74 +76509,38 @@ scalar_t sf(scalar_t x, scalar_t y) public static final int TORCH_VERSION_MAJOR = 2; /** Indicates the minor version of LibTorch. */ -public static final int TORCH_VERSION_MINOR = 0; +public static final int TORCH_VERSION_MINOR = 1; /** Indicates the patch version of LibTorch. */ public static final int TORCH_VERSION_PATCH = 0; /** Indicates the version of LibTorch. */ public static final String TORCH_VERSION = - "2.0.0"; + "2.1.0"; -// Parsed from torch/csrc/api/include/torch/all.h +// Parsed from torch/csrc/autograd/InferenceMode.h // #pragma once -// #if !defined(_MSC_VER) && __cplusplus < 201402L -// #error C++14 or later compatible compiler is required to use PyTorch. -// #endif - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - -// Parsed from caffe2/serialize/inline_container.h +// #include +// #include -// #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include + // namespace torch -// #include -// #include -// #include "caffe2/serialize/istream_adapter.h" -// #include "caffe2/serialize/read_adapter_interface.h" -// #include "caffe2/serialize/versions.h" -// Targeting ../mz_zip_archive.java +// Parsed from caffe2/serialize/read_adapter_interface.h +// #pragma once -// Targeting ../PyTorchStreamReader.java +// #include +// #include +// #include "c10/macros/Macros.h" +// Targeting ../ReadAdapterInterface.java -// Writer-specific constants -@Namespace("caffe2::serialize::detail") @MemberGetter public static native @Cast("const uint64_t") long kFieldAlignment(); -// Returns a record to be appended to the local user extra data entry in order -// to make data beginning aligned at kFieldAlignment bytes boundary. - - // namespace detail // namespace serialize // namespace caffe2 @@ -74741,22 +76558,6 @@ scalar_t sf(scalar_t x, scalar_t y) - // namespace serialize - // namespace caffe2 - - -// Parsed from caffe2/serialize/read_adapter_interface.h - -// #pragma once - -// #include -// #include - -// #include "c10/macros/Macros.h" -// Targeting ../ReadAdapterInterface.java - - - // namespace serialize // namespace caffe2 @@ -74895,223 +76696,588 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace caffe2 -// Parsed from torch/csrc/jit/serialization/unpickler.h +// Parsed from caffe2/serialize/inline_container.h // #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// Targeting ../Unpickler.java +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include "caffe2/serialize/istream_adapter.h" +// #include "caffe2/serialize/read_adapter_interface.h" +// #include "caffe2/serialize/versions.h" +// Targeting ../mz_zip_archive.java - // namespace jit - // namespace torch +// PyTorch containers are a special zip archive with the following layout +// archive_name.zip contains: +// archive_name/ +// version # a file with a single decimal number written in ascii, +// # used to establish the version of the archive format +// model.json # overall model description, this is a json output of +// # ModelDef from torch.proto +// # the following names are by convention only, model.json will +// # refer to these files by full names +// tensors/ +// 0 # flat storage for tensor data, meta-data about shapes, etc. is +// # in model.json +// 1 +// ... +// # code entries will only exist for modules that have methods attached +// code/ +// archive_name.py # serialized torch script code (python syntax, using +// PythonPrint) archive_name_my_submodule.py # submodules have separate +// files +// +// The PyTorchStreamWriter also ensures additional useful properties for these +// files +// 1. All files are stored uncompressed. +// 2. All files in the archive are aligned to 64 byte boundaries such that +// it is possible to mmap the entire file and get an aligned pointer to +// tensor data. +// 3. We universally write in ZIP64 format for consistency. + +// The PyTorchStreamReader also provides additional properties: +// 1. It can read zip files that are created with common +// zip tools. This means that even though our writer doesn't compress files, +// the reader can still read files that were compressed. +// 2. It provides a getRecordOffset function which returns the offset into the +// raw file where file data lives. If the file was written with +// PyTorchStreamWriter it is guaranteed to be 64 byte aligned. + +// PyTorchReader/Writer handle checking the version number on the archive format +// and ensure that all files are written to a archive_name directory so they +// unzip cleanly. + +// When developing this format we want to pay particular attention to the +// following use cases: +// +// -- Reading -- +// 1) Reading with full random access +// a) Reading with file api's such as fread() +// b) mmaping the file and jumping around the mapped region +// 2) Reading with 1-pass sequential access +// -> A reader will need to build up a data structure of parsed structures +// as it reads +// +// -- Writing -- +// 1) Writing with full random access +// 2) Writing with 1-pass sequential access +// -> We must take care not to require updating values that have already +// been written. We place the variable-length index at the end and do +// not put any indicies into the header to fulfill this constraint. + +// The model.json, which contains all the metadata information, +// should be written as the last file. One reason is that the size of tensor +// data is usually stable. As long as the shape and type of the tensor do not +// change, the size of the data won't change. On the other sied, the size of the +// serialized model is likely to change, so we store it as the last record, and +// we don't need to move previous records when updating the model data. + +// The zip format is sufficiently flexible to handle the above use-case. +// it puts its central directory at the end of the archive and we write +// model.json as the last file when writing after we have accumulated all +// other information. + +@Namespace("caffe2::serialize") @MemberGetter public static native @Cast("const char*") BytePointer kSerializationIdRecordName(); +// Targeting ../PyTorchStreamReader.java -// Parsed from torch/csrc/jit/frontend/script_type_parser.h +// Writer-specific constants +@Namespace("caffe2::serialize::detail") @MemberGetter public static native @Cast("const uint64_t") long kFieldAlignment(); -// #pragma once -// #include -// #include -// #include -// #include -// Targeting ../ScriptTypeParser.java +// Returns a record to be appended to the local user extra data entry in order +// to make data beginning aligned at kFieldAlignment bytes boundary. + // namespace detail - // namespace jit - // namespace torch + // namespace serialize + // namespace caffe2 -// Parsed from torch/csrc/jit/frontend/resolver.h +// Parsed from torch/csrc/jit/serialization/import.h // #pragma once -// #include -// #include -// #include -// Targeting ../Resolver.java +// #include +// #include +// #include +// #include +// #include + // namespace serialize -// Targeting ../NativeResolver.java +// Targeting ../DeserializationStorageContext.java -@Namespace("torch::jit") public static native @SharedPtr NativeResolver nativeResolver(); - // namespace jit - // namespace torch +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @StdString BytePointer filename, + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, + @Cast("bool") boolean load_debug_files/*=true*/); +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @StdString BytePointer filename); +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @StdString String filename, + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, + @Cast("bool") boolean load_debug_files/*=true*/); +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @StdString String filename); +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @Cast("std::istream*") @ByRef Pointer in, + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, + @Cast("bool") boolean load_debug_files/*=true*/); +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @Cast("std::istream*") @ByRef Pointer in); -// Parsed from torch/csrc/jit/frontend/sugared_value.h +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @UniquePtr ReadAdapterInterface rai, + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, + @Cast("bool") boolean load_debug_files/*=true*/); +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @UniquePtr ReadAdapterInterface rai); -// #pragma once -// #include -// #include -// #include -// #include -// #include +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @StdString BytePointer filename, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files, + @Cast("bool") boolean load_debug_files/*=true*/, + @Cast("bool") boolean restore_shapes/*=false*/); +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @StdString BytePointer filename, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files); +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @StdString String filename, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files, + @Cast("bool") boolean load_debug_files/*=true*/, + @Cast("bool") boolean restore_shapes/*=false*/); +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @StdString String filename, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// Targeting ../SugaredValue.java +// For reading unified serialization format from torch.Package +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @ByVal @Cast("std::shared_ptr*") Pointer reader, + @SharedPtr DeserializationStorageContext storage_context, + @ByVal DeviceOptional device, + @StdString BytePointer ts_id); +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @ByVal @Cast("std::shared_ptr*") Pointer reader, + @SharedPtr DeserializationStorageContext storage_context, + @ByVal DeviceOptional device, + @StdString String ts_id); +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @Cast("std::istream*") @ByRef Pointer in, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files, + @Cast("bool") boolean load_debug_files/*=true*/, + @Cast("bool") boolean restore_shapes/*=false*/); +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @Cast("std::istream*") @ByRef Pointer in, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files); -// Targeting ../SimpleValue.java +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @UniquePtr ReadAdapterInterface rai, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files, + @Cast("bool") boolean load_debug_files/*=true*/); +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @UniquePtr ReadAdapterInterface rai, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files); +/** Loads a serialized {@code Module} from the given {@code istream}. + * + * The istream must contain a serialized {@code Module}, exported via + * {@code torch::jit::ExportModule} in C++. */ +@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( + @Cast("std::istream*") @ByRef Pointer in, + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, + @Cast("bool") boolean load_debug_files/*=true*/); +@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( + @Cast("std::istream*") @ByRef Pointer in); -// Targeting ../BuiltinFunction.java +/// +@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( + @Cast("std::istream*") @ByRef Pointer in, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files, + @Cast("bool") boolean load_debug_files/*=true*/); +@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( + @Cast("std::istream*") @ByRef Pointer in, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files); -// Targeting ../SugaredTupleValue.java +/** Loads a serialized {@code Module} from the given {@code filename}. + * + * The file stored at the location given in {@code filename} must contain a + * serialized {@code Module}, exported either via {@code ScriptModule.save()} in + * Python or {@code torch::jit::ExportModule} in C++. */ +@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( + @StdString BytePointer filename, + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, + @Cast("bool") boolean load_debug_files/*=true*/); +@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( + @StdString BytePointer filename); +@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( + @StdString String filename, + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, + @Cast("bool") boolean load_debug_files/*=true*/); +@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( + @StdString String filename); -// Targeting ../BuiltinModule.java +/// +@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( + @StdString BytePointer filename, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files, + @Cast("bool") boolean load_debug_files/*=true*/); +@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( + @StdString BytePointer filename, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files); +@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( + @StdString String filename, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files, + @Cast("bool") boolean load_debug_files/*=true*/); +@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( + @StdString String filename, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files); + +/** Loads a serialized {@code Module} from the given shared_ptr {@code rai}. + * + * The reader adapter, which is for customized input stream, must contain a + * serialized {@code Module}, exported either via {@code ScriptModule.save()} in + * Python or {@code torch::jit::ExportModule} in C++. */ +@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( + @SharedPtr ReadAdapterInterface rai, + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, + @Cast("bool") boolean load_debug_files/*=true*/); +@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( + @SharedPtr ReadAdapterInterface rai); + +@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( + @SharedPtr ReadAdapterInterface rai, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files, + @Cast("bool") boolean load_debug_files/*=true*/); +@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( + @SharedPtr ReadAdapterInterface rai, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files); + +@Namespace("torch::jit") public static native @ByVal JitModule jitModuleFromSourceAndConstants( + @Const @ByRef IValue ivalue, + @Const @ByRef ExtraFilesMap source, + @Const @ByRef IValueVector constants, + int version); + +@Namespace("torch::jit") public static native @ByVal JitModule parse_and_initialize_jit_module( + @Cast("char*") @SharedPtr BytePointer data, + @Cast("size_t") long size, + @ByRef ExtraFilesMap extra_files, + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); +@Namespace("torch::jit") public static native @ByVal JitModule parse_and_initialize_jit_module( + @Cast("char*") @SharedPtr BytePointer data, + @Cast("size_t") long size, + @ByRef ExtraFilesMap extra_files); +@Namespace("torch::jit") public static native @ByVal JitModule parse_and_initialize_jit_module( + @Cast("char*") @SharedPtr ByteBuffer data, + @Cast("size_t") long size, + @ByRef ExtraFilesMap extra_files, + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); +@Namespace("torch::jit") public static native @ByVal JitModule parse_and_initialize_jit_module( + @Cast("char*") @SharedPtr ByteBuffer data, + @Cast("size_t") long size, + @ByRef ExtraFilesMap extra_files); +@Namespace("torch::jit") public static native @ByVal JitModule parse_and_initialize_jit_module( + @Cast("char*") @SharedPtr byte[] data, + @Cast("size_t") long size, + @ByRef ExtraFilesMap extra_files, + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); +@Namespace("torch::jit") public static native @ByVal JitModule parse_and_initialize_jit_module( + @Cast("char*") @SharedPtr byte[] data, + @Cast("size_t") long size, + @ByRef ExtraFilesMap extra_files); +@Namespace("torch::jit") public static native @ByVal JitModule load_jit_module_from_file( + @StdString BytePointer filename, + @ByRef ExtraFilesMap extra_files, + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); +@Namespace("torch::jit") public static native @ByVal JitModule load_jit_module_from_file( + @StdString BytePointer filename, + @ByRef ExtraFilesMap extra_files); +@Namespace("torch::jit") public static native @ByVal JitModule load_jit_module_from_file( + @StdString String filename, + @ByRef ExtraFilesMap extra_files, + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); +@Namespace("torch::jit") public static native @ByVal JitModule load_jit_module_from_file( + @StdString String filename, + @ByRef ExtraFilesMap extra_files); -// Targeting ../ClassValue.java +@Namespace("torch::jit") public static native @ByVal JitModule load_jit_module_from_stream( + @Cast("std::istream*") @ByRef Pointer in, + @ByRef ExtraFilesMap extra_files, + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); +@Namespace("torch::jit") public static native @ByVal JitModule load_jit_module_from_stream( + @Cast("std::istream*") @ByRef Pointer in, + @ByRef ExtraFilesMap extra_files); + // namespace jit + // namespace torch -// Targeting ../NamedTupleConstructor.java +// Parsed from c10/util/FbcodeMaps.h -// Targeting ../FunctionValue.java +// #ifndef C10_UTIL_FBCODEMAPS_H_ +// #define C10_UTIL_FBCODEMAPS_H_ +// Map typedefs so that we can use folly's F14 maps in fbcode without +// taking a folly dependency. -// Targeting ../ClosureValue.java +// #ifdef FBCODE_CAFFE2 +// #include +// #include +// #else +// #include +// #include +// #endif +// #ifdef FBCODE_CAFFE2 +// #else +// #endif + // namespace c10 +// #endif // C10_UTIL_FBCODEMAPS_H_ -// Targeting ../MethodValue.java +// Parsed from torch/csrc/jit/serialization/pickler.h -// Targeting ../PrintValue.java +// #pragma once +// #include +// #include +// #include +// #include -// Targeting ../CastValue.java +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// See Python's pickletools.py for a detailed description of each of these codes +@Namespace("torch::jit") public enum PickleOpCode { + MARK((byte)('(')), + STOP((byte)('.')), + POP((byte)('0')), + POP_MARK((byte)('1')), + DUP((byte)('2')), + FLOAT((byte)('F')), + INT((byte)('I')), + BININT((byte)('J')), + BININT1((byte)('K')), + LONG((byte)('L')), + BININT2((byte)('M')), + NONE((byte)('N')), + PERSID((byte)('P')), + BINPERSID((byte)('Q')), + REDUCE((byte)('R')), + STRING((byte)('S')), + BINSTRING((byte)('T')), + SHORT_BINSTRING((byte)('U')), + // NB: Avoid using UNICODE as it is a macro in the Windows API + UNICODE_((byte)('V')), + BINUNICODE((byte)('X')), + APPEND((byte)('a')), + BUILD((byte)('b')), + GLOBAL((byte)('c')), + DICT((byte)('d')), + EMPTY_DICT((byte)('}')), + APPENDS((byte)('e')), + GET((byte)('g')), + BINGET((byte)('h')), + INST((byte)('i')), + LONG_BINGET((byte)('j')), + LIST((byte)('l')), + EMPTY_LIST((byte)(']')), + OBJ((byte)('o')), + PUT((byte)('p')), + BINPUT((byte)('q')), + LONG_BINPUT((byte)('r')), + SETITEM((byte)('s')), + TUPLE((byte)('t')), + EMPTY_TUPLE((byte)(')')), + SETITEMS((byte)('u')), + BINFLOAT((byte)('G')), -// Targeting ../TensorCastValue.java + // Protocol 2 + PROTO((byte)(0x80)), + NEWOBJ((byte)(0x81)), + EXT1((byte)(0x82)), + EXT2((byte)(0x83)), + EXT4((byte)(0x84)), + TUPLE1((byte)(0x85)), + TUPLE2((byte)(0x86)), + TUPLE3((byte)(0x87)), + NEWTRUE((byte)(0x88)), + NEWFALSE((byte)(0x89)), + LONG1((byte)(0x8a)), + LONG4((byte)(0x8b)), + // Protocol 3 (Python 3.x) + BINBYTES((byte)('B')), + SHORT_BINBYTES((byte)('C')), -// Targeting ../MagicMethod.java + // Protocol 4 + SHORT_BINUNICODE((byte)(0x8c)), + BINUNICODE8((byte)(0x8d)), + BINBYTES8((byte)(0x8e)), + EMPTY_SET((byte)(0x8f)), + ADDITEMS((byte)(0x90)), + FROZENSET((byte)(0x91)), + NEWOBJ_EX((byte)(0x92)), + STACK_GLOBAL((byte)(0x93)), + MEMOIZE((byte)(0x94)), + FRAME((byte)(0x95)); + public final byte value; + private PickleOpCode(byte v) { this.value = v; } + private PickleOpCode(PickleOpCode e) { this.value = e.value; } + public PickleOpCode intern() { for (PickleOpCode e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} +// Targeting ../WriteableTensorData.java -// Targeting ../SpecialFormValue.java -// Targeting ../LegacyTensorConstructor.java -// Targeting ../RangeValue.java +// Targeting ../Pickler.java -// Specialized Tree structure to matched against for special handling -// of builtin functions iterables expressions like zip(), enumerate(), etc. -// zip and enumerate can be modeled as a tree of SimpleValue/RangeValue: -// zip(x, y) -> (x, y) with tuple assignment to each loop target -// enumerate(x) -> (range(0, math.inf, 1), x) -// So a complicated expression like zip(a, enumerate(b), range(0, 100)) will be: -// (a, (range(0, math.inf, 1), b), range(0, 100)) -// We use those base iterables to fill in the loop information like -// max_trip_count and set the value table for loop targets -// Iterables can contain lists of SugaredValues like ModuleLists. If it -// does, then we emit it unrolled and require that all values it contains -// have a statically-determinable length. +// returns a (tensor, record_size) for a tensor, converting it to a CPU tensor +// if it was CUDA and to_cpu is True. +@Namespace("torch::jit") public static native @ByVal WriteableTensorData getWriteableTensorData(@Const @ByRef Tensor tensor, @Cast("bool") boolean to_cpu/*=true*/); +@Namespace("torch::jit") public static native @ByVal WriteableTensorData getWriteableTensorData(@Const @ByRef Tensor tensor); -@Namespace("torch::jit") public static native @ByVal ValueVector toValues( - @ByRef Graph g, - @ByVal NamedValueArrayRef nvs); -// Targeting ../SimpleSelf.java +// return the value of the tensor's storage pointer -// Targeting ../ExceptionMessageValue.java +// if the cls has __getstate__/__setstate__ +// assert they have the right schema and return true, +// otherwise return false -// Targeting ../ExceptionValue.java +// Declare BackendMeta serialization and deserialization function pointer types. +// A allowlist of device type, currently available is PrivateUse1 +@Namespace("torch::jit") public static native @ByRef DeviceTypeSet GetBackendMetaAllowlist(); -// Targeting ../SugaredEnumClass.java +// Dynamically obtain serialization function pairs +// that require the corresponding backend. +@Namespace("torch::jit") public static native @Cast("std::array >,at::COMPILE_TIME_MAX_DEVICE_TYPES>*") @ByRef PointerPairOptional GetBackendMetaSerialization(); +// Register function pointer of Tensor BackendMetadata for serialization. +@Namespace("torch::jit") public static native void TensorBackendMetaRegistry( + DeviceType t, + @ByVal @Cast("torch::jit::BackendMetaPtr*") Pointer get_fptr, + @ByVal @Cast("torch::jit::BackendMetaPtr*") Pointer set_fptr); +@Namespace("torch::jit") public static native void TensorBackendMetaRegistry( + @Cast("c10::DeviceType") byte t, + @ByVal @Cast("torch::jit::BackendMetaPtr*") Pointer get_fptr, + @ByVal @Cast("torch::jit::BackendMetaPtr*") Pointer set_fptr); -// Targeting ../SliceValue.java +// Return a map of Tensor Metadata which including BackendMetaData for +// serialization. For now, it only takes care of `conj` and `neg` bit. +@Namespace("torch::jit") public static native @ByVal StringBoolMap getTensorMetadata( + @Const @ByRef Tensor t); +// set Tensor Metadata based on the map. +// Refer: getTensorMetadata +@Namespace("torch::jit") public static native void setTensorMetadata( + @Const @ByRef Tensor t, + @ByVal StringBoolMap metadata); +// set Tensor metadata based on the map. +// NOTE: This overload is required by unpickler.cpp +@Namespace("torch::jit") public static native void setTensorMetadata( + @Const @ByRef Tensor t, + @ByVal GenericDict metadata_idict); // namespace jit // namespace torch -// Parsed from torch/csrc/jit/frontend/error_report.h +// Parsed from torch/csrc/jit/frontend/parser_constants.h // #pragma once - -// #include -// #include -// Targeting ../Call.java - - -// Targeting ../ErrorReport.java - - - +@Namespace("torch::jit") public static native @Cast("const char*") BytePointer valid_single_char_tokens(); public static native void valid_single_char_tokens(BytePointer setter); // namespace jit // namespace torch -// Parsed from torch/csrc/jit/frontend/tree.h +// Parsed from torch/csrc/jit/frontend/strtod.h // #pragma once -// #include -// #include -// #include -// #include - -// #include -// #include -// #include - -// Trees are used to represent all forms of TC IR, pre- and post-typechecking. -// Rather than have a full class hierarchy for all TC statements, trees are a -// slight variation of Lisp s-expressions. For instance, the expression a*b+1 -// is represented as: -// (+ (* (ident a) (ident b)) (const 1)) -// Atoms like 'a', 'b', and '1' are represented by subclasses of Tree which -// define stringValue(). Everything else is a Compound object, which has a -// 'kind' that is a token from lexer.h's TokenKind enum. Single-character -// operators like '+' are represented using the character itself (so, add.kind() -// would be '+'). Each Compound object also contains a list of subtrees and is -// associated with a SourceRange for error reporting. -// Memory management of trees is done using intrusive_ptr. -// Targeting ../Tree.java - - -// Targeting ../JitString.java - - - -@Namespace("torch::jit") public static native @ByVal SourceRange mergeRanges(@ByVal SourceRange c, @Cast("const torch::jit::TreeList*") @ByRef SymDimVector others); -// Targeting ../Compound.java - - -// Targeting ../pretty_tree.java - - - -@Namespace("torch::jit") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @ByVal pretty_tree t_); +// #include -@Namespace("torch::jit") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef TreeRef t); +@Namespace("torch::jit") public static native double strtod_c(@Cast("const char*") BytePointer nptr, @Cast("char**") PointerPointer endptr); +@Namespace("torch::jit") public static native double strtod_c(@Cast("const char*") BytePointer nptr, @Cast("char**") @ByPtrPtr BytePointer endptr); +@Namespace("torch::jit") public static native double strtod_c(String nptr, @Cast("char**") @ByPtrPtr ByteBuffer endptr); +@Namespace("torch::jit") public static native double strtod_c(@Cast("const char*") BytePointer nptr, @Cast("char**") @ByPtrPtr byte[] endptr); +@Namespace("torch::jit") public static native double strtod_c(String nptr, @Cast("char**") @ByPtrPtr BytePointer endptr); +@Namespace("torch::jit") public static native double strtod_c(@Cast("const char*") BytePointer nptr, @Cast("char**") @ByPtrPtr ByteBuffer endptr); +@Namespace("torch::jit") public static native double strtod_c(String nptr, @Cast("char**") @ByPtrPtr byte[] endptr); +@Namespace("torch::jit") public static native float strtof_c(@Cast("const char*") BytePointer nptr, @Cast("char**") PointerPointer endptr); +@Namespace("torch::jit") public static native float strtof_c(@Cast("const char*") BytePointer nptr, @Cast("char**") @ByPtrPtr BytePointer endptr); +@Namespace("torch::jit") public static native float strtof_c(String nptr, @Cast("char**") @ByPtrPtr ByteBuffer endptr); +@Namespace("torch::jit") public static native float strtof_c(@Cast("const char*") BytePointer nptr, @Cast("char**") @ByPtrPtr byte[] endptr); +@Namespace("torch::jit") public static native float strtof_c(String nptr, @Cast("char**") @ByPtrPtr BytePointer endptr); +@Namespace("torch::jit") public static native float strtof_c(@Cast("const char*") BytePointer nptr, @Cast("char**") @ByPtrPtr ByteBuffer endptr); +@Namespace("torch::jit") public static native float strtof_c(String nptr, @Cast("char**") @ByPtrPtr byte[] endptr); // namespace jit // namespace torch @@ -75371,34 +77537,66 @@ scalar_t sf(scalar_t x, scalar_t y) -// Parsed from torch/csrc/jit/frontend/parser_constants.h +// Parsed from torch/csrc/jit/frontend/tree.h // #pragma once -@Namespace("torch::jit") public static native @Cast("const char*") BytePointer valid_single_char_tokens(); public static native void valid_single_char_tokens(BytePointer setter); + +// #include +// #include +// #include +// #include + +// #include +// #include +// #include + +// Trees are used to represent all forms of TC IR, pre- and post-typechecking. +// Rather than have a full class hierarchy for all TC statements, trees are a +// slight variation of Lisp s-expressions. For instance, the expression a*b+1 +// is represented as: +// (+ (* (ident a) (ident b)) (const 1)) +// Atoms like 'a', 'b', and '1' are represented by subclasses of Tree which +// define stringValue(). Everything else is a Compound object, which has a +// 'kind' that is a token from lexer.h's TokenKind enum. Single-character +// operators like '+' are represented using the character itself (so, add.kind() +// would be '+'). Each Compound object also contains a list of subtrees and is +// associated with a SourceRange for error reporting. +// Memory management of trees is done using intrusive_ptr. +// Targeting ../Tree.java + + +// Targeting ../JitString.java + + + +@Namespace("torch::jit") public static native @ByVal SourceRange mergeRanges(@ByVal SourceRange c, @Cast("const torch::jit::TreeList*") @ByRef SymDimVector others); +// Targeting ../Compound.java + + +// Targeting ../pretty_tree.java + + + +@Namespace("torch::jit") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @ByVal pretty_tree t_); + +@Namespace("torch::jit") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef TreeRef t); + // namespace jit // namespace torch -// Parsed from torch/csrc/jit/frontend/strtod.h +// Parsed from torch/csrc/jit/frontend/error_report.h // #pragma once -// #include +// #include +// #include +// Targeting ../Call.java + + +// Targeting ../ErrorReport.java + -@Namespace("torch::jit") public static native double strtod_c(@Cast("const char*") BytePointer nptr, @Cast("char**") PointerPointer endptr); -@Namespace("torch::jit") public static native double strtod_c(@Cast("const char*") BytePointer nptr, @Cast("char**") @ByPtrPtr BytePointer endptr); -@Namespace("torch::jit") public static native double strtod_c(String nptr, @Cast("char**") @ByPtrPtr ByteBuffer endptr); -@Namespace("torch::jit") public static native double strtod_c(@Cast("const char*") BytePointer nptr, @Cast("char**") @ByPtrPtr byte[] endptr); -@Namespace("torch::jit") public static native double strtod_c(String nptr, @Cast("char**") @ByPtrPtr BytePointer endptr); -@Namespace("torch::jit") public static native double strtod_c(@Cast("const char*") BytePointer nptr, @Cast("char**") @ByPtrPtr ByteBuffer endptr); -@Namespace("torch::jit") public static native double strtod_c(String nptr, @Cast("char**") @ByPtrPtr byte[] endptr); -@Namespace("torch::jit") public static native float strtof_c(@Cast("const char*") BytePointer nptr, @Cast("char**") PointerPointer endptr); -@Namespace("torch::jit") public static native float strtof_c(@Cast("const char*") BytePointer nptr, @Cast("char**") @ByPtrPtr BytePointer endptr); -@Namespace("torch::jit") public static native float strtof_c(String nptr, @Cast("char**") @ByPtrPtr ByteBuffer endptr); -@Namespace("torch::jit") public static native float strtof_c(@Cast("const char*") BytePointer nptr, @Cast("char**") @ByPtrPtr byte[] endptr); -@Namespace("torch::jit") public static native float strtof_c(String nptr, @Cast("char**") @ByPtrPtr BytePointer endptr); -@Namespace("torch::jit") public static native float strtof_c(@Cast("const char*") BytePointer nptr, @Cast("char**") @ByPtrPtr ByteBuffer endptr); -@Namespace("torch::jit") public static native float strtof_c(String nptr, @Cast("char**") @ByPtrPtr byte[] endptr); // namespace jit // namespace torch @@ -75406,111 +77604,234 @@ scalar_t sf(scalar_t x, scalar_t y) // Parsed from torch/csrc/jit/frontend/schema_matching.h -// #pragma once -// #include -// #include -// #include +// #pragma once +// #include +// #include +// #include + +// #include +// Targeting ../MatchedSchema.java + + + +@Namespace("torch::jit") public static native @Cast("bool") boolean isBlockListedSchema(@Const @ByRef FunctionSchema schema); + +@Namespace("torch::jit") public static native @ByVal MatchedSchema matchSchema( + @Const @ByRef FunctionSchema schema, + @Const @ByRef SourceRange loc, + @ByRef Graph graph, + @ByVal NamedValueArrayRef args, + @ByVal NamedValueArrayRef kwargs, + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") NamedValueOptional self); +@Namespace("torch::jit") public static native @ByVal MatchedSchema matchSchema( + @Const @ByRef FunctionSchema schema, + @Const @ByRef SourceRange loc, + @ByRef Graph graph, + @ByVal NamedValueArrayRef args, + @ByVal NamedValueArrayRef kwargs); + +@Namespace("torch::jit") public static native @ByVal SizeTMatchedSchemaPair matchSchemas( + @Const @ByRef FunctionSchemaVector schemas, + @Const @ByRef SourceRange loc, + @ByRef Graph graph, + @ByVal NamedValueArrayRef args, + @ByVal NamedValueArrayRef kwargs, + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") NamedValueOptional self, + @Cast("bool") boolean render_errors/*=false*/); +@Namespace("torch::jit") public static native @ByVal SizeTMatchedSchemaPair matchSchemas( + @Const @ByRef FunctionSchemaVector schemas, + @Const @ByRef SourceRange loc, + @ByRef Graph graph, + @ByVal NamedValueArrayRef args, + @ByVal NamedValueArrayRef kwargs); + +@Namespace("torch::jit") public static native @Cast("bool") boolean convertibleToList( + @Const @ByRef Type.TypePtr type, + @Const @ByRef Type.TypePtr list_type_); + +@Namespace("torch::jit") public static native @StdString BytePointer getFullSchemaName(@Const @ByRef FunctionSchema schema); + +@Namespace("torch::jit") public static native Value emitBuiltinCall( + @Const @ByRef SourceRange loc, + @ByRef Graph graph, + @ByVal Symbol name, + @ByVal NamedValueArrayRef args, + @ByVal NamedValueArrayRef kwargs, + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") NamedValueOptional self); +@Namespace("torch::jit") public static native Value emitBuiltinCall( + @Const @ByRef SourceRange loc, + @ByRef Graph graph, + @ByVal Symbol name, + @ByVal NamedValueArrayRef args, + @ByVal NamedValueArrayRef kwargs); + +@Namespace("torch::jit") public static native @ByVal SizeTOptional findInputWithName( + @StdString BytePointer name, + @ByVal NamedValueArrayRef kwargs, + @Cast("bool") boolean is_aten/*=false*/); +@Namespace("torch::jit") public static native @ByVal SizeTOptional findInputWithName( + @StdString BytePointer name, + @ByVal NamedValueArrayRef kwargs); +@Namespace("torch::jit") public static native @ByVal SizeTOptional findInputWithName( + @StdString String name, + @ByVal NamedValueArrayRef kwargs, + @Cast("bool") boolean is_aten/*=false*/); +@Namespace("torch::jit") public static native @ByVal SizeTOptional findInputWithName( + @StdString String name, + @ByVal NamedValueArrayRef kwargs); + +// applies implicit conversion from value trying to turn it into type +// concrete_type it succeeds if the return_value->isSubtypeOf(concrete_type) +@Namespace("torch::jit") public static native Value tryConvertToType( + @Const @ByRef SourceRange loc, + @ByRef Graph graph, + @Const @ByRef Type.TypePtr concrete_type, + Value value, + @Cast("bool") boolean allow_conversions); + // namespace jit + // namespace torch + + +// Parsed from torch/csrc/jit/frontend/versioned_symbols.h + +// #pragma once + +// #include +// #include +// #include + +// #include +// Maps the given symbol into an implementation of its behavior at the +// given version. +// See note [Versioned Symbols] +@Namespace("torch::jit") public static native @ByVal Symbol get_symbol_for_version(@Const @ByVal Symbol name, @Cast("const uint64_t") long version); + +// Maps the given kind to the minimum version that supports it. +// See note [Dynamic Versions and torch.jit.save vs. torch.save] +@Namespace("torch::jit") public static native @Cast("uint64_t") long get_min_version_for_kind(@Cast("const torch::jit::NodeKind*") @ByRef Symbol kind); + // namespace jit + // namespace torch + + +// Parsed from torch/csrc/jit/frontend/sugared_value.h + +// #pragma once +// #include +// #include +// #include +// #include +// #include + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// Targeting ../SugaredValue.java + + +// Targeting ../SimpleValue.java + + +// Targeting ../BuiltinFunction.java + + +// Targeting ../SugaredTupleValue.java + + +// Targeting ../BuiltinModule.java + + +// Targeting ../ClassValue.java + + +// Targeting ../NamedTupleConstructor.java + + +// Targeting ../FunctionValue.java + + +// Targeting ../ClosureValue.java + + +// Targeting ../MethodValue.java + + +// Targeting ../PrintValue.java + + +// Targeting ../CastValue.java + + +// Targeting ../TensorCastValue.java + + +// Targeting ../MagicMethod.java + + +// Targeting ../SpecialFormValue.java + + +// Targeting ../LegacyTensorConstructor.java + + +// Targeting ../RangeValue.java + + + +// Specialized Tree structure to matched against for special handling +// of builtin functions iterables expressions like zip(), enumerate(), etc. +// zip and enumerate can be modeled as a tree of SimpleValue/RangeValue: +// zip(x, y) -> (x, y) with tuple assignment to each loop target +// enumerate(x) -> (range(0, math.inf, 1), x) +// So a complicated expression like zip(a, enumerate(b), range(0, 100)) will be: +// (a, (range(0, math.inf, 1), b), range(0, 100)) +// We use those base iterables to fill in the loop information like +// max_trip_count and set the value table for loop targets +// Iterables can contain lists of SugaredValues like ModuleLists. If it +// does, then we emit it unrolled and require that all values it contains +// have a statically-determinable length. + +@Namespace("torch::jit") public static native @ByVal ValueVector toValues( + @ByRef Graph g, + @ByVal NamedValueArrayRef nvs); +// Targeting ../SimpleSelf.java -// #include -// Targeting ../MatchedSchema.java +// Targeting ../ExceptionMessageValue.java -@Namespace("torch::jit") public static native @Cast("bool") boolean isBlockListedSchema(@Const @ByRef FunctionSchema schema); +// Targeting ../ExceptionValue.java -@Namespace("torch::jit") public static native @ByVal MatchedSchema matchSchema( - @Const @ByRef FunctionSchema schema, - @Const @ByRef SourceRange loc, - @ByRef Graph graph, - @ByVal NamedValueArrayRef args, - @ByVal NamedValueArrayRef kwargs, - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") NamedValueOptional self); -@Namespace("torch::jit") public static native @ByVal MatchedSchema matchSchema( - @Const @ByRef FunctionSchema schema, - @Const @ByRef SourceRange loc, - @ByRef Graph graph, - @ByVal NamedValueArrayRef args, - @ByVal NamedValueArrayRef kwargs); -@Namespace("torch::jit") public static native @ByVal SizeTMatchedSchemaPair matchSchemas( - @Const @ByRef FunctionSchemaVector schemas, - @Const @ByRef SourceRange loc, - @ByRef Graph graph, - @ByVal NamedValueArrayRef args, - @ByVal NamedValueArrayRef kwargs, - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") NamedValueOptional self, - @Cast("bool") boolean render_errors/*=false*/); -@Namespace("torch::jit") public static native @ByVal SizeTMatchedSchemaPair matchSchemas( - @Const @ByRef FunctionSchemaVector schemas, - @Const @ByRef SourceRange loc, - @ByRef Graph graph, - @ByVal NamedValueArrayRef args, - @ByVal NamedValueArrayRef kwargs); +// Targeting ../SugaredEnumClass.java -@Namespace("torch::jit") public static native @Cast("bool") boolean convertibleToList( - @Const @ByRef Type.TypePtr type, - @Const @ByRef Type.TypePtr list_type_); -@Namespace("torch::jit") public static native @StdString BytePointer getFullSchemaName(@Const @ByRef FunctionSchema schema); +// Targeting ../SliceValue.java -@Namespace("torch::jit") public static native Value emitBuiltinCall( - @Const @ByRef SourceRange loc, - @ByRef Graph graph, - @ByVal Symbol name, - @ByVal NamedValueArrayRef args, - @ByVal NamedValueArrayRef kwargs, - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") NamedValueOptional self); -@Namespace("torch::jit") public static native Value emitBuiltinCall( - @Const @ByRef SourceRange loc, - @ByRef Graph graph, - @ByVal Symbol name, - @ByVal NamedValueArrayRef args, - @ByVal NamedValueArrayRef kwargs); -@Namespace("torch::jit") public static native @ByVal SizeTOptional findInputWithName( - @StdString BytePointer name, - @ByVal NamedValueArrayRef kwargs, - @Cast("bool") boolean is_aten/*=false*/); -@Namespace("torch::jit") public static native @ByVal SizeTOptional findInputWithName( - @StdString BytePointer name, - @ByVal NamedValueArrayRef kwargs); -@Namespace("torch::jit") public static native @ByVal SizeTOptional findInputWithName( - @StdString String name, - @ByVal NamedValueArrayRef kwargs, - @Cast("bool") boolean is_aten/*=false*/); -@Namespace("torch::jit") public static native @ByVal SizeTOptional findInputWithName( - @StdString String name, - @ByVal NamedValueArrayRef kwargs); -// applies implicit conversion from value trying to turn it into type -// concrete_type it succeeds if the return_value->isSubtypeOf(concrete_type) -@Namespace("torch::jit") public static native Value tryConvertToType( - @Const @ByRef SourceRange loc, - @ByRef Graph graph, - @Const @ByRef Type.TypePtr concrete_type, - Value value, - @Cast("bool") boolean allow_conversions); // namespace jit // namespace torch -// Parsed from torch/csrc/jit/frontend/versioned_symbols.h +// Parsed from torch/csrc/jit/frontend/resolver.h // #pragma once -// #include -// #include -// #include +// #include +// #include +// #include +// Targeting ../Resolver.java -// #include -// Maps the given symbol into an implementation of its behavior at the -// given version. -// See note [Versioned Symbols] -@Namespace("torch::jit") public static native @ByVal Symbol get_symbol_for_version(@Const @ByVal Symbol name, @Cast("const uint64_t") long version); -// Maps the given kind to the minimum version that supports it. -// See note [Dynamic Versions and torch.jit.save vs. torch.save] -@Namespace("torch::jit") public static native @Cast("uint64_t") long get_min_version_for_kind(@Cast("const torch::jit::NodeKind*") @ByRef Symbol kind); +// Targeting ../NativeResolver.java + + + +@Namespace("torch::jit") public static native @SharedPtr NativeResolver nativeResolver(); // namespace jit // namespace torch @@ -75732,187 +78053,33 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace std -// Parsed from torch/csrc/jit/serialization/pickler.h +// Parsed from torch/csrc/jit/frontend/script_type_parser.h // #pragma once - -// #include -// #include -// #include -// #include - -// #include -// #include // #include -// #include // #include +// #include +// #include +// Targeting ../ScriptTypeParser.java -// See Python's pickletools.py for a detailed description of each of these codes -@Namespace("torch::jit") public enum PickleOpCode { - MARK((byte)('(')), - STOP((byte)('.')), - POP((byte)('0')), - POP_MARK((byte)('1')), - DUP((byte)('2')), - FLOAT((byte)('F')), - INT((byte)('I')), - BININT((byte)('J')), - BININT1((byte)('K')), - LONG((byte)('L')), - BININT2((byte)('M')), - NONE((byte)('N')), - PERSID((byte)('P')), - BINPERSID((byte)('Q')), - REDUCE((byte)('R')), - STRING((byte)('S')), - BINSTRING((byte)('T')), - SHORT_BINSTRING((byte)('U')), - // NB: Avoid using UNICODE as it is a macro in the Windows API - UNICODE_((byte)('V')), - BINUNICODE((byte)('X')), - APPEND((byte)('a')), - BUILD((byte)('b')), - GLOBAL((byte)('c')), - DICT((byte)('d')), - EMPTY_DICT((byte)('}')), - APPENDS((byte)('e')), - GET((byte)('g')), - BINGET((byte)('h')), - INST((byte)('i')), - LONG_BINGET((byte)('j')), - LIST((byte)('l')), - EMPTY_LIST((byte)(']')), - OBJ((byte)('o')), - PUT((byte)('p')), - BINPUT((byte)('q')), - LONG_BINPUT((byte)('r')), - SETITEM((byte)('s')), - TUPLE((byte)('t')), - EMPTY_TUPLE((byte)(')')), - SETITEMS((byte)('u')), - BINFLOAT((byte)('G')), - - // Protocol 2 - PROTO((byte)(0x80)), - NEWOBJ((byte)(0x81)), - EXT1((byte)(0x82)), - EXT2((byte)(0x83)), - EXT4((byte)(0x84)), - TUPLE1((byte)(0x85)), - TUPLE2((byte)(0x86)), - TUPLE3((byte)(0x87)), - NEWTRUE((byte)(0x88)), - NEWFALSE((byte)(0x89)), - LONG1((byte)(0x8a)), - LONG4((byte)(0x8b)), - - // Protocol 3 (Python 3.x) - BINBYTES((byte)('B')), - SHORT_BINBYTES((byte)('C')), - - // Protocol 4 - SHORT_BINUNICODE((byte)(0x8c)), - BINUNICODE8((byte)(0x8d)), - BINBYTES8((byte)(0x8e)), - EMPTY_SET((byte)(0x8f)), - ADDITEMS((byte)(0x90)), - FROZENSET((byte)(0x91)), - NEWOBJ_EX((byte)(0x92)), - STACK_GLOBAL((byte)(0x93)), - MEMOIZE((byte)(0x94)), - FRAME((byte)(0x95)); - - public final byte value; - private PickleOpCode(byte v) { this.value = v; } - private PickleOpCode(PickleOpCode e) { this.value = e.value; } - public PickleOpCode intern() { for (PickleOpCode e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} -// Targeting ../WriteableTensorData.java - - - - - -// Targeting ../Pickler.java - - - -// returns a (tensor, record_size) for a tensor, converting it to a CPU tensor -// if it was CUDA and to_cpu is True. -@Namespace("torch::jit") public static native @ByVal WriteableTensorData getWriteableTensorData(@Const @ByRef Tensor tensor, @Cast("bool") boolean to_cpu/*=true*/); -@Namespace("torch::jit") public static native @ByVal WriteableTensorData getWriteableTensorData(@Const @ByRef Tensor tensor); - -// return the value of the tensor's storage pointer - - -// if the cls has __getstate__/__setstate__ -// assert they have the right schema and return true, -// otherwise return false - - -// Return a map of Tensor Metadata for serialization. -// For now, it only takes care of `conj` and `neg` bit. -@Namespace("torch::jit") public static native @ByVal StringBoolMap getTensorMetadata( - @Const @ByRef Tensor t); - -// set Tensor Metadata based on the map. -// Refer: getTensorMathdata -@Namespace("torch::jit") public static native void setTensorMetadata( - @Const @ByRef Tensor t, - @ByVal StringBoolMap metadata); - -// set Tensor metadata based on the map. -// NOTE: This overload is required by unpickler.cpp -@Namespace("torch::jit") public static native void setTensorMetadata( - @Const @ByRef Tensor t, - @ByVal GenericDict metadata_idict); // namespace jit // namespace torch -// Parsed from torch/torch.h - -// #pragma once - -// #include - -// #ifdef TORCH_API_INCLUDE_EXTENSION_H -// #include - -// #endif // defined(TORCH_API_INCLUDE_EXTENSION_H) - - -// Parsed from ATen/native/TensorShape.h - -// #pragma once -// #include -// #include -// #include - -@Namespace("at::native") public static native @ByVal Tensor clone_preserve_strides(@Const @ByRef Tensor self); - -@Namespace("at::native") public static native @Cast("bool") boolean cat_should_skip_tensor(@Const @ByRef Tensor t); - - // Check to see if the shape of tensors is compatible - // for being concatenated along a given dimension. -@Namespace("at::native") public static native void check_cat_shape_except_dim(@Const @ByRef Tensor first, @Const @ByRef Tensor second, @Cast("int64_t") long dimension, @Cast("int64_t") long index); - -@Namespace("at::native") public static native @Cast("int64_t") long get_num_splits(@Const @ByRef Tensor self, @Cast("int64_t") long split_size, @Cast("int64_t") long dim); - - // namespace at::native - - -// Parsed from torch/csrc/jit/serialization/storage_context.h +// Parsed from torch/csrc/jit/serialization/unpickler.h // #pragma once // #include -// Targeting ../SerializationStorageContext.java +// #include +// #include +// #include +// #include +// #include +// Targeting ../Unpickler.java -// Targeting ../DeserializationStorageContext.java @@ -75920,259 +78087,122 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/csrc/jit/serialization/import.h +// Parsed from torch/csrc/jit/serialization/pickle.h // #pragma once // #include +// #include // #include -// #include -// #include +// #include +// #include // #include -// #include - // namespace serialize - // namespace caffe2 - -@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( - @SharedPtr CompilationUnit cu, - @StdString BytePointer filename, - @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, - @Cast("bool") boolean load_debug_files/*=true*/); -@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( - @SharedPtr CompilationUnit cu, - @StdString BytePointer filename); -@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( - @SharedPtr CompilationUnit cu, - @StdString String filename, - @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, - @Cast("bool") boolean load_debug_files/*=true*/); -@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( - @SharedPtr CompilationUnit cu, - @StdString String filename); - -@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( - @SharedPtr CompilationUnit cu, - @Cast("std::istream*") @ByRef Pointer in, - @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, - @Cast("bool") boolean load_debug_files/*=true*/); -@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( - @SharedPtr CompilationUnit cu, - @Cast("std::istream*") @ByRef Pointer in); - -@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( - @SharedPtr CompilationUnit cu, - @UniquePtr ReadAdapterInterface rai, - @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, - @Cast("bool") boolean load_debug_files/*=true*/); -@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( - @SharedPtr CompilationUnit cu, - @UniquePtr ReadAdapterInterface rai); - -@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( - @SharedPtr CompilationUnit cu, - @StdString BytePointer filename, - @ByVal DeviceOptional device, - @ByRef ExtraFilesMap extra_files, - @Cast("bool") boolean load_debug_files/*=true*/, - @Cast("bool") boolean restore_shapes/*=false*/); -@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( - @SharedPtr CompilationUnit cu, - @StdString BytePointer filename, - @ByVal DeviceOptional device, - @ByRef ExtraFilesMap extra_files); -@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( - @SharedPtr CompilationUnit cu, - @StdString String filename, - @ByVal DeviceOptional device, - @ByRef ExtraFilesMap extra_files, - @Cast("bool") boolean load_debug_files/*=true*/, - @Cast("bool") boolean restore_shapes/*=false*/); -@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( - @SharedPtr CompilationUnit cu, - @StdString String filename, - @ByVal DeviceOptional device, - @ByRef ExtraFilesMap extra_files); - -// For reading unified serialization format from torch.Package -@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( - @SharedPtr CompilationUnit cu, - @ByVal @Cast("std::shared_ptr*") Pointer reader, - @SharedPtr DeserializationStorageContext storage_context, - @ByVal DeviceOptional device, - @StdString BytePointer ts_id); -@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( - @SharedPtr CompilationUnit cu, - @ByVal @Cast("std::shared_ptr*") Pointer reader, - @SharedPtr DeserializationStorageContext storage_context, - @ByVal DeviceOptional device, - @StdString String ts_id); - -@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( - @SharedPtr CompilationUnit cu, - @Cast("std::istream*") @ByRef Pointer in, - @ByVal DeviceOptional device, - @ByRef ExtraFilesMap extra_files, - @Cast("bool") boolean load_debug_files/*=true*/, - @Cast("bool") boolean restore_shapes/*=false*/); -@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( - @SharedPtr CompilationUnit cu, - @Cast("std::istream*") @ByRef Pointer in, - @ByVal DeviceOptional device, - @ByRef ExtraFilesMap extra_files); - -@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( - @SharedPtr CompilationUnit cu, - @UniquePtr ReadAdapterInterface rai, - @ByVal DeviceOptional device, - @ByRef ExtraFilesMap extra_files, - @Cast("bool") boolean load_debug_files/*=true*/); -@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( - @SharedPtr CompilationUnit cu, - @UniquePtr ReadAdapterInterface rai, - @ByVal DeviceOptional device, - @ByRef ExtraFilesMap extra_files); - -/** Loads a serialized {@code Module} from the given {@code istream}. +/** Pickle an IValue by calling a function to handle writing the data. * - * The istream must contain a serialized {@code Module}, exported via - * {@code torch::jit::ExportModule} in C++. */ -@Namespace("torch::jit") public static native @ByVal JitModule load( - @Cast("std::istream*") @ByRef Pointer in, - @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, - @Cast("bool") boolean load_debug_files/*=true*/); -@Namespace("torch::jit") public static native @ByVal JitModule load( - @Cast("std::istream*") @ByRef Pointer in); - - -/// -@Namespace("torch::jit") public static native @ByVal JitModule load( - @Cast("std::istream*") @ByRef Pointer in, - @ByVal DeviceOptional device, - @ByRef ExtraFilesMap extra_files, - @Cast("bool") boolean load_debug_files/*=true*/); -@Namespace("torch::jit") public static native @ByVal JitModule load( - @Cast("std::istream*") @ByRef Pointer in, - @ByVal DeviceOptional device, - @ByRef ExtraFilesMap extra_files); - -/** Loads a serialized {@code Module} from the given {@code filename}. + * {@code writer} is a function that takes in a pointer to a chunk of memory and its + * size and consumes it. * - * The file stored at the location given in {@code filename} must contain a - * serialized {@code Module}, exported either via {@code ScriptModule.save()} in - * Python or {@code torch::jit::ExportModule} in C++. */ -@Namespace("torch::jit") public static native @ByVal JitModule load( - @StdString BytePointer filename, - @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, - @Cast("bool") boolean load_debug_files/*=true*/); -@Namespace("torch::jit") public static native @ByVal JitModule load( - @StdString BytePointer filename); -@Namespace("torch::jit") public static native @ByVal JitModule load( - @StdString String filename, - @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, - @Cast("bool") boolean load_debug_files/*=true*/); -@Namespace("torch::jit") public static native @ByVal JitModule load( - @StdString String filename); - + * See {@code jit::pickle} for more details. */ /// -@Namespace("torch::jit") public static native @ByVal JitModule load( - @StdString BytePointer filename, - @ByVal DeviceOptional device, - @ByRef ExtraFilesMap extra_files, - @Cast("bool") boolean load_debug_files/*=true*/); -@Namespace("torch::jit") public static native @ByVal JitModule load( - @StdString BytePointer filename, - @ByVal DeviceOptional device, - @ByRef ExtraFilesMap extra_files); -@Namespace("torch::jit") public static native @ByVal JitModule load( - @StdString String filename, - @ByVal DeviceOptional device, - @ByRef ExtraFilesMap extra_files, - @Cast("bool") boolean load_debug_files/*=true*/); -@Namespace("torch::jit") public static native @ByVal JitModule load( - @StdString String filename, - @ByVal DeviceOptional device, - @ByRef ExtraFilesMap extra_files); +/// +/// +/// +/// +/// +/// +/// +@Namespace("torch::jit") public static native void pickle( + @ByVal PickleWriter writer, + @Const @ByRef IValue ivalue, + TensorVector tensor_table/*=nullptr*/); +@Namespace("torch::jit") public static native void pickle( + @ByVal PickleWriter writer, + @Const @ByRef IValue ivalue); -/** Loads a serialized {@code Module} from the given shared_ptr {@code rai}. +/** Save a {@code torch::IValue} in a format compatible with Python's {@code pickle} module * - * The reader adapter, which is for customized input stream, must contain a - * serialized {@code Module}, exported either via {@code ScriptModule.save()} in - * Python or {@code torch::jit::ExportModule} in C++. */ -@Namespace("torch::jit") public static native @ByVal JitModule load( - @SharedPtr ReadAdapterInterface rai, - @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, - @Cast("bool") boolean load_debug_files/*=true*/); -@Namespace("torch::jit") public static native @ByVal JitModule load( - @SharedPtr ReadAdapterInterface rai); + * If present, {@code tensor_table} is a pointer to a table in which tensors that + * are contained within {@code ivalue} are stored, and the bytes returned by the + * pickler will only include references to these tensors in the table. This can + * be used to keep the binary blob size small. + * If not provided, tensors are stored in the same byte stream as the pickle + * data, similar to {@code torch.save()} in eager Python. + * + * Pickled values can be loaded in Python and C++: + * \rst + * .. code-block:: cpp + * + * torch::IValue float_value(2.3); + * + * // TODO: when tensors are stored in the pickle, delete this + * std::vector tensor_table; + * auto data = torch::jit::pickle(float_value, &tensor_table); + * + * std::vector ivalues = + * torch::jit::unpickle(data.data(), data.size()); + * + * .. code-block:: python + * + * values = torch.load('data.pkl') + * print(values) + * + * \endrst */ +@Namespace("torch::jit") public static native @Cast("char*") @StdVector BytePointer pickle( + @Const @ByRef IValue ivalue, + TensorVector tensor_table/*=nullptr*/); +@Namespace("torch::jit") public static native @Cast("char*") @StdVector BytePointer pickle( + @Const @ByRef IValue ivalue); -@Namespace("torch::jit") public static native @ByVal JitModule load( - @SharedPtr ReadAdapterInterface rai, - @ByVal DeviceOptional device, - @ByRef ExtraFilesMap extra_files, - @Cast("bool") boolean load_debug_files/*=true*/); -@Namespace("torch::jit") public static native @ByVal JitModule load( - @SharedPtr ReadAdapterInterface rai, - @ByVal DeviceOptional device, - @ByRef ExtraFilesMap extra_files); +/** Save a {@code torch::IValue} in a format that can be loaded by both + * {@code torch::pickle_load} in C++ and {@code torch.load} in Python. */ -@Namespace("torch::jit") public static native @ByVal JitModule jitModuleFromSourceAndConstants( - @Const @ByRef IValue ivalue, - @Const @ByRef ExtraFilesMap source, - @Const @ByRef IValueVector constants, - int version); +/** Deserialize a {@code torch::IValue} from bytes produced by either + * {@code torch::pickle_save} in C++ or {@code torch.save} in Python */ -@Namespace("torch::jit") public static native @ByVal JitModule parse_and_initialize_jit_module( - @Cast("char*") @SharedPtr BytePointer data, - @Cast("size_t") long size, - @ByRef ExtraFilesMap extra_files, - @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); -@Namespace("torch::jit") public static native @ByVal JitModule parse_and_initialize_jit_module( - @Cast("char*") @SharedPtr BytePointer data, - @Cast("size_t") long size, - @ByRef ExtraFilesMap extra_files); -@Namespace("torch::jit") public static native @ByVal JitModule parse_and_initialize_jit_module( - @Cast("char*") @SharedPtr ByteBuffer data, - @Cast("size_t") long size, - @ByRef ExtraFilesMap extra_files, - @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); -@Namespace("torch::jit") public static native @ByVal JitModule parse_and_initialize_jit_module( - @Cast("char*") @SharedPtr ByteBuffer data, - @Cast("size_t") long size, - @ByRef ExtraFilesMap extra_files); -@Namespace("torch::jit") public static native @ByVal JitModule parse_and_initialize_jit_module( - @Cast("char*") @SharedPtr byte[] data, +/** {@code reader} is a function that takes in a size to read from some pickled + * binary. {@code reader} should remember where it last read, and return + * the number of bytes read. + * See {@code torch::pickle} for details. + * type_resolver is used to resolve any JIT type based on type str */ + +/// +/// +@Namespace("torch::jit") public static native @ByVal IValue unpickle( + @ByVal PickleReader reader, + @ByVal TypeResolver type_resolver, + @ByVal TensorArrayRef tensor_table, + TypeParser type_parser/*=torch::jit::Unpickler::defaultTypeParser*/); +@Namespace("torch::jit") public static native @ByVal IValue unpickle( + @ByVal PickleReader reader, + @ByVal TypeResolver type_resolver, + @ByVal TensorArrayRef tensor_table); + +/** Decode a chunk of memory containing pickled data into its {@code torch::IValue}s. + * + * If any {@code torch::IValue}s in the pickled data are {@code Object}s, then a + * {@code class_resolver} function must be provided. + * + * See {@code torch::pickle} for details. */ +@Namespace("torch::jit") public static native @ByVal IValue unpickle( + @Cast("const char*") BytePointer data, @Cast("size_t") long size, - @ByRef ExtraFilesMap extra_files, - @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); -@Namespace("torch::jit") public static native @ByVal JitModule parse_and_initialize_jit_module( - @Cast("char*") @SharedPtr byte[] data, + @ByVal(nullValue = "torch::jit::TypeResolver(nullptr)") TypeResolver type_resolver, + @ByVal(nullValue = "c10::ArrayRef{}") TensorArrayRef tensor_table, + TypeParser type_parser/*=torch::jit::Unpickler::defaultTypeParser*/); +@Namespace("torch::jit") public static native @ByVal IValue unpickle( + @Cast("const char*") BytePointer data, + @Cast("size_t") long size); +@Namespace("torch::jit") public static native @ByVal IValue unpickle( + String data, @Cast("size_t") long size, - @ByRef ExtraFilesMap extra_files); - -@Namespace("torch::jit") public static native @ByVal JitModule load_jit_module_from_file( - @StdString BytePointer filename, - @ByRef ExtraFilesMap extra_files, - @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); -@Namespace("torch::jit") public static native @ByVal JitModule load_jit_module_from_file( - @StdString BytePointer filename, - @ByRef ExtraFilesMap extra_files); -@Namespace("torch::jit") public static native @ByVal JitModule load_jit_module_from_file( - @StdString String filename, - @ByRef ExtraFilesMap extra_files, - @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); -@Namespace("torch::jit") public static native @ByVal JitModule load_jit_module_from_file( - @StdString String filename, - @ByRef ExtraFilesMap extra_files); - -@Namespace("torch::jit") public static native @ByVal JitModule load_jit_module_from_stream( - @Cast("std::istream*") @ByRef Pointer in, - @ByRef ExtraFilesMap extra_files, - @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); -@Namespace("torch::jit") public static native @ByVal JitModule load_jit_module_from_stream( - @Cast("std::istream*") @ByRef Pointer in, - @ByRef ExtraFilesMap extra_files); + @ByVal(nullValue = "torch::jit::TypeResolver(nullptr)") TypeResolver type_resolver, + @ByVal(nullValue = "c10::ArrayRef{}") TensorArrayRef tensor_table, + TypeParser type_parser/*=torch::jit::Unpickler::defaultTypeParser*/); +@Namespace("torch::jit") public static native @ByVal IValue unpickle( + String data, + @Cast("size_t") long size); // namespace jit // namespace torch diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java index 1b188be3648..1a94768b09f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java @@ -101,6 +101,116 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // namespace c10 +// Parsed from c10/core/impl/GPUTrace.h + +// #pragma once + +// #include + + // namespace impl + // namespace c10 + + +// Parsed from c10/cuda/impl/cuda_cmake_macros.h + +// #pragma once + +// Automatically generated header file for the C10 CUDA library. Do not +// include this file directly. Instead, include c10/cuda/CUDAMacros.h + +// #define C10_CUDA_BUILD_SHARED_LIBS + + +// Parsed from c10/cuda/CUDAMacros.h + +// #pragma once + +// #ifndef C10_USING_CUSTOM_GENERATED_MACROS + +// We have not yet modified the AMD HIP build to generate this file so +// we add an extra option to specifically ignore it. +// #ifndef C10_CUDA_NO_CMAKE_CONFIGURE_FILE +// #include +// #endif // C10_CUDA_NO_CMAKE_CONFIGURE_FILE + +// #endif + +// See c10/macros/Export.h for a detailed explanation of what the function +// of these macros are. We need one set of macros for every separate library +// we build. + +// #ifdef _WIN32 +// #else // _WIN32 +// #if defined(__GNUC__) +// #define C10_CUDA_EXPORT __attribute__((__visibility__("default"))) +// #else // defined(__GNUC__) +// #define C10_CUDA_EXPORT +// #endif // defined(__GNUC__) +// #define C10_CUDA_IMPORT C10_CUDA_EXPORT +// #endif // _WIN32 + +// This one is being used by libc10_cuda.so +// #ifdef C10_CUDA_BUILD_MAIN_LIB +// #define C10_CUDA_API C10_CUDA_EXPORT +// #else +// #define C10_CUDA_API C10_CUDA_IMPORT +// #endif + +/** + * The maximum number of GPUs that we recognizes. + */ +public static final int C10_COMPILE_TIME_MAX_GPUS = 16; + + +// Parsed from c10/cuda/CUDADeviceAssertionHost.h + +// #pragma once + +// #include + +// #include +// #include +// #include +// #include + +// #ifdef USE_CUDA +// #define TORCH_USE_CUDA_DSA +// #endif + +/** Number of assertion failure messages we can store. If this is too small + * threads will fail silently. */ +@MemberGetter public static native int C10_CUDA_DSA_ASSERTION_COUNT(); +@MemberGetter public static native int C10_CUDA_DSA_MAX_STR_LEN(); +// Targeting ../cuda/DeviceAssertionData.java + + +// Targeting ../cuda/DeviceAssertionsData.java + + +// Targeting ../cuda/CUDAKernelLaunchInfo.java + + +// Targeting ../cuda/CUDAKernelLaunchRegistry.java + + + + + + // namespace cuda + // namespace c10 + +// Each kernel launched with TORCH_DSA_KERNEL_LAUNCH +// requires the same input arguments. We introduce the following macro to +// standardize these. +// #define TORCH_DSA_KERNEL_ARGS +// [[maybe_unused]] c10::cuda::DeviceAssertionsData *const assertions_data, +// [[maybe_unused]] uint32_t assertion_caller_id + +// This macro can be used to pass the DSA arguments onward to another +// function +// #define TORCH_DSA_KERNEL_ARGS_PASS assertions_data, assertion_caller_id + + // Parsed from c10/cuda/CUDAStream.h // #pragma once @@ -156,6 +266,9 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { * on the matter, streams are thread safe; e.g., it is safe to enqueue * a kernel on the same stream from two different threads. */ + +@Namespace("c10::cuda") @MemberGetter public static native int max_compile_time_stream_priorities(); +public static final int max_compile_time_stream_priorities = max_compile_time_stream_priorities(); // Targeting ../cuda/CUDAStream.java @@ -172,6 +285,9 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { */ @Namespace("c10::cuda") public static native @ByVal CUDAStream getStreamFromPool(@Cast("const bool") boolean isHighPriority/*=false*/, byte device/*=-1*/); @Namespace("c10::cuda") public static native @ByVal CUDAStream getStreamFromPool(); +// no default priority to disambiguate overloads +@Namespace("c10::cuda") public static native @ByVal CUDAStream getStreamFromPool(int priority, byte device/*=-1*/); +@Namespace("c10::cuda") public static native @ByVal CUDAStream getStreamFromPool(int priority); /** * Get a CUDAStream from a externally allocated one. @@ -220,228 +336,6 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // namespace std -// Parsed from ATen/cuda/CUDAContext.h - -// #pragma once - -// #include - -// #include -// #include -// #include - -// #ifdef CUDART_VERSION -// #include -// #endif - -// #include -// #include -// #include -// #include -// #include - -/* -A common CUDA interface for ATen. - -This interface is distinct from CUDAHooks, which defines an interface that links -to both CPU-only and CUDA builds. That interface is intended for runtime -dispatch and should be used from files that are included in both CPU-only and -CUDA builds. - -CUDAContext, on the other hand, should be preferred by files only included in -CUDA builds. It is intended to expose CUDA functionality in a consistent -manner. - -This means there is some overlap between the CUDAContext and CUDAHooks, but -the choice of which to use is simple: use CUDAContext when in a CUDA-only file, -use CUDAHooks otherwise. - -Note that CUDAContext simply defines an interface with no associated class. -It is expected that the modules whose functions compose this interface will -manage their own state. There is only a single CUDA context/state. -*/ - -/** - * DEPRECATED: use device_count() instead - */ -@Namespace("at::cuda") public static native @Cast("int64_t") long getNumGPUs(); - -/** - * CUDA is available if we compiled with CUDA, and there are one or more - * devices. If we compiled with CUDA but there is a driver problem, etc., - * this function will report CUDA is not available (rather than raise an error.) - */ -@Namespace("at::cuda") public static native @Cast("bool") boolean is_available(); - -@Namespace("at::cuda") public static native Pointer getCurrentDeviceProperties(); - -@Namespace("at::cuda") public static native int warp_size(); - -@Namespace("at::cuda") public static native Pointer getDeviceProperties(@Cast("int64_t") long device); - -@Namespace("at::cuda") public static native @Cast("bool") boolean canDeviceAccessPeer( - @Cast("int64_t") long device, - @Cast("int64_t") long peer_device); - -@Namespace("at::cuda") public static native Allocator getCUDADeviceAllocator(); - -/* Handles */ -@Namespace("at::cuda") public static native @Cast("cusparseHandle_t") Pointer getCurrentCUDASparseHandle(); -@Namespace("at::cuda") public static native @Cast("cublasHandle_t") Pointer getCurrentCUDABlasHandle(); - -@Namespace("at::cuda") public static native void clearCublasWorkspaces(); - -// #ifdef CUDART_VERSION -@Namespace("at::cuda") public static native @Cast("cusolverDnHandle_t") Pointer getCurrentCUDASolverDnHandle(); -// #endif - - // namespace cuda - // namespace at - - -// Parsed from c10/core/impl/GPUTrace.h - -// #pragma once - -// #include - - // namespace impl - // namespace c10 - - -// Parsed from c10/cuda/CUDADeviceAssertionHost.h - -// #pragma once - -// #include - -// #include -// #include -// #include -// #include - -// #ifdef USE_CUDA -// #define TORCH_USE_CUDA_DSA -// #endif - -/** Number of assertion failure messages we can store. If this is too small - * threads will fail silently. */ -@MemberGetter public static native int C10_CUDA_DSA_ASSERTION_COUNT(); -@MemberGetter public static native int C10_CUDA_DSA_MAX_STR_LEN(); -// Targeting ../cuda/DeviceAssertionData.java - - -// Targeting ../cuda/DeviceAssertionsData.java - - -// Targeting ../cuda/CUDAKernelLaunchInfo.java - - -// Targeting ../cuda/CUDAKernelLaunchRegistry.java - - - - - - // namespace cuda - // namespace c10 - -// Each kernel launched with TORCH_DSA_KERNEL_LAUNCH -// requires the same input arguments. We introduce the following macro to -// standardize these. -// #define TORCH_DSA_KERNEL_ARGS -// [[maybe_unused]] c10::cuda::DeviceAssertionsData *const assertions_data, -// [[maybe_unused]] uint32_t assertion_caller_id - -// This macro can be used to pass the DSA arguments onward to another -// function -// #define TORCH_DSA_KERNEL_ARGS_PASS assertions_data, assertion_caller_id - - -// Parsed from c10/cuda/CUDAMacros.h - -// #pragma once - -// #ifndef C10_USING_CUSTOM_GENERATED_MACROS - -// We have not yet modified the AMD HIP build to generate this file so -// we add an extra option to specifically ignore it. -// #ifndef C10_CUDA_NO_CMAKE_CONFIGURE_FILE -// #include -// #endif // C10_CUDA_NO_CMAKE_CONFIGURE_FILE - -// #endif - -// See c10/macros/Export.h for a detailed explanation of what the function -// of these macros are. We need one set of macros for every separate library -// we build. - -// #ifdef _WIN32 -// #else // _WIN32 -// #if defined(__GNUC__) -// #define C10_CUDA_EXPORT __attribute__((__visibility__("default"))) -// #else // defined(__GNUC__) -// #define C10_CUDA_EXPORT -// #endif // defined(__GNUC__) -// #define C10_CUDA_IMPORT C10_CUDA_EXPORT -// #endif // _WIN32 - -// This one is being used by libc10_cuda.so -// #ifdef C10_CUDA_BUILD_MAIN_LIB -// #define C10_CUDA_API C10_CUDA_EXPORT -// #else -// #define C10_CUDA_API C10_CUDA_IMPORT -// #endif - -/** - * The maximum number of GPUs that we recognizes. - */ -public static final int C10_COMPILE_TIME_MAX_GPUS = 16; - - -// Parsed from c10/cuda/impl/cuda_cmake_macros.h - -// #pragma once - -// Automatically generated header file for the C10 CUDA library. Do not -// include this file directly. Instead, include c10/cuda/CUDAMacros.h - -// #define C10_CUDA_BUILD_SHARED_LIBS - - -// Parsed from c10/cuda/CUDAGraphsC10Utils.h - -// #pragma once - -// #include -// #include - -// CUDA Graphs utils used by c10 and aten. -// aten/cuda/CUDAGraphsUtils.cuh adds utils used by aten only. - -// first is set if the instance is created by CUDAGraph::capture_begin. -// second is set if the instance is created by at::cuda::graph_pool_handle. -// Targeting ../cuda/CUDAStreamCaptureModeGuard.java - - -// #endif - -// #if !defined(USE_ROCM) || ROCM_VERSION >= 50300 -// Protects against enum cudaStreamCaptureStatus implementation changes. -// Some compilers seem not to like static_assert without the messages. -// #endif - - - -@Namespace("c10::cuda") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer os, @Cast("c10::cuda::CaptureStatus") int status); - -// Use this version where you're sure a CUDA context exists already. -@Namespace("c10::cuda") public static native @Cast("c10::cuda::CaptureStatus") int currentStreamCaptureStatusMayInitCtx(); - - // namespace cuda - // namespace c10 - - // Parsed from ATen/cuda/Exceptions.h // #pragma once @@ -507,6 +401,9 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // cusolver related headers are only supported on cuda now // #ifdef CUDART_VERSION @Namespace("at::cuda::solver") public static native @Cast("const char*") BytePointer cusolverGetErrorMessage(@Cast("cusolverStatus_t") int status); + +@Namespace("at::cuda::solver") @MemberGetter public static native @Cast("const char*") BytePointer _cusolver_backend_suggestion(); + // namespace at::cuda::solver // When cuda < 11.5, cusolver raises CUSOLVER_STATUS_EXECUTION_FAILED when input contains nan. @@ -523,13 +420,15 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // "cusolver error: ", // at::cuda::solver::cusolverGetErrorMessage(__err), // ", when calling `" #EXPR "`", -// ". This error may appear if the input matrix contains NaN."); +// ". This error may appear if the input matrix contains NaN. ", +// at::cuda::solver::_cusolver_backend_suggestion); // } else { // TORCH_CHECK( // __err == CUSOLVER_STATUS_SUCCESS, // "cusolver error: ", // at::cuda::solver::cusolverGetErrorMessage(__err), -// ", when calling `" #EXPR "`"); +// ", when calling `" #EXPR "`. ", +// at::cuda::solver::_cusolver_backend_suggestion); // } // } while (0) @@ -594,6 +493,89 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // } while (0) +// Parsed from ATen/cuda/CUDAContext.h + +// #pragma once + +// #include + +// #include +// #include +// #include + +// #ifdef CUDART_VERSION +// #include +// #endif + +// #if defined(USE_ROCM) && ROCM_VERSION >= 50300 +// #include +// #endif + +// #include +// #include +// #include +// #include +// #include +// #include + +/* +A common CUDA interface for ATen. + +This interface is distinct from CUDAHooks, which defines an interface that links +to both CPU-only and CUDA builds. That interface is intended for runtime +dispatch and should be used from files that are included in both CPU-only and +CUDA builds. + +CUDAContext, on the other hand, should be preferred by files only included in +CUDA builds. It is intended to expose CUDA functionality in a consistent +manner. + +This means there is some overlap between the CUDAContext and CUDAHooks, but +the choice of which to use is simple: use CUDAContext when in a CUDA-only file, +use CUDAHooks otherwise. + +Note that CUDAContext simply defines an interface with no associated class. +It is expected that the modules whose functions compose this interface will +manage their own state. There is only a single CUDA context/state. +*/ + +/** + * DEPRECATED: use device_count() instead + */ +@Namespace("at::cuda") public static native @Cast("int64_t") long getNumGPUs(); + +/** + * CUDA is available if we compiled with CUDA, and there are one or more + * devices. If we compiled with CUDA but there is a driver problem, etc., + * this function will report CUDA is not available (rather than raise an error.) + */ +@Namespace("at::cuda") public static native @Cast("bool") boolean is_available(); + +@Namespace("at::cuda") public static native Pointer getCurrentDeviceProperties(); + +@Namespace("at::cuda") public static native int warp_size(); + +@Namespace("at::cuda") public static native Pointer getDeviceProperties(@Cast("int64_t") long device); + +@Namespace("at::cuda") public static native @Cast("bool") boolean canDeviceAccessPeer( + @Cast("int64_t") long device, + @Cast("int64_t") long peer_device); + +@Namespace("at::cuda") public static native Allocator getCUDADeviceAllocator(); + +/* Handles */ +@Namespace("at::cuda") public static native @Cast("cusparseHandle_t") Pointer getCurrentCUDASparseHandle(); +@Namespace("at::cuda") public static native @Cast("cublasHandle_t") Pointer getCurrentCUDABlasHandle(); + +@Namespace("at::cuda") public static native void clearCublasWorkspaces(); + +// #if defined(CUDART_VERSION) || defined(USE_ROCM) && ROCM_VERSION >= 50300 +@Namespace("at::cuda") public static native @Cast("cusolverDnHandle_t") Pointer getCurrentCUDASolverDnHandle(); +// #endif + + // namespace at::cuda + + // Parsed from ATen/cudnn/cudnn-wrapper.h // #pragma once @@ -613,6 +595,30 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // #undef STRING +// Parsed from ATen/cuda/ATenCUDAGeneral.h + +// #pragma once + +// #include +// #include +// #include + +// #include + +// Use TORCH_CUDA_CPP_API or TORCH_CUDA_CU_API for exports from this folder + + +// Parsed from ATen/cudnn/Handle.h + +// #pragma once + +// #include +// #include + +@Namespace("at::native") public static native @Cast("cudnnHandle_t") Pointer getCudnnHandle(); + // namespace at::native + + // Parsed from ATen/cudnn/Utils.h // #pragma once @@ -631,28 +637,62 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { -// Parsed from ATen/cudnn/Handle.h +// Parsed from c10/cuda/CUDAGraphsC10Utils.h // #pragma once -// #include -// #include +// #include +// #include -@Namespace("at::native") public static native @Cast("cudnnHandle_t") Pointer getCudnnHandle(); - // namespace at::native +// CUDA Graphs utils used by c10 and aten. +// aten/cuda/CUDAGraphsUtils.cuh adds utils used by aten only. +// first is set if the instance is created by CUDAGraph::capture_begin. +// second is set if the instance is created by at::cuda::graph_pool_handle. +// Targeting ../cuda/CUDAStreamCaptureModeGuard.java -// Parsed from ATen/cuda/ATenCUDAGeneral.h + +// #endif + +// #if !defined(USE_ROCM) || ROCM_VERSION >= 50300 +// Protects against enum cudaStreamCaptureStatus implementation changes. +// Some compilers seem not to like static_assert without the messages. +// #endif + + + +@Namespace("c10::cuda") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer os, @Cast("c10::cuda::CaptureStatus") int status); + +// Use this version where you're sure a CUDA context exists already. +@Namespace("c10::cuda") public static native @Cast("c10::cuda::CaptureStatus") int currentStreamCaptureStatusMayInitCtx(); + + // namespace cuda + // namespace c10 + + +// Parsed from c10/cuda/impl/CUDAGuardImpl.h // #pragma once -// #include -// #include -// #include +// #include +// #include +// #include +// #include +// #include -// #include +// #include +// #include +// #include +// #include -// Use TORCH_CUDA_CPP_API or TORCH_CUDA_CU_API for exports from this folder +// #include +// Targeting ../cuda/CUDAGuardImpl.java + + + + // namespace impl + // namespace cuda + // namespace c10 // Parsed from ATen/cudnn/Descriptors.h diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_iterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_iterator.java deleted file mode 100644 index 7724c8b9d99..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_iterator.java +++ /dev/null @@ -1,55 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::jit::slot_iterator_impl >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class named_attribute_iterator extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public named_attribute_iterator(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public named_attribute_iterator(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public named_attribute_iterator position(long position) { - return (named_attribute_iterator)super.position(position); - } - @Override public named_attribute_iterator getPointer(long i) { - return new named_attribute_iterator((Pointer)this).offsetAddress(i); - } - - public named_attribute_iterator( - @ByVal JitModule root, - @Cast("bool") boolean recurse, - @Cast("bool") boolean return_module) { super((Pointer)null); allocate(root, recurse, return_module); } - private native void allocate( - @ByVal JitModule root, - @Cast("bool") boolean recurse, - @Cast("bool") boolean return_module); - // empty cursors_, represents end of iteration - public named_attribute_iterator() { super((Pointer)null); allocate(); } - private native void allocate(); - public native @ByVal @Name("operator *") NamedIValue multiply(); - public native @ByVal @Name("operator ->") NamedIValue access(); - public native @ByRef @Name("operator ++") named_attribute_iterator increment(); - public native @ByVal @Name("operator ++") named_attribute_iterator increment(int arg0); - - private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals( - @Const @ByRef named_attribute_iterator a, - @Const @ByRef named_attribute_iterator b); - public boolean notEquals(named_attribute_iterator b) { return notEquals(this, b); } -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_list.java b/pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_list.java deleted file mode 100644 index 0d1ee8c7015..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_list.java +++ /dev/null @@ -1,32 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::jit::slot_list_impl >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class named_attribute_list extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public named_attribute_list(Pointer p) { super(p); } - - public native @ByVal named_attribute_iterator begin(); - public native @ByVal named_attribute_iterator end(); - public native @Cast("size_t") long size(); - - public named_attribute_list(@ByVal JitModule module, @Cast("bool") boolean recurse, @Cast("bool") boolean return_module) { super((Pointer)null); allocate(module, recurse, return_module); } - private native void allocate(@ByVal JitModule module, @Cast("bool") boolean recurse, @Cast("bool") boolean return_module); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_iterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_iterator.java deleted file mode 100644 index 1aeef22c386..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_iterator.java +++ /dev/null @@ -1,55 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::jit::slot_iterator_impl >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class named_buffer_iterator extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public named_buffer_iterator(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public named_buffer_iterator(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public named_buffer_iterator position(long position) { - return (named_buffer_iterator)super.position(position); - } - @Override public named_buffer_iterator getPointer(long i) { - return new named_buffer_iterator((Pointer)this).offsetAddress(i); - } - - public named_buffer_iterator( - @ByVal JitModule root, - @Cast("bool") boolean recurse, - @Cast("bool") boolean return_module) { super((Pointer)null); allocate(root, recurse, return_module); } - private native void allocate( - @ByVal JitModule root, - @Cast("bool") boolean recurse, - @Cast("bool") boolean return_module); - // empty cursors_, represents end of iteration - public named_buffer_iterator() { super((Pointer)null); allocate(); } - private native void allocate(); - public native @ByVal @Name("operator *") NamedTensor multiply(); - public native @ByVal @Name("operator ->") NamedTensor access(); - public native @ByRef @Name("operator ++") named_buffer_iterator increment(); - public native @ByVal @Name("operator ++") named_buffer_iterator increment(int arg0); - - private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals( - @Const @ByRef named_buffer_iterator a, - @Const @ByRef named_buffer_iterator b); - public boolean notEquals(named_buffer_iterator b) { return notEquals(this, b); } -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_list.java b/pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_list.java deleted file mode 100644 index f79ee2c7687..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_list.java +++ /dev/null @@ -1,32 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::jit::slot_list_impl >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class named_buffer_list extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public named_buffer_list(Pointer p) { super(p); } - - public native @ByVal named_buffer_iterator begin(); - public native @ByVal named_buffer_iterator end(); - public native @Cast("size_t") long size(); - - public named_buffer_list(@ByVal JitModule module, @Cast("bool") boolean recurse, @Cast("bool") boolean return_module) { super((Pointer)null); allocate(module, recurse, return_module); } - private native void allocate(@ByVal JitModule module, @Cast("bool") boolean recurse, @Cast("bool") boolean return_module); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_iterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_iterator.java deleted file mode 100644 index b7b95d94a50..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_iterator.java +++ /dev/null @@ -1,55 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::jit::slot_iterator_impl >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class named_parameter_iterator extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public named_parameter_iterator(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public named_parameter_iterator(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public named_parameter_iterator position(long position) { - return (named_parameter_iterator)super.position(position); - } - @Override public named_parameter_iterator getPointer(long i) { - return new named_parameter_iterator((Pointer)this).offsetAddress(i); - } - - public named_parameter_iterator( - @ByVal JitModule root, - @Cast("bool") boolean recurse, - @Cast("bool") boolean return_module) { super((Pointer)null); allocate(root, recurse, return_module); } - private native void allocate( - @ByVal JitModule root, - @Cast("bool") boolean recurse, - @Cast("bool") boolean return_module); - // empty cursors_, represents end of iteration - public named_parameter_iterator() { super((Pointer)null); allocate(); } - private native void allocate(); - public native @ByVal @Name("operator *") NamedTensor multiply(); - public native @ByVal @Name("operator ->") NamedTensor access(); - public native @ByRef @Name("operator ++") named_parameter_iterator increment(); - public native @ByVal @Name("operator ++") named_parameter_iterator increment(int arg0); - - private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals( - @Const @ByRef named_parameter_iterator a, - @Const @ByRef named_parameter_iterator b); - public boolean notEquals(named_parameter_iterator b) { return notEquals(this, b); } -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_list.java b/pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_list.java deleted file mode 100644 index 6eba09a6a7d..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_list.java +++ /dev/null @@ -1,32 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::jit::slot_list_impl >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class named_parameter_list extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public named_parameter_list(Pointer p) { super(p); } - - public native @ByVal named_parameter_iterator begin(); - public native @ByVal named_parameter_iterator end(); - public native @Cast("size_t") long size(); - - public named_parameter_list(@ByVal JitModule module, @Cast("bool") boolean recurse, @Cast("bool") boolean return_module) { super((Pointer)null); allocate(module, recurse, return_module); } - private native void allocate(@ByVal JitModule module, @Cast("bool") boolean recurse, @Cast("bool") boolean return_module); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/parameter_iterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/parameter_iterator.java deleted file mode 100644 index 14f412baf94..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/parameter_iterator.java +++ /dev/null @@ -1,55 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::jit::slot_iterator_impl") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class parameter_iterator extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public parameter_iterator(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public parameter_iterator(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public parameter_iterator position(long position) { - return (parameter_iterator)super.position(position); - } - @Override public parameter_iterator getPointer(long i) { - return new parameter_iterator((Pointer)this).offsetAddress(i); - } - - public parameter_iterator( - @ByVal JitModule root, - @Cast("bool") boolean recurse, - @Cast("bool") boolean return_module) { super((Pointer)null); allocate(root, recurse, return_module); } - private native void allocate( - @ByVal JitModule root, - @Cast("bool") boolean recurse, - @Cast("bool") boolean return_module); - // empty cursors_, represents end of iteration - public parameter_iterator() { super((Pointer)null); allocate(); } - private native void allocate(); - public native @ByVal @Name("operator *") Tensor multiply(); - public native @ByVal @Name("operator ->") Tensor access(); - public native @ByRef @Name("operator ++") parameter_iterator increment(); - public native @ByVal @Name("operator ++") parameter_iterator increment(int arg0); - - private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals( - @Const @ByRef parameter_iterator a, - @Const @ByRef parameter_iterator b); - public boolean notEquals(parameter_iterator b) { return notEquals(this, b); } -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/parameter_list.java b/pytorch/src/gen/java/org/bytedeco/pytorch/parameter_list.java deleted file mode 100644 index f650a73ab2b..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/parameter_list.java +++ /dev/null @@ -1,32 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::jit::slot_list_impl") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class parameter_list extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public parameter_list(Pointer p) { super(p); } - - public native @ByVal parameter_iterator begin(); - public native @ByVal parameter_iterator end(); - public native @Cast("size_t") long size(); - - public parameter_list(@ByVal JitModule module, @Cast("bool") boolean recurse, @Cast("bool") boolean return_module) { super((Pointer)null); allocate(module, recurse, return_module); } - private native void allocate(@ByVal JitModule module, @Cast("bool") boolean recurse, @Cast("bool") boolean return_module); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/prime_number_hash_policy.java b/pytorch/src/gen/java/org/bytedeco/pytorch/prime_number_hash_policy.java deleted file mode 100644 index 513f6993d37..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/prime_number_hash_policy.java +++ /dev/null @@ -1,244 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - // namespace detailv3 - -@Namespace("ska_ordered") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class prime_number_hash_policy extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public prime_number_hash_policy() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public prime_number_hash_policy(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public prime_number_hash_policy(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public prime_number_hash_policy position(long position) { - return (prime_number_hash_policy)super.position(position); - } - @Override public prime_number_hash_policy getPointer(long i) { - return new prime_number_hash_policy((Pointer)this).offsetAddress(i); - } - - public static native @Cast("uint64_t") long mod0(@Cast("uint64_t") long arg0); - public static native @Cast("uint64_t") long mod2(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod3(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod5(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod7(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod11(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod13(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod17(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod23(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod29(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod37(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod47(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod59(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod73(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod97(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod127(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod151(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod197(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod251(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod313(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod397(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod499(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod631(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod797(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod1009(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod1259(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod1597(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod2011(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod2539(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod3203(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod4027(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod5087(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod6421(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod8089(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod10193(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod12853(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod16193(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod20399(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod25717(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod32401(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod40823(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod51437(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod64811(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod81649(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod102877(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod129607(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod163307(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod205759(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod259229(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod326617(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod411527(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod518509(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod653267(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod823117(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod1037059(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod1306601(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod1646237(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod2074129(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod2613229(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod3292489(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod4148279(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod5226491(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod6584983(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod8296553(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod10453007(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod13169977(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod16593127(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod20906033(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod26339969(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod33186281(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod41812097(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod52679969(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod66372617(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod83624237(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod105359939(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod132745199(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod167248483(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod210719881(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod265490441(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod334496971(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod421439783(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod530980861(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod668993977(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod842879579(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod1061961721(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod1337987929(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod1685759167(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod2123923447(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod2675975881(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod3371518343(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod4247846927(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod5351951779(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod6743036717(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod8495693897(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod10703903591(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod13486073473(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod16991387857(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod21407807219(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod26972146961(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod33982775741(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod42815614441(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod53944293929(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod67965551447(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod85631228929(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod107888587883(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod135931102921(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod171262457903(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod215777175787(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod271862205833(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod342524915839(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod431554351609(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod543724411781(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod685049831731(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod863108703229(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod1087448823553(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod1370099663459(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod1726217406467(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod2174897647073(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod2740199326961(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod3452434812973(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod4349795294267(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod5480398654009(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod6904869625999(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod8699590588571(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod10960797308051(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod13809739252051(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod17399181177241(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod21921594616111(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod27619478504183(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod34798362354533(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod43843189232363(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod55238957008387(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod69596724709081(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod87686378464759(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod110477914016779(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod139193449418173(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod175372756929481(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod220955828033581(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod278386898836457(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod350745513859007(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod441911656067171(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod556773797672909(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod701491027718027(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod883823312134381(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod1113547595345903(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod1402982055436147(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod1767646624268779(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod2227095190691797(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod2805964110872297(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod3535293248537579(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod4454190381383713(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod5611928221744609(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod7070586497075177(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod8908380762767489(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod11223856443489329(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod14141172994150357(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod17816761525534927(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod22447712886978529(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod28282345988300791(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod35633523051069991(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod44895425773957261(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod56564691976601587(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod71267046102139967(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod89790851547914507(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod113129383953203213(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod142534092204280003(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod179581703095829107(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod226258767906406483(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod285068184408560057(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod359163406191658253(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod452517535812813007(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod570136368817120201(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod718326812383316683(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod905035071625626043(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod1140272737634240411(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod1436653624766633509(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod1810070143251252131(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod2280545475268481167(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod2873307249533267101(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod3620140286502504283(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod4561090950536962147(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod5746614499066534157(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod7240280573005008577(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod9122181901073924329(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod11493228998133068689(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod14480561146010017169(@Cast("uint64_t") long hash); - public static native @Cast("uint64_t") long mod18446744073709551557(@Cast("uint64_t") long hash); - - public static class mod_function extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public mod_function(Pointer p) { super(p); } - protected mod_function() { allocate(); } - private native void allocate(); - public native @Cast("uint64_t") long call(@Cast("uint64_t") long arg0); - } - - public native mod_function next_size_over(@Cast("uint64_t*") @ByRef LongPointer size); - public native mod_function next_size_over(@Cast("uint64_t*") @ByRef LongBuffer size); - public native mod_function next_size_over(@Cast("uint64_t*") @ByRef long[] size); - public native void commit(mod_function new_mod_function); - public native void reset(); - - public native @Cast("uint64_t") long index_for_hash(@Cast("uint64_t") long hash, @Cast("uint64_t") long arg1); - public native @Cast("uint64_t") long keep_in_range(@Cast("uint64_t") long index, @Cast("uint64_t") long num_slots_minus_one); -} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/BackendMetaPtr.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/BackendMetaPtr.java new file mode 100644 index 00000000000..cb56ce261d5 --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/BackendMetaPtr.java @@ -0,0 +1,31 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.*; +import org.bytedeco.pytorch.StringBoolMap; +import org.bytedeco.pytorch.Tensor; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class BackendMetaPtr extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public BackendMetaPtr(Pointer p) { + super(p); + } + + protected BackendMetaPtr() { + allocate(); + } + + private native void allocate(); + + // std::function&)> + public native void call(@Const @ByRef Tensor tensor, @ByRef StringBoolMap map); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/MemCopyFunction.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/MemCopyFunction.java new file mode 100644 index 00000000000..ea5e809b236 --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/MemCopyFunction.java @@ -0,0 +1,31 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.Cast; +import org.bytedeco.javacpp.annotation.Const; +import org.bytedeco.javacpp.annotation.Properties; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class MemCopyFunction extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public MemCopyFunction(Pointer p) { + super(p); + } + + protected MemCopyFunction() { + allocate(); + } + + private native void allocate(); + + // std::function + public native void call(Pointer dest, @Const Pointer src, @Cast("size_t") long n); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/MetadataLogger.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/MetadataLogger.java new file mode 100644 index 00000000000..36a5026481b --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/MetadataLogger.java @@ -0,0 +1,35 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.BytePointer; +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.ByRef; +import org.bytedeco.javacpp.annotation.Const; +import org.bytedeco.javacpp.annotation.Properties; +import org.bytedeco.javacpp.annotation.StdString; +import org.bytedeco.pytorch.DDPLoggingData; +import org.bytedeco.pytorch.StringStringMap; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class MetadataLogger extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public MetadataLogger(Pointer p) { + super(p); + } + + protected MetadataLogger() { + allocate(); + } + + private native void allocate(); + + // std::function&)> + public native void call(@Const @StdString BytePointer s, @Const @ByRef StringStringMap map); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/OperationCreator.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/OperationCreator.java new file mode 100644 index 00000000000..8e627b27ca6 --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/OperationCreator.java @@ -0,0 +1,34 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.ByVal; +import org.bytedeco.javacpp.annotation.Properties; +import org.bytedeco.javacpp.annotation.Const; +import org.bytedeco.pytorch.Operation; +import org.bytedeco.pytorch.JitNode; + + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class OperationCreator extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public OperationCreator(Pointer p) { + super(p); + } + + protected OperationCreator() { + allocate(); + } + + private native void allocate(); + + public native @ByVal Operation call(@Const JitNode arg0); + +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/PickleReader.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/PickleReader.java new file mode 100644 index 00000000000..2906b6bc120 --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/PickleReader.java @@ -0,0 +1,31 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.BytePointer; +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.Cast; +import org.bytedeco.javacpp.annotation.Properties; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class PickleReader extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public PickleReader(Pointer p) { + super(p); + } + + protected PickleReader() { + allocate(); + } + + private native void allocate(); + + // std::function + public native @Cast("size_t") long call(@Cast("char*") BytePointer buf, @Cast("size_t") long nbytes); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/PlacementConsumer.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/PlacementConsumer.java new file mode 100644 index 00000000000..769b0d1c618 --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/PlacementConsumer.java @@ -0,0 +1,29 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.Cast; +import org.bytedeco.javacpp.annotation.Properties; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class PlacementConsumer extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public PlacementConsumer(Pointer p) { + super(p); + } + + protected PlacementConsumer() { + allocate(); + } + + private native void allocate(); + + public native void call(Pointer ptr, @Cast("size_t") long size); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/PlacementCopier.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/PlacementCopier.java new file mode 100644 index 00000000000..f6a2500543c --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/PlacementCopier.java @@ -0,0 +1,30 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.Cast; +import org.bytedeco.javacpp.annotation.Const; +import org.bytedeco.javacpp.annotation.Properties; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class PlacementCopier extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public PlacementCopier(Pointer p) { + super(p); + } + + protected PlacementCopier() { + allocate(); + } + + private native void allocate(); + + public native void call(@Const Pointer src, Pointer dst, @Cast("size_t") long size); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/PointerSupplier.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/PointerSupplier.java new file mode 100644 index 00000000000..86b40507c38 --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/PointerSupplier.java @@ -0,0 +1,28 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.Properties; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class PointerSupplier extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public PointerSupplier(Pointer p) { + super(p); + } + + protected PointerSupplier() { + allocate(); + } + + private native void allocate(); + + public native Pointer call(); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/StorageImplCreateHelper.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/StorageImplCreateHelper.java new file mode 100644 index 00000000000..f9f6f8d5f68 --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/StorageImplCreateHelper.java @@ -0,0 +1,34 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.*; +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Allocator; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class StorageImplCreateHelper extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public StorageImplCreateHelper(Pointer p) { + super(p); + } + + protected StorageImplCreateHelper() { + allocate(); + } + + private native void allocate(); + + public native @ByVal StorageImplPtr call( + @ByVal StorageImpl.use_byte_size_t arg0, + @ByVal SymInt size_bytes, + Allocator allocator, + @Cast("bool") boolean resizable); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/TensorTensorRefHook.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/TensorTensorRefHook.java new file mode 100644 index 00000000000..40f72a45f54 --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/TensorTensorRefHook.java @@ -0,0 +1,31 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.ByRef; +import org.bytedeco.javacpp.annotation.Const; +import org.bytedeco.javacpp.annotation.Properties; +import org.bytedeco.pytorch.TensorBase; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TensorTensorRefHook extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public TensorTensorRefHook(Pointer p) { + super(p); + } + + protected TensorTensorRefHook() { + allocate(); + } + + private native void allocate(); + + public native @ByRef TensorBase call(@Const @ByRef TensorBase a); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/TypeParser.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/TypeParser.java new file mode 100644 index 00000000000..200825857af --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/TypeParser.java @@ -0,0 +1,31 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.BytePointer; +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.*; +import org.bytedeco.pytorch.Type.TypePtr; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TypeParser extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public TypeParser(Pointer p) { + super(p); + } + + protected TypeParser() { + allocate(); + } + + private native void allocate(); + + // std::function + public native @ByVal TypePtr call(@Const @StdString BytePointer s); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/TypePrinter.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/TypePrinter.java new file mode 100644 index 00000000000..f83ce7b650f --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/TypePrinter.java @@ -0,0 +1,34 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.ByRef; +import org.bytedeco.javacpp.annotation.ByVal; +import org.bytedeco.javacpp.annotation.Const; +import org.bytedeco.javacpp.annotation.Properties; +import org.bytedeco.pytorch.StringOptional; +import org.bytedeco.pytorch.Type; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TypePrinter extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public TypePrinter(Pointer p) { + super(p); + } + + protected TypePrinter() { + allocate(); + } + + private native void allocate(); + + // std::function(const c10::Type&)> + public native @ByVal StringOptional call(@Const @ByRef Type type); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/TypeResolver.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/TypeResolver.java new file mode 100644 index 00000000000..629c0050107 --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/TypeResolver.java @@ -0,0 +1,31 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.*; +import org.bytedeco.pytorch.QualifiedName; +import org.bytedeco.pytorch.StrongTypePtr; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TypeResolver extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public TypeResolver(Pointer p) { + super(p); + } + + protected TypeResolver() { + allocate(); + } + + private native void allocate(); + + // std::function + public native @ByVal StrongTypePtr call(@Const @ByRef QualifiedName name); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/TypeSupplier.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/TypeSupplier.java new file mode 100644 index 00000000000..4d69ec28b5b --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/TypeSupplier.java @@ -0,0 +1,30 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.ByVal; +import org.bytedeco.javacpp.annotation.Properties; +import org.bytedeco.pytorch.Type.TypePtr; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TypeSupplier extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public TypeSupplier(Pointer p) { + super(p); + } + + protected TypeSupplier() { + allocate(); + } + + private native void allocate(); + + public native @ByVal TypePtr call(); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index 57a4f36e306..32a52ee1e08 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -56,17 +56,18 @@ value = { @Platform( value = {"linux", "macosx", "windows"}, - compiler = "cpp14", + compiler = "cpp17", define = {"SHARED_PTR_NAMESPACE std", "UNIQUE_PTR_NAMESPACE std"}, include = { "torch/torch.h", - "ATen/native/TensorShape.h", - "torch/csrc/jit/serialization/storage_context.h", - "torch/csrc/jit/serialization/import.h", + "torch/script.h", // For inclusion in JNI only, not parsed (compiler needs some complete definitions) "torch/csrc/jit/runtime/instruction.h", "torch/csrc/jit/serialization/source_range_serialization.h", + "torch/csrc/jit/frontend/resolver.h", + "torch/csrc/jit/frontend/tree_views.h", + "torch/csrc/jit/serialization/storage_context.h", "pytorch_adapters.h" }, @@ -76,10 +77,7 @@ @Platform( value = {"linux", "macosx", "windows"}, link = { "c10", "c10_cuda", "torch_cpu", "torch_cuda", "torch" }, - // If nvfuser_codegen is linked and not preloaded, and javacpp cache is empty, we get: - // Loading nvfuser library failed with: Error in dlopen: libtorch.so: Cannot open... (function LoadingNvfuserLibrary) - // The warning disappears once the cache is filled. Probably some obscure race condition. - preload = {"gomp@.1", "iomp5", "omp", "tbb@.2", "asmjit", "fbgemm", "cupti@.12", "nvfuser_codegen"}, + preload = {"gomp@.1", "iomp5", "omp", "tbb@.2", "asmjit", "fbgemm", "cupti@.12"}, includepath = {"/usr/local/cuda/include", "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.1/include/"}, preloadpath = { "/usr/local/cuda-12.1/lib64/", @@ -245,7 +243,10 @@ public static void sharedMap(InfoMap infoMap) { "ERROR_UNSUPPORTED_CAST", "LEGACY_CONTIGUOUS_MEMORY_FORMAT", "GFLAGS_DLL_DEFINE_FLAG", "GFLAGS_DLL_DECLARE_FLAG", "AT_X", "DEFINE_KEY", "C10_DISPATCHER_INLINE_UNLESS_MOBILE", "TH_DISALLOW_COPY_AND_ASSIGN", "__device__", "TORCH_DSA_KERNEL_ARGS", "TORCH_DSA_KERNEL_ARGS_PASS", - "C10_CUDA_API", "C10_CUDA_IMPORT", "C10_CUDA_EXPORT").cppTypes().annotations()) + "C10_CUDA_API", "C10_CUDA_IMPORT", "C10_CUDA_EXPORT", + "__ubsan_ignore_float_divide_by_zero__", "__ubsan_ignore_undefined__", + "__ubsan_ignore_signed_int_overflow__", "__ubsan_ignore_pointer_overflow__", + "__ubsan_ignore_function__").cppTypes().annotations()) .put(new Info("defined(__CUDACC__) || defined(__HIPCC__)", "defined(__CUDACC__) && !defined(USE_ROCM)", @@ -258,7 +259,8 @@ public static void sharedMap(InfoMap infoMap) { "defined(_MSC_VER)", "_WIN32", "defined(USE_ROCM)", "USE_ROCM", "SYCL_LANGUAGE_VERSION", "defined(CUDA_VERSION) && CUDA_VERSION >= 11000", - "defined ENABLE_RECORD_KERNEL_FUNCTION_DTYPE").define(false)) + "defined ENABLE_RECORD_KERNEL_FUNCTION_DTYPE", + "__OBJC__").define(false)) .put(new Info("C10_DEFINE_DEPRECATED_USING").cppText("#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy)").cppTypes()) .put(new Info("C10_DEPRECATED_MESSAGE").cppText("#define C10_DEPRECATED_MESSAGE() deprecated").cppTypes()) @@ -330,6 +332,8 @@ public void map(InfoMap infoMap) { .put(new Info("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)").cast().valueTypes("boolean").pointerTypes("BoolPointer")) .put(new Info("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Half>::t)").pointerTypes("Half")) .put(new Info("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::BFloat16>::t)").pointerTypes("BFloat16")) + .put(new Info("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Float8_e5m2>::t)").pointerTypes("Float8_e5m2")) + .put(new Info("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Float8_e4m3fn>::t)").pointerTypes("Float8_e4m3fn")) .put(new Info("c10::DataPtr", "at::DataPtr").valueTypes("@Cast({\"\", \"c10::DataPtr&&\"}) @StdMove DataPtr").pointerTypes("DataPtr")) .put(new Info("c10::Storage", "at::Storage").valueTypes("@Cast({\"\", \"c10::Storage&&\"}) @StdMove Storage").pointerTypes("Storage")) .put(new Info("c10::ClassType").purify().pointerTypes("ClassType")) // Issue #669 @@ -428,7 +432,6 @@ public void map(InfoMap infoMap) { .put(new Info("c10::optional").pointerTypes("ModuleInstanceInfoOptional").define()) .put(new Info("c10::optional").pointerTypes("SourceRangeOptional").define()) .put(new Info("c10::optional").pointerTypes("MethodOptional").define()) - .put(new Info("c10::optional").pointerTypes("OperatorOptional").define()) .put(new Info("c10::optional", "c10::optional").pointerTypes("NamedValueOptional").define()) .put(new Info("c10::optional").pointerTypes("ValueOptional").define()) .put(new Info("c10::optional >", @@ -445,6 +448,7 @@ public void map(InfoMap infoMap) { .put(new Info("c10::optional >", "c10::optional >").pointerTypes("T_TypePtrLong_TOptional").cast().define()) .put(new Info("c10::optional").pointerTypes("StringViewOptional").define()) .put(new Info("c10::optional >").pointerTypes("StringViewVectorOptional").define()) + .put(new Info("c10::optional >")/*.cast?*/.pointerTypes("PointerPairOptional").define()) ; @@ -586,6 +590,7 @@ public void map(InfoMap infoMap) { .put(new Info("std::array").pointerTypes("PointerPointer")) .put(new Info("std::array").cast().pointerTypes("FunctionalityOffsetAndMask")) .put(new Info("std::array").pointerTypes("IntPointer").cast()) + .put(new Info("std::array >,at::COMPILE_TIME_MAX_DEVICE_TYPES>").pointerTypes("PointerPairOptional").cast()) ; @@ -622,7 +627,6 @@ public void map(InfoMap infoMap) { .valueTypes("@Cast({\"\", \"std::vector\"}) @StdMove TensorVector").pointerTypes("TensorVector").define()) .put(new Info("std::vector", "std::vector").pointerTypes("TensorIndexVector").define()) .put(new Info("std::vector >").pointerTypes("TensorOptionalVector").define()) - .put(new Info("std::vector >").pointerTypes("OperatorOptionalVector").define()) .put(new Info("std::vector >").pointerTypes("SharedFunctionPreVector").define()) .put(new Info("const std::vector >", "std::vector >").pointerTypes("FunctionPreHookVector").define()) @@ -631,14 +635,11 @@ public void map(InfoMap infoMap) { .put(new Info("const std::vector", "std::vector").pointerTypes("SavedVariableVector").define()) .put(new Info("const std::vector", "std::vector").pointerTypes("DefVector").define()) .put(new Info("const std::vector", "std::vector").pointerTypes("PropertyVector").define()) - .put(new Info("const std::vector", "std::vector").pointerTypes("InstructionVector").define()) - .put(new Info("const std::vector", "std::vector").pointerTypes("CompilationUnitVector").define()) .put(new Info("const std::vector", "std::vector").pointerTypes("OptimizerParamGroupVector").define()) .put(new Info("std::vector").pointerTypes("FunctionVector").define()) .put(new Info("std::vector >").pointerTypes("GraphVector").define()) .put(new Info("std::vector >").pointerTypes("OperatorVector").define()) .put(new Info("std::vector >", "std::vector").pointerTypes("ResolverVector").define()) - .put(new Info("std::vector").pointerTypes("StackEntryVector").define()) .put(new Info("std::vector", "std::vector").pointerTypes("ValueVector").define()) // Returned by inlineCallTo .put(new Info("std::vector").pointerTypes("JitNodeVector").define()) .put(new Info("std::vector").pointerTypes("ModuleVector").define()) @@ -781,9 +782,7 @@ public void map(InfoMap infoMap) { for (String[] t : new String[][]{ {"SymInt", "SymInt", "@ByVal SymInt", "c10::SymInt", "at::kDimVectorStaticSize", "at::SymDimVector", "SymDimVector"}, {"Long", "LongPointer", "long", "int64_t", "at::kDimVectorStaticSize", "at::DimVector", "DimVector"}, - {"Node", "Node", "@ByPtr Node", "torch::autograd::Node*", "4", null, "SmallNodeVector"}, - {"TreeRef", "TreeRef", "@ByVal TreeRef", "c10::intrusive_ptr", "4", null, "TreeList"} - + {"Node", "Node", "@ByPtr Node", "torch::autograd::Node*", "4", null, "SmallNodeVector"} }) { // Assume all have SmallVectorSizeType == uint32_t infoMap @@ -841,6 +840,7 @@ public void map(InfoMap infoMap) { .put(new Info("std::unordered_set").pointerTypes("NodeSet").define()) .put(new Info("std::unordered_set").pointerTypes("StreamSet").define()) .put(new Info("std::unordered_set").pointerTypes("RecordScopeSet").define()) + .put(new Info("std::unordered_set").pointerTypes("DeviceTypeSet").define()) .put(new Info("std::set").pointerTypes("ActivityTypeSet").define()) ; @@ -946,22 +946,6 @@ public void map(InfoMap infoMap) { infoMap.put(new Info("torch::jit::TreeList::const_iterator").cast().pointerTypes("TreeRef")); - /* Not parsed anymore - List binaryOps = Arrays.asList("Add", "Sub", "Div", "Max", "Min", "Mul", "Mod", "Xor", "And", "Or", "Rshift", "Lshift"); - List exprOps = new ArrayList<>(); - exprOps.addAll(Arrays.asList("CharImm", "FloatImm", "BitCast", "Intrinsics", "Broadcast", "Cast")); - exprOps.addAll(binaryOps); - List bitwiseOps = Arrays.asList("Xor", "And", "Or", "Rshift", "Lshift"); - - for (String op : binaryOps) - infoMap.put(new Info("torch::jit::tensorexpr::BinaryOpNode").pointerTypes("BinaryOpNode" + op)); - for (String op : exprOps) - infoMap.put(new Info("torch::jit::tensorexpr::ExprNode").pointerTypes("ExprNode" + op)); - for (String op : bitwiseOps) - infoMap.put(new Info("torch::jit::tensorexpr::BitwiseOpNode").pointerTypes("BitwiseOpNode" + op)); - */ - - //// c10 Dict infoMap .put(new Info("c10::Dict").purify().pointerTypes("GenericDict")) @@ -1014,6 +998,7 @@ public void map(InfoMap infoMap) { .put(new Info("std::pair").pointerTypes("StringAnyModulePair").define()) .put(new Info("std::pair >").pointerTypes("StringSharedModulePair").define()) .put(new Info("std::pair").pointerTypes("RecordFunctionHandleIntPair").define()) + .put(new Info("std::pair").pointerTypes("PointerPair").define()) .put(new Info("std::pair").pointerTypes("SizeTMatchedSchemaPair").define()) ; @@ -1034,7 +1019,8 @@ public void map(InfoMap infoMap) { new PointerInfo("c10::TensorImpl,c10::UndefinedTensorImpl").javaBaseName("TensorImpl"), new PointerInfo("torch::jit::Tree").javaName("TreeRef"), new PointerInfo("c10::StorageImpl", "c10::StorageImpl,NullType"), - new PointerInfo("c10::SymNodeImpl").javaName("SymNode") + new PointerInfo("c10::SymNodeImpl").javaName("SymNode"), + new PointerInfo("c10::BackendMeta").javaName("BackendMetaRef") // Warning: BackendMetaPtr is sth different }) { String[] cppNames = new String[pi.argumentNames.length + pi.otherCppNames.length]; int i = 0; @@ -1169,10 +1155,7 @@ public void map(InfoMap infoMap) { //// Jit iterators for (String[] t : new String[][]{ - {"Module", "JitModule", "torch::jit::Module"}, - {"Parameter", "Tensor", "torch::Tensor"}, - {"Attribute", "IValue", "c10::IValue"}, - {"Buffer", "Tensor", "torch::Tensor"} + {"Module", "JitModule", "torch::jit::Module"} }) { infoMap.put(new Info( "torch::jit::slot_list_impl", @@ -1190,7 +1173,6 @@ public void map(InfoMap infoMap) { } infoMap - .put(new Info("torch::jit::tracer::warn_fn_type", "warn_fn_type").cast().pointerTypes("warn_fn_type")) .put(new Info("torch::jit::Maybe").pointerTypes("DefMaybe")) .put(new Info("torch::jit::Maybe").pointerTypes("ExprMaybe")) .put(new Info("torch::jit::Maybe").pointerTypes("VarMaybe")) @@ -1203,10 +1185,12 @@ public void map(InfoMap infoMap) { "torch::jit::Maybe::map", "torch::jit::Maybe::map", "torch::jit::Maybe >::map", - "torch::jit::Maybe >::map").skip()) + "torch::jit::Maybe >::map" + ).skip()) /* Could be mapped if needed */ .put(new Info("torch::jit::Wrap").pointerTypes("BlockWrap")) .put(new Info("torch::jit::Wrap").pointerTypes("JitNodeWrap")) - .put(new Info("torch::jit::Wrap").pointerTypes("ValueWrap")); + .put(new Info("torch::jit::Wrap").pointerTypes("ValueWrap")) + ; //// Datasets @@ -1373,6 +1357,9 @@ public void map(InfoMap infoMap) { .put(new Info("torch::nn::ConstantPadOptions<1>").pointerTypes("ConstantPad1dOptions")) .put(new Info("torch::nn::ConstantPadOptions<2>").pointerTypes("ConstantPad2dOptions")) .put(new Info("torch::nn::ConstantPadOptions<3>").pointerTypes("ConstantPad3dOptions")) + .put(new Info("torch::nn::ZeroPadOptions<1>").pointerTypes("ZeroPad1dOptions")) + .put(new Info("torch::nn::ZeroPadOptions<2>").pointerTypes("ZeroPad2dOptions")) + .put(new Info("torch::nn::ZeroPadOptions<3>").pointerTypes("ZeroPad3dOptions")) .put(new Info("torch::nn::AvgPoolOptions<1>", "torch::nn::functional::AvgPool1dFuncOptions").pointerTypes("AvgPool1dOptions")) .put(new Info("torch::nn::AvgPoolOptions<2>", "torch::nn::functional::AvgPool2dFuncOptions").pointerTypes("AvgPool2dOptions")) .put(new Info("torch::nn::AvgPoolOptions<3>", "torch::nn::functional::AvgPool3dFuncOptions").pointerTypes("AvgPool3dOptions")) @@ -1478,10 +1465,7 @@ public void map(InfoMap infoMap) { mapModule(infoMap, "ReflectionPad" + i + "d", "torch::nn::ReflectionPadImpl<" + i + ",torch::nn::ReflectionPad" + i + "dImpl>"); mapModule(infoMap, "ReplicationPad" + i + "d", "torch::nn::ReplicationPadImpl<" + i + ",torch::nn::ReplicationPad" + i + "dImpl>"); mapModule(infoMap, "ConstantPad" + i + "d", "torch::nn::ConstantPadImpl<" + i + ",torch::nn::ConstantPad" + i + "dImpl>"); - if (i == 2) { - mapModule(infoMap, "ZeroPad" + i + "d"); - } - + mapModule(infoMap, "ZeroPad" + i + "d", "torch::nn::ZeroPadImpl<" + i + ",torch::nn::ZeroPad" + i + "dImpl>"); mapModule(infoMap, "AvgPool" + i + "d", "torch::nn::AvgPoolImpl<" + i + ",torch::nn::AvgPool" + i + "dImpl>"); mapModule(infoMap, "MaxPool" + i + "d", "torch::nn::MaxPoolImpl<" + i + ",torch::nn::MaxPool" + i + "dImpl>"); mapModule(infoMap, "AdaptiveAvgPool" + i + "d", "torch::nn::AdaptiveAvgPoolImpl<" + i + ",torch::ExpandingArray" + (i > 1 ? "WithOptionalElem<" : "<") + i + ">,torch::nn::AdaptiveAvgPool" + i + "dImpl>"); @@ -1634,7 +1618,6 @@ public void map(InfoMap infoMap) { new PointerInfo("torch::jit::Graph"), new PointerInfo("torch::jit::Operator"), new PointerInfo("torch::jit::Resolver"), - new PointerInfo("torch::jit::tensorexpr::analysis::AccessInfo"), new PointerInfo("c10::ClassType"), new PointerInfo("c10::TensorType").otherCppNames("c10::TensorTypePtr", "at::TensorTypePtr", "torch::TensorTypePtr"), new PointerInfo("torch::autograd::FunctionPreHook"), @@ -1669,8 +1652,8 @@ public void map(InfoMap infoMap) { .valueTypes("@Cast({\"\", \"std::unique_ptr&&\"}) FunctionPostHook") .pointerTypes("FunctionPostHook")) .put(new Info("std::unique_ptr", "Ptr").annotations("@UniquePtr").pointerTypes("AttributeValue")) - ; + infoMap.put(new Info("torch::autograd::AutogradMeta::post_acc_grad_hooks_").annotations("@UniquePtr", "@Cast({\"\", \"\", \"std::unique_ptr&&\"})")); // See JavaCPP Issue #717 /* TODO: see how to map these, if needed and meant to be part of API */ infoMap.put(new Info("c10::MaybeOwnedTraitsGenericImpl >::assignBorrow", @@ -1718,7 +1701,17 @@ We need either to put an annotation info on each member, or javaName("@NoOffset "torch::jit::StackEntry::filename", "torch::jit::StackEntry::range", "torch::jit::Call::fn_name", - "torch::jit::Call::caller_range" + "torch::jit::Call::caller_range", + "c10::SymbolicShapeMeta::sizes_", + "c10::SymbolicShapeMeta::strides_", + "c10::SymbolicShapeMeta::numel_", + "c10::SymbolicShapeMeta::storage_offset_", + "c10::SymbolicShapeMeta::is_contiguous_", + "c10::SymbolicShapeMeta::is_channels_last_contiguous_", + "c10::SymbolicShapeMeta::is_channels_last_3d_contiguous_", + "c10::SymbolicShapeMeta::is_channels_last_", + "c10::SymbolicShapeMeta::is_channels_last_3d_", + "c10::SymbolicShapeMeta::is_non_overlapping_and_dense_" }) { Info i = infoMap.getFirst(n, false); if (i == null) { @@ -1807,7 +1800,7 @@ We need either to put an annotation info on each member, or javaName("@NoOffset ).skip()); - //// Avoiding name clashes or making them more explicit. + //// Avoiding name clashes by skipping or renaming infoMap.put(new Info("c10::ComplexType::get").javaNames("getComplexTypePtr")) .put(new Info("c10::FloatType::get").javaNames("getFloatTypePtr")) .put(new Info("c10::IntType::get").javaNames("getIntTypePtr")) @@ -1824,6 +1817,7 @@ We need either to put an annotation info on each member, or javaName("@NoOffset .put(new Info("torch::jit::Node").pointerTypes("JitNode")) .put(new Info("torch::jit::Module").pointerTypes("JitModule")) .put(new Info("torch::jit::Object").pointerTypes("JitObject")) + .put(new Info("torch::jit::load").javaNames("jitLoad")) .put(new Info("torch::jit::String").pointerTypes("JitString")) ; @@ -1886,7 +1880,9 @@ We need either to put an annotation info on each member, or javaName("@NoOffset {"c10::complex", "ComplexFloat"}, {"c10::complex", "ComplexDouble"}, {"bool", "boolean"}, - {"at::BFloat16", "BFload16"} + {"at::BFloat16", "BFloat16"}, + {"at::Float8_e4m3fn", "Float8_e4m3fn"}, + {"at::Float8_e5m2", "Float8_e5m2"} }) { infoMap.put(new Info("c10::fetch_and_cast<" + t[0] + ">").javaNames("fetch_and_cast_to_" + t[1])) .put(new Info("c10::cast_and_store<" + t[0] + ">").javaNames("cast_and_store_from_" + t[1])); @@ -1940,6 +1936,10 @@ We need either to put an annotation info on each member, or javaName("@NoOffset infoMap.put(new Info("c10::ThreadPoolRegistry()", "c10::CUDAHooksRegistry()").skip()); + ///// Forward references and opaque classes + infoMap + .put(new Info("c10::Argument").pointerTypes("Argument")) // Ref in function_schema_inl.h, defined in function_schema.h + ; /* Classes that are not part of API (no TORCH_API nor C10_API) and are not argument nor return type of API methods. * Consider manual exclusion of all at::meta, at::native and caffe2 namespaces (but TypeMeta, that should @@ -2057,11 +2057,13 @@ We need either to put an annotation info on each member, or javaName("@NoOffset "c10::static_cast_with_inter_type,c10::BFloat16>", "c10::trivial_init_t", "caffe2::detail::_Uninitialized", + "caffe2::TypeMetaData", "ska::detailv3::sherwood_v3_entry::", "ska::detailv3::sherwood_v3_table::convertible_to_iterator", "ska::fibonacci_hash_policy", "ska::power_of_two_hash_policy", "ska::prime_number_hash_policy", + "ska_ordered::prime_number_hash_policy", "ska_ordered::detailv3::sherwood_v3_entry::", "ska_ordered::detailv3::sherwood_v3_table::convertible_to_iterator", "ska_ordered::order_preserving_flat_hash_map::convertible_to_value", @@ -2085,6 +2087,7 @@ We need either to put an annotation info on each member, or javaName("@NoOffset "torch::autograd::InputMetadata", "torch::autograd::NodeGuard", "torch::autograd::TraceableFunction", + "torch::autograd::VariableHooks", "torch::data::DataLoaderBase::Job", "torch::data::DataLoaderBase::QuitWorker", "torch::data::DataLoaderBase::Result", @@ -2159,10 +2162,14 @@ We need either to put an annotation info on each member, or javaName("@NoOffset .put(new Info("torch::Tensor", "at::Tensor")) + //// std::function passed as generic pointer because are returned by some methods. So no mapping possible. + .put(new Info("std::function", "torch::jit::BackendMetaPtr", "std::function&)>") + .pointerTypes("Pointer").cast()) + //// Classes kept but passed as generic pointer .put(new Info("c10::intrusive_ptr_target", "c10::nullopt", "c10::nullopt_t", "c10::impl::PyObjectSlot", "_object", - "PyObject", "std::function", "THPObjectPtr", "pyobj_list", "std::chrono::milliseconds", "std::exception_ptr", "std::type_info", + "PyObject", "THPObjectPtr", "pyobj_list", "std::chrono::milliseconds", "std::exception_ptr", "std::type_info", "std::pair", "std::stack >", "torch::autograd::utils::DelayWarningHandler", "std::is_same,torch::detail::pack >", "at::cuda::NVRTC", "at::RecordFunctionCallback", "at::StepCallbacks", "THCState", "THHState", "torch::autograd::ViewInfo", "torch::jit::InlinedCallStackPtr", "InlinedCallStackPtr", "torch::jit::ScopePtr", "torch::jit::BackendDebugInfoRecorder", @@ -2200,7 +2207,6 @@ We need either to put an annotation info on each member, or javaName("@NoOffset ///// Special cases needing javaText infoMap .put(new Info("at::Tensor::toString", "at::TensorBase::toString", "torch::Tensor::toString", "torch::TensorBase::toString", "torch::jit::Graph::toString").javaText("public native @StdString String toString();")) - .put(new Info("torch::jit::tracer::pauseTracing()").javaText("@Namespace(\"torch::jit::tracer\") public static native @ByVal @Cast(\"std::function*\") Pointer pauseTracing();")) .put(new Info("torch::jit::ProfileOp::getCallback()", "torch::jit::ProfileIValueOp::getCallback()").javaText( "public native @ByVal @Cast(\"std::function&)>*\") Pointer getCallback();")) .put(new Info("torch::optim::AdamOptions::betas", "torch::optim::AdamWOptions::betas").javaText( @@ -2228,15 +2234,21 @@ We need either to put an annotation info on each member, or javaName("@NoOffset //// Callback functions + // skip() is added when function pointer are parsed instead of std::function to use the class in package + // functions and prevent the creation of an automatic class in main package. + // If a native function returns a std::function, no way to map it. infoMap - .put(new Info("c10::DeleterFnPtr").cast().valueTypes("PointerConsumer", "Pointer", "long")) - .put(new Info("torch::Deleter", "std::function").pointerTypes("PointerConsumer", "@Cast(\"void(*)(void*)\") Pointer", "@Cast(\"void(*)(void*)\") long")) + .put(new Info("void (*)(void*)", "c10::DeleterFnPtr", "torch::Deleter", "at::ContextDeleter", + "caffe2::TypeMeta::Delete", "std::function").pointerTypes("PointerConsumer").valueTypes("PointerConsumer").skip()) + .put(new Info("void* (*)()", "caffe2::TypeMeta::New").pointerTypes("PointerSupplier").valueTypes("PointerSupplier").skip()) .put(new Info("std::function").pointerTypes("Func")) .put(new Info("std::function").pointerTypes("StringSupplier")) .put(new Info("std::function").pointerTypes("StringConsumer")) .put(new Info("std::function", "std::function").pointerTypes("DDPLogger")) .put(new Info("std::function").pointerTypes("TypeMapper")) + .put(new Info("c10::detail::infer_schema::ArgumentDef::GetTypeFn").pointerTypes("TypeSupplier").skip()) + .put(new Info("c10::TypePtr (*)()", "c10::detail::infer_schema::ArgumentDef::GetTypeFn*").pointerTypes("TypeSupplier").valueTypes("TypeSupplier").skip()) .put(new Info("std::function").pointerTypes("ValueMapper")) .put(new Info("std::function").pointerTypes("GraphFunctionCreator")) .put(new Info("torch::nn::Module::ModuleApplyFunction", "torch::nn::Module::ConstModuleApplyFunction", "std::function", "std::function").pointerTypes("ModuleApplyFunction")) @@ -2249,6 +2261,7 @@ We need either to put an annotation info on each member, or javaName("@NoOffset .put(new Info("std::function").pointerTypes("Reader")) .put(new Info("std::function").pointerTypes("ArchiveWriter")) .put(new Info("std::function").pointerTypes("PickleWriter")) + .put(new Info("std::function").pointerTypes("PickleReader")) .put(new Info("std::function&)>").pointerTypes("TypeRenamer")) .put(new Info("std::function").pointerTypes("TensorIdGetter")) .put(new Info("std::function").pointerTypes("SizeTSupplier")) @@ -2256,16 +2269,32 @@ We need either to put an annotation info on each member, or javaName("@NoOffset .put(new Info("std::function", "torch::nn::TripletMarginWithDistanceLossOptions::distance_function_t", "torch::nn::functional::TripletMarginWithDistanceLossFuncOptions::distance_function_t").pointerTypes("DistanceFunction")) + .put(new Info("std::function").pointerTypes("MemCopyFunction")) .put(new Info("std::function)>").pointerTypes("Pointer")) .put(new Info("at::TensorBase::register_hook >").javaNames("register_hook")) .put(new Info("at::TensorBase::register_hook >").javaNames("register_hook")) .put(new Info("std::function").pointerTypes("VoidTensorHook")) .put(new Info("std::function").pointerTypes("TensorTensorHook")) + .put(new Info("std::function").pointerTypes("TensorTensorRefHook")) .put(new Info("std::function").pointerTypes("TensorMapper")) .put(new Info("at::TensorBase::hook_return_void_t > ", "at::TensorBase::hook_return_void_t >").valueTypes("int")) + .put(new Info("std::function&)>").pointerTypes("MetadataLogger")) + .put(new Info("std::function", "torch::jit::TypeResolver").pointerTypes("TypeResolver")) + .put(new Info("std::function", "torch::jit::TypeParserT", + "c10::TypePtr (*)(const std::string&)", + "c10::Type::SingletonOrSharedTypePtr (*)(const std::string&)" + ).pointerTypes("TypeParser").skip()) + .put(new Info("c10::intrusive_ptr (*)(c10::StorageImpl::use_byte_size_t, c10::SymInt, c10::Allocator*, bool)").pointerTypes("StorageImplCreateHelper").skip()) + .put(new Info("std::function(const c10::Type&)>").pointerTypes("TypePrinter")) + .put(new Info("void (*)(void*, size_t)", "c10::PlacementDtor", "caffe2::TypeMeta::PlacementNew", "caffe2::TypeMeta::PlacementDelete").pointerTypes("PlacementConsumer").valueTypes("PlacementConsumer").skip()) + .put(new Info("void (*)(const void*, void*, size_t)", "caffe2::TypeMeta::Copy").pointerTypes("PlacementCopier").valueTypes("PlacementCopier").skip()) + .put(new Info("torch::jit::Operation (*)(const torch::jit::Node*)", "torch::jit::OperationCreator").pointerTypes("OperationCreator").valueTypes("OperationCreator").skip()) ; + + infoMap.put(new Info("caffe2::TypeMeta::deleteFn").javaText("public native @NoException(true) PointerConsumer deleteFn();")); // Parser picks up the wrong Delete + } private static String template(String t, String... args) { diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java index 68a574141cd..a15e1cf5232 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java @@ -139,7 +139,7 @@ public void map(InfoMap infoMap) { "cudaDeviceProp" ).pointerTypes("Pointer")) .put(new Info( // Pointers to opaque structs - "cudaStream_t", "cusparseHandle_t", "cublasHandle_t", "cusolverDnHandle_t", "cudnnHandle_t" + "cudaStream_t", "cusparseHandle_t", "cublasHandle_t", "cusolverDnHandle_t", "cudnnHandle_t", "cudaEvent_t" ).valueTypes("Pointer").cast()) .put(new Info( // Enums "cudnnActivationMode_t", "cudnnLossNormalizationMode_t", "cudnnRNNInputMode_t", diff --git a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h index 254741f1d0d..18079c8faf3 100644 --- a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h +++ b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h @@ -4,23 +4,23 @@ // ATen/cudnn/Descriptors.h // ATen/cudnn/Types.h // c10/cuda/CUDAGuard.h -#include "c10/cuda/CUDAStream.h" -#include "ATen/cuda/CUDAContext.h" #include "c10/core/impl/GPUTrace.h" -#include "c10/cuda/CUDADeviceAssertionHost.h" -#include "c10/cuda/CUDAMacros.h" #include "c10/cuda/impl/cuda_cmake_macros.h" -#include "c10/cuda/CUDAGraphsC10Utils.h" +#include "c10/cuda/CUDAMacros.h" +#include "c10/cuda/CUDADeviceAssertionHost.h" +// #include "c10/cuda/CUDAMiscFunctions.h", // Parsing error +// #include "c10/cuda/CUDAException.h", // Parsing error +// #include "c10/cuda/CUDAFunctions.h", // Parsing error +#include "c10/cuda/CUDAStream.h" #include "ATen/cuda/Exceptions.h" +#include "ATen/cuda/CUDAContext.h" #include "ATen/cudnn/cudnn-wrapper.h" -#include "ATen/cudnn/Utils.h" -#include "ATen/cudnn/Handle.h" #include "ATen/cuda/ATenCUDAGeneral.h" -// #include "c10/cuda/CUDAFunctions.h", // Parsing error -// #include "c10/cuda/CUDAException.h", // Parsing error -// #include "c10/cuda/CUDAMiscFunctions.h", // Parsing error +#include "ATen/cudnn/Handle.h" +#include "ATen/cudnn/Utils.h" +#include "c10/cuda/CUDAGraphsC10Utils.h" // #include "c10/cuda/CUDACachingAllocator.h", // If map needed, rename global symbols - +#include "c10/cuda/impl/CUDAGuardImpl.h" #include "ATen/cudnn/Descriptors.h" #include "ATen/cudnn/Types.h" -#include "c10/cuda/CUDAGuard.h" +#include "c10/cuda/CUDAGuard.h" \ No newline at end of file diff --git a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h index ec3238af8d9..a1ba3d04897 100644 --- a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h +++ b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h @@ -1,5 +1,7 @@ -// All files included by #include -// as listed by g++ -M torch/csrc/api/include/torch/all.h (or -H) +// All files included by +// #include +// #include +// as listed by g++ -H torch/torch.h torch/script.h // Excluding: // - the ones that fill at::meta at::native and at::_ops namespaces // (ATen/ops/*_native.h ATen/ops/*_meta.h ATen/ops/*_ops.h) @@ -9,9 +11,8 @@ #include "c10/macros/cmake_macros.h" #include "c10/macros/Export.h" #include "torch/csrc/Export.h" -#include "c10/macros/Macros.h" // import c10 into at and others #include "c10/core/DeviceType.h" -#include "c10/util/Deprecated.h" +#include "c10/macros/Macros.h" // #include "c10/util/string_utils.h" // Android only // #include "c10/util/C++17.h" #include "c10/util/reverse_iterator.h" @@ -26,10 +27,12 @@ #include "c10/util/TypeTraits.h" #include "c10/util/TypeList.h" // #include "c10/util/Metaprogramming.h" // Not parseable +#include "c10/util/bit_cast.h" // #include "c10/util/llvmMathExtras.h" // Not parseable #include "c10/core/DispatchKeySet.h" #include "c10/core/Backend.h" #include "c10/core/Layout.h" +#include "c10/util/Deprecated.h" #include "c10/util/AlignOf.h" #include "c10/util/SmallVector.h" #include "c10/util/ArrayRef.h" @@ -37,68 +40,67 @@ #include "c10/core/QScheme.h" #include "c10/core/Stream.h" #include "c10/core/OptionalRef.h" -#include "c10/util/BFloat16.h" #include "c10/util/BFloat16-inl.h" +#include "c10/util/BFloat16.h" #include "c10/util/TypeSafeSignMath.h" +#include "c10/util/floating_point_utils.h" +#include "c10/util/Float8_e4m3fn-inl.h" +#include "c10/util/Float8_e4m3fn.h" #include "c10/util/complex_math.h" -#include "c10/util/Half.h" // Moved before complex.h because it overrides complex -#include "c10/util/Half-inl.h" #include "c10/util/complex_utils.h" +#include "c10/util/Half.h" // Moved before complex.h because it overrides complex #include "c10/util/complex.h" +#include "c10/util/Half-inl.h" +#include "c10/util/Float8_e5m2-inl.h" +#include "c10/util/Float8_e5m2.h" +#include "c10/util/bits.h" #include "c10/util/qint32.h" #include "c10/util/qint8.h" #include "c10/util/quint2x4.h" #include "c10/util/quint4x2.h" #include "c10/util/quint8.h" #include "c10/core/ScalarType.h" -#include "c10/util/ExclusivelyOwned.h" +// #include "c10/util/Optional.h" // Incompatible with declaration of c10::optional as basic container #include "c10/util/MaybeOwned.h" -// #include "c10/util/intrusive_ptr.h" Moved below #include "c10/core/SymNodeImpl.h" -#include "c10/core/SymFloat.h" #include "c10/core/SymBool.h" +#include "c10/core/SymFloat.h" #include "c10/core/SymInt.h" #include "c10/util/TypeCast.h" #include "c10/core/Scalar.h" -// #include "c10/util/Optional.h" // Incompatible with declaration of c10::optional as basic container -#include "c10/util/Backtrace.h" #include "c10/util/IdWrapper.h" -#include "c10/util/Type.h" #include "c10/util/ConstexprCrc.h" #include "c10/util/TypeIndex.h" -#include "c10/util/flat_hash_map.h" #include "c10/util/irange.h" #include "c10/util/typeid.h" #include "c10/core/ScalarTypeToTypeMeta.h" #include "c10/util/ThreadLocalDebugInfo.h" #include "c10/util/UniqueVoidPtr.h" #include "c10/core/Allocator.h" +#include "c10/core/impl/HermeticPyObjectTLS.h" +#include "c10/core/SymIntArrayRef.h" +#include "c10/util/python_stub.h" +#include "c10/core/impl/PyInterpreter.h" +#include "c10/core/impl/PyObjectSlot.h" #include "c10/core/StorageImpl.h" #include "c10/core/Storage.h" -#include "c10/core/CopyBytes.h" #include "c10/core/AutogradState.h" -#include "c10/core/GradMode.h" -#include "c10/util/Registry.h" -#include "c10/util/Flags.h" #include "c10/core/impl/LocalDispatchKeySet.h" #include "c10/core/InferenceMode.h" -#include "c10/core/SymIntArrayRef.h" -#include "c10/core/DefaultDtype.h" -#include "c10/core/TensorOptions.h" #include "c10/core/WrapDimMinimal.h" -#include "c10/core/impl/HermeticPyObjectTLS.h" -#include "c10/core/impl/PyInterpreter.h" -#include "c10/core/impl/PyObjectSlot.h" #include "c10/core/impl/SizesAndStrides.h" #include "c10/util/DimVector.h" -// #include "c10/util/logging_is_google_glog.h" // Not parseable -// #include "c10/util/logging_is_not_google_glog.h" // Not parseable -#include "c10/util/Logging.h" +#include "c10/util/Type.h" +#include "c10/util/Registry.h" +#include "c10/util/Flags.h" #include "c10/util/accumulate.h" #include "c10/util/safe_numerics.h" #include "c10/core/TensorImpl.h" #include "c10/core/UndefinedTensorImpl.h" +#include "c10/util/ExclusivelyOwned.h" // #include "c10/util/OptionalArrayRef.h" // Not compatible with basic container. Are we concerned by https://github.com/pytorch/pytorch/issues/63645 ? +#include "c10/core/DefaultDtype.h" +#include "c10/core/TensorOptions.h" #include "ATen/core/CheckMemoryFormat.h" // #include "ATen/core/DeprecatedTypePropertiesRegistry.h" // Deprecated #include "c10/core/GeneratorImpl.h" @@ -110,6 +112,7 @@ #include "ATen/core/QuantizerBase.h" #include "ATen/core/TensorAccessor.h" #include "c10/util/ExclusivelyOwnedTensorTraits.h" +#include "ATen/StorageUtils.h" #include "ATen/core/TensorBase.h" // #include "ATen/core/ATen_fwd.h" // Only forward declarations + conflict with basic containers #include "ATen/MethodOperators.h" @@ -146,7 +149,9 @@ #include "c10/core/impl/InlineStreamGuard.h" #include "c10/core/StreamGuard.h" #include "c10/util/FunctionRef.h" -#include "c10/util/intrusive_ptr.h" +// #include "c10/util/logging_is_not_google_glog.h" // Not parseable +#include "c10/util/Logging.h" +#include "c10/util/intrusive_ptr.h" // Moved after the definition or its template args #include "ATen/core/ivalue_inl.h" #include "ATen/core/ivalue.h" #include "ATen/core/List_inl.h" @@ -157,34 +162,34 @@ #include "ATen/TensorNames.h" // #include "ATen/WrapDimUtilsMulti.h" // Windows-specific #include "ATen/NamedTensorUtils.h" +#include "ATen/core/VariableHooksInterface.h" #include "torch/csrc/autograd/variable.h" #include "torch/csrc/autograd/autograd.h" #include "ATen/core/alias_info.h" #include "ATen/core/operator_name.h" #include "ATen/core/dispatch/OperatorOptions.h" -#include "ATen/core/function_schema.h" #include "ATen/core/function_schema_inl.h" +#include "ATen/core/function_schema.h" #include "ATen/core/op_registration/infer_schema.h" -#include "ATen/record_function.h" #include "ATen/core/op_registration/op_allowlist.h" #include "c10/util/either.h" #include "torch/csrc/jit/frontend/function_schema_parser.h" +#include "ATen/core/enum_tag.h" #include "c10/core/CompileTimeFunctionPointer.h" #include "ATen/core/boxing/OperatorKernel.h" -#include "ATen/core/boxing/BoxedKernel.h" #include "ATen/core/boxing/BoxedKernel_impl.h" +#include "ATen/core/boxing/BoxedKernel.h" #include "ATen/core/stack.h" #include "ATen/core/boxing/impl/boxing.h" #include "ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h" #include "ATen/core/boxing/impl/WrapFunctionIntoFunctor.h" #include "ATen/core/boxing/impl/WrapFunctionIntoRuntimeFunctor.h" -#include "ATen/core/boxing/KernelFunction.h" #include "ATen/core/boxing/KernelFunction_impl.h" +#include "ATen/core/boxing/KernelFunction.h" #include "ATen/core/dispatch/CppSignature.h" #include "ATen/core/dispatch/RegistrationHandleRAII.h" #include "ATen/core/ATenOpList.h" #include "ATen/core/op_registration/op_registration.h" -#include "ATen/core/enum_tag.h" #include "ATen/core/function.h" // #include "ATen/core/builtin_function.h" // Not in API #include "ATen/core/class_type.h" @@ -192,7 +197,9 @@ // #include "torch/custom_class.h" // Not in API #include "torch/library.h" #include "torch/csrc/autograd/autograd_not_implemented_fallback.h" +#include "c10/util/flat_hash_map.h" #include "torch/csrc/autograd/anomaly_mode.h" +#include "c10/core/GradMode.h" #include "ATen/core/grad_mode.h" #include "torch/csrc/autograd/grad_mode.h" #include "ATen/FuncTorchTLS.h" @@ -200,6 +207,7 @@ #include "ATen/PythonTorchFunctionTLS.h" #include "ATen/SavedTensorHooks.h" #include "ATen/ThreadLocalPythonObjects.h" +#include "ATen/record_function.h" #include "c10/core/impl/PythonDispatcherTLS.h" #include "c10/core/impl/TorchDispatchModeTLS.h" #include "ATen/ThreadLocalState.h" @@ -215,7 +223,10 @@ #include "ATen/detail/CUDAHooksInterface.h" #include "ATen/detail/HIPHooksInterface.h" #include "ATen/detail/MPSHooksInterface.h" +#include "ATen/detail/MTIAHooksInterface.h" #include "ATen/detail/ORTHooksInterface.h" +#include "ATen/detail/PrivateUse1HooksInterface.h" +#include "ATen/detail/XPUHooksInterface.h" #include "c10/core/QEngine.h" #include "c10/util/CallOnce.h" #include "c10/util/env.h" @@ -230,6 +241,8 @@ #include "ATen/TensorUtils.h" #include "ATen/TracerMode.h" #include "ATen/core/Reduction.h" +#include "ATen/ops/from_blob.h" +#include "ATen/ops/tensor.h" #include "ATen/ops/abs.h" #include "ATen/ops/absolute.h" #include "ATen/ops/acos.h" @@ -439,6 +452,7 @@ #include "ATen/ops/embedding_sparse_backward.h" #include "ATen/ops/empty.h" #include "ATen/ops/empty_like.h" +#include "ATen/ops/empty_permuted.h" #include "ATen/ops/empty_quantized.h" #include "ATen/ops/empty_strided.h" #include "ATen/ops/eq.h" @@ -512,7 +526,6 @@ #include "ATen/ops/fractional_max_pool3d_backward.h" #include "ATen/ops/frexp.h" #include "ATen/ops/frobenius_norm.h" -#include "ATen/ops/from_blob.h" #include "ATen/ops/from_file.h" #include "ATen/ops/full.h" #include "ATen/ops/full_like.h" @@ -809,6 +822,7 @@ #include "ATen/ops/nll_loss_nd.h" #include "ATen/ops/nonzero.h" #include "ATen/ops/nonzero_numpy.h" +#include "ATen/ops/nonzero_static.h" #include "ATen/ops/norm.h" #include "ATen/ops/norm_except_dim.h" #include "ATen/ops/normal.h" @@ -859,6 +873,7 @@ #include "ATen/ops/quantized_lstm_cell.h" #include "ATen/ops/quantized_max_pool1d.h" #include "ATen/ops/quantized_max_pool2d.h" +#include "ATen/ops/quantized_max_pool3d.h" #include "ATen/ops/quantized_rnn_relu_cell.h" #include "ATen/ops/quantized_rnn_tanh_cell.h" #include "ATen/ops/rad2deg.h" @@ -1057,6 +1072,12 @@ #include "ATen/ops/svd.h" #include "ATen/ops/swapaxes.h" #include "ATen/ops/swapdims.h" +#include "ATen/ops/sym_constrain_range.h" +#include "ATen/ops/sym_constrain_range_for_size.h" +#include "ATen/ops/sym_numel.h" +#include "ATen/ops/sym_size.h" +#include "ATen/ops/sym_storage_offset.h" +#include "ATen/ops/sym_stride.h" #include "ATen/ops/t.h" #include "ATen/ops/t_copy.h" #include "ATen/ops/take.h" @@ -1065,7 +1086,6 @@ #include "ATen/ops/tanh.h" #include "ATen/ops/tanh_backward.h" #include "ATen/ops/tensor_split.h" -#include "ATen/ops/tensor.h" #include "ATen/ops/tensordot.h" #include "ATen/ops/thnn_conv2d.h" #include "ATen/ops/threshold.h" @@ -1162,9 +1182,9 @@ #include "ATen/SequenceNumber.h" #include "torch/csrc/autograd/function.h" #include "torch/csrc/autograd/custom_function.h" -#include "torch/autograd.h" -#include "torch/cuda.h" -#include "torch/arg.h" +#include "torch/csrc/api/include/torch/autograd.h" +#include "torch/csrc/api/include/torch/cuda.h" +#include "torch/csrc/api/include/torch/arg.h" #include "ATen/Device.h" #include "ATen/Dispatch.h" #include "ATen/ScalarOps.h" @@ -1190,26 +1210,26 @@ #include "c10/util/Synchronized.h" // #include "c10/util/LeftRight.h" // Not in API #include "ATen/core/dispatch/Dispatcher.h" -#include "torch/types.h" -#include "torch/data/dataloader_options.h" -#include "torch/data/detail/queue.h" -#include "torch/data/detail/data_shuttle.h" -#include "torch/data/detail/sequencers.h" -#include "torch/data/iterator.h" -#include "torch/data/samplers/base.h" -#include "torch/data/samplers/random.h" -#include "torch/data/worker_exception.h" +#include "torch/csrc/api/include/torch/types.h" +#include "torch/csrc/api/include/torch/data/dataloader_options.h" +#include "torch/csrc/api/include/torch/data/detail/queue.h" +#include "torch/csrc/api/include/torch/data/detail/data_shuttle.h" +#include "torch/csrc/api/include/torch/data/detail/sequencers.h" +#include "torch/csrc/api/include/torch/data/iterator.h" +#include "torch/csrc/api/include/torch/data/samplers/base.h" +#include "torch/csrc/api/include/torch/data/samplers/random.h" +#include "torch/csrc/api/include/torch/data/worker_exception.h" #include "torch/csrc/utils/memory.h" -#include "torch/data/dataloader/base.h" -#include "torch/data/dataloader/stateful.h" -#include "torch/data/dataloader/stateless.h" -#include "torch/data/dataloader.h" -#include "torch/data/example.h" -#include "torch/data/datasets/base.h" -#include "torch/data/datasets/stateful.h" -#include "torch/data/samplers/custom_batch_request.h" -#include "torch/data/samplers/distributed.h" -#include "torch/data/samplers/sequential.h" +#include "torch/csrc/api/include/torch/data/dataloader/base.h" +#include "torch/csrc/api/include/torch/data/dataloader/stateful.h" +#include "torch/csrc/api/include/torch/data/dataloader/stateless.h" +#include "torch/csrc/api/include/torch/data/dataloader.h" +#include "torch/csrc/api/include/torch/data/example.h" +#include "torch/csrc/api/include/torch/data/datasets/base.h" +#include "torch/csrc/api/include/torch/data/datasets/stateful.h" +#include "torch/csrc/api/include/torch/data/samplers/custom_batch_request.h" +#include "torch/csrc/api/include/torch/data/samplers/distributed.h" +#include "torch/csrc/api/include/torch/data/samplers/sequential.h" #include "torch/csrc/api/include/torch/imethod.h" #include "torch/csrc/jit/ir/attributes.h" #include "torch/csrc/jit/ir/graph_node_list.h" @@ -1236,46 +1256,43 @@ #include "torch/csrc/jit/frontend/name_mangler.h" #include "torch/csrc/jit/api/compilation_unit.h" #include "torch/csrc/jit/api/module.h" -#include "torch/serialize/input-archive.h" -#include "torch/serialize/output-archive.h" -#include "torch/serialize/archive.h" -#include "torch/data/samplers/serialize.h" -#include "torch/data/samplers/stream.h" -#include "torch/data/samplers.h" -#include "torch/serialize/tensor.h" -#include "torch/serialize.h" -#include "torch/data/datasets/chunk.h" -#include "torch/data/datasets/map.h" -#include "torch/data/datasets/mnist.h" -#include "torch/data/datasets/shared.h" -#include "torch/data/datasets/tensor.h" -#include "torch/data/datasets.h" -#include "torch/data/transforms/base.h" -#include "torch/data/transforms/lambda.h" -#include "torch/data/transforms/collate.h" -#include "torch/data/transforms/stack.h" -#include "torch/data/transforms/tensor.h" -#include "torch/data/transforms.h" -#include "torch/data.h" -#include "torch/enum.h" -#include "torch/fft.h" -#include "torch/jit.h" -#include "torch/linalg.h" -#include "torch/nested.h" -#include "torch/detail/static.h" +#include "torch/csrc/api/include/torch/serialize/input-archive.h" +#include "torch/csrc/api/include/torch/serialize/output-archive.h" +#include "torch/csrc/api/include/torch/serialize/archive.h" +#include "torch/csrc/api/include/torch/data/samplers/serialize.h" +#include "torch/csrc/api/include/torch/data/samplers/stream.h" +#include "torch/csrc/api/include/torch/data/samplers.h" +#include "torch/csrc/api/include/torch/serialize/tensor.h" +#include "torch/csrc/api/include/torch/serialize.h" +#include "torch/csrc/api/include/torch/data/datasets/chunk.h" +#include "torch/csrc/api/include/torch/data/datasets/map.h" +#include "torch/csrc/api/include/torch/data/datasets/mnist.h" +#include "torch/csrc/api/include/torch/data/datasets/shared.h" +#include "torch/csrc/api/include/torch/data/datasets/tensor.h" +#include "torch/csrc/api/include/torch/data/datasets.h" +#include "torch/csrc/api/include/torch/data/transforms/base.h" +#include "torch/csrc/api/include/torch/data/transforms/lambda.h" +#include "torch/csrc/api/include/torch/data/transforms/collate.h" +#include "torch/csrc/api/include/torch/data/transforms/stack.h" +#include "torch/csrc/api/include/torch/data/transforms/tensor.h" +#include "torch/csrc/api/include/torch/data/transforms.h" +#include "torch/csrc/api/include/torch/data.h" +#include "torch/csrc/api/include/torch/enum.h" +#include "torch/csrc/api/include/torch/fft.h" +#include "torch/csrc/api/include/torch/jit.h" +#include "torch/csrc/api/include/torch/linalg.h" +#include "torch/csrc/api/include/torch/mps.h" +#include "torch/csrc/api/include/torch/nested.h" +#include "torch/csrc/api/include/torch/detail/static.h" #include "torch/csrc/api/include/torch/nn/pimpl-inl.h" -#include "torch/nn/pimpl.h" -#include "torch/nn/modules/container/any_value.h" -#include "torch/nn/modules/container/any_module_holder.h" -#include "torch/ordered_dict.h" -#include "torch/nn/module.h" +#include "torch/csrc/api/include/torch/nn/pimpl.h" +#include "torch/csrc/api/include/torch/nn/modules/container/any_value.h" +#include "torch/csrc/api/include/torch/nn/modules/container/any_module_holder.h" +#include "torch/csrc/api/include/torch/nn/module.h" #include "ATen/Config.h" // #include "ATen/ParallelOpenMP.h" // Internal only -// #include "ATen/ParallelNative.h" // Internal only -// #include "ATen/ParallelNativeTBB.h" // Internal only #include "ATen/Parallel-inl.h" #include "ATen/Parallel.h" -#include "torch/csrc/api/include/torch/types.h" #include "torch/csrc/profiler/orchestration/observer.h" #include "torch/csrc/profiler/api.h" #include "torch/csrc/profiler/events.h" @@ -1285,131 +1302,121 @@ #include "torch/csrc/autograd/profiler_kineto.h" // #include "torch/csrc/autograd/profiler_legacy.h" // Do not bother with legacy API #include "torch/csrc/autograd/profiler.h" -#include "torch/utils.h" -#include "torch/nn/cloneable.h" -#include "torch/nn/options/batchnorm.h" -#include "torch/nn/functional/batchnorm.h" -// #include "torch/expanding_array.h" // Mapped to *Pointer -#include "torch/nn/options/conv.h" -#include "torch/nn/functional/conv.h" -#include "torch/nn/options/distance.h" -#include "torch/nn/functional/distance.h" -#include "torch/nn/options/dropout.h" -#include "torch/nn/functional/dropout.h" -#include "torch/nn/options/embedding.h" -#include "torch/nn/functional/embedding.h" -#include "torch/nn/options/fold.h" -#include "torch/nn/functional/fold.h" -#include "torch/nn/options/instancenorm.h" -#include "torch/nn/functional/instancenorm.h" -#include "torch/nn/functional/linear.h" -#include "torch/nn/options/activation.h" -#include "torch/nn/options/linear.h" -#include "torch/nn/functional/activation.h" -#include "torch/nn/options/loss.h" -#include "torch/nn/functional/loss.h" +#include "torch/csrc/api/include/torch/utils.h" +#include "torch/csrc/api/include/torch/nn/cloneable.h" +#include "torch/csrc/api/include/torch/nn/options/batchnorm.h" +#include "torch/csrc/api/include/torch/nn/functional/batchnorm.h" +// #include "torch/csrc/api/include/torch/expanding_array.h" // Mapped to *Pointer +#include "torch/csrc/api/include/torch/nn/options/conv.h" +#include "torch/csrc/api/include/torch/nn/functional/conv.h" +#include "torch/csrc/api/include/torch/nn/options/distance.h" +#include "torch/csrc/api/include/torch/nn/functional/distance.h" +#include "torch/csrc/api/include/torch/nn/options/dropout.h" +#include "torch/csrc/api/include/torch/nn/functional/dropout.h" +#include "torch/csrc/api/include/torch/nn/options/embedding.h" +#include "torch/csrc/api/include/torch/nn/functional/embedding.h" +#include "torch/csrc/api/include/torch/nn/options/fold.h" +#include "torch/csrc/api/include/torch/nn/functional/fold.h" +#include "torch/csrc/api/include/torch/nn/options/instancenorm.h" +#include "torch/csrc/api/include/torch/nn/functional/instancenorm.h" +#include "torch/csrc/api/include/torch/nn/functional/linear.h" +#include "torch/csrc/api/include/torch/nn/options/activation.h" +#include "torch/csrc/api/include/torch/nn/options/linear.h" +#include "torch/csrc/api/include/torch/nn/functional/activation.h" +#include "torch/csrc/api/include/torch/nn/options/loss.h" +#include "torch/csrc/api/include/torch/nn/functional/loss.h" #include "ATen/PadNd.h" -#include "torch/nn/options/padding.h" -#include "torch/nn/functional/padding.h" -#include "torch/nn/modules/utils.h" -#include "torch/nn/options/pooling.h" -#include "torch/nn/functional/pooling.h" -#include "torch/nn/options/normalization.h" -#include "torch/nn/functional/normalization.h" -#include "torch/nn/options/pixelshuffle.h" -#include "torch/nn/functional/pixelshuffle.h" -#include "torch/nn/options/upsampling.h" -#include "torch/nn/functional/upsampling.h" -#include "torch/nn/options/vision.h" -#include "torch/nn/functional/vision.h" -#include "torch/nn/functional.h" -#include "torch/nn/init.h" -#include "torch/nn/modules/common.h" -#include "torch/nn/modules/container/any.h" -// #include "torch/nn/modules/container/functional.h" // Complex variadic templates non parseable -#include "torch/nn/modules/container/moduledict.h" -#include "torch/nn/modules/container/modulelist.h" -#include "torch/nn/modules/container/named_any.h" -#include "torch/nn/modules/container/parameterdict.h" -#include "torch/nn/modules/container/parameterlist.h" -#include "torch/nn/modules/container/sequential.h" -#include "torch/nn/modules/linear.h" -#include "torch/nn/modules/activation.h" -#include "torch/nn/options/adaptive.h" -#include "torch/nn/modules/adaptive.h" -#include "torch/nn/modules/batchnorm.h" +#include "torch/csrc/api/include/torch/nn/options/padding.h" +#include "torch/csrc/api/include/torch/nn/functional/padding.h" +#include "torch/csrc/api/include/torch/nn/modules/utils.h" +#include "torch/csrc/api/include/torch/nn/options/pooling.h" +#include "torch/csrc/api/include/torch/nn/functional/pooling.h" +#include "torch/csrc/api/include/torch/nn/options/normalization.h" +#include "torch/csrc/api/include/torch/nn/functional/normalization.h" +#include "torch/csrc/api/include/torch/nn/options/pixelshuffle.h" +#include "torch/csrc/api/include/torch/nn/functional/pixelshuffle.h" +#include "torch/csrc/api/include/torch/nn/options/upsampling.h" +#include "torch/csrc/api/include/torch/nn/functional/upsampling.h" +#include "torch/csrc/api/include/torch/nn/options/vision.h" +#include "torch/csrc/api/include/torch/nn/functional/vision.h" +#include "torch/csrc/api/include/torch/nn/functional.h" +#include "torch/csrc/api/include/torch/nn/init.h" +#include "torch/csrc/api/include/torch/nn/modules/common.h" +#include "torch/csrc/api/include/torch/nn/modules/container/any.h" +// #include "torch/csrc/api/include/torch/nn/modules/container/functional.h" // Complex variadic templates non parseable +#include "torch/csrc/api/include/torch/nn/modules/container/moduledict.h" +#include "torch/csrc/api/include/torch/nn/modules/container/modulelist.h" +#include "torch/csrc/api/include/torch/nn/modules/container/named_any.h" +#include "torch/csrc/api/include/torch/nn/modules/container/parameterdict.h" +#include "torch/csrc/api/include/torch/nn/modules/container/parameterlist.h" +#include "torch/csrc/api/include/torch/nn/modules/container/sequential.h" +#include "torch/csrc/api/include/torch/nn/modules/linear.h" +#include "torch/csrc/api/include/torch/nn/modules/activation.h" +#include "torch/csrc/api/include/torch/nn/options/adaptive.h" +#include "torch/csrc/api/include/torch/nn/modules/adaptive.h" +#include "torch/csrc/api/include/torch/nn/modules/batchnorm.h" // #include "c10/util/overloaded.h" // Non parseable -#include "torch/nn/modules/conv.h" -#include "torch/nn/modules/distance.h" -#include "torch/nn/modules/dropout.h" -#include "torch/nn/modules/embedding.h" -#include "torch/nn/modules/fold.h" -#include "torch/nn/modules/instancenorm.h" -#include "torch/nn/modules/loss.h" -#include "torch/nn/modules/_functions.h" -#include "torch/nn/modules/normalization.h" -#include "torch/nn/modules/padding.h" -#include "torch/nn/modules/pixelshuffle.h" -#include "torch/nn/modules/pooling.h" -#include "torch/nn/options/rnn.h" -#include "torch/nn/utils/rnn.h" -#include "torch/nn/modules/rnn.h" -#include "torch/nn/options/transformerlayer.h" -#include "torch/nn/options/transformer.h" -#include "torch/nn/modules/transformer.h" -#include "torch/nn/modules/transformerlayer.h" -#include "torch/nn/options/transformercoder.h" -#include "torch/nn/modules/transformercoder.h" -#include "torch/nn/modules/upsampling.h" -#include "torch/nn/modules.h" -#include "torch/nn/options.h" -#include "torch/nn/utils/clip_grad.h" -#include "torch/nn/utils/convert_parameters.h" -#include "torch/nn/utils.h" -#include "torch/nn.h" -#include "torch/optim/optimizer.h" -#include "torch/optim/serialize.h" -#include "torch/optim/adagrad.h" -#include "torch/optim/adam.h" -#include "torch/optim/adamw.h" -#include "torch/optim/lbfgs.h" -#include "torch/optim/rmsprop.h" -#include "torch/optim/sgd.h" -#include "torch/optim/schedulers/lr_scheduler.h" -#include "torch/optim/schedulers/step_lr.h" -#include "torch/optim.h" -#include "torch/sparse.h" -#include "torch/special.h" -#include "torch/version.h" -#include "torch/csrc/api/include/torch/all.h" - -// Included by -// ATen/native/TensorShape.h" -// torch/csrc/jit/serialization/storage_context.h" -// torch/csrc/jit/serialization/import.h" -#include "caffe2/serialize/inline_container.h" -#include "caffe2/serialize/istream_adapter.h" +#include "torch/csrc/api/include/torch/nn/modules/conv.h" +#include "torch/csrc/api/include/torch/nn/modules/distance.h" +#include "torch/csrc/api/include/torch/nn/modules/dropout.h" +#include "torch/csrc/api/include/torch/nn/modules/embedding.h" +#include "torch/csrc/api/include/torch/nn/modules/fold.h" +#include "torch/csrc/api/include/torch/nn/modules/instancenorm.h" +#include "torch/csrc/api/include/torch/nn/modules/loss.h" +#include "torch/csrc/api/include/torch/nn/modules/_functions.h" +#include "torch/csrc/api/include/torch/nn/modules/normalization.h" +#include "torch/csrc/api/include/torch/nn/modules/padding.h" +#include "torch/csrc/api/include/torch/nn/modules/pixelshuffle.h" +#include "torch/csrc/api/include/torch/nn/modules/pooling.h" +#include "torch/csrc/api/include/torch/nn/options/rnn.h" +#include "torch/csrc/api/include/torch/nn/utils/rnn.h" +#include "torch/csrc/api/include/torch/nn/modules/rnn.h" +#include "torch/csrc/api/include/torch/nn/options/transformerlayer.h" +#include "torch/csrc/api/include/torch/nn/options/transformer.h" +#include "torch/csrc/api/include/torch/nn/modules/transformer.h" +#include "torch/csrc/api/include/torch/nn/modules/transformerlayer.h" +#include "torch/csrc/api/include/torch/nn/options/transformercoder.h" +#include "torch/csrc/api/include/torch/nn/modules/transformercoder.h" +#include "torch/csrc/api/include/torch/nn/modules/upsampling.h" +#include "torch/csrc/api/include/torch/nn/modules.h" +#include "torch/csrc/api/include/torch/nn/options.h" +#include "torch/csrc/api/include/torch/nn/utils/clip_grad.h" +#include "torch/csrc/api/include/torch/nn/utils/convert_parameters.h" +#include "torch/csrc/api/include/torch/nn/utils.h" +#include "torch/csrc/api/include/torch/nn.h" +#include "torch/csrc/api/include/torch/optim/optimizer.h" +#include "torch/csrc/api/include/torch/optim/serialize.h" +#include "torch/csrc/api/include/torch/optim/adagrad.h" +#include "torch/csrc/api/include/torch/optim/adam.h" +#include "torch/csrc/api/include/torch/optim/adamw.h" +#include "torch/csrc/api/include/torch/optim/lbfgs.h" +#include "torch/csrc/api/include/torch/optim/rmsprop.h" +#include "torch/csrc/api/include/torch/optim/sgd.h" +#include "torch/csrc/api/include/torch/optim/schedulers/lr_scheduler.h" +#include "torch/csrc/api/include/torch/optim/schedulers/step_lr.h" +#include "torch/csrc/api/include/torch/optim.h" +#include "torch/csrc/api/include/torch/sparse.h" +#include "torch/csrc/api/include/torch/special.h" +#include "torch/csrc/api/include/torch/version.h" +#include "torch/csrc/autograd/InferenceMode.h" +// #include "torch/csrc/jit/runtime/custom_operator.h" // Name conflict with torch::RegisterOperator + little chance to have any use #include "caffe2/serialize/read_adapter_interface.h" +#include "caffe2/serialize/istream_adapter.h" #include "caffe2/serialize/versions.h" -#include "torch/csrc/jit/serialization/unpickler.h" -#include "torch/csrc/jit/frontend/script_type_parser.h" -#include "torch/csrc/jit/frontend/resolver.h" -#include "torch/csrc/jit/frontend/sugared_value.h" -#include "torch/csrc/jit/frontend/error_report.h" -#include "torch/csrc/jit/frontend/tree.h" -#include "torch/csrc/jit/frontend/lexer.h" +#include "caffe2/serialize/inline_container.h" +#include "torch/csrc/jit/serialization/import.h" +#include "c10/util/FbcodeMaps.h" +#include "torch/csrc/jit/serialization/pickler.h" #include "torch/csrc/jit/frontend/parser_constants.h" #include "torch/csrc/jit/frontend/strtod.h" +#include "torch/csrc/jit/frontend/lexer.h" +#include "torch/csrc/jit/frontend/tree.h" +#include "torch/csrc/jit/frontend/error_report.h" #include "torch/csrc/jit/frontend/schema_matching.h" #include "torch/csrc/jit/frontend/versioned_symbols.h" +#include "torch/csrc/jit/frontend/sugared_value.h" +#include "torch/csrc/jit/frontend/resolver.h" #include "torch/csrc/jit/frontend/tree_views.h" -#include "torch/csrc/jit/serialization/pickler.h" - -// Parsed and for inclusion in JNI -// See also https://github.com/pytorch/pytorch/blob/main/docs/cpp/source/Doxyfile -// for an approximation of what should be in API in addition to torch.h" -// torch/csrc/jit/runtime/custom_operator.h: Name conflict with torch::RegisterOperator + little chance to have any use -#include "torch/torch.h" -#include "ATen/native/TensorShape.h" -#include "torch/csrc/jit/serialization/storage_context.h" -#include "torch/csrc/jit/serialization/import.h" +#include "torch/csrc/jit/frontend/script_type_parser.h" +#include "torch/csrc/jit/serialization/unpickler.h" +#include "torch/csrc/jit/serialization/pickle.h" \ No newline at end of file From 7dfa27ef537282275049cd5ad1ede02c24eb93f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Mon, 16 Oct 2023 16:36:56 +0200 Subject: [PATCH 03/26] Remove useless classes --- .../pytorch/ActivityTraceWrapper.java | 26 --- .../org/bytedeco/pytorch/Bool2Vector.java | 90 -------- .../java/org/bytedeco/pytorch/CUevent_st.java | 27 --- .../pytorch/InterpreterStateImpl.java | 26 --- .../gen/java/org/bytedeco/pytorch/Lexer.java | 47 ----- .../org/bytedeco/pytorch/LongStringMap.java | 51 ----- .../bytedeco/pytorch/LongVectorArrayRef.java | 133 ------------ .../pytorch/MaybeOwnedTraitsTensor.java | 50 ----- .../org/bytedeco/pytorch/ModuleVector.java | 90 -------- .../pytorch/NamedJitModulePolicy.java | 48 ----- .../java/org/bytedeco/pytorch/NodeIntMap.java | 48 ----- .../bytedeco/pytorch/NodeSmallVectorBase.java | 29 --- .../pytorch/NodeSmallVectorCommon.java | 49 ----- .../bytedeco/pytorch/NodeSmallVectorImpl.java | 71 ------- .../pytorch/OptionalSingleElementType.java | 35 ---- .../java/org/bytedeco/pytorch/ReadyQueue.java | 26 --- .../RecordFunctionCallbackHandleVector.java | 35 ---- .../org/bytedeco/pytorch/RecordScopeSet.java | 46 ----- .../pytorch/RegistrationListenerList.java | 26 --- .../org/bytedeco/pytorch/SavedVariable.java | 83 -------- .../pytorch/SavedVariableArrayRef.java | 135 ------------ .../bytedeco/pytorch/SavedVariableVector.java | 47 ----- .../pytorch/SchemaRegistrationHandleRAII.java | 26 --- .../pytorch/SharedFunctionPreVector.java | 90 -------- .../org/bytedeco/pytorch/SmallNodeVector.java | 51 ----- .../java/org/bytedeco/pytorch/StreamSet.java | 46 ----- .../bytedeco/pytorch/StringFunctionMap.java | 48 ----- .../org/bytedeco/pytorch/StringIntMap.java | 48 ----- .../pytorch/StringLongStringMapMap.java | 48 ----- .../bytedeco/pytorch/StringModuleDict.java | 193 ------------------ .../pytorch/StringModuleDictItem.java | 53 ----- .../pytorch/StringModuleDictItemVector.java | 47 ----- .../bytedeco/pytorch/StringModulePair.java | 48 ----- .../bytedeco/pytorch/StringModuleVector.java | 58 ------ .../org/bytedeco/pytorch/StringTensorMap.java | 48 ----- .../org/bytedeco/pytorch/T_DoubleLong_T.java | 36 ---- .../org/bytedeco/pytorch/T_StringLong_T.java | 36 ---- .../T_TensorTensorLongLongTensor_T.java | 42 ---- .../T_TensorTensorTensorTensorLong_T.java | 42 ---- ...ensorTensorTensorTensorTensorTensor_T.java | 44 ---- ...TensorTensorsLongLongLongLongTensor_T.java | 50 ----- ...ensorVectorTensorVectorTensorVector_T.java | 42 ---- .../pytorch/T_TensorVectorTensor_T.java | 36 ---- .../pytorch/ThreadLocalStateOptional.java | 35 ---- .../org/bytedeco/pytorch/TypeMetaData.java | 118 ----------- .../pytorch/cuda/CUDAStreamOptional.java | 38 ---- .../pytorch/cuda/OptionalCUDAGuard.java | 90 -------- .../pytorch/cuda/OptionalCUDAStreamGuard.java | 86 -------- .../pytorch/fibonacci_hash_policy.java | 47 ----- .../org/bytedeco/pytorch/global/torch.java | 157 +------------- .../bytedeco/pytorch/global/torch_cuda.java | 11 +- .../org/bytedeco/pytorch/mz_zip_archive.java | 26 --- .../pytorch/power_of_two_hash_policy.java | 46 ----- .../org/bytedeco/pytorch/presets/torch.java | 60 ++---- .../bytedeco/pytorch/presets/torch_cuda.java | 13 +- 55 files changed, 39 insertions(+), 3038 deletions(-) delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ActivityTraceWrapper.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Bool2Vector.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/CUevent_st.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/InterpreterStateImpl.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Lexer.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/LongStringMap.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/LongVectorArrayRef.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MaybeOwnedTraitsTensor.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ModuleVector.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/NamedJitModulePolicy.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/NodeIntMap.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/NodeSmallVectorBase.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/NodeSmallVectorCommon.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/NodeSmallVectorImpl.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/OptionalSingleElementType.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ReadyQueue.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionCallbackHandleVector.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/RecordScopeSet.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/RegistrationListenerList.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariable.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariableArrayRef.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariableVector.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SchemaRegistrationHandleRAII.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SharedFunctionPreVector.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SmallNodeVector.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/StreamSet.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/StringFunctionMap.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/StringIntMap.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/StringLongStringMapMap.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleDict.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleDictItem.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleDictItemVector.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/StringModulePair.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleVector.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorMap.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/T_DoubleLong_T.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/T_StringLong_T.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorLongLongTensor_T.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorLong_T.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorTensorTensor_T.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorsLongLongLongLongTensor_T.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorVectorTensor_T.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalStateOptional.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TypeMetaData.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamOptional.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/OptionalCUDAGuard.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/OptionalCUDAStreamGuard.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/fibonacci_hash_policy.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/mz_zip_archive.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/power_of_two_hash_policy.java diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ActivityTraceWrapper.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ActivityTraceWrapper.java deleted file mode 100644 index d85fe88b885..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ActivityTraceWrapper.java +++ /dev/null @@ -1,26 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Namespace("torch::profiler::impl::kineto") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ActivityTraceWrapper extends Pointer { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public ActivityTraceWrapper() { super((Pointer)null); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ActivityTraceWrapper(Pointer p) { super(p); } -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Bool2Vector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Bool2Vector.java deleted file mode 100644 index 1cd991dd051..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Bool2Vector.java +++ /dev/null @@ -1,90 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("std::vector >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Bool2Vector extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Bool2Vector(Pointer p) { super(p); } - public Bool2Vector(BoolPointer value) { this(1); put(0, value); } - public Bool2Vector(BoolPointer ... array) { this(array.length); put(array); } - public Bool2Vector() { allocate(); } - public Bool2Vector(long n) { allocate(n); } - private native void allocate(); - private native void allocate(@Cast("size_t") long n); - public native @Name("operator =") @ByRef Bool2Vector put(@ByRef Bool2Vector x); - - public boolean empty() { return size() == 0; } - public native long size(); - public void clear() { resize(0); } - public native void resize(@Cast("size_t") long n); - - public BoolPointer front() { return get(0); } - public BoolPointer back() { return get(size() - 1); } - @Index(function = "at") public native @Cast("std::array*") @ByRef BoolPointer get(@Cast("size_t") long i); - public native Bool2Vector put(@Cast("size_t") long i, BoolPointer value); - - public native @ByVal Iterator insert(@ByVal Iterator pos, @Cast("std::array*") @ByRef BoolPointer value); - public native @ByVal Iterator erase(@ByVal Iterator pos); - public native @ByVal Iterator begin(); - public native @ByVal Iterator end(); - @NoOffset @Name("iterator") public static class Iterator extends Pointer { - public Iterator(Pointer p) { super(p); } - public Iterator() { } - - public native @Name("operator ++") @ByRef Iterator increment(); - public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *") @Cast("std::array*") @ByRef @Const BoolPointer get(); - } - - public BoolPointer[] get() { - BoolPointer[] array = new BoolPointer[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE]; - for (int i = 0; i < array.length; i++) { - array[i] = get(i); - } - return array; - } - @Override public String toString() { - return java.util.Arrays.toString(get()); - } - - public BoolPointer pop_back() { - long size = size(); - BoolPointer value = get(size - 1); - resize(size - 1); - return value; - } - public Bool2Vector push_back(BoolPointer value) { - long size = size(); - resize(size + 1); - return put(size, value); - } - public Bool2Vector put(BoolPointer value) { - if (size() != 1) { resize(1); } - return put(0, value); - } - public Bool2Vector put(BoolPointer ... array) { - if (size() != array.length) { resize(array.length); } - for (int i = 0; i < array.length; i++) { - put(i, array[i]); - } - return this; - } -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CUevent_st.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CUevent_st.java deleted file mode 100644 index d36882ccba1..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CUevent_st.java +++ /dev/null @@ -1,27 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class CUevent_st extends Pointer { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public CUevent_st() { super((Pointer)null); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public CUevent_st(Pointer p) { super(p); } -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InterpreterStateImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InterpreterStateImpl.java deleted file mode 100644 index 0307371678f..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InterpreterStateImpl.java +++ /dev/null @@ -1,26 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Namespace("torch::jit") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class InterpreterStateImpl extends Pointer { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public InterpreterStateImpl() { super((Pointer)null); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public InterpreterStateImpl(Pointer p) { super(p); } -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Lexer.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Lexer.java deleted file mode 100644 index ff7b37f261f..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Lexer.java +++ /dev/null @@ -1,47 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Lexer extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Lexer(Pointer p) { super(p); } - - public Lexer(@SharedPtr Source source) { super((Pointer)null); allocate(source); } - private native void allocate(@SharedPtr Source source); - // Return the current token, and then move to the next one - public native @ByVal Token next(); - // Skip the current token if it matches the given kind - public native @Cast("bool") boolean nextIf(int kind); - - public native void reportError(@StdString BytePointer what); - public native void reportError(@StdString String what); - public native void reportError(@StdString BytePointer what, @Const @ByRef Token t); - public native void reportError(@StdString String what, @Const @ByRef Token t); - public native void expected(@StdString BytePointer what, @Const @ByRef Token t); - public native void expected(@StdString String what, @Const @ByRef Token t); - public native void expected(@StdString BytePointer what); - public native void expected(@StdString String what); - // Check that the current token has a given kind, return the current token, - // and advance to the next one. - public native @ByVal Token expect(int kind); - public native @ByRef Token lookahead(); - public native @ByRef Token cur(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongStringMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongStringMap.java deleted file mode 100644 index 869e85d05ef..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongStringMap.java +++ /dev/null @@ -1,51 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("std::unordered_map") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class LongStringMap extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public LongStringMap(Pointer p) { super(p); } - public LongStringMap() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef LongStringMap put(@ByRef LongStringMap x); - - public boolean empty() { return size() == 0; } - public native long size(); - - public BytePointer front() { return get(0); } - public BytePointer back() { return get(size() - 1); } - @Index public native @StdString BytePointer get(@Cast("int64_t") long i); - public native LongStringMap put(@Cast("int64_t") long i, BytePointer value); - @ValueSetter @Index public native LongStringMap put(@Cast("int64_t") long i, @StdString String value); - - public native void erase(@ByVal Iterator pos); - public native @ByVal Iterator begin(); - public native @ByVal Iterator end(); - @NoOffset @Name("iterator") public static class Iterator extends Pointer { - public Iterator(Pointer p) { super(p); } - public Iterator() { } - - public native @Name("operator ++") @ByRef Iterator increment(); - public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *().first") @MemberGetter @Cast("int64_t") long first(); - public native @Name("operator *().second") @MemberGetter @StdString BytePointer second(); - } -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongVectorArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongVectorArrayRef.java deleted file mode 100644 index 75d2d27fa99..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongVectorArrayRef.java +++ /dev/null @@ -1,133 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("c10::ArrayRef >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class LongVectorArrayRef extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public LongVectorArrayRef(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public LongVectorArrayRef(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public LongVectorArrayRef position(long position) { - return (LongVectorArrayRef)super.position(position); - } - @Override public LongVectorArrayRef getPointer(long i) { - return new LongVectorArrayRef((Pointer)this).offsetAddress(i); - } - - /** \name Constructors - * \{ -

- * Construct an empty ArrayRef. */ - /* implicit */ public LongVectorArrayRef() { super((Pointer)null); allocate(); } -private native void allocate(); - - /** Construct an ArrayRef from a single element. */ - // TODO Make this explicit - - - /** Construct an ArrayRef from a pointer and length. */ - public LongVectorArrayRef(@Cast("const std::vector*") LongVector data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } - private native void allocate(@Cast("const std::vector*") LongVector data, @Cast("size_t") long length); - - /** Construct an ArrayRef from a range. */ - public LongVectorArrayRef(@Cast("const std::vector*") LongVector begin, @Cast("const std::vector*") LongVector end) { super((Pointer)null); allocate(begin, end); } - private native void allocate(@Cast("const std::vector*") LongVector begin, @Cast("const std::vector*") LongVector end); - - /** Construct an ArrayRef from a SmallVector. This is templated in order to - * avoid instantiating SmallVectorTemplateCommon whenever we - * copy-construct an ArrayRef. */ - - /** Construct an ArrayRef from a std::vector. */ - // The enable_if stuff here makes sure that this isn't used for - // std::vector, because ArrayRef can't work on a std::vector - // bitfield. - - /** Construct an ArrayRef from a std::array */ - - /** Construct an ArrayRef from a C array. */ - - /** Construct an ArrayRef from a std::initializer_list. */ - /* implicit */ - - /** \} - * \name Simple Operations - * \{ */ - - public native @Const @ByPtr LongVector begin(); - public native @Const @ByPtr LongVector end(); - - // These are actually the same as iterator, since ArrayRef only - // gives you const iterators. - public native @Const @ByPtr LongVector cbegin(); - public native @Const @ByPtr LongVector cend(); - - /** empty - Check if the array is empty. */ - public native @Cast("const bool") boolean empty(); - - public native @Cast("const std::vector*") LongVector data(); - - /** size - Get the array size. */ - public native @Cast("const size_t") long size(); - - /** front - Get the first element. */ - public native @Cast("const std::vector*") @ByRef LongVector front(); - - /** back - Get the last element. */ - public native @Cast("const std::vector*") @ByRef LongVector back(); - - /** equals - Check for element-wise equality. */ - public native @Cast("const bool") boolean equals(@ByVal LongVectorArrayRef RHS); - - /** slice(n, m) - Take M elements of the array starting at element N */ - public native @Const @ByVal LongVectorArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); - - /** slice(n) - Chop off the first N elements of the array. */ - public native @Const @ByVal LongVectorArrayRef slice(@Cast("size_t") long N); - - /** \} - * \name Operator Overloads - * \{ */ - public native @Cast("const std::vector*") @ByRef @Name("operator []") LongVector get(@Cast("size_t") long Index); - - /** Vector compatibility */ - - /// - public native @Cast("const std::vector*") @ByRef LongVector at(@Cast("size_t") long Index); - - /** Disallow accidental assignment from a temporary. - * - * The declaration here is extra complicated so that "arrayRef = {}" - * continues to select the move assignment operator. */ - - - /** Disallow accidental assignment from a temporary. - * - * The declaration here is extra complicated so that "arrayRef = {}" - * continues to select the move assignment operator. */ - - - /** \} - * \name Expensive Operations - * \{ */ - public native @Cast("std::vector*") @StdVector LongVector vec(); - - /** \} */ -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaybeOwnedTraitsTensor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaybeOwnedTraitsTensor.java deleted file mode 100644 index ea178decbfb..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaybeOwnedTraitsTensor.java +++ /dev/null @@ -1,50 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - // namespace at -@Name("c10::MaybeOwnedTraits") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class MaybeOwnedTraitsTensor extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public MaybeOwnedTraitsTensor() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public MaybeOwnedTraitsTensor(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public MaybeOwnedTraitsTensor(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public MaybeOwnedTraitsTensor position(long position) { - return (MaybeOwnedTraitsTensor)super.position(position); - } - @Override public MaybeOwnedTraitsTensor getPointer(long i) { - return new MaybeOwnedTraitsTensor((Pointer)this).offsetAddress(i); - } - - - public static native @ByVal @Cast("c10::MaybeOwnedTraits::borrow_type*") TensorBase createBorrow(@Cast("const c10::MaybeOwnedTraits::owned_type*") @ByRef TensorBase from); - - public static native void assignBorrow(@Cast("c10::MaybeOwnedTraits::borrow_type*") @ByRef TensorBase lhs, @Cast("const c10::MaybeOwnedTraits::borrow_type*") @ByRef TensorBase rhs); - - public static native void destroyBorrow(@Cast("c10::MaybeOwnedTraits::borrow_type*") @ByRef TensorBase toDestroy); - - public static native @Cast("const c10::MaybeOwnedTraits::owned_type*") @ByRef TensorBase referenceFromBorrow(@Cast("const c10::MaybeOwnedTraits::borrow_type*") @ByRef TensorBase borrow); - - public static native @Cast("const c10::MaybeOwnedTraits::owned_type*") TensorBase pointerFromBorrow(@Cast("const c10::MaybeOwnedTraits::borrow_type*") @ByRef TensorBase borrow); - - public static native @Cast("bool") boolean debugBorrowIsValid(@Cast("const c10::MaybeOwnedTraits::borrow_type*") @ByRef TensorBase arg0); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleVector.java deleted file mode 100644 index 610b36f5e9f..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleVector.java +++ /dev/null @@ -1,90 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("std::vector") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ModuleVector extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ModuleVector(Pointer p) { super(p); } - public ModuleVector(Module value) { this(1); put(0, value); } - public ModuleVector(Module ... array) { this(array.length); put(array); } - public ModuleVector() { allocate(); } - public ModuleVector(long n) { allocate(n); } - private native void allocate(); - private native void allocate(@Cast("size_t") long n); - public native @Name("operator =") @ByRef ModuleVector put(@ByRef ModuleVector x); - - public boolean empty() { return size() == 0; } - public native long size(); - public void clear() { resize(0); } - public native void resize(@Cast("size_t") long n); - - public Module front() { return get(0); } - public Module back() { return get(size() - 1); } - @Index(function = "at") public native @ByRef Module get(@Cast("size_t") long i); - public native ModuleVector put(@Cast("size_t") long i, Module value); - - public native @ByVal Iterator insert(@ByVal Iterator pos, @ByRef Module value); - public native @ByVal Iterator erase(@ByVal Iterator pos); - public native @ByVal Iterator begin(); - public native @ByVal Iterator end(); - @NoOffset @Name("iterator") public static class Iterator extends Pointer { - public Iterator(Pointer p) { super(p); } - public Iterator() { } - - public native @Name("operator ++") @ByRef Iterator increment(); - public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *") @ByRef @Const Module get(); - } - - public Module[] get() { - Module[] array = new Module[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE]; - for (int i = 0; i < array.length; i++) { - array[i] = get(i); - } - return array; - } - @Override public String toString() { - return java.util.Arrays.toString(get()); - } - - public Module pop_back() { - long size = size(); - Module value = get(size - 1); - resize(size - 1); - return value; - } - public ModuleVector push_back(Module value) { - long size = size(); - resize(size + 1); - return put(size, value); - } - public ModuleVector put(Module value) { - if (size() != 1) { resize(1); } - return put(0, value); - } - public ModuleVector put(Module ... array) { - if (size() != array.length) { resize(array.length); } - for (int i = 0; i < array.length; i++) { - put(i, array[i]); - } - return this; - } -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedJitModulePolicy.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedJitModulePolicy.java deleted file mode 100644 index 555c0b766e9..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedJitModulePolicy.java +++ /dev/null @@ -1,48 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -// take a Policy object, and make a version of it that returns the slot. -// along with the fully qualified name of that slot. This is used for the named_ -// variants like named_parameters(). -@Name("torch::jit::detail::NamedPolicy") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class NamedJitModulePolicy extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public NamedJitModulePolicy() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public NamedJitModulePolicy(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public NamedJitModulePolicy(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public NamedJitModulePolicy position(long position) { - return (NamedJitModulePolicy)super.position(position); - } - @Override public NamedJitModulePolicy getPointer(long i) { - return new NamedJitModulePolicy((Pointer)this).offsetAddress(i); - } - - public static native @ByVal @Cast("torch::jit::detail::NamedPolicy::value_type*") NamedJitModule create( - @StdVector SlotCursor cursors, - @ByVal IValue v); - public static native @Cast("bool") boolean valid(@Const @SharedPtr("c10::ClassType") @ByRef ClassType t, @Cast("size_t") long i, @Const @ByRef IValue v); - @MemberGetter public static native @Cast("const bool") boolean all_slots(); - public static final boolean all_slots = all_slots(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NodeIntMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NodeIntMap.java deleted file mode 100644 index ed7622fa1b7..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NodeIntMap.java +++ /dev/null @@ -1,48 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("std::unordered_map") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class NodeIntMap extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public NodeIntMap(Pointer p) { super(p); } - public NodeIntMap() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef NodeIntMap put(@ByRef NodeIntMap x); - - public boolean empty() { return size() == 0; } - public native long size(); - - @Index public native int get(Node i); - public native NodeIntMap put(Node i, int value); - - public native void erase(@ByVal Iterator pos); - public native @ByVal Iterator begin(); - public native @ByVal Iterator end(); - @NoOffset @Name("iterator") public static class Iterator extends Pointer { - public Iterator(Pointer p) { super(p); } - public Iterator() { } - - public native @Name("operator ++") @ByRef Iterator increment(); - public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *().first") @MemberGetter @Const Node first(); - public native @Name("operator *().second") @MemberGetter int second(); - } -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NodeSmallVectorBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NodeSmallVectorBase.java deleted file mode 100644 index 8fa0aa80cbb..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NodeSmallVectorBase.java +++ /dev/null @@ -1,29 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("c10::SmallVectorTemplateBase") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class NodeSmallVectorBase extends NodeSmallVectorCommon { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public NodeSmallVectorBase(Pointer p) { super(p); } - - public native void push_back(@ByPtrRef Node Elt); - - public native void pop_back(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NodeSmallVectorCommon.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NodeSmallVectorCommon.java deleted file mode 100644 index 4cf04304a22..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NodeSmallVectorCommon.java +++ /dev/null @@ -1,49 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("c10::SmallVectorTemplateCommon") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class NodeSmallVectorCommon extends IntSizedSmallVectorBase { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public NodeSmallVectorCommon(Pointer p) { super(p); } - - - // forward iterator creation methods. - public native @ByVal @Cast("c10::SmallVectorTemplateCommon::iterator*") Node begin(); - public native @ByVal @Cast("c10::SmallVectorTemplateCommon::iterator*") Node end(); - - // reverse iterator creation methods. - - public native long size_in_bytes(); - public native long max_size(); - - public native @Cast("size_t") long capacity_in_bytes(); - - /** Return a pointer to the vector's buffer, even if empty(). */ - public native @ByVal @Cast("c10::SmallVectorTemplateCommon::pointer*") Node data(); - /** Return a pointer to the vector's buffer, even if empty(). */ - - // SmallVector::at is NOT from LLVM. - public native @ByPtr Node at(long idx); - public native @Name("operator []") @ByPtr Node get(long idx); - - public native @ByPtr Node front(); - - public native @ByPtr Node back(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NodeSmallVectorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NodeSmallVectorImpl.java deleted file mode 100644 index 3e30e7be69d..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NodeSmallVectorImpl.java +++ /dev/null @@ -1,71 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("c10::SmallVectorImpl") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class NodeSmallVectorImpl extends NodeSmallVectorBase { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public NodeSmallVectorImpl(Pointer p) { super(p); } - - - - public native void clear(); - public native void resize(long N); - - /** Like resize, but \ref T is POD, the new values won't be initialized. */ - public native void resize_for_overwrite(long N); - - public native void resize(long N, @ByPtr Node NV); - - public native void reserve(long N); - - public native void pop_back_n(long NumItems); - - public native Node pop_back_val(); - - public native void swap(@ByRef NodeSmallVectorImpl RHS); - - /** Add the specified range to the end of the SmallVector. */ - - /** Append \p NumInputs copies of \p Elt to the end. */ - public native void append(long NumInputs, @ByPtr Node Elt); - - public native void append(@Const @ByRef NodeSmallVectorImpl RHS); - - public native void assign(long NumElts, @ByPtr Node Elt); - - // FIXME: Consider assigning over existing elements, rather than clearing & - // re-initializing them - for all assign(...) variants. - - public native void assign(@Const @ByRef NodeSmallVectorImpl RHS); - - public native @ByVal @Cast("c10::SmallVectorImpl::iterator*") Node erase(@ByVal @Cast("c10::SmallVectorImpl::const_iterator*") Node CI); - - public native @ByVal @Cast("c10::SmallVectorImpl::iterator*") Node erase(@ByVal @Cast("c10::SmallVectorImpl::const_iterator*") Node CS, @ByVal @Cast("c10::SmallVectorImpl::const_iterator*") Node CE); - public native @ByVal @Cast("c10::SmallVectorImpl::iterator*") Node insert(@ByVal @Cast("c10::SmallVectorImpl::iterator*") Node I, Node Elt); - - public native @ByVal @Cast("c10::SmallVectorImpl::iterator*") Node insert(@ByVal @Cast("c10::SmallVectorImpl::iterator*") Node I, long NumToInsert, @ByPtr Node Elt); - - public native @ByRef @Name("operator =") NodeSmallVectorImpl put(@Const @ByRef NodeSmallVectorImpl RHS); - - public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef NodeSmallVectorImpl RHS); - public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef NodeSmallVectorImpl RHS); - - public native @Cast("bool") @Name("operator <") boolean lessThan(@Const @ByRef NodeSmallVectorImpl RHS); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalSingleElementType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalSingleElementType.java deleted file mode 100644 index 609e2b3b6b0..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalSingleElementType.java +++ /dev/null @@ -1,35 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("c10::SingleElementType") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class OptionalSingleElementType extends SharedType { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public OptionalSingleElementType(Pointer p) { super(p); } - - @MemberGetter public static native TypeKind Kind(); - - public native @Const @ByRef Type.TypePtr getElementType(); - - public native @Cast("bool") boolean hasFreeVariables(); - - public native @ByVal TypeArrayRef containedTypes(); - - public native @Cast("bool") boolean equals(@Const @ByRef Type rhs); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReadyQueue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReadyQueue.java deleted file mode 100644 index eb6145b8fd2..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReadyQueue.java +++ /dev/null @@ -1,26 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Namespace("torch::autograd") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ReadyQueue extends Pointer { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public ReadyQueue() { super((Pointer)null); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ReadyQueue(Pointer p) { super(p); } -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionCallbackHandleVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionCallbackHandleVector.java deleted file mode 100644 index ba394961e7c..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionCallbackHandleVector.java +++ /dev/null @@ -1,35 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("std::vector >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class RecordFunctionCallbackHandleVector extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public RecordFunctionCallbackHandleVector(Pointer p) { super(p); } - public RecordFunctionCallbackHandleVector() { allocate(); } - private native void allocate(); - - - public boolean empty() { return size() == 0; } - public native long size(); - - @Index(function = "at") public native @Cast("at::RecordFunctionCallback*") @ByRef Pointer first(@Cast("size_t") long i); public native RecordFunctionCallbackHandleVector first(@Cast("size_t") long i, Pointer first); - @Index(function = "at") public native @Cast("uint64_t") long second(@Cast("size_t") long i); public native RecordFunctionCallbackHandleVector second(@Cast("size_t") long i, long second); -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordScopeSet.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RecordScopeSet.java deleted file mode 100644 index f1f13f8161a..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordScopeSet.java +++ /dev/null @@ -1,46 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("std::unordered_set") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class RecordScopeSet extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public RecordScopeSet(Pointer p) { super(p); } - public RecordScopeSet() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef RecordScopeSet put(@ByRef RecordScopeSet x); - - public boolean empty() { return size() == 0; } - public native long size(); - - public RecordScope front() { try (Iterator it = begin()) { return it.get(); } } - public native void insert(@ByRef RecordScope value); - public native void erase(@ByRef RecordScope value); - public native @ByVal Iterator begin(); - public native @ByVal Iterator end(); - @NoOffset @Name("iterator") public static class Iterator extends Pointer { - public Iterator(Pointer p) { super(p); } - public Iterator() { } - - public native @Name("operator ++") @ByRef Iterator increment(); - public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *") @ByRef @Const RecordScope get(); - } -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RegistrationListenerList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RegistrationListenerList.java deleted file mode 100644 index 6aadfb8e0f6..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RegistrationListenerList.java +++ /dev/null @@ -1,26 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Namespace("c10::detail") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class RegistrationListenerList extends Pointer { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public RegistrationListenerList() { super((Pointer)null); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public RegistrationListenerList(Pointer p) { super(p); } -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariable.java deleted file mode 100644 index dd95c5fd443..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariable.java +++ /dev/null @@ -1,83 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A snapshot of a variable at a certain version. A {@code SavedVariable} stores - * enough information to reconstruct a variable from a certain point in time. */ -@Namespace("torch::autograd") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SavedVariable extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SavedVariable(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public SavedVariable(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public SavedVariable position(long position) { - return (SavedVariable)super.position(position); - } - @Override public SavedVariable getPointer(long i) { - return new SavedVariable((Pointer)this).offsetAddress(i); - } - - public SavedVariable() { super((Pointer)null); allocate(); } - private native void allocate(); - public SavedVariable( - @Cast("const torch::autograd::Variable*") @ByRef Tensor variable, - @Cast("bool") boolean is_output, - @Cast("bool") boolean is_inplace_on_view/*=false*/) { super((Pointer)null); allocate(variable, is_output, is_inplace_on_view); } - private native void allocate( - @Cast("const torch::autograd::Variable*") @ByRef Tensor variable, - @Cast("bool") boolean is_output, - @Cast("bool") boolean is_inplace_on_view/*=false*/); - public SavedVariable( - @Cast("const torch::autograd::Variable*") @ByRef Tensor variable, - @Cast("bool") boolean is_output) { super((Pointer)null); allocate(variable, is_output); } - private native void allocate( - @Cast("const torch::autograd::Variable*") @ByRef Tensor variable, - @Cast("bool") boolean is_output); - public SavedVariable( - @Const @ByRef TensorOptional variable, - @Cast("bool") boolean is_output, - @Cast("bool") boolean is_inplace_on_view/*=false*/) { super((Pointer)null); allocate(variable, is_output, is_inplace_on_view); } - private native void allocate( - @Const @ByRef TensorOptional variable, - @Cast("bool") boolean is_output, - @Cast("bool") boolean is_inplace_on_view/*=false*/); - public SavedVariable( - @Const @ByRef TensorOptional variable, - @Cast("bool") boolean is_output) { super((Pointer)null); allocate(variable, is_output); } - private native void allocate( - @Const @ByRef TensorOptional variable, - @Cast("bool") boolean is_output); - public SavedVariable(@ByRef(true) SavedVariable arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByRef(true) SavedVariable arg0); - public native @ByRef @Name("operator =") SavedVariable put(@ByRef(true) SavedVariable arg0); - - /** Reconstructs the saved variable. Pass {@code saved_for} as the gradient - * function if constructing the {@code SavedVariable} with it would have caused a - * circular reference. */ - public native @ByVal @Cast("torch::autograd::Variable*") Tensor unpack(@SharedPtr Node saved_for/*=nullptr*/); - public native @ByVal @Cast("torch::autograd::Variable*") Tensor unpack(); - - public native void register_hooks(@UniquePtr SavedVariableHooks hooks); - - public native void reset_data(); - - public native @Cast("bool") boolean has_hooks(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariableArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariableArrayRef.java deleted file mode 100644 index d6376ec9ccf..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariableArrayRef.java +++ /dev/null @@ -1,135 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("c10::ArrayRef") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SavedVariableArrayRef extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SavedVariableArrayRef(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public SavedVariableArrayRef(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public SavedVariableArrayRef position(long position) { - return (SavedVariableArrayRef)super.position(position); - } - @Override public SavedVariableArrayRef getPointer(long i) { - return new SavedVariableArrayRef((Pointer)this).offsetAddress(i); - } - - /** \name Constructors - * \{ -

- * Construct an empty ArrayRef. */ - /* implicit */ public SavedVariableArrayRef() { super((Pointer)null); allocate(); } -private native void allocate(); - - /** Construct an ArrayRef from a single element. */ - // TODO Make this explicit - - - /** Construct an ArrayRef from a pointer and length. */ - public SavedVariableArrayRef(@Const SavedVariable data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } - private native void allocate(@Const SavedVariable data, @Cast("size_t") long length); - - /** Construct an ArrayRef from a range. */ - public SavedVariableArrayRef(@Const SavedVariable begin, @Const SavedVariable end) { super((Pointer)null); allocate(begin, end); } - private native void allocate(@Const SavedVariable begin, @Const SavedVariable end); - - /** Construct an ArrayRef from a SmallVector. This is templated in order to - * avoid instantiating SmallVectorTemplateCommon whenever we - * copy-construct an ArrayRef. */ - - /** Construct an ArrayRef from a std::vector. */ - // The enable_if stuff here makes sure that this isn't used for - // std::vector, because ArrayRef can't work on a std::vector - // bitfield. - public SavedVariableArrayRef(@ByRef SavedVariableVector vec) { super((Pointer)null); allocate(vec); } - private native void allocate(@ByRef SavedVariableVector vec); - - /** Construct an ArrayRef from a std::array */ - - /** Construct an ArrayRef from a C array. */ - - /** Construct an ArrayRef from a std::initializer_list. */ - /* implicit */ - - /** \} - * \name Simple Operations - * \{ */ - - public native @Const @ByPtr SavedVariable begin(); - public native @Const @ByPtr SavedVariable end(); - - // These are actually the same as iterator, since ArrayRef only - // gives you const iterators. - public native @Const @ByPtr SavedVariable cbegin(); - public native @Const @ByPtr SavedVariable cend(); - - /** empty - Check if the array is empty. */ - public native @Cast("const bool") boolean empty(); - - public native @Const SavedVariable data(); - - /** size - Get the array size. */ - public native @Cast("const size_t") long size(); - - /** front - Get the first element. */ - public native @Const @ByRef SavedVariable front(); - - /** back - Get the last element. */ - public native @Const @ByRef SavedVariable back(); - - /** equals - Check for element-wise equality. */ - - - /** slice(n, m) - Take M elements of the array starting at element N */ - public native @Const @ByVal SavedVariableArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); - - /** slice(n) - Chop off the first N elements of the array. */ - public native @Const @ByVal SavedVariableArrayRef slice(@Cast("size_t") long N); - - /** \} - * \name Operator Overloads - * \{ */ - public native @Const @ByRef @Name("operator []") SavedVariable get(@Cast("size_t") long Index); - - /** Vector compatibility */ - - /// - public native @Const @ByRef SavedVariable at(@Cast("size_t") long Index); - - /** Disallow accidental assignment from a temporary. - * - * The declaration here is extra complicated so that "arrayRef = {}" - * continues to select the move assignment operator. */ - - - /** Disallow accidental assignment from a temporary. - * - * The declaration here is extra complicated so that "arrayRef = {}" - * continues to select the move assignment operator. */ - - - /** \} - * \name Expensive Operations - * \{ */ - - - /** \} */ -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariableVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariableVector.java deleted file mode 100644 index 4ce52cd9aea..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariableVector.java +++ /dev/null @@ -1,47 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("std::vector") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SavedVariableVector extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SavedVariableVector(Pointer p) { super(p); } - public SavedVariableVector() { allocate(); } - private native void allocate(); - - - public boolean empty() { return size() == 0; } - public native long size(); - - public SavedVariable front() { return get(0); } - public SavedVariable back() { return get(size() - 1); } - @Index(function = "at") public native @ByRef SavedVariable get(@Cast("size_t") long i); - - public native @ByVal Iterator begin(); - public native @ByVal Iterator end(); - @NoOffset @Name("iterator") public static class Iterator extends Pointer { - public Iterator(Pointer p) { super(p); } - public Iterator() { } - - public native @Name("operator ++") @ByRef Iterator increment(); - public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *") @ByRef @Const SavedVariable get(); - } -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SchemaRegistrationHandleRAII.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SchemaRegistrationHandleRAII.java deleted file mode 100644 index fa207e4a8f7..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SchemaRegistrationHandleRAII.java +++ /dev/null @@ -1,26 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Namespace("c10") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SchemaRegistrationHandleRAII extends Pointer { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public SchemaRegistrationHandleRAII() { super((Pointer)null); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SchemaRegistrationHandleRAII(Pointer p) { super(p); } -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SharedFunctionPreVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SharedFunctionPreVector.java deleted file mode 100644 index 03371d340f7..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SharedFunctionPreVector.java +++ /dev/null @@ -1,90 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("std::vector >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SharedFunctionPreVector extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SharedFunctionPreVector(Pointer p) { super(p); } - public SharedFunctionPreVector(FunctionPreHook value) { this(1); put(0, value); } - public SharedFunctionPreVector(FunctionPreHook ... array) { this(array.length); put(array); } - public SharedFunctionPreVector() { allocate(); } - public SharedFunctionPreVector(long n) { allocate(n); } - private native void allocate(); - private native void allocate(@Cast("size_t") long n); - public native @Name("operator =") @ByRef SharedFunctionPreVector put(@ByRef SharedFunctionPreVector x); - - public boolean empty() { return size() == 0; } - public native long size(); - public void clear() { resize(0); } - public native void resize(@Cast("size_t") long n); - - public FunctionPreHook front() { return get(0); } - public FunctionPreHook back() { return get(size() - 1); } - @Index(function = "at") public native @SharedPtr("torch::autograd::FunctionPreHook") FunctionPreHook get(@Cast("size_t") long i); - public native SharedFunctionPreVector put(@Cast("size_t") long i, FunctionPreHook value); - - public native @ByVal Iterator insert(@ByVal Iterator pos, @SharedPtr("torch::autograd::FunctionPreHook") FunctionPreHook value); - public native @ByVal Iterator erase(@ByVal Iterator pos); - public native @ByVal Iterator begin(); - public native @ByVal Iterator end(); - @NoOffset @Name("iterator") public static class Iterator extends Pointer { - public Iterator(Pointer p) { super(p); } - public Iterator() { } - - public native @Name("operator ++") @ByRef Iterator increment(); - public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *") @SharedPtr("torch::autograd::FunctionPreHook") @Const FunctionPreHook get(); - } - - public FunctionPreHook[] get() { - FunctionPreHook[] array = new FunctionPreHook[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE]; - for (int i = 0; i < array.length; i++) { - array[i] = get(i); - } - return array; - } - @Override public String toString() { - return java.util.Arrays.toString(get()); - } - - public FunctionPreHook pop_back() { - long size = size(); - FunctionPreHook value = get(size - 1); - resize(size - 1); - return value; - } - public SharedFunctionPreVector push_back(FunctionPreHook value) { - long size = size(); - resize(size + 1); - return put(size, value); - } - public SharedFunctionPreVector put(FunctionPreHook value) { - if (size() != 1) { resize(1); } - return put(0, value); - } - public SharedFunctionPreVector put(FunctionPreHook ... array) { - if (size() != array.length) { resize(array.length); } - for (int i = 0; i < array.length; i++) { - put(i, array[i]); - } - return this; - } -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SmallNodeVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SmallNodeVector.java deleted file mode 100644 index 53174c93531..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SmallNodeVector.java +++ /dev/null @@ -1,51 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("c10::SmallVector") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SmallNodeVector extends NodeSmallVectorImpl { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SmallNodeVector(Pointer p) { super(p); } - - public SmallNodeVector() { super((Pointer)null); allocate(); } - private native void allocate(); - - public SmallNodeVector(@Cast("size_t") long Size, @ByPtrRef Node Value/*=torch::autograd::Node*()*/) { super((Pointer)null); allocate(Size, Value); } - private native void allocate(@Cast("size_t") long Size, @ByPtrRef Node Value/*=torch::autograd::Node*()*/); - public SmallNodeVector(@Cast("size_t") long Size) { super((Pointer)null); allocate(Size); } - private native void allocate(@Cast("size_t") long Size); - - // note: The enable_if restricts Container to types that have a .begin() and - // .end() that return valid input iterators. - - public SmallNodeVector(@Const @ByRef SmallNodeVector RHS) { super((Pointer)null); allocate(RHS); } - private native void allocate(@Const @ByRef SmallNodeVector RHS); - - public native @ByRef @Name("operator =") SmallNodeVector put(@Const @ByRef SmallNodeVector RHS); - - // note: The enable_if restricts Container to types that have a .begin() and - // .end() that return valid input iterators. - - - - - - // note: The enable_if restricts Container to types that have a .begin() and - // .end() that return valid input iterators. -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StreamSet.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StreamSet.java deleted file mode 100644 index b720402cc9f..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StreamSet.java +++ /dev/null @@ -1,46 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("std::unordered_set") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class StreamSet extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public StreamSet(Pointer p) { super(p); } - public StreamSet() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef StreamSet put(@ByRef StreamSet x); - - public boolean empty() { return size() == 0; } - public native long size(); - - public Stream front() { try (Iterator it = begin()) { return it.get(); } } - public native void insert(@ByRef Stream value); - public native void erase(@ByRef Stream value); - public native @ByVal Iterator begin(); - public native @ByVal Iterator end(); - @NoOffset @Name("iterator") public static class Iterator extends Pointer { - public Iterator(Pointer p) { super(p); } - public Iterator() { } - - public native @Name("operator ++") @ByRef Iterator increment(); - public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *") @ByRef @Const Stream get(); - } -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringFunctionMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringFunctionMap.java deleted file mode 100644 index 29ea8a7d9a8..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringFunctionMap.java +++ /dev/null @@ -1,48 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("std::unordered_map >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class StringFunctionMap extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public StringFunctionMap(Pointer p) { super(p); } - public StringFunctionMap() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef StringFunctionMap put(@ByRef StringFunctionMap x); - - public boolean empty() { return size() == 0; } - public native long size(); - - @Index public native @Cast("std::function*") @ByRef Pointer get(@StdString BytePointer i); - public native StringFunctionMap put(@StdString BytePointer i, Pointer value); - - public native void erase(@ByVal Iterator pos); - public native @ByVal Iterator begin(); - public native @ByVal Iterator end(); - @NoOffset @Name("iterator") public static class Iterator extends Pointer { - public Iterator(Pointer p) { super(p); } - public Iterator() { } - - public native @Name("operator ++") @ByRef Iterator increment(); - public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *().first") @MemberGetter @StdString BytePointer first(); - public native @Name("operator *().second") @MemberGetter @Cast("std::function*") @ByRef @Const Pointer second(); - } -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringIntMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringIntMap.java deleted file mode 100644 index a5fa205b60b..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringIntMap.java +++ /dev/null @@ -1,48 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("std::map") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class StringIntMap extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public StringIntMap(Pointer p) { super(p); } - public StringIntMap() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef StringIntMap put(@ByRef StringIntMap x); - - public boolean empty() { return size() == 0; } - public native long size(); - - @Index public native int get(@StdString BytePointer i); - public native StringIntMap put(@StdString BytePointer i, int value); - - public native void erase(@ByVal Iterator pos); - public native @ByVal Iterator begin(); - public native @ByVal Iterator end(); - @NoOffset @Name("iterator") public static class Iterator extends Pointer { - public Iterator(Pointer p) { super(p); } - public Iterator() { } - - public native @Name("operator ++") @ByRef Iterator increment(); - public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *().first") @MemberGetter @StdString BytePointer first(); - public native @Name("operator *().second") @MemberGetter int second(); - } -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringLongStringMapMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringLongStringMapMap.java deleted file mode 100644 index 1e4416435ad..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringLongStringMapMap.java +++ /dev/null @@ -1,48 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("std::unordered_map >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class StringLongStringMapMap extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public StringLongStringMapMap(Pointer p) { super(p); } - public StringLongStringMapMap() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef StringLongStringMapMap put(@ByRef StringLongStringMapMap x); - - public boolean empty() { return size() == 0; } - public native long size(); - - @Index public native @ByRef LongStringMap get(@StdString BytePointer i); - public native StringLongStringMapMap put(@StdString BytePointer i, LongStringMap value); - - public native void erase(@ByVal Iterator pos); - public native @ByVal Iterator begin(); - public native @ByVal Iterator end(); - @NoOffset @Name("iterator") public static class Iterator extends Pointer { - public Iterator(Pointer p) { super(p); } - public Iterator() { } - - public native @Name("operator ++") @ByRef Iterator increment(); - public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *().first") @MemberGetter @StdString BytePointer first(); - public native @Name("operator *().second") @MemberGetter @ByRef @Const LongStringMap second(); - } -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleDict.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleDict.java deleted file mode 100644 index ce135bb7599..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleDict.java +++ /dev/null @@ -1,193 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::OrderedDict") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class StringModuleDict extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public StringModuleDict(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public StringModuleDict(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public StringModuleDict position(long position) { - return (StringModuleDict)super.position(position); - } - @Override public StringModuleDict getPointer(long i) { - return new StringModuleDict((Pointer)this).offsetAddress(i); - } - - /** A (key, value) pair. */ - - // The lifetime of an iterator is bound to the lifetime of the `OrderedDict`. - // Further, any `insert()` operation may invalidate all iterators - // pointing into the vector. - - /** Constructs the {@code OrderedDict} with a short description of the kinds of keys - * stored in the {@code OrderedDict}. This description is used in error messages - * thrown by the {@code OrderedDict}. */ - public StringModuleDict(@StdString BytePointer key_description/*="Key"*/) { super((Pointer)null); allocate(key_description); } - private native void allocate(@StdString BytePointer key_description/*="Key"*/); - public StringModuleDict() { super((Pointer)null); allocate(); } - private native void allocate(); - public StringModuleDict(@StdString String key_description/*="Key"*/) { super((Pointer)null); allocate(key_description); } - private native void allocate(@StdString String key_description/*="Key"*/); - - /** Copy constructs this {@code OrderedDict} from {@code other}. */ - public StringModuleDict(@Const @ByRef StringModuleDict other) { super((Pointer)null); allocate(other); } - private native void allocate(@Const @ByRef StringModuleDict other); - - /** Assigns items from {@code other} to this {@code OrderedDict}. */ - public native @ByRef @Name("operator =") StringModuleDict put(@Const @ByRef StringModuleDict other); - - // NB: Move works by default, because you can move-construct vectors of const - // values. I tried to make this noexcept (conditional on the move constructors - // of index_ and items_ being noexcept) but the obvious spelling didn't - // compile on Windows. - - /** Constructs a new {@code OrderedDict} and pre-populates it with the given - * {@code Item}s. */ - /*implicit */ - - /** Returns the key description string the {@code OrderedDict} was constructed with. */ - public native @StdString @NoException(true) BytePointer key_description(); - - // Element Access - - /** Returns the very first item in the {@code OrderedDict} and throws an exception - * if it is empty. */ - public native @ByRef StringModuleDictItem front(); - - /** Returns the very first item in the {@code OrderedDict} and throws an exception - * if it is empty. */ - - /** Returns the very last item in the {@code OrderedDict} and throws an exception - * if it is empty. */ - public native @ByRef StringModuleDictItem back(); - - /** Returns the very last item in the {@code OrderedDict} and throws an exception - * if it is empty. */ - - /** Returns the item at the {@code index}-th position in the {@code OrderedDict}. Throws - * an exception if the index is out of bounds. */ - public native @ByRef @Name("operator []") StringModuleDictItem get(@Cast("size_t") long index); - - /** Returns the item at the {@code index}-th position in the {@code OrderedDict}. Throws - * an exception if the index is out of bounds. */ - - /** Returns the value associated with the given {@code key}. Throws an exception if - * no such key is stored in the {@code OrderedDict}. Use {@code find()} for a - * non-throwing way of accessing a value if it is present. */ - public native @ByRef @Name("operator []") Module get(@StdString BytePointer key); - public native @ByRef @Name("operator []") Module get(@StdString String key); - - /** Returns the value associated with the given {@code key}. Throws an exception if - * no such key is stored in the {@code OrderedDict}. Use {@code find()} for a - * non-throwing way of accessing a value if it is present. */ - - // Lookup - - /** Returns a pointer to the value associated with the given key, or a - * {@code nullptr} if no such key is stored in the {@code OrderedDict}. */ - public native @NoException(true) Module find(@StdString BytePointer key); - public native @NoException(true) Module find(@StdString String key); - - /** Returns a pointer to the value associated with the given key, or a - * {@code nullptr} if no such key is stored in the {@code OrderedDict}. */ - - /** Returns true if the key is present in the {@code OrderedDict}. */ - public native @Cast("bool") @NoException(true) boolean contains(@StdString BytePointer key); - public native @Cast("bool") @NoException(true) boolean contains(@StdString String key); - - // Iterators - - /** Returns an iterator to the first item in the {@code OrderedDict}. Iteration is - * ordered. */ - public native @ByVal @Cast("torch::OrderedDict::Iterator*") StringModuleDictItemVector.Iterator begin(); - - /** Returns an iterator to the first item in the {@code OrderedDict}. Iteration is - * ordered. */ - - /** Returns an iterator one past the last item in the {@code OrderedDict}. */ - public native @ByVal @Cast("torch::OrderedDict::Iterator*") StringModuleDictItemVector.Iterator end(); - - /** Returns an iterator one past the last item in the {@code OrderedDict}. */ - - // Capacity - - /** Returns the number of items currently stored in the {@code OrderedDict}. */ - public native @Cast("size_t") @NoException(true) long size(); - - /** Returns true if the {@code OrderedDict} contains no elements. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); - - /** Resizes internal storage to fit at least {@code requested_capacity} items - * without requiring reallocation. */ - public native void reserve(@Cast("size_t") long requested_capacity); - - // Modifiers - - /** Inserts a new {@code (key, value)} pair into the {@code OrderedDict}. Throws an - * exception if the key is already present. If insertion is successful, - * immediately returns a reference to the inserted value. */ - - /** Inserts a new {@code (key, value)} pair into the {@code OrderedDict}. Throws an - * exception if the key is already present. If insertion is successful, - * immediately returns a reference to the inserted value. */ - public Module insert(BytePointer key, Module value) { return _insert(key, value.asModule()); } - private native @ByRef @Name("insert") Module _insert(@StdString BytePointer key, @ByRef(true) Module value); - public Module insert(String key, Module value) { return _insert(key, value.asModule()); } - private native @ByRef @Name("insert") Module _insert(@StdString String key, @ByRef(true) Module value); - - /** Inserts all items from {@code other} into this {@code OrderedDict}. If any key from - * {@code other} is already present in this {@code OrderedDict}, an exception is thrown. */ - public native void update(@ByRef(true) StringModuleDict other); - - /** Inserts all items from {@code other} into this {@code OrderedDict}. If any key from - * {@code other} is already present in this {@code OrderedDict}, an exception is thrown. */ - - /** Removes the item that has {@code key} from this {@code OrderedDict} if exists and if - * it doesn't an exception is thrown. */ - public native void erase(@StdString BytePointer key); - public native void erase(@StdString String key); - - /** Removes all items from this {@code OrderedDict}. */ - public native void clear(); - - // Observers - - /** Returns the items stored in the {@code OrderedDict}. */ - public native @Const @ByRef @NoException(true) StringModuleDictItemVector items(); - - /** Returns a newly allocated vector and copies all keys from this - * {@code OrderedDict} into the vector. */ - public native @ByVal StringVector keys(); - - /** Returns a newly allocated vector and copies all values from this - * {@code OrderedDict} into the vector. */ - public native @ByVal ModuleVector values(); - - /** Returns a newly allocated vector and copies all keys and values from this - * {@code OrderedDict} into a vector of {@code std::pair}. */ - public native @ByVal StringModuleVector pairs(); - - /** Returns true if both dicts contain the same keys and values, in the same - * order. */ - - /** A mapping from a key to an index into the {@code items_} vector. */ -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleDictItem.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleDictItem.java deleted file mode 100644 index 825ef8f98b7..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleDictItem.java +++ /dev/null @@ -1,53 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Name("torch::OrderedDict::Item") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class StringModuleDictItem extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public StringModuleDictItem(Pointer p) { super(p); } - - /** Constructs a new item. */ - public StringModuleDictItem(@StdString BytePointer key, @ByVal Module value) { super((Pointer)null); allocate(key, value.asModule()); } - private native void allocate(@StdString BytePointer key, @ByVal Module value); - public StringModuleDictItem(@StdString String key, @ByVal Module value) { super((Pointer)null); allocate(key, value.asModule()); } - private native void allocate(@StdString String key, @ByVal Module value); - - /** Returns a reference to the value. */ - public native @ByRef @Name("operator *") Module multiply(); - - /** Returns a reference to the value. */ - - /** Allows access to the value using the arrow operator. */ - public native @Name("operator ->") Module access(); - - /** Allows access to the value using the arrow operator. */ - - /** Returns a reference to the key. */ - public native @StdString @NoException(true) BytePointer key(); - - /** Returns a reference to the value. */ - public native @ByRef @NoException(true) Module value(); - - /** Returns a reference to the value. */ - - /** Returns a {@code (key, value)} pair. */ - public native @Const @ByRef @NoException(true) StringModulePair pair(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleDictItemVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleDictItemVector.java deleted file mode 100644 index 357e89a4d85..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleDictItemVector.java +++ /dev/null @@ -1,47 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("std::vector::Item>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class StringModuleDictItemVector extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public StringModuleDictItemVector(Pointer p) { super(p); } - public StringModuleDictItemVector() { allocate(); } - private native void allocate(); - - - public boolean empty() { return size() == 0; } - public native long size(); - - public StringModuleDictItem front() { return get(0); } - public StringModuleDictItem back() { return get(size() - 1); } - @Index(function = "at") public native @ByRef StringModuleDictItem get(@Cast("size_t") long i); - - public native @ByVal Iterator begin(); - public native @ByVal Iterator end(); - @NoOffset @Name("iterator") public static class Iterator extends Pointer { - public Iterator(Pointer p) { super(p); } - public Iterator() { } - - public native @Name("operator ++") @ByRef Iterator increment(); - public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *") @ByRef @Const StringModuleDictItem get(); - } -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringModulePair.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringModulePair.java deleted file mode 100644 index f027d5deb09..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringModulePair.java +++ /dev/null @@ -1,48 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@NoOffset @Name("std::pair") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class StringModulePair extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public StringModulePair(Pointer p) { super(p); } - public StringModulePair(BytePointer firstValue, Module secondValue) { this(); put(firstValue, secondValue); } - public StringModulePair(String firstValue, Module secondValue) { this(); put(firstValue, secondValue); } - public StringModulePair() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef StringModulePair put(@ByRef StringModulePair x); - - - @MemberGetter public native @StdString BytePointer first(); public native StringModulePair first(BytePointer first); - @MemberGetter public native @ByRef Module second(); public native StringModulePair second(Module second); - @MemberSetter @Index public native StringModulePair first(@StdString String first); - - public StringModulePair put(BytePointer firstValue, Module secondValue) { - first(firstValue); - second(secondValue); - return this; - } - - public StringModulePair put(String firstValue, Module secondValue) { - first(firstValue); - second(secondValue); - return this; - } -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleVector.java deleted file mode 100644 index 1d22d80c23b..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleVector.java +++ /dev/null @@ -1,58 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("std::vector >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class StringModuleVector extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public StringModuleVector(Pointer p) { super(p); } - public StringModuleVector(BytePointer[] firstValue, Module[] secondValue) { this(Math.min(firstValue.length, secondValue.length)); put(firstValue, secondValue); } - public StringModuleVector(String[] firstValue, Module[] secondValue) { this(Math.min(firstValue.length, secondValue.length)); put(firstValue, secondValue); } - public StringModuleVector() { allocate(); } - public StringModuleVector(long n) { allocate(n); } - private native void allocate(); - private native void allocate(@Cast("size_t") long n); - public native @Name("operator =") @ByRef StringModuleVector put(@ByRef StringModuleVector x); - - public boolean empty() { return size() == 0; } - public native long size(); - public void clear() { resize(0); } - public native void resize(@Cast("size_t") long n); - - @Index(function = "at") public native @StdString BytePointer first(@Cast("size_t") long i); public native StringModuleVector first(@Cast("size_t") long i, BytePointer first); - @Index(function = "at") public native @ByRef Module second(@Cast("size_t") long i); public native StringModuleVector second(@Cast("size_t") long i, Module second); - @MemberSetter @Index(function = "at") public native StringModuleVector first(@Cast("size_t") long i, @StdString String first); - - public StringModuleVector put(BytePointer[] firstValue, Module[] secondValue) { - for (int i = 0; i < firstValue.length && i < secondValue.length; i++) { - first(i, firstValue[i]); - second(i, secondValue[i]); - } - return this; - } - - public StringModuleVector put(String[] firstValue, Module[] secondValue) { - for (int i = 0; i < firstValue.length && i < secondValue.length; i++) { - first(i, firstValue[i]); - second(i, secondValue[i]); - } - return this; - } -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorMap.java deleted file mode 100644 index 5b7ea2e071c..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorMap.java +++ /dev/null @@ -1,48 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("std::map") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class StringTensorMap extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public StringTensorMap(Pointer p) { super(p); } - public StringTensorMap() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef StringTensorMap put(@ByRef StringTensorMap x); - - public boolean empty() { return size() == 0; } - public native long size(); - - @Index public native @ByRef Tensor get(@StdString BytePointer i); - public native StringTensorMap put(@StdString BytePointer i, Tensor value); - - public native void erase(@ByVal Iterator pos); - public native @ByVal Iterator begin(); - public native @ByVal Iterator end(); - @NoOffset @Name("iterator") public static class Iterator extends Pointer { - public Iterator(Pointer p) { super(p); } - public Iterator() { } - - public native @Name("operator ++") @ByRef Iterator increment(); - public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *().first") @MemberGetter @StdString BytePointer first(); - public native @Name("operator *().second") @MemberGetter @ByRef @Const Tensor second(); - } -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_DoubleLong_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_DoubleLong_T.java deleted file mode 100644 index 31f86db357c..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_DoubleLong_T.java +++ /dev/null @@ -1,36 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class T_DoubleLong_T extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public T_DoubleLong_T(Pointer p) { super(p); } - public T_DoubleLong_T(double value0, @Cast("int64_t") long value1) { allocate(value0, value1); } - private native void allocate(double value0, @Cast("int64_t") long value1); - public T_DoubleLong_T() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef T_DoubleLong_T put(@ByRef T_DoubleLong_T x); - - public double get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native double get0(@ByRef T_DoubleLong_T container); - public @Cast("int64_t") long get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @Cast("int64_t") long get1(@ByRef T_DoubleLong_T container); -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_StringLong_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_StringLong_T.java deleted file mode 100644 index c178108ee3f..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_StringLong_T.java +++ /dev/null @@ -1,36 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class T_StringLong_T extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public T_StringLong_T(Pointer p) { super(p); } - public T_StringLong_T(@StdString BytePointer value0, @Cast("uint64_t") long value1) { allocate(value0, value1); } - private native void allocate(@StdString BytePointer value0, @Cast("uint64_t") long value1); - public T_StringLong_T() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef T_StringLong_T put(@ByRef T_StringLong_T x); - - public @StdString BytePointer get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native @StdString BytePointer get0(@ByRef T_StringLong_T container); - public @Cast("uint64_t") long get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @Cast("uint64_t") long get1(@ByRef T_StringLong_T container); -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorLongLongTensor_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorLongLongTensor_T.java deleted file mode 100644 index b19c0454932..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorLongLongTensor_T.java +++ /dev/null @@ -1,42 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class T_TensorTensorLongLongTensor_T extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public T_TensorTensorLongLongTensor_T(Pointer p) { super(p); } - public T_TensorTensorLongLongTensor_T(@ByRef Tensor value0, @ByRef Tensor value1, @Cast("int64_t") long value2, @Cast("int64_t") long value3, @ByRef Tensor value4) { allocate(value0, value1, value2, value3, value4); } - private native void allocate(@ByRef Tensor value0, @ByRef Tensor value1, @Cast("int64_t") long value2, @Cast("int64_t") long value3, @ByRef Tensor value4); - public T_TensorTensorLongLongTensor_T() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef T_TensorTensorLongLongTensor_T put(@ByRef T_TensorTensorLongLongTensor_T x); - - public @ByRef Tensor get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef T_TensorTensorLongLongTensor_T container); - public @ByRef Tensor get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef T_TensorTensorLongLongTensor_T container); - public @Cast("int64_t") long get2() { return get2(this); } - @Namespace @Name("std::get<2>") public static native @Cast("int64_t") long get2(@ByRef T_TensorTensorLongLongTensor_T container); - public @Cast("int64_t") long get3() { return get3(this); } - @Namespace @Name("std::get<3>") public static native @Cast("int64_t") long get3(@ByRef T_TensorTensorLongLongTensor_T container); - public @ByRef Tensor get4() { return get4(this); } - @Namespace @Name("std::get<4>") public static native @ByRef Tensor get4(@ByRef T_TensorTensorLongLongTensor_T container); -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorLong_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorLong_T.java deleted file mode 100644 index 00eb302f91f..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorLong_T.java +++ /dev/null @@ -1,42 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class T_TensorTensorTensorTensorLong_T extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public T_TensorTensorTensorTensorLong_T(Pointer p) { super(p); } - public T_TensorTensorTensorTensorLong_T(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @ByRef Tensor value3, @Cast("int64_t") long value4) { allocate(value0, value1, value2, value3, value4); } - private native void allocate(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @ByRef Tensor value3, @Cast("int64_t") long value4); - public T_TensorTensorTensorTensorLong_T() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef T_TensorTensorTensorTensorLong_T put(@ByRef T_TensorTensorTensorTensorLong_T x); - - public @ByRef Tensor get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef T_TensorTensorTensorTensorLong_T container); - public @ByRef Tensor get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef T_TensorTensorTensorTensorLong_T container); - public @ByRef Tensor get2() { return get2(this); } - @Namespace @Name("std::get<2>") public static native @ByRef Tensor get2(@ByRef T_TensorTensorTensorTensorLong_T container); - public @ByRef Tensor get3() { return get3(this); } - @Namespace @Name("std::get<3>") public static native @ByRef Tensor get3(@ByRef T_TensorTensorTensorTensorLong_T container); - public @Cast("int64_t") long get4() { return get4(this); } - @Namespace @Name("std::get<4>") public static native @Cast("int64_t") long get4(@ByRef T_TensorTensorTensorTensorLong_T container); -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorTensorTensor_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorTensorTensor_T.java deleted file mode 100644 index 3dbeda5f956..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorTensorTensor_T.java +++ /dev/null @@ -1,44 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class T_TensorTensorTensorTensorTensorTensor_T extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public T_TensorTensorTensorTensorTensorTensor_T(Pointer p) { super(p); } - public T_TensorTensorTensorTensorTensorTensor_T(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @ByRef Tensor value3, @ByRef Tensor value4, @ByRef Tensor value5) { allocate(value0, value1, value2, value3, value4, value5); } - private native void allocate(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @ByRef Tensor value3, @ByRef Tensor value4, @ByRef Tensor value5); - public T_TensorTensorTensorTensorTensorTensor_T() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef T_TensorTensorTensorTensorTensorTensor_T put(@ByRef T_TensorTensorTensorTensorTensorTensor_T x); - - public @ByRef Tensor get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef T_TensorTensorTensorTensorTensorTensor_T container); - public @ByRef Tensor get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef T_TensorTensorTensorTensorTensorTensor_T container); - public @ByRef Tensor get2() { return get2(this); } - @Namespace @Name("std::get<2>") public static native @ByRef Tensor get2(@ByRef T_TensorTensorTensorTensorTensorTensor_T container); - public @ByRef Tensor get3() { return get3(this); } - @Namespace @Name("std::get<3>") public static native @ByRef Tensor get3(@ByRef T_TensorTensorTensorTensorTensorTensor_T container); - public @ByRef Tensor get4() { return get4(this); } - @Namespace @Name("std::get<4>") public static native @ByRef Tensor get4(@ByRef T_TensorTensorTensorTensorTensorTensor_T container); - public @ByRef Tensor get5() { return get5(this); } - @Namespace @Name("std::get<5>") public static native @ByRef Tensor get5(@ByRef T_TensorTensorTensorTensorTensorTensor_T container); -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorsLongLongLongLongTensor_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorsLongLongLongLongTensor_T.java deleted file mode 100644 index 33f2bc4cb37..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorsLongLongLongLongTensor_T.java +++ /dev/null @@ -1,50 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class T_TensorTensorTensorTensorsLongLongLongLongTensor_T extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public T_TensorTensorTensorTensorsLongLongLongLongTensor_T(Pointer p) { super(p); } - public T_TensorTensorTensorTensorsLongLongLongLongTensor_T(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @ByRef Tensor value3, @Cast("int64_t") long value4, @Cast("int64_t") long value5, @Cast("int64_t") long value6, @Cast("int64_t") long value7, @ByRef Tensor value8) { allocate(value0, value1, value2, value3, value4, value5, value6, value7, value8); } - private native void allocate(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @ByRef Tensor value3, @Cast("int64_t") long value4, @Cast("int64_t") long value5, @Cast("int64_t") long value6, @Cast("int64_t") long value7, @ByRef Tensor value8); - public T_TensorTensorTensorTensorsLongLongLongLongTensor_T() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef T_TensorTensorTensorTensorsLongLongLongLongTensor_T put(@ByRef T_TensorTensorTensorTensorsLongLongLongLongTensor_T x); - - public @ByRef Tensor get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef T_TensorTensorTensorTensorsLongLongLongLongTensor_T container); - public @ByRef Tensor get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef T_TensorTensorTensorTensorsLongLongLongLongTensor_T container); - public @ByRef Tensor get2() { return get2(this); } - @Namespace @Name("std::get<2>") public static native @ByRef Tensor get2(@ByRef T_TensorTensorTensorTensorsLongLongLongLongTensor_T container); - public @ByRef Tensor get3() { return get3(this); } - @Namespace @Name("std::get<3>") public static native @ByRef Tensor get3(@ByRef T_TensorTensorTensorTensorsLongLongLongLongTensor_T container); - public @Cast("int64_t") long get4() { return get4(this); } - @Namespace @Name("std::get<4>") public static native @Cast("int64_t") long get4(@ByRef T_TensorTensorTensorTensorsLongLongLongLongTensor_T container); - public @Cast("int64_t") long get5() { return get5(this); } - @Namespace @Name("std::get<5>") public static native @Cast("int64_t") long get5(@ByRef T_TensorTensorTensorTensorsLongLongLongLongTensor_T container); - public @Cast("int64_t") long get6() { return get6(this); } - @Namespace @Name("std::get<6>") public static native @Cast("int64_t") long get6(@ByRef T_TensorTensorTensorTensorsLongLongLongLongTensor_T container); - public @Cast("int64_t") long get7() { return get7(this); } - @Namespace @Name("std::get<7>") public static native @Cast("int64_t") long get7(@ByRef T_TensorTensorTensorTensorsLongLongLongLongTensor_T container); - public @ByRef Tensor get8() { return get8(this); } - @Namespace @Name("std::get<8>") public static native @ByRef Tensor get8(@ByRef T_TensorTensorTensorTensorsLongLongLongLongTensor_T container); -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T.java deleted file mode 100644 index 53779948f33..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T.java +++ /dev/null @@ -1,42 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@NoOffset @Name("std::tuple,std::vector,std::vector,std::vector,std::vector >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T(Pointer p) { super(p); } - public T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T(@Cast({"", "std::vector"}) @StdMove TensorVector value0, @Cast({"", "std::vector"}) @StdMove TensorVector value1, @Cast({"", "std::vector"}) @StdMove TensorVector value2, @Cast({"", "std::vector"}) @StdMove TensorVector value3, @Cast({"", "std::vector"}) @StdMove TensorVector value4) { allocate(value0, value1, value2, value3, value4); } - private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector value0, @Cast({"", "std::vector"}) @StdMove TensorVector value1, @Cast({"", "std::vector"}) @StdMove TensorVector value2, @Cast({"", "std::vector"}) @StdMove TensorVector value3, @Cast({"", "std::vector"}) @StdMove TensorVector value4); - public T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T put(@ByRef T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T x); - - public @Cast({"", "std::vector"}) @StdMove TensorVector get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native @Cast({"", "std::vector"}) @StdMove TensorVector get0(@ByRef T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T container); - public @Cast({"", "std::vector"}) @StdMove TensorVector get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @Cast({"", "std::vector"}) @StdMove TensorVector get1(@ByRef T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T container); - public @Cast({"", "std::vector"}) @StdMove TensorVector get2() { return get2(this); } - @Namespace @Name("std::get<2>") public static native @Cast({"", "std::vector"}) @StdMove TensorVector get2(@ByRef T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T container); - public @Cast({"", "std::vector"}) @StdMove TensorVector get3() { return get3(this); } - @Namespace @Name("std::get<3>") public static native @Cast({"", "std::vector"}) @StdMove TensorVector get3(@ByRef T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T container); - public @Cast({"", "std::vector"}) @StdMove TensorVector get4() { return get4(this); } - @Namespace @Name("std::get<4>") public static native @Cast({"", "std::vector"}) @StdMove TensorVector get4(@ByRef T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T container); -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorVectorTensor_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorVectorTensor_T.java deleted file mode 100644 index 8aec7f95641..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorVectorTensor_T.java +++ /dev/null @@ -1,36 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@NoOffset @Name("std::tuple,torch::Tensor>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class T_TensorVectorTensor_T extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public T_TensorVectorTensor_T(Pointer p) { super(p); } - public T_TensorVectorTensor_T(@Cast({"", "std::vector"}) @StdMove TensorVector value0, @ByRef Tensor value1) { allocate(value0, value1); } - private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector value0, @ByRef Tensor value1); - public T_TensorVectorTensor_T() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef T_TensorVectorTensor_T put(@ByRef T_TensorVectorTensor_T x); - - public @Cast({"", "std::vector"}) @StdMove TensorVector get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native @Cast({"", "std::vector"}) @StdMove TensorVector get0(@ByRef T_TensorVectorTensor_T container); - public @ByRef Tensor get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef T_TensorVectorTensor_T container); -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalStateOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalStateOptional.java deleted file mode 100644 index d6ff895ddc8..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalStateOptional.java +++ /dev/null @@ -1,35 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@NoOffset @Name("c10::optional") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ThreadLocalStateOptional extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ThreadLocalStateOptional(Pointer p) { super(p); } - public ThreadLocalStateOptional(ThreadLocalState value) { this(); put(value); } - public ThreadLocalStateOptional() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef ThreadLocalStateOptional put(@ByRef ThreadLocalStateOptional x); - - public native boolean has_value(); - public native void reset(); - public native @Name("value") @ByRef ThreadLocalState get(); - @ValueSetter public native ThreadLocalStateOptional put(@ByRef ThreadLocalState value); -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMetaData.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMetaData.java deleted file mode 100644 index 6f7a2d409f3..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMetaData.java +++ /dev/null @@ -1,118 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -// This struct holds the actual type information. There will be -// one allocated per type. TypeMeta objects will then point to the struct -// instance for the type they're configured for. -@Namespace("caffe2::detail") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TypeMetaData extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TypeMetaData(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public TypeMetaData(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public TypeMetaData position(long position) { - return (TypeMetaData)super.position(position); - } - @Override public TypeMetaData getPointer(long i) { - return new TypeMetaData((Pointer)this).offsetAddress(i); - } - - @Opaque public static class New extends Pointer { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public New() { super((Pointer)null); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public New(Pointer p) { super(p); } - } - @Opaque public static class PlacementNew extends Pointer { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public PlacementNew() { super((Pointer)null); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public PlacementNew(Pointer p) { super(p); } - } - @Opaque public static class Copy extends Pointer { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public Copy() { super((Pointer)null); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Copy(Pointer p) { super(p); } - } - @Opaque public static class PlacementDelete extends Pointer { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public PlacementDelete() { super((Pointer)null); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public PlacementDelete(Pointer p) { super(p); } - } - @Opaque public static class Delete extends Pointer { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public Delete() { super((Pointer)null); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Delete(Pointer p) { super(p); } - } - - public TypeMetaData() { super((Pointer)null); allocate(); } - @NoException(true) private native void allocate(); - - public TypeMetaData( - @Cast("size_t") long itemsize, - New newFn, - PlacementNew placementNew, - Copy copy, - PlacementDelete placementDelete, - Delete deleteFn, - @ByVal TypeIdentifier id, - @StringView BytePointer name) { super((Pointer)null); allocate(itemsize, newFn, placementNew, copy, placementDelete, deleteFn, id, name); } - @NoException(true) private native void allocate( - @Cast("size_t") long itemsize, - New newFn, - PlacementNew placementNew, - Copy copy, - PlacementDelete placementDelete, - Delete deleteFn, - @ByVal TypeIdentifier id, - @StringView BytePointer name); - public TypeMetaData( - @Cast("size_t") long itemsize, - New newFn, - PlacementNew placementNew, - Copy copy, - PlacementDelete placementDelete, - Delete deleteFn, - @ByVal TypeIdentifier id, - @StringView String name) { super((Pointer)null); allocate(itemsize, newFn, placementNew, copy, placementDelete, deleteFn, id, name); } - @NoException(true) private native void allocate( - @Cast("size_t") long itemsize, - New newFn, - PlacementNew placementNew, - Copy copy, - PlacementDelete placementDelete, - Delete deleteFn, - @ByVal TypeIdentifier id, - @StringView String name); - - public native @Cast("size_t") long itemsize_(); public native TypeMetaData itemsize_(long setter); - public native New new_(); public native TypeMetaData new_(New setter); - public native PlacementNew placementNew_(); public native TypeMetaData placementNew_(PlacementNew setter); - public native Copy copy_(); public native TypeMetaData copy_(Copy setter); - public native PlacementDelete placementDelete_(); public native TypeMetaData placementDelete_(PlacementDelete setter); - public native Delete delete_(); public native TypeMetaData delete_(Delete setter); - public native @ByRef TypeIdentifier id_(); public native TypeMetaData id_(TypeIdentifier setter); - public native @StringView BytePointer name_(); public native TypeMetaData name_(BytePointer setter); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamOptional.java deleted file mode 100644 index 97dee56b551..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamOptional.java +++ /dev/null @@ -1,38 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch.cuda; - -import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.Error; -import org.bytedeco.pytorch.global.torch.DeviceType; -import org.bytedeco.pytorch.global.torch.ScalarType; -import org.bytedeco.pytorch.global.torch.MemoryFormat; -import org.bytedeco.pytorch.Allocator; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; -import org.bytedeco.pytorch.*; -import static org.bytedeco.pytorch.global.torch.*; - -import static org.bytedeco.pytorch.global.torch_cuda.*; - -@NoOffset @Name("c10::optional") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) -public class CUDAStreamOptional extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public CUDAStreamOptional(Pointer p) { super(p); } - public CUDAStreamOptional(CUDAStream value) { this(); put(value); } - public CUDAStreamOptional() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef CUDAStreamOptional put(@ByRef CUDAStreamOptional x); - - public native boolean has_value(); - public native void reset(); - public native @Name("value") @ByRef CUDAStream get(); - @ValueSetter public native CUDAStreamOptional put(@ByRef CUDAStream value); -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/OptionalCUDAGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/OptionalCUDAGuard.java deleted file mode 100644 index 4a7897fa10c..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/OptionalCUDAGuard.java +++ /dev/null @@ -1,90 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch.cuda; - -import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.Error; -import org.bytedeco.pytorch.global.torch.DeviceType; -import org.bytedeco.pytorch.global.torch.ScalarType; -import org.bytedeco.pytorch.global.torch.MemoryFormat; -import org.bytedeco.pytorch.Allocator; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; -import org.bytedeco.pytorch.*; -import static org.bytedeco.pytorch.global.torch.*; - -import static org.bytedeco.pytorch.global.torch_cuda.*; - - -/** A variant of OptionalDeviceGuard that is specialized for CUDA. See - * CUDAGuard for when you can use this. */ -@Namespace("c10::cuda") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) -public class OptionalCUDAGuard extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public OptionalCUDAGuard(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public OptionalCUDAGuard(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public OptionalCUDAGuard position(long position) { - return (OptionalCUDAGuard)super.position(position); - } - @Override public OptionalCUDAGuard getPointer(long i) { - return new OptionalCUDAGuard((Pointer)this).offsetAddress(i); - } - - /** Create an uninitialized OptionalCUDAGuard. */ - public OptionalCUDAGuard() { super((Pointer)null); allocate(); } - private native void allocate(); - - /** Set the current CUDA device to the passed Device, if it is not nullopt. */ - public OptionalCUDAGuard(@ByVal DeviceOptional device_opt) { super((Pointer)null); allocate(device_opt); } - private native void allocate(@ByVal DeviceOptional device_opt); - - /** Set the current CUDA device to the passed device index, if it is not - * nullopt */ - public OptionalCUDAGuard(@ByVal ByteOptional device_index_opt) { super((Pointer)null); allocate(device_index_opt); } - private native void allocate(@ByVal ByteOptional device_index_opt); - - // Copy is not allowed - - - - // See Note [Move construction for RAII guards is tricky] - - - // See Note [Move assignment for RAII guards is tricky] - - - /** Sets the CUDA device to the given device, initializing the guard if it - * is not already initialized. Errors if the given device is not a CUDA - * device. */ - public native void set_device(@ByVal Device device); - - /** Sets the CUDA device to the given device, initializing the guard if it is - * not already initialized. Errors if the given device is not a CUDA device. - * (This method is provided for uniformity with OptionalDeviceGuard). */ - public native void reset_device(@ByVal Device device); - - /** Sets the CUDA device to the given device index, initializing the guard if - * it is not already initialized. */ - public native void set_index(byte device_index); - - /** Returns the device that was set immediately prior to initialization of the - * guard, or nullopt if the guard is uninitialized. */ - public native @ByVal DeviceOptional original_device(); - - /** Returns the most recent device that was set using this device guard, - * either from construction, or via set_device, if the guard is initialized, - * or nullopt if the guard is uninitialized. */ - public native @ByVal DeviceOptional current_device(); - - /** Restore the original CUDA device, resetting this guard to uninitialized - * state. */ - public native void reset(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/OptionalCUDAStreamGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/OptionalCUDAStreamGuard.java deleted file mode 100644 index 32472e73e3b..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/OptionalCUDAStreamGuard.java +++ /dev/null @@ -1,86 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch.cuda; - -import org.bytedeco.pytorch.*; -import org.bytedeco.pytorch.Error; -import org.bytedeco.pytorch.global.torch.DeviceType; -import org.bytedeco.pytorch.global.torch.ScalarType; -import org.bytedeco.pytorch.global.torch.MemoryFormat; -import org.bytedeco.pytorch.Allocator; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; -import org.bytedeco.pytorch.*; -import static org.bytedeco.pytorch.global.torch.*; - -import static org.bytedeco.pytorch.global.torch_cuda.*; - - -/** A variant of OptionalStreamGuard that is specialized for CUDA. See - * CUDAGuard for when you can use this. */ -@Namespace("c10::cuda") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) -public class OptionalCUDAStreamGuard extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public OptionalCUDAStreamGuard(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public OptionalCUDAStreamGuard(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public OptionalCUDAStreamGuard position(long position) { - return (OptionalCUDAStreamGuard)super.position(position); - } - @Override public OptionalCUDAStreamGuard getPointer(long i) { - return new OptionalCUDAStreamGuard((Pointer)this).offsetAddress(i); - } - - /** Create an uninitialized guard. */ - public OptionalCUDAStreamGuard() { super((Pointer)null); allocate(); } - private native void allocate(); - - /** Set the current CUDA device to the device associated with the passed - * stream, and set the current CUDA stream on that device to the passed - * stream. Errors if the Stream is not a CUDA stream. */ - public OptionalCUDAStreamGuard(@ByVal Stream stream) { super((Pointer)null); allocate(stream); } - private native void allocate(@ByVal Stream stream); - - /** Set the current device to the device associated with the passed stream, - * and set the current stream on that device to the passed stream, - * if the passed stream is not nullopt. */ - public OptionalCUDAStreamGuard(@ByVal StreamOptional stream_opt) { super((Pointer)null); allocate(stream_opt); } - private native void allocate(@ByVal StreamOptional stream_opt); - - /** Copy is disallowed */ - - - - // See Note [Move construction for RAII guards is tricky] - - - // See Note [Move assignment for RAII guards is tricky] - - - /** Resets the currently set CUDA stream to the original stream and - * the currently set device to the original device. Then, - * set the current device to the device associated with the passed stream, - * and set the current stream on that device to the passed stream. - * Initializes the guard if it was not previously initialized. */ - public native void reset_stream(@ByVal Stream stream); - - /** Returns the CUDA stream that was set at the time the guard was most - * recently initialized, or nullopt if the guard is uninitialized. */ - public native @ByVal CUDAStreamOptional original_stream(); - - /** Returns the most recent CUDA stream that was set using this stream guard, - * either from construction, or via reset_stream, if the guard is - * initialized, or nullopt if the guard is uninitialized. */ - public native @ByVal CUDAStreamOptional current_stream(); - - /** Restore the original CUDA device and stream, resetting this guard to - * uninitialized state. */ - public native void reset(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/fibonacci_hash_policy.java b/pytorch/src/gen/java/org/bytedeco/pytorch/fibonacci_hash_policy.java deleted file mode 100644 index d2ccf7a4556..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/fibonacci_hash_policy.java +++ /dev/null @@ -1,47 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Namespace("ska_ordered") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class fibonacci_hash_policy extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public fibonacci_hash_policy() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public fibonacci_hash_policy(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public fibonacci_hash_policy(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public fibonacci_hash_policy position(long position) { - return (fibonacci_hash_policy)super.position(position); - } - @Override public fibonacci_hash_policy getPointer(long i) { - return new fibonacci_hash_policy((Pointer)this).offsetAddress(i); - } - - public native @Cast("uint64_t") long index_for_hash(@Cast("uint64_t") long hash, @Cast("uint64_t") long arg1); - public native @Cast("uint64_t") long keep_in_range(@Cast("uint64_t") long index, @Cast("uint64_t") long num_slots_minus_one); - - public native byte next_size_over(@Cast("uint64_t*") @ByRef LongPointer size); - public native byte next_size_over(@Cast("uint64_t*") @ByRef LongBuffer size); - public native byte next_size_over(@Cast("uint64_t*") @ByRef long[] size); - public native void commit(byte shift_); - public native void reset(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java index f4c93ef6955..5cbb27119aa 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java @@ -158,9 +158,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../TensorArrayRefOptional.java -// Targeting ../ThreadLocalStateOptional.java - - // Targeting ../TypeMetaOptional.java @@ -281,21 +278,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../StringStringMap.java -// Targeting ../StringIntMap.java - - // Targeting ../StringLongMap.java -// Targeting ../StringTensorMap.java - - // Targeting ../ActivityTypeSet.java -// Targeting ../RecordFunctionCallbackHandleVector.java - - // Targeting ../DimnameVector.java @@ -305,9 +293,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../FunctionPostHookVector.java -// Targeting ../SavedVariableVector.java - - // Targeting ../DefVector.java @@ -323,9 +308,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../StringTensorDictItemVector.java -// Targeting ../StringModuleDictItemVector.java - - // Targeting ../StringAnyModuleDictItemVector.java @@ -335,9 +317,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../WeakStorageVector.java -// Targeting ../Bool2Vector.java - - // Targeting ../BoolVector.java @@ -407,9 +386,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../TensorOptionalVector.java -// Targeting ../SharedFunctionPreVector.java - - // Targeting ../FunctionVector.java @@ -428,9 +404,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../JitNodeVector.java -// Targeting ../ModuleVector.java - - // Targeting ../AnyModuleVector.java @@ -440,9 +413,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../StringTensorVector.java -// Targeting ../StringModuleVector.java - - // Targeting ../StringAnyModuleVector.java @@ -470,9 +440,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../StringTensorPair.java -// Targeting ../StringModulePair.java - - // Targeting ../StringAnyModulePair.java @@ -497,9 +464,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../T_LongLong_T.java -// Targeting ../T_DoubleLong_T.java - - // Targeting ../T_TensorTensor_T.java @@ -512,18 +476,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../T_TensorTensorTensorTensorTensor_T.java -// Targeting ../T_TensorTensorTensorTensorTensorTensor_T.java - - // Targeting ../T_TensorTensorTensorTensorTensorTensorTensor_T.java // Targeting ../T_TensorTensorTensorTensorVector_T.java -// Targeting ../T_TensorTensorTensorTensorLong_T.java - - // Targeting ../T_TensorTensorDoubleLong_T.java @@ -545,39 +503,18 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../T_StringSizeTSizeT_T.java -// Targeting ../T_StringLong_T.java - - // Targeting ../T_TensorTensorVector_T.java -// Targeting ../T_TensorVectorTensor_T.java - - -// Targeting ../T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T.java - - // Targeting ../T_TensorTensorVectorTensorVector_T.java -// Targeting ../T_TensorTensorLongLongTensor_T.java - - -// Targeting ../T_TensorTensorTensorTensorsLongLongLongLongTensor_T.java - - // Targeting ../T_TypePtrLong_T.java -// Targeting ../NodeIntMap.java - - // Targeting ../HashAliasedIValueMap.java -// Targeting ../LongStringMap.java - - // Targeting ../StringBoolMap.java @@ -593,15 +530,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../StringIValueMap.java -// Targeting ../StringFunctionMap.java - - // Targeting ../StringValueMap.java -// Targeting ../StringLongStringMapMap.java - - // Targeting ../ValueValueMap.java @@ -623,18 +554,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../TensorImplSet.java -// Targeting ../RecordScopeSet.java - - // Targeting ../NodeSet.java -// Targeting ../StreamSet.java - - -// Targeting ../RecordScopeSet.java - - // Targeting ../DeviceTypeSet.java @@ -3529,18 +3451,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../LongSmallVectorCommon.java -// Targeting ../NodeSmallVectorCommon.java - - // Targeting ../SymIntSmallVectorBase.java // Targeting ../LongSmallVectorBase.java -// Targeting ../NodeSmallVectorBase.java - - // Define this out-of-line to dissuade the C++ compiler from inlining it. @@ -3561,9 +3477,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../LongSmallVectorImpl.java -// Targeting ../NodeSmallVectorImpl.java - - @@ -3593,9 +3506,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../DimVector.java -// Targeting ../SmallNodeVector.java - - /** Given a range of type R, iterate the entire range and return a * SmallVector with elements of the vector. This is useful, for example, @@ -3695,15 +3605,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../LongOptionalArrayRef.java -// Targeting ../LongVectorArrayRef.java - - // Targeting ../NamedValueArrayRef.java -// Targeting ../SavedVariableArrayRef.java - - // Targeting ../ScalarArrayRef.java @@ -6113,9 +6017,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace caffe2 -// Targeting ../TypeMetaData.java - +// This struct holds the actual type information. There will be +// one allocated per type. TypeMeta objects will then point to the struct +// instance for the type they're configured for. // Mechanism for throwing errors which can't be prevented at compile time // due to type erasure. E.g. somebody calling TypeMeta::copy() for @@ -8259,7 +8164,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native DispatchKey legacyExtractDispatchKey(@Const @ByRef TensorBase t); -// Targeting ../MaybeOwnedTraitsTensor.java +// Targeting ../MaybeOwnedTraits.java // namespace c10 @@ -12075,12 +11980,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Implementation taken from http://en.cppreference.com/w/cpp/types/void_t // (it takes CWG1558 into account and also works for older compilers) // namespace detailv3 -// Targeting ../power_of_two_hash_policy.java - - -// Targeting ../fibonacci_hash_policy.java - - // namespace ska_ordered @@ -12266,9 +12165,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../FutureSingleElementType.java -// Targeting ../OptionalSingleElementType.java - - // Targeting ../UnionType.java @@ -17800,9 +17696,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// Targeting ../ReadyQueue.java - - @Namespace("torch::autograd") @MemberGetter public static native int NO_DEVICE(); public static final int NO_DEVICE = NO_DEVICE(); @@ -58612,9 +58505,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include @Namespace("torch::autograd") public static native @Cast("const char*") BytePointer ERR_BACKWARD_TWICE(); public static native void ERR_BACKWARD_TWICE(BytePointer setter); -// Targeting ../SavedVariable.java - +/** A snapshot of a variable at a certain version. A {@code SavedVariable} stores + * enough information to reconstruct a variable from a certain point in time. */ // namespace autograd // namespace torch @@ -58768,9 +58661,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { /** Return true if any of the variables in the list require a gradient. */ @Namespace("torch::autograd") public static native @Cast("bool") boolean any_variable_requires_grad(@Cast({"", "std::vector"}) @StdMove TensorVector variables); -// Targeting ../TypeAndSize.java - +/** Return the next edges of all the given variables, or tuples of variables. */ // namespace autograd // namespace torch @@ -62508,12 +62400,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../OpRegistrationListener.java -// Targeting ../RegistrationListenerList.java - - - -// Targeting ../SchemaRegistrationHandleRAII.java - // Targeting ../Dispatcher.java @@ -63807,9 +63693,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { -// Targeting ../InterpreterStateImpl.java - - // Targeting ../Instruction.java @@ -64009,9 +63892,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../StringTensorDict.java -// Targeting ../StringModuleDict.java - - // Targeting ../StringAnyModuleDict.java @@ -64021,9 +63901,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../StringTensorDictItem.java -// Targeting ../StringModuleDictItem.java - - // Targeting ../StringAnyModuleDictItem.java @@ -64293,9 +64170,10 @@ The list of (type, depth) pairs controls the type of specializations and the num // Targeting ../AttributePolicy.java -// Targeting ../NamedJitModulePolicy.java - +// take a Policy object, and make a version of it that returns the slot. +// along with the fully qualified name of that slot. This is used for the named_ +// variants like named_parameters(). // namespace detail @@ -66795,16 +66673,10 @@ scalar_t sf(scalar_t x, scalar_t y) // #include // #include -// Targeting ../CUevent_st.java - - // ---------------------------------------------------------------------------- // -- Annotation -------------------------------------------------------------- // ---------------------------------------------------------------------------- -// Targeting ../ProfilerVoidEventStub.java - - // namespace impl // namespace profiler @@ -66971,9 +66843,6 @@ scalar_t sf(scalar_t x, scalar_t y) // Targeting ../Result.java -// Targeting ../ActivityTraceWrapper.java - - // namespace kineto // namespace impl // namespace profiler @@ -76715,9 +76584,6 @@ scalar_t sf(scalar_t x, scalar_t y) // #include "caffe2/serialize/istream_adapter.h" // #include "caffe2/serialize/read_adapter_interface.h" // #include "caffe2/serialize/versions.h" -// Targeting ../mz_zip_archive.java - - // PyTorch containers are a special zip archive with the following layout // archive_name.zip contains: @@ -77529,9 +77395,6 @@ scalar_t sf(scalar_t x, scalar_t y) // Targeting ../Token.java -// Targeting ../Lexer.java - - // namespace jit // namespace torch diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java index 1a94768b09f..ea0bf0de087 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java @@ -26,9 +26,6 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { @Namespace("at") public static native @ByVal @Name("make_generator") Generator make_generator_cuda(@Cast("int8_t&&") byte device_index); -// Targeting ../cuda/CUDAStreamOptional.java - - // Targeting ../cuda/DeviceAssertionsDataVector.java @@ -797,15 +794,15 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // Targeting ../cuda/CUDAGuard.java -// Targeting ../cuda/OptionalCUDAGuard.java - +/** A variant of OptionalDeviceGuard that is specialized for CUDA. See + * CUDAGuard for when you can use this. */ // Targeting ../cuda/CUDAStreamGuard.java -// Targeting ../cuda/OptionalCUDAStreamGuard.java - +/** A variant of OptionalStreamGuard that is specialized for CUDA. See + * CUDAGuard for when you can use this. */ // Targeting ../cuda/CUDAMultiStreamGuard.java diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/mz_zip_archive.java b/pytorch/src/gen/java/org/bytedeco/pytorch/mz_zip_archive.java deleted file mode 100644 index c3641c41837..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/mz_zip_archive.java +++ /dev/null @@ -1,26 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class mz_zip_archive extends Pointer { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public mz_zip_archive() { super((Pointer)null); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public mz_zip_archive(Pointer p) { super(p); } -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/power_of_two_hash_policy.java b/pytorch/src/gen/java/org/bytedeco/pytorch/power_of_two_hash_policy.java deleted file mode 100644 index 4c3d397c33a..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/power_of_two_hash_policy.java +++ /dev/null @@ -1,46 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Namespace("ska_ordered") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class power_of_two_hash_policy extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public power_of_two_hash_policy() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public power_of_two_hash_policy(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public power_of_two_hash_policy(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public power_of_two_hash_policy position(long position) { - return (power_of_two_hash_policy)super.position(position); - } - @Override public power_of_two_hash_policy getPointer(long i) { - return new power_of_two_hash_policy((Pointer)this).offsetAddress(i); - } - - public native @Cast("uint64_t") long index_for_hash(@Cast("uint64_t") long hash, @Cast("uint64_t") long num_slots_minus_one); - public native @Cast("uint64_t") long keep_in_range(@Cast("uint64_t") long index, @Cast("uint64_t") long num_slots_minus_one); - public native byte next_size_over(@Cast("uint64_t*") @ByRef LongPointer size); - public native byte next_size_over(@Cast("uint64_t*") @ByRef LongBuffer size); - public native byte next_size_over(@Cast("uint64_t*") @ByRef long[] size); - public native void commit(byte arg0); - public native void reset(); -} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index 32a52ee1e08..022f9926ba7 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -345,7 +345,6 @@ public void map(InfoMap infoMap) { .pointerTypes("FunctionSchema")) .put(new Info("c10::MaybeOwned").valueTypes("@Cast({\"\", \"c10::MaybeOwned&&\"}) @StdMove TensorMaybeOwned").pointerTypes("TensorMaybeOwned")) .put(new Info("c10::MaybeOwned").valueTypes("@Cast({\"\", \"c10::MaybeOwned&&\"}) @StdMove TensorBaseMaybeOwned").pointerTypes("TensorBaseMaybeOwned")) - .put(new Info("c10::MaybeOwnedTraits").pointerTypes("MaybeOwnedTraitsTensor")) .put(new Info("at::InferExpandGeometryResult").pointerTypes("DimVectorInferExpandGeometryResult")) .put(new Info("at::namedinference::TensorName").valueTypes("@Cast({\"\", \"at::namedinference::TensorName&&\"}) @StdMove TensorName").pointerTypes("TensorName")) .put(new Info("c10::remove_symint::type").valueTypes("long")) @@ -421,7 +420,6 @@ public void map(InfoMap infoMap) { .put(new Info("c10::optional").pointerTypes("GeneratorOptional").define()) .put(new Info("c10::optional", "c10::optional", "c10::optional", "c10::optional", "c10::optional").pointerTypes("TensorOptional").define()) .put(new Info("c10::optional", "c10::optional").pointerTypes("TensorArrayRefOptional").define()) - .put(new Info("c10::optional").pointerTypes("ThreadLocalStateOptional").define()) .put(new Info("c10::optional").pointerTypes("TypeMetaOptional").define()) .put(new Info("c10::optional").pointerTypes("ExecutorExecutionModeOptional").define()) .put(new Info("c10::optional::operator ->").skip()) // Returns a pointer to ExecutorExecutionMode, which is an enum @@ -596,7 +594,6 @@ public void map(InfoMap infoMap) { //// std::vector infoMap - .put(new Info("std::vector >").pointerTypes("Bool2Vector").define()) .put(new Info("std::vector").pointerTypes("BoolVector").define()) .put(new Info("std::vector").pointerTypes("BytePointerVector").define()) .put(new Info("std::vector", "std::tuple,std::vector >").cast().pointerTypes("LongVector").define()) @@ -605,8 +602,6 @@ public void map(InfoMap infoMap) { .put(new Info("std::vector").pointerTypes("StringVector").define()) .put(new Info("std::vector").pointerTypes("StringViewVector").define()) .put(new Info("std::vector >").pointerTypes("StringLongVector").define()) - .put(new Info("const std::vector >", - "std::vector >").pointerTypes("RecordFunctionCallbackHandleVector").define()) .put(new Info("std::vector", "torch::jit::Stack").pointerTypes("IValueVector").define()) .put(new Info("std::vector::const_iterator", "torch::jit::Stack::const_iterator").pointerTypes("IValueVector.Iterator")) .put(new Info("std::vector", "std::vector").pointerTypes("QEngineVector").define()) @@ -627,12 +622,10 @@ public void map(InfoMap infoMap) { .valueTypes("@Cast({\"\", \"std::vector\"}) @StdMove TensorVector").pointerTypes("TensorVector").define()) .put(new Info("std::vector", "std::vector").pointerTypes("TensorIndexVector").define()) .put(new Info("std::vector >").pointerTypes("TensorOptionalVector").define()) - .put(new Info("std::vector >").pointerTypes("SharedFunctionPreVector").define()) .put(new Info("const std::vector >", "std::vector >").pointerTypes("FunctionPreHookVector").define()) .put(new Info("const std::vector >", "std::vector >").pointerTypes("FunctionPostHookVector").define()) - .put(new Info("const std::vector", "std::vector").pointerTypes("SavedVariableVector").define()) .put(new Info("const std::vector", "std::vector").pointerTypes("DefVector").define()) .put(new Info("const std::vector", "std::vector").pointerTypes("PropertyVector").define()) .put(new Info("const std::vector", "std::vector").pointerTypes("OptimizerParamGroupVector").define()) @@ -642,14 +635,12 @@ public void map(InfoMap infoMap) { .put(new Info("std::vector >", "std::vector").pointerTypes("ResolverVector").define()) .put(new Info("std::vector", "std::vector").pointerTypes("ValueVector").define()) // Returned by inlineCallTo .put(new Info("std::vector").pointerTypes("JitNodeVector").define()) - .put(new Info("std::vector").pointerTypes("ModuleVector").define()) .put(new Info("std::vector::iterator").pointerTypes("ModuleVector.Iterator")) .put(new Info("std::vector").pointerTypes("AnyModuleVector").define()) .put(new Info("std::vector::iterator").pointerTypes("AnyModuleVector.Iterator")) .put(new Info("std::vector >").pointerTypes("SharedModuleVector").define()) .put(new Info("std::vector >::iterator").pointerTypes("SharedModuleVector.Iterator")) .put(new Info("std::vector >").pointerTypes("StringTensorVector").define()) - .put(new Info("std::vector >").pointerTypes("StringModuleVector").define()) .put(new Info("std::vector >").pointerTypes("StringAnyModuleVector").define()) .put(new Info("std::vector > >").pointerTypes("StringSharedModuleVector").define()) .put(new Info("std::vector >", "torch::jit::FusionStrategy").pointerTypes("FusionStrategy").define()) @@ -688,9 +679,7 @@ public void map(InfoMap infoMap) { .elementTypes("int64_t", "jlong") // Order is important, since ArrayRef and ArrayRef are incompatible, even though long == long long. And jlong is long long. .elementValueType("long"), new ArrayInfo("LongOptional").elementTypes("c10::optional"), - new ArrayInfo("LongVector").elementTypes("std::vector"), new ArrayInfo("NamedValue").elementTypes("torch::jit::NamedValue"), - new ArrayInfo("SavedVariable").elementTypes("torch::autograd::SavedVariable"), new ArrayInfo("Scalar").elementTypes("at::Scalar"), new ArrayInfo("ScalarType").itPointerType("@Cast(\"c10::ScalarType*\") BytePointer").elementTypes("c10::ScalarType", "at::ScalarType"), new ArrayInfo("Short").itPointerType("ShortPointer").elementTypes("jshort", "int16_t", "uint16_t").elementValueType("short"), @@ -781,8 +770,7 @@ public void map(InfoMap infoMap) { infoMap.put(new Info("c10::SymInt").pointerTypes("SymInt")); // Since SymInt is defined after SmallVector.h for (String[] t : new String[][]{ {"SymInt", "SymInt", "@ByVal SymInt", "c10::SymInt", "at::kDimVectorStaticSize", "at::SymDimVector", "SymDimVector"}, - {"Long", "LongPointer", "long", "int64_t", "at::kDimVectorStaticSize", "at::DimVector", "DimVector"}, - {"Node", "Node", "@ByPtr Node", "torch::autograd::Node*", "4", null, "SmallNodeVector"} + {"Long", "LongPointer", "long", "int64_t", "at::kDimVectorStaticSize", "at::DimVector", "DimVector"} }) { // Assume all have SmallVectorSizeType == uint32_t infoMap @@ -824,9 +812,7 @@ public void map(InfoMap infoMap) { //// std::map infoMap .put(new Info("std::map").pointerTypes("StringStringMap").define()) - .put(new Info("std::map").pointerTypes("StringIntMap").define()) .put(new Info("std::map").pointerTypes("StringLongMap").define()) - .put(new Info("std::map").pointerTypes("StringTensorMap").define()) ; @@ -836,10 +822,7 @@ public void map(InfoMap infoMap) { .put(new Info("std::unordered_set").pointerTypes("HashAliasedIValues").define()) .put(new Info("std::unordered_set").pointerTypes("SymbolSet").define()) .put(new Info("std::unordered_set", "std::unordered_set").pointerTypes("TensorImplSet").define()) - .put(new Info("std::unordered_set >").pointerTypes("RecordScopeSet").define()) .put(new Info("std::unordered_set").pointerTypes("NodeSet").define()) - .put(new Info("std::unordered_set").pointerTypes("StreamSet").define()) - .put(new Info("std::unordered_set").pointerTypes("RecordScopeSet").define()) .put(new Info("std::unordered_set").pointerTypes("DeviceTypeSet").define()) .put(new Info("std::set").pointerTypes("ActivityTypeSet").define()) ; @@ -847,17 +830,13 @@ public void map(InfoMap infoMap) { //// std::unordered_map infoMap - .put(new Info("std::unordered_map").pointerTypes("NodeIntMap").define()) .put(new Info("std::unordered_map").pointerTypes("HashAliasedIValueMap").define()) - .put(new Info("std::unordered_map").pointerTypes("LongStringMap").define()) .put(new Info("std::unordered_map").pointerTypes("StringBoolMap").define()) .put(new Info("std::unordered_map").pointerTypes("StringSizeTMap").define()) .put(new Info("std::unordered_map").pointerTypes("ExtraFilesMap").define()) .put(new Info("std::unordered_map").pointerTypes("TypeEnv").define()) .put(new Info("std::unordered_map", "std::unordered_map").pointerTypes("StringIValueMap").define()) - .put(new Info("std::unordered_map >").pointerTypes("StringFunctionMap").define()) .put(new Info("std::unordered_map").pointerTypes("StringValueMap").define()) - .put(new Info("std::unordered_map >").pointerTypes("StringLongStringMapMap").define()) .put(new Info("std::unordered_map").pointerTypes("ValueValueMap").define()) .put(new Info("std::unordered_map").pointerTypes("ArgumentSpecExecutionPlanMap").define()) .put(new Info("std::unordered_map").pointerTypes("TreeRefStringMap").define()) @@ -874,18 +853,14 @@ public void map(InfoMap infoMap) { //// std::tuple infoMap - .put(new Info("std::tuple").pointerTypes("T_IntInt_T").define()) + .put(new Info("std::tuple").pointerTypes("T_IntInt_T").define()) // Needed for CUDAStream .put(new Info("std::tuple").pointerTypes("T_LongLong_T").define()) - .put(new Info("std::tuple").pointerTypes("T_DoubleLong_T").define()) - //.put(new Info("std::tuple").pointerTypes("TensorTuple").define()) .put(new Info("std::tuple", "std::tuple", "std::tuple", "std::tuple").pointerTypes("T_TensorTensor_T").define()) .put(new Info("std::tuple", "std::tuple", "std::tuple").pointerTypes("T_TensorTensorTensor_T").define()) .put(new Info("std::tuple", "std::tuple", "std::tuple").pointerTypes("T_TensorTensorTensorTensor_T").define()) .put(new Info("std::tuple", "std::tuple", "std::tuple").pointerTypes("T_TensorTensorTensorTensorTensor_T").define()) - .put(new Info("std::tuple", "std::tuple", "std::tuple").pointerTypes("T_TensorTensorTensorTensorTensorTensor_T").define()) .put(new Info("std::tuple", "std::tuple", "std::tuple").pointerTypes("T_TensorTensorTensorTensorTensorTensorTensor_T").define()) .put(new Info("std::tuple >", "std::tuple >").pointerTypes("T_TensorTensorTensorTensorVector_T").define()) - .put(new Info("std::tuple", "std::tuple").pointerTypes("T_TensorTensorTensorTensorLong_T").define()) .put(new Info("std::tuple", "std::tuple").pointerTypes("T_TensorTensorDoubleLong_T").define()) .put(new Info("std::tuple >").pointerTypes("T_TensorT_TensorTensor_T_T").define()) .put(new Info("std::tuple,c10::MaybeOwned >") @@ -902,16 +877,8 @@ public void map(InfoMap infoMap) { "std::tuple" ).cast().pointerTypes("PointerPointer")) .put(new Info("std::tuple").pointerTypes("T_StringSizeTSizeT_T").define()) - .put(new Info("std::tuple").pointerTypes("T_StringLong_T").define()) .put(new Info("std::tuple >", "std::tuple >").pointerTypes("T_TensorTensorVector_T").define()) - .put(new Info("std::tuple,torch::Tensor>", "std::tuple,at::Tensor>").pointerTypes("T_TensorVectorTensor_T").define()) - .put(new Info( - "std::tuple,std::vector,std::vector,std::vector,std::vector >", - "std::tuple,std::vector,std::vector,std::vector,std::vector >") - .pointerTypes("T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T").define()) .put(new Info("std::tuple,std::vector >", "std::tuple,std::vector >").pointerTypes("T_TensorTensorVectorTensorVector_T").define()) - .put(new Info("std::tuple", "std::tuple").pointerTypes("T_TensorTensorLongLongTensor_T").define()) - .put(new Info("std::tuple", "std::tuple").pointerTypes("T_TensorTensorTensorTensorsLongLongLongLongTensor_T").define()) .put(new Info("const std::tuple", "std::tuple").pointerTypes("T_DataPtrSizeT_T").define()) .put(new Info("std::tuple", "std::pair").pointerTypes("T_TypePtrLong_T").define()) // Parse this pair as tuple because Parser doesn't generate valid code for optional ; @@ -973,7 +940,6 @@ public void map(InfoMap infoMap) { //// torch::OrderedDict for (String[] o: new String[][] { { "std::string", "torch::Tensor", "StringTensor" }, - { "std::string", "torch::nn::Module", "StringModule" }, { "std::string", "torch::nn::AnyModule", "StringAnyModule" }, { "std::string", "std::shared_ptr", "StringSharedModule" } }) { @@ -994,7 +960,6 @@ public void map(InfoMap infoMap) { // Parser doesn't generate iterators for vector of pairs, so function returning such iterators, like ParameterListImpl::begin() // must be mapped to returning item instead. Issue #673. Change when issue resolved. .put(new Info("std::pair", "std::pair").cast().pointerTypes("StringTensorPair").define()) - .put(new Info("std::pair").pointerTypes("StringModulePair").define()) .put(new Info("std::pair").pointerTypes("StringAnyModulePair").define()) .put(new Info("std::pair >").pointerTypes("StringSharedModulePair").define()) .put(new Info("std::pair").pointerTypes("RecordFunctionHandleIntPair").define()) @@ -1131,7 +1096,6 @@ public void map(InfoMap infoMap) { .put(new Info("c10::SingleElementType").pointerTypes("ListSingleElementType")) .put(new Info("c10::SingleElementType").pointerTypes("RRefSingleElementType")) .put(new Info("c10::SingleElementType").pointerTypes("FutureSingleElementType")) - .put(new Info("c10::SingleElementType").pointerTypes("OptionalSingleElementType")) .put(new Info("c10::SingleElementType").pointerTypes("AwaitSingleElementType")) ; @@ -1163,7 +1127,6 @@ public void map(InfoMap infoMap) { .put(new Info("torch::jit::slot_iterator_impl").pointerTypes(t[0].toLowerCase() + "_iterator")) .put(new Info("torch::jit::slot_iterator_impl::value_type").pointerTypes(t[1])) .put(new Info("torch::jit::Named<" + t[2] + ">").pointerTypes("Named" + t[1])) - .put(new Info("torch::jit::detail::NamedPolicy").pointerTypes("Named" + t[1] + "Policy")) .put(new Info( "torch::jit::slot_list_impl >", "torch::jit::named_" + t[0].toLowerCase() + "_list").pointerTypes("named_" + t[0].toLowerCase() + "_list")) @@ -1790,8 +1753,6 @@ We need either to put an annotation info on each member, or javaName("@NoOffset //"std::vector >::put(std::vector >)", "c10::ArrayRef::equals", "c10::ArrayRef::equals", - "c10::ArrayRef::equals", - "c10::ArrayRef::vec", "c10::ArrayRef::equals", "c10::ArrayRef::equals", "c10::ArrayRef::equals", @@ -1941,10 +1902,12 @@ We need either to put an annotation info on each member, or javaName("@NoOffset .put(new Info("c10::Argument").pointerTypes("Argument")) // Ref in function_schema_inl.h, defined in function_schema.h ; - /* Classes that are not part of API (no TORCH_API nor C10_API) and are not argument nor return type of API methods. + /* Classes that are not part of the API (no TORCH_API nor C10_API) and are not argument nor return type of API methods. * Consider manual exclusion of all at::meta, at::native and caffe2 namespaces (but TypeMeta, that should * be moved to c10 one day). */ infoMap.put(new Info( + "CUevent_st", + "mz_zip_archive", "ModuleHolderIndicator", "at::ObserverContext", "at::Range", @@ -1987,6 +1950,7 @@ We need either to put an annotation info on each member, or javaName("@NoOffset "c10::Registry,at::HIPHooksArgs>", "c10::Registry,at::MPSHooksArgs>", "c10::Registry,at::ORTHooksArgs>", + "c10::SchemaRegistrationHandleRAII", "c10::Scalar::v_t", "c10::StreamGuard", "c10::Type::SingletonOrSharedTypePtr::Repr", @@ -2023,6 +1987,7 @@ We need either to put an annotation info on each member, or javaName("@NoOffset "c10::detail::infer_schema::createReturns", "c10::detail::infer_schema::createReturns,void>", // Parsing error ? "c10::detail::ivalue_to_const_ref_overload_return", + "c10::detail::RegistrationListenerList", "c10::either::", "c10::either", "c10::either::", @@ -2057,16 +2022,18 @@ We need either to put an annotation info on each member, or javaName("@NoOffset "c10::static_cast_with_inter_type,c10::BFloat16>", "c10::trivial_init_t", "caffe2::detail::_Uninitialized", - "caffe2::TypeMetaData", + "caffe2::detail::TypeMetaData", "ska::detailv3::sherwood_v3_entry::", "ska::detailv3::sherwood_v3_table::convertible_to_iterator", "ska::fibonacci_hash_policy", "ska::power_of_two_hash_policy", "ska::prime_number_hash_policy", + "ska_ordered::fibonacci_hash_policy", "ska_ordered::prime_number_hash_policy", "ska_ordered::detailv3::sherwood_v3_entry::", "ska_ordered::detailv3::sherwood_v3_table::convertible_to_iterator", "ska_ordered::order_preserving_flat_hash_map::convertible_to_value", + "ska_ordered::power_of_two_hash_policy", "std::hash", "std::hash", "std::hash", @@ -2086,7 +2053,10 @@ We need either to put an annotation info on each member, or javaName("@NoOffset "torch::autograd::InputBuffer", "torch::autograd::InputMetadata", "torch::autograd::NodeGuard", + "torch::autograd::ReadyQueue", "torch::autograd::TraceableFunction", + "torch::autograd::TypeAndSize", + "torch::autograd::SavedVariable", "torch::autograd::VariableHooks", "torch::data::DataLoaderBase::Job", "torch::data::DataLoaderBase::QuitWorker", @@ -2107,6 +2077,8 @@ We need either to put an annotation info on each member, or javaName("@NoOffset "torch::jit::IRAttributeError", "torch::jit::InterpreterContinuation", "torch::jit::InterpreterState", + "torch::jit::InterpreterStateImpl", + "torch::jit::Lexer", "torch::jit::Operator::C10Operator", "torch::jit::Operator::JitOnlyOperator", "torch::jit::Operator::UnparsedFunctionSchema", @@ -2126,6 +2098,8 @@ We need either to put an annotation info on each member, or javaName("@NoOffset "torch::nn::NamedAnyModule", "torch::nn::functions::CrossMapLRN2d", "torch::profiler::impl::HashCombine", + "torch::profiler::impl::kineto::ActivityTraceWrapper", + "torch::profiler::impl::ProfilerVoidEventStub", "torch::autograd::_jvp_fn_t", "torch::autograd::profiler::post_process_t", "at::StringView" // Confusion with string_view and @StringView, and doesn't seem to be of any use in API diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java index a15e1cf5232..a3c2a504c7f 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java @@ -23,14 +23,11 @@ import org.bytedeco.javacpp.ClassProperties; import org.bytedeco.javacpp.LoadEnabled; -import org.bytedeco.javacpp.Loader; import org.bytedeco.javacpp.annotation.*; import org.bytedeco.javacpp.tools.Info; import org.bytedeco.javacpp.tools.InfoMap; import org.bytedeco.javacpp.tools.InfoMapper; -import java.util.List; - /** * @author Hervé Guillemet */ @@ -93,8 +90,6 @@ public void map(InfoMap infoMap) { "at::CUDAGeneratorImpl" ).skip()) - .put(new Info("c10::optional").pointerTypes("CUDAStreamOptional").define()) - //// Already defined in main torch .put(new Info("c10::Stream").pointerTypes("Stream")) .put(new Info("c10::optional").pointerTypes("StreamOptional")) @@ -149,5 +144,13 @@ public void map(InfoMap infoMap) { ; new torch.ArrayInfo("CUDAStream").elementTypes("c10::cuda::CUDAStream").mapArrayRef(infoMap); + + // Classes that are not part of the API (no TORCH_API nor C10_API) and are not argument nor return type of API methods. + infoMap.put(new Info( + "c10::cuda::OptionalCUDAGuard", + "c10::cuda::OptionalCUDAStreamGuard" + ).skip()) + ; + } } From 672cdfa6d467e140aaa641db83f07d2f44c993ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Mon, 16 Oct 2023 23:35:23 +0200 Subject: [PATCH 04/26] Add missing gen classes. Remove useless classes. --- .../org/bytedeco/pytorch/BackendMeta.java | 49 ++++++ .../org/bytedeco/pytorch/BackendMetaRef.java | 150 ++++++++++++++++++ .../bytedeco/pytorch/CompiledNodeArgs.java | 26 +++ .../java/org/bytedeco/pytorch/DLDevice_.java | 30 ++++ .../org/bytedeco/pytorch/DeviceTypeSet.java | 46 ++++++ .../org/bytedeco/pytorch/Float8_e4m3fn.java | 46 ++++++ .../org/bytedeco/pytorch/Float8_e5m2.java | 47 ++++++ .../org/bytedeco/pytorch/GatheredContext.java | 40 +++++ .../bytedeco/pytorch/MTIAHooksInterface.java | 45 ++++++ .../pytorch/OpaqueOptionalTensorRef.java | 46 ------ .../bytedeco/pytorch/OptionalTensorRef.java | 55 ------- .../org/bytedeco/pytorch/PointerPair.java | 40 +++++ .../bytedeco/pytorch/PointerPairOptional.java | 35 ++++ .../pytorch/PostAccumulateGradHook.java | 31 ++++ .../pytorch/PrivateUse1HooksArgs.java | 27 ++++ .../pytorch/PrivateUse1HooksInterface.java | 42 +++++ .../bytedeco/pytorch/SwapSavedVariables.java | 26 +++ .../org/bytedeco/pytorch/SymBoolType.java | 33 ++++ .../bytedeco/pytorch/SymbolicShapeMeta.java | 49 ++++++ .../pytorch/VariableHooksInterface.java | 45 ++++++ .../org/bytedeco/pytorch/WeakStorage.java | 2 - .../org/bytedeco/pytorch/XPUHooksArgs.java | 27 ++++ .../bytedeco/pytorch/XPUHooksInterface.java | 54 +++++++ .../org/bytedeco/pytorch/ZeroPad1dImpl.java | 38 +++++ .../bytedeco/pytorch/ZeroPad1dImplBase.java | 47 ++++++ .../pytorch/ZeroPad1dImplCloneable.java | 42 +++++ .../bytedeco/pytorch/ZeroPad1dOptions.java | 32 ++++ .../bytedeco/pytorch/ZeroPad2dImplBase.java | 43 +++++ .../org/bytedeco/pytorch/ZeroPad3dImpl.java | 38 +++++ .../bytedeco/pytorch/ZeroPad3dImplBase.java | 43 +++++ .../pytorch/ZeroPad3dImplCloneable.java | 42 +++++ .../bytedeco/pytorch/ZeroPad3dOptions.java | 30 ++++ .../gen/java/org/bytedeco/pytorch/bits16.java | 45 ++++++ .../java/org/bytedeco/pytorch/bits1x8.java | 45 ++++++ .../java/org/bytedeco/pytorch/bits2x4.java | 45 ++++++ .../java/org/bytedeco/pytorch/bits4x2.java | 45 ++++++ .../gen/java/org/bytedeco/pytorch/bits8.java | 45 ++++++ .../org/bytedeco/pytorch/global/torch.java | 34 ++-- .../org/bytedeco/pytorch/presets/torch.java | 17 +- 39 files changed, 1491 insertions(+), 131 deletions(-) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/BackendMeta.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/BackendMetaRef.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/CompiledNodeArgs.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/DLDevice_.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/DeviceTypeSet.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Float8_e4m3fn.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Float8_e5m2.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/GatheredContext.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MTIAHooksInterface.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/OpaqueOptionalTensorRef.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/OptionalTensorRef.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/PointerPair.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/PointerPairOptional.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/PostAccumulateGradHook.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/PrivateUse1HooksArgs.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/PrivateUse1HooksInterface.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SwapSavedVariables.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SymBoolType.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SymbolicShapeMeta.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/VariableHooksInterface.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/XPUHooksArgs.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/XPUHooksInterface.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImpl.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImplBase.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImplCloneable.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dOptions.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplBase.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImpl.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImplBase.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImplCloneable.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dOptions.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/bits16.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/bits1x8.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/bits2x4.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/bits4x2.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/bits8.java diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BackendMeta.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BackendMeta.java new file mode 100644 index 00000000000..708b845b5e5 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BackendMeta.java @@ -0,0 +1,49 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// For ease of copy pasting +// #if 0 +// #endif + +/** + * This structure is intended to hold additional metadata of the specific device + * backend. + **/ +@Namespace("c10") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class BackendMeta extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public BackendMeta() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public BackendMeta(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public BackendMeta(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public BackendMeta position(long position) { + return (BackendMeta)super.position(position); + } + @Override public BackendMeta getPointer(long i) { + return new BackendMeta((Pointer)this).offsetAddress(i); + } + + public native @ByVal BackendMetaRef clone( + @Const @ByRef BackendMetaRef ptr); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BackendMetaRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BackendMetaRef.java new file mode 100644 index 00000000000..8cf29659d24 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BackendMetaRef.java @@ -0,0 +1,150 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("c10::intrusive_ptr") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class BackendMetaRef extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public BackendMetaRef(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public BackendMetaRef(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public BackendMetaRef position(long position) { + return (BackendMetaRef)super.position(position); + } + @Override public BackendMetaRef getPointer(long i) { + return new BackendMetaRef((Pointer)this).offsetAddress(i); + } + + + public BackendMetaRef() { super((Pointer)null); allocate(); } + @NoException(true) private native void allocate(); + + public BackendMetaRef(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } + @NoException(true) private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); + + // This constructor will not increase the ref counter for you. + // We use the tagged dispatch mechanism to explicitly mark this constructor + // to not increase the refcount + public BackendMetaRef(BackendMeta target, @ByVal DontIncreaseRefcount arg1) { super((Pointer)null); allocate(target, arg1); } + @NoException(true) private native void allocate(BackendMeta target, @ByVal DontIncreaseRefcount arg1); + + + + public BackendMetaRef(@ByRef(true) BackendMetaRef rhs) { super((Pointer)null); allocate(rhs); } + @NoException(true) private native void allocate(@ByRef(true) BackendMetaRef rhs); + + public native @ByRef @Name("operator =") @NoException(true) BackendMetaRef put(@ByRef(true) BackendMetaRef rhs); + + public native @NoException(true) BackendMeta get(); + + public native @ByRef @Name("operator *") @NoException(true) BackendMeta multiply(); + + public native @Name("operator ->") @NoException(true) BackendMeta access(); + + public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); + + public native @NoException(true) void reset(); + + public native @NoException(true) void swap(@ByRef BackendMetaRef rhs); + + // We do a lot of null-pointer checks in our code, good to have this be cheap. + public native @Cast("bool") @NoException(true) boolean defined(); + + public native @Cast("size_t") @NoException(true) long use_count(); + + public native @Cast("size_t") @NoException(true) long weak_use_count(); + + public native @Cast("bool") @NoException(true) boolean unique(); + + /** + * Returns an owning (!) pointer to the underlying object and makes the + * intrusive_ptr instance invalid. That means the refcount is not decreased. + * You *must* put the returned pointer back into a intrusive_ptr using + * intrusive_ptr::reclaim(ptr) to properly destruct it. + * This is helpful for C APIs. + */ + public native @NoException(true) BackendMeta release(); + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr that takes + * over ownership. That means the refcount is not increased. + * This is the counter-part to intrusive_ptr::release() and the pointer + * passed in *must* have been created using intrusive_ptr::release(). + */ + public static native @ByVal BackendMetaRef reclaim(BackendMeta owning_ptr); + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr + * representing a new reference, i.e. the raw pointer retains + * ownership. + */ + public static native @ByVal BackendMetaRef reclaim_copy(BackendMeta owning_ptr); + + /** + * Allocate a heap object with args and wrap it inside a intrusive_ptr and + * incref. This is a helper function to let make_intrusive() access private + * intrusive_ptr constructors. + */ + + /** + * Turn a new instance of TTarget (e.g., literally allocated + * using new TTarget(...) into an intrusive_ptr. If possible, + * use intrusive_ptr::make instead which statically guarantees + * that the allocation was done properly. + * + * At the moment, the only reason this method exists is because + * pybind11 holder types expect to be able to allocate in + * this way (because pybind11 handles the new allocation itself). + */ + public static native @ByVal BackendMetaRef unsafe_steal_from_new(BackendMeta raw_ptr); + + /** + * Turn an instance of TTarget that should not be reference counted + * (e.g., allocated into an arena with placement new) into an + * intrusive_ptr. This is gratuitously unsafe and should only be + * used if you can guarantee that the pointer will not escape and be + * refcounted as normal. + * + * {@code expected_decrefs} is a debugging parameter: it indicates the + * number of strong owners the intrusive_ptr_target in question is + * expected to get. In most use cases, this will likely be 1. + * + * The reason this method exists is for manually sharing + * StorageImpls across Tensors in the static runtime. It needs + * access to private intrusive_ptr members so that the refcounts can + * be initialized to custom values. + */ + public static native @ByVal BackendMetaRef unsafe_adapt_non_heap_allocated( + BackendMeta raw_ptr, + @Cast("size_t") long expected_decrefs); + + /** + * Turn a **non-owning raw pointer** to an intrusive_ptr. It is + * the moral equivalent of enable_shared_from_this on a shared pointer. + * + * This method is only valid for objects that are already live. If + * you are looking for the moral equivalent of unique_ptr(T*) + * constructor, see steal_from_new. + * + * TODO: https://github.com/pytorch/pytorch/issues/56482 + */ + public static native @ByVal BackendMetaRef unsafe_reclaim_from_nonowning(BackendMeta raw_ptr); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CompiledNodeArgs.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CompiledNodeArgs.java new file mode 100644 index 00000000000..780ba34f781 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CompiledNodeArgs.java @@ -0,0 +1,26 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Namespace("torch::dynamo::autograd") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class CompiledNodeArgs extends Pointer { + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public CompiledNodeArgs() { super((Pointer)null); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public CompiledNodeArgs(Pointer p) { super(p); } +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DLDevice_.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DLDevice_.java new file mode 100644 index 00000000000..ae08e542c96 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DLDevice_.java @@ -0,0 +1,30 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// We use forward declaration here instead of #include to avoid +// leaking DLPack implementation detail to every project that includes `ATen/Context.h`, which in turn +// would lead to a conflict when linked with another project using DLPack (for example TVM) +@Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class DLDevice_ extends Pointer { + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public DLDevice_() { super((Pointer)null); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public DLDevice_(Pointer p) { super(p); } +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceTypeSet.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceTypeSet.java new file mode 100644 index 00000000000..3373402df36 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceTypeSet.java @@ -0,0 +1,46 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("std::unordered_set") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class DeviceTypeSet extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public DeviceTypeSet(Pointer p) { super(p); } + public DeviceTypeSet() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef DeviceTypeSet put(@ByRef DeviceTypeSet x); + + public boolean empty() { return size() == 0; } + public native long size(); + + public DeviceType front() { try (Iterator it = begin()) { return it.get(); } } + public native void insert(@ByRef DeviceType value); + public native void erase(@ByRef DeviceType value); + public native @ByVal Iterator begin(); + public native @ByVal Iterator end(); + @NoOffset @Name("iterator") public static class Iterator extends Pointer { + public Iterator(Pointer p) { super(p); } + public Iterator() { } + + public native @Name("operator ++") @ByRef Iterator increment(); + public native @Name("operator ==") boolean equals(@ByRef Iterator it); + public native @Name("operator *") @ByRef @Const DeviceType get(); + } +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Float8_e4m3fn.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Float8_e4m3fn.java new file mode 100644 index 00000000000..876b2466492 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Float8_e4m3fn.java @@ -0,0 +1,46 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + // namespace detail + +@Namespace("c10") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class Float8_e4m3fn extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Float8_e4m3fn(Pointer p) { super(p); } + + public native @Cast("uint8_t") byte x(); public native Float8_e4m3fn x(byte setter); + + @Opaque public static class from_bits_t extends Pointer { + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public from_bits_t() { super((Pointer)null); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public from_bits_t(Pointer p) { super(p); } + } + public static native @Const @ByVal from_bits_t from_bits(); + + public Float8_e4m3fn() { super((Pointer)null); allocate(); } + private native void allocate(); + + public Float8_e4m3fn(@Cast("uint8_t") byte bits, @ByVal from_bits_t arg1) { super((Pointer)null); allocate(bits, arg1); } + private native void allocate(@Cast("uint8_t") byte bits, @ByVal from_bits_t arg1); + public Float8_e4m3fn(float value) { super((Pointer)null); allocate(value); } + private native void allocate(float value); + public native @Name("operator float") float asFloat(); + public native @Cast("bool") boolean isnan(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Float8_e5m2.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Float8_e5m2.java new file mode 100644 index 00000000000..02104b9c12d --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Float8_e5m2.java @@ -0,0 +1,47 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + // namespace detail + +@Namespace("c10") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class Float8_e5m2 extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Float8_e5m2(Pointer p) { super(p); } + + public native @Cast("uint8_t") byte x(); public native Float8_e5m2 x(byte setter); + + @Opaque public static class from_bits_t extends Pointer { + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public from_bits_t() { super((Pointer)null); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public from_bits_t(Pointer p) { super(p); } + } + public static native @Const @ByVal from_bits_t from_bits(); + + public Float8_e5m2() { super((Pointer)null); allocate(); } + private native void allocate(); + + public Float8_e5m2(@Cast("uint8_t") byte bits, @ByVal from_bits_t arg1) { super((Pointer)null); allocate(bits, arg1); } + private native void allocate(@Cast("uint8_t") byte bits, @ByVal from_bits_t arg1); + public Float8_e5m2(float value) { super((Pointer)null); allocate(value); } + private native void allocate(float value); + public native @Name("operator float") float asFloat(); + public native @Cast("bool") boolean isnan(); + public native @Cast("bool") boolean isinf(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GatheredContext.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GatheredContext.java new file mode 100644 index 00000000000..4795c37a2f2 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GatheredContext.java @@ -0,0 +1,40 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// used to hold traceback information in allocators +@Namespace("c10") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class GatheredContext extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public GatheredContext() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public GatheredContext(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public GatheredContext(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public GatheredContext position(long position) { + return (GatheredContext)super.position(position); + } + @Override public GatheredContext getPointer(long i) { + return new GatheredContext((Pointer)this).offsetAddress(i); + } + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MTIAHooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MTIAHooksInterface.java new file mode 100644 index 00000000000..e6889274576 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MTIAHooksInterface.java @@ -0,0 +1,45 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("at") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class MTIAHooksInterface extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public MTIAHooksInterface() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public MTIAHooksInterface(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public MTIAHooksInterface(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public MTIAHooksInterface position(long position) { + return (MTIAHooksInterface)super.position(position); + } + @Override public MTIAHooksInterface getPointer(long i) { + return new MTIAHooksInterface((Pointer)this).offsetAddress(i); + } + + + public native void initMTIA(); + + public native @Cast("bool") boolean hasMTIA(); + + public native @StdString BytePointer showConfig(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OpaqueOptionalTensorRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OpaqueOptionalTensorRef.java deleted file mode 100644 index ccb53cba6a8..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OpaqueOptionalTensorRef.java +++ /dev/null @@ -1,46 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -// Storage for a non-owning Tensor, without needing to include Tensor.h -@Namespace("at::internal") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class OpaqueOptionalTensorRef extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public OpaqueOptionalTensorRef(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public OpaqueOptionalTensorRef(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public OpaqueOptionalTensorRef position(long position) { - return (OpaqueOptionalTensorRef)super.position(position); - } - @Override public OpaqueOptionalTensorRef getPointer(long i) { - return new OpaqueOptionalTensorRef((Pointer)this).offsetAddress(i); - } - - public OpaqueOptionalTensorRef() { super((Pointer)null); allocate(); } - private native void allocate(); - - public native OptionalTensorRef get(); - - public native @ByRef @Name("operator *") OptionalTensorRef multiply(); - public native @Name("operator ->") OptionalTensorRef access(); - - public native @Const @ByRef Tensor getTensor(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalTensorRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalTensorRef.java deleted file mode 100644 index c8bde6fc129..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalTensorRef.java +++ /dev/null @@ -1,55 +0,0 @@ -// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.functions.*; -import org.bytedeco.pytorch.Module; -import org.bytedeco.javacpp.annotation.Cast; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Namespace("at") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class OptionalTensorRef extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public OptionalTensorRef(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public OptionalTensorRef(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public OptionalTensorRef position(long position) { - return (OptionalTensorRef)super.position(position); - } - @Override public OptionalTensorRef getPointer(long i) { - return new OptionalTensorRef((Pointer)this).offsetAddress(i); - } - - public OptionalTensorRef() { super((Pointer)null); allocate(); } - private native void allocate(); - - public OptionalTensorRef(@Const @ByRef TensorBase src) { super((Pointer)null); allocate(src); } - private native void allocate(@Const @ByRef TensorBase src); - - public OptionalTensorRef(@Const @ByRef OptionalTensorRef rhs) { super((Pointer)null); allocate(rhs); } - private native void allocate(@Const @ByRef OptionalTensorRef rhs); - - public native @ByRef @Name("operator =") OptionalTensorRef put(@ByVal OptionalTensorRef rhs); - - public native @Cast("bool") boolean has_value(); - - public native @Const @ByRef Tensor getTensorRef(); - - public native @Const @ByRef @Name("operator *") Tensor multiply(); - - public native @Const @Name("operator ->") Tensor access(); - - public native @Cast("bool") @Name("operator bool") boolean asBoolean(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PointerPair.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PointerPair.java new file mode 100644 index 00000000000..3c82ba7b308 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PointerPair.java @@ -0,0 +1,40 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("std::pair") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class PointerPair extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public PointerPair(Pointer p) { super(p); } + public PointerPair(Pointer firstValue, Pointer secondValue) { this(); put(firstValue, secondValue); } + public PointerPair() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef PointerPair put(@ByRef PointerPair x); + + + @MemberGetter public native Pointer first(); public native PointerPair first(Pointer first); + @MemberGetter public native Pointer second(); public native PointerPair second(Pointer second); + + public PointerPair put(Pointer firstValue, Pointer secondValue) { + first(firstValue); + second(secondValue); + return this; + } +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PointerPairOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PointerPairOptional.java new file mode 100644 index 00000000000..4295b5cd213 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PointerPairOptional.java @@ -0,0 +1,35 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("c10::optional >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class PointerPairOptional extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public PointerPairOptional(Pointer p) { super(p); } + public PointerPairOptional(PointerPair value) { this(); put(value); } + public PointerPairOptional() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef PointerPairOptional put(@ByRef PointerPairOptional x); + + public native boolean has_value(); + public native void reset(); + public native @Name("value") @ByRef PointerPair get(); + @ValueSetter public native PointerPairOptional put(@ByRef PointerPair value); +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PostAccumulateGradHook.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PostAccumulateGradHook.java new file mode 100644 index 00000000000..ff8e318da7a --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PostAccumulateGradHook.java @@ -0,0 +1,31 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("torch::autograd") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class PostAccumulateGradHook extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public PostAccumulateGradHook(Pointer p) { super(p); } + + public native @Name("operator ()") void apply(@Cast("const torch::autograd::Variable*") @ByRef Tensor tensor); + // only implemented for python hooks on nodes, registers hook with compiled + // autograd + public native void compiled_args(@ByRef CompiledNodeArgs args); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PrivateUse1HooksArgs.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PrivateUse1HooksArgs.java new file mode 100644 index 00000000000..1dcb562e341 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PrivateUse1HooksArgs.java @@ -0,0 +1,27 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("at") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class PrivateUse1HooksArgs extends Pointer { + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public PrivateUse1HooksArgs() { super((Pointer)null); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public PrivateUse1HooksArgs(Pointer p) { super(p); } +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PrivateUse1HooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PrivateUse1HooksInterface.java new file mode 100644 index 00000000000..a9bfdaf533f --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PrivateUse1HooksInterface.java @@ -0,0 +1,42 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("at") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class PrivateUse1HooksInterface extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public PrivateUse1HooksInterface() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public PrivateUse1HooksInterface(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public PrivateUse1HooksInterface(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public PrivateUse1HooksInterface position(long position) { + return (PrivateUse1HooksInterface)super.position(position); + } + @Override public PrivateUse1HooksInterface getPointer(long i) { + return new PrivateUse1HooksInterface((Pointer)this).offsetAddress(i); + } + + public native @Const @ByRef Generator getDefaultGenerator(@Cast("c10::DeviceIndex") byte device_index); + + public native @ByVal Device getDeviceFromPtr(Pointer data); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SwapSavedVariables.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SwapSavedVariables.java new file mode 100644 index 00000000000..a22bdcdb139 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SwapSavedVariables.java @@ -0,0 +1,26 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Namespace("torch::dynamo::autograd") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class SwapSavedVariables extends Pointer { + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public SwapSavedVariables() { super((Pointer)null); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public SwapSavedVariables(Pointer p) { super(p); } +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymBoolType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymBoolType.java new file mode 100644 index 00000000000..a889f3c993e --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymBoolType.java @@ -0,0 +1,33 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Namespace("c10") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class SymBoolType extends Type { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public SymBoolType(Pointer p) { super(p); } + + public native @Cast("bool") boolean equals(@Const @ByRef Type rhs); + public native @StdString BytePointer str(); + public native @StdString BytePointer annotation_str_impl(@ByVal(nullValue = "c10::TypePrinter(nullptr)") TypePrinter printer); + public native @StdString BytePointer annotation_str_impl(); + @MemberGetter public static native TypeKind Kind(); + // global singleton + public static native @ByVal @Cast("c10::SymBoolTypePtr*") SingletonTypePtr get(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolicShapeMeta.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolicShapeMeta.java new file mode 100644 index 00000000000..479d5459760 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolicShapeMeta.java @@ -0,0 +1,49 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("c10") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class SymbolicShapeMeta extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public SymbolicShapeMeta() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public SymbolicShapeMeta(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public SymbolicShapeMeta(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public SymbolicShapeMeta position(long position) { + return (SymbolicShapeMeta)super.position(position); + } + @Override public SymbolicShapeMeta getPointer(long i) { + return new SymbolicShapeMeta((Pointer)this).offsetAddress(i); + } + + public native @ByRef @NoOffset SymDimVector sizes_(); public native SymbolicShapeMeta sizes_(SymDimVector setter); + public native @ByRef @NoOffset SymDimVector strides_(); public native SymbolicShapeMeta strides_(SymDimVector setter); + public native @ByRef @NoOffset SymInt numel_(); public native SymbolicShapeMeta numel_(SymInt setter); + public native @ByRef @NoOffset SymInt storage_offset_(); public native SymbolicShapeMeta storage_offset_(SymInt setter); + public native @ByRef @NoOffset SymBool is_contiguous_(); public native SymbolicShapeMeta is_contiguous_(SymBool setter); + public native @ByRef @NoOffset SymBool is_channels_last_contiguous_(); public native SymbolicShapeMeta is_channels_last_contiguous_(SymBool setter); + public native @ByRef @NoOffset SymBool is_channels_last_3d_contiguous_(); public native SymbolicShapeMeta is_channels_last_3d_contiguous_(SymBool setter); + public native @ByRef @NoOffset SymBool is_channels_last_(); public native SymbolicShapeMeta is_channels_last_(SymBool setter); + public native @ByRef @NoOffset SymBool is_channels_last_3d_(); public native SymbolicShapeMeta is_channels_last_3d_(SymBool setter); + public native @ByRef @NoOffset SymBool is_non_overlapping_and_dense_(); public native SymbolicShapeMeta is_non_overlapping_and_dense_(SymBool setter); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/VariableHooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/VariableHooksInterface.java new file mode 100644 index 00000000000..1d9d16c2218 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/VariableHooksInterface.java @@ -0,0 +1,45 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + // namespace torch::autograd + +@Namespace("at::impl") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class VariableHooksInterface extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public VariableHooksInterface(Pointer p) { super(p); } + + public native @ByVal TensorBase tensor_data(@Const @ByRef TensorBase arg0); + public native @ByVal TensorBase variable_data(@Const @ByRef TensorBase arg0); + public native @SharedPtr Node grad_fn(@Const @ByRef TensorBase arg0); + + public native void remove_hook(@Const @ByRef TensorBase arg0, @Cast("unsigned") int pos); + public native @Cast("bool") boolean is_view(@Const @ByRef TensorBase arg0); + public native @Const @ByRef TensorBase base(@Const @ByRef TensorBase arg0); + public native @StdString BytePointer name(@Const @ByRef TensorBase arg0); + public native @Cast("bool") boolean is_leaf(@Const @ByRef TensorBase arg0); + public native @Cast("int64_t") long output_nr(@Const @ByRef TensorBase arg0); + public native void set_data(@Const @ByRef TensorBase arg0, @Const @ByRef TensorBase arg1); + public native @ByVal TensorBase data(@Const @ByRef TensorBase arg0); + public native @Cast("int64_t") long _version(@Const @ByRef TensorBase arg0); + public native void retain_grad(@Const @ByRef TensorBase arg0); + public native @Cast("bool") boolean retains_grad(@Const @ByRef TensorBase arg0); + public native void _backward(@Const @ByRef Tensor arg0, @ByVal @Cast("at::TensorList*") TensorArrayRef arg1, @Const @ByRef TensorOptional arg2, @ByVal BoolOptional arg3, @Cast("bool") boolean arg4); + public native void requires_grad_(@Const @ByRef TensorBase arg0, @Cast("bool") boolean arg1); + public native void basic_autograd_not_implemented_fallback(@Const @ByRef OperatorHandle op, @ByVal DispatchKeySet dispatch_keys, IValueVector stack); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorage.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorage.java index 70f785f79e7..790ad83d148 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorage.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorage.java @@ -18,8 +18,6 @@ import static org.bytedeco.pytorch.global.torch.*; -// To allow intrusive_ptr inside std::map or std::set, we need operator< - @Name("c10::weak_intrusive_ptr") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class WeakStorage extends Pointer { static { Loader.load(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/XPUHooksArgs.java b/pytorch/src/gen/java/org/bytedeco/pytorch/XPUHooksArgs.java new file mode 100644 index 00000000000..ebfc2122f37 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/XPUHooksArgs.java @@ -0,0 +1,27 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("at") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class XPUHooksArgs extends Pointer { + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public XPUHooksArgs() { super((Pointer)null); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public XPUHooksArgs(Pointer p) { super(p); } +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/XPUHooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/XPUHooksInterface.java new file mode 100644 index 00000000000..4ca00697fc2 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/XPUHooksInterface.java @@ -0,0 +1,54 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("at") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class XPUHooksInterface extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public XPUHooksInterface() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public XPUHooksInterface(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public XPUHooksInterface(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public XPUHooksInterface position(long position) { + return (XPUHooksInterface)super.position(position); + } + @Override public XPUHooksInterface getPointer(long i) { + return new XPUHooksInterface((Pointer)this).offsetAddress(i); + } + + + public native void initXPU(); + + public native @Cast("bool") boolean hasXPU(); + + public native @StdString BytePointer showConfig(); + + public native @ByVal Device getATenDeviceFromDLPackDevice( + @Const @ByRef DLDevice_ dl_device, + Pointer data); + + public native @ByRef DLDevice_ getDLPackDeviceFromATenDevice( + @ByRef DLDevice_ dl_device, + @Const @ByRef Device aten_device, + Pointer data); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImpl.java new file mode 100644 index 00000000000..4aa29b693e1 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImpl.java @@ -0,0 +1,38 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ZeroPad1d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Applies ZeroPad over a 1-D input. +@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ZeroPad1dImpl extends ZeroPad1dImplBase { + static { Loader.load(); } + + + public ZeroPad1dImpl(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } + private native void allocate(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding); + public ZeroPad1dImpl(@Const @ByRef ZeroPad1dOptions options_) { super((Pointer)null); allocate(options_); } + private native void allocate(@Const @ByRef ZeroPad1dOptions options_); + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ZeroPad1dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public ZeroPad1dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImplBase.java new file mode 100644 index 00000000000..548d01aa4c5 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImplBase.java @@ -0,0 +1,47 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// ============================================================================ + +/** Base class for all (dimension-specialized) ZeroPad modules. */ +@Name("torch::nn::ZeroPadImpl<1,torch::nn::ZeroPad1dImpl>") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ZeroPad1dImplBase extends ZeroPad1dImplCloneable { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ZeroPad1dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public ZeroPad1dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + + public ZeroPad1dImplBase(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } + private native void allocate(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding); + public ZeroPad1dImplBase(@Const @ByRef ZeroPad1dOptions options_) { super((Pointer)null); allocate(options_); } + private native void allocate(@Const @ByRef ZeroPad1dOptions options_); + + public native void reset(); + + public native @ByVal Tensor forward(@Const @ByRef Tensor input); + + /** Pretty prints the {@code ZeroPad{1,2}d} module into the given {@code stream}. */ + public native void pretty_print(@Cast("std::ostream*") @ByRef Pointer stream); + + /** The options with which this {@code Module} was constructed. */ + public native @ByRef ZeroPad1dOptions options(); public native ZeroPad1dImplBase options(ZeroPad1dOptions setter); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImplCloneable.java new file mode 100644 index 00000000000..0997e9ab359 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImplCloneable.java @@ -0,0 +1,42 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::nn::Cloneable") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ZeroPad1dImplCloneable extends Module { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ZeroPad1dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public ZeroPad1dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ZeroPad1dImplCloneable pointer); + + + /** {@code reset()} must perform initialization of all members with reference + * semantics, most importantly parameters, buffers and submodules. */ + public native void reset(); + + /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters + * and submodules in the cloned module are different from those in the + * original module. */ + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dOptions.java new file mode 100644 index 00000000000..a054ee42159 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dOptions.java @@ -0,0 +1,32 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// ============================================================================ + +@Name("torch::nn::ZeroPadOptions<1>") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ZeroPad1dOptions extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ZeroPad1dOptions(Pointer p) { super(p); } + + public ZeroPad1dOptions(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } + private native void allocate(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding); + public native @Cast("torch::ExpandingArray<1*2>*") @ByRef @NoException(true) LongPointer padding(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplBase.java new file mode 100644 index 00000000000..8228aca0330 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplBase.java @@ -0,0 +1,43 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::nn::ZeroPadImpl<2,torch::nn::ZeroPad2dImpl>") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ZeroPad2dImplBase extends ZeroPad2dImplCloneable { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ZeroPad2dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public ZeroPad2dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + + public ZeroPad2dImplBase(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } + private native void allocate(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding); + public ZeroPad2dImplBase(@Const @ByRef ZeroPad2dOptions options_) { super((Pointer)null); allocate(options_); } + private native void allocate(@Const @ByRef ZeroPad2dOptions options_); + + public native void reset(); + + public native @ByVal Tensor forward(@Const @ByRef Tensor input); + + /** Pretty prints the {@code ZeroPad{1,2}d} module into the given {@code stream}. */ + public native void pretty_print(@Cast("std::ostream*") @ByRef Pointer stream); + + /** The options with which this {@code Module} was constructed. */ + public native @ByRef ZeroPad2dOptions options(); public native ZeroPad2dImplBase options(ZeroPad2dOptions setter); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImpl.java new file mode 100644 index 00000000000..c8ed1b20d82 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImpl.java @@ -0,0 +1,38 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ZeroPad3d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Applies ZeroPad over a 3-D input. +@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ZeroPad3dImpl extends ZeroPad3dImplBase { + static { Loader.load(); } + + + public ZeroPad3dImpl(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } + private native void allocate(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding); + public ZeroPad3dImpl(@Const @ByRef ZeroPad3dOptions options_) { super((Pointer)null); allocate(options_); } + private native void allocate(@Const @ByRef ZeroPad3dOptions options_); + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ZeroPad3dImpl(Pointer p) { super(p); } + /** Downcast constructor. */ + public ZeroPad3dImpl(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast") void allocate(@SharedPtr Module pointer); + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImplBase.java new file mode 100644 index 00000000000..7b2beab3f78 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImplBase.java @@ -0,0 +1,43 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::nn::ZeroPadImpl<3,torch::nn::ZeroPad3dImpl>") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ZeroPad3dImplBase extends ZeroPad3dImplCloneable { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ZeroPad3dImplBase(Pointer p) { super(p); } + /** Downcast constructor. */ + public ZeroPad3dImplBase(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + + public ZeroPad3dImplBase(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } + private native void allocate(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding); + public ZeroPad3dImplBase(@Const @ByRef ZeroPad3dOptions options_) { super((Pointer)null); allocate(options_); } + private native void allocate(@Const @ByRef ZeroPad3dOptions options_); + + public native void reset(); + + public native @ByVal Tensor forward(@Const @ByRef Tensor input); + + /** Pretty prints the {@code ZeroPad{1,2}d} module into the given {@code stream}. */ + public native void pretty_print(@Cast("std::ostream*") @ByRef Pointer stream); + + /** The options with which this {@code Module} was constructed. */ + public native @ByRef ZeroPad3dOptions options(); public native ZeroPad3dImplBase options(ZeroPad3dOptions setter); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImplCloneable.java new file mode 100644 index 00000000000..66a472e9ad5 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImplCloneable.java @@ -0,0 +1,42 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::nn::Cloneable") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ZeroPad3dImplCloneable extends Module { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ZeroPad3dImplCloneable(Pointer p) { super(p); } + /** Downcast constructor. */ + public ZeroPad3dImplCloneable(Module pointer) { super((Pointer)null); allocate(pointer); } + @Namespace private native @SharedPtr @Name("SHARED_PTR_NAMESPACE::dynamic_pointer_cast, torch::nn::Module>") void allocate(@SharedPtr Module pointer); + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ZeroPad3dImplCloneable pointer); + + + /** {@code reset()} must perform initialization of all members with reference + * semantics, most importantly parameters, buffers and submodules. */ + public native void reset(); + + /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters + * and submodules in the cloned module are different from those in the + * original module. */ + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dOptions.java new file mode 100644 index 00000000000..386b8836ff8 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dOptions.java @@ -0,0 +1,30 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("torch::nn::ZeroPadOptions<3>") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ZeroPad3dOptions extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ZeroPad3dOptions(Pointer p) { super(p); } + + public ZeroPad3dOptions(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } + private native void allocate(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding); + public native @Cast("torch::ExpandingArray<3*2>*") @ByRef @NoException(true) LongPointer padding(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/bits16.java b/pytorch/src/gen/java/org/bytedeco/pytorch/bits16.java new file mode 100644 index 00000000000..abeaf27356f --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/bits16.java @@ -0,0 +1,45 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +/** + * bits16 is an uninterpreted dtype of a tensor with 16 bits, without any + * semantics defined. + */ +@Namespace("c10") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class bits16 extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public bits16(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public bits16(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public bits16 position(long position) { + return (bits16)super.position(position); + } + @Override public bits16 getPointer(long i) { + return new bits16((Pointer)this).offsetAddress(i); + } + + public native @Cast("uint16_t") short val_(); public native bits16 val_(short setter); + public bits16() { super((Pointer)null); allocate(); } + private native void allocate(); + public bits16(@Cast("uint16_t") short val) { super((Pointer)null); allocate(val); } + private native void allocate(@Cast("uint16_t") short val); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/bits1x8.java b/pytorch/src/gen/java/org/bytedeco/pytorch/bits1x8.java new file mode 100644 index 00000000000..5287dcb83cb --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/bits1x8.java @@ -0,0 +1,45 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +/** + * bits1x8 is an uninterpreted dtype of a tensor with 1 bit (packed to byte + * boundary), without any semantics defined. + */ +@Namespace("c10") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class bits1x8 extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public bits1x8(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public bits1x8(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public bits1x8 position(long position) { + return (bits1x8)super.position(position); + } + @Override public bits1x8 getPointer(long i) { + return new bits1x8((Pointer)this).offsetAddress(i); + } + + public native @Cast("uint8_t") byte val_(); public native bits1x8 val_(byte setter); + public bits1x8() { super((Pointer)null); allocate(); } + private native void allocate(); + public bits1x8(@Cast("uint8_t") byte val) { super((Pointer)null); allocate(val); } + private native void allocate(@Cast("uint8_t") byte val); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/bits2x4.java b/pytorch/src/gen/java/org/bytedeco/pytorch/bits2x4.java new file mode 100644 index 00000000000..3200e81f615 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/bits2x4.java @@ -0,0 +1,45 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +/** + * bits2x4 is an uninterpreted dtype of a tensor with 2 bits (packed to byte + * boundary), without any semantics defined. + */ +@Namespace("c10") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class bits2x4 extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public bits2x4(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public bits2x4(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public bits2x4 position(long position) { + return (bits2x4)super.position(position); + } + @Override public bits2x4 getPointer(long i) { + return new bits2x4((Pointer)this).offsetAddress(i); + } + + public native @Cast("uint8_t") byte val_(); public native bits2x4 val_(byte setter); + public bits2x4() { super((Pointer)null); allocate(); } + private native void allocate(); + public bits2x4(@Cast("uint8_t") byte val) { super((Pointer)null); allocate(val); } + private native void allocate(@Cast("uint8_t") byte val); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/bits4x2.java b/pytorch/src/gen/java/org/bytedeco/pytorch/bits4x2.java new file mode 100644 index 00000000000..a3e2a43ad51 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/bits4x2.java @@ -0,0 +1,45 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +/** + * bits4x2 is an uninterpreted dtype of a tensor with 4 bits (packed to byte + * boundary), without any semantics defined. + */ +@Namespace("c10") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class bits4x2 extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public bits4x2(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public bits4x2(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public bits4x2 position(long position) { + return (bits4x2)super.position(position); + } + @Override public bits4x2 getPointer(long i) { + return new bits4x2((Pointer)this).offsetAddress(i); + } + + public native @Cast("uint8_t") byte val_(); public native bits4x2 val_(byte setter); + public bits4x2() { super((Pointer)null); allocate(); } + private native void allocate(); + public bits4x2(@Cast("uint8_t") byte val) { super((Pointer)null); allocate(val); } + private native void allocate(@Cast("uint8_t") byte val); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/bits8.java b/pytorch/src/gen/java/org/bytedeco/pytorch/bits8.java new file mode 100644 index 00000000000..9770762725d --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/bits8.java @@ -0,0 +1,45 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +/** + * bits8 is an uninterpreted dtype of a tensor with 8 bits, without any + * semantics defined. + */ +@Namespace("c10") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class bits8 extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public bits8(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public bits8(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public bits8 position(long position) { + return (bits8)super.position(position); + } + @Override public bits8 getPointer(long i) { + return new bits8((Pointer)this).offsetAddress(i); + } + + public native @Cast("uint8_t") byte val_(); public native bits8 val_(byte setter); + public bits8() { super((Pointer)null); allocate(); } + private native void allocate(); + public bits8(@Cast("uint8_t") byte val) { super((Pointer)null); allocate(val); } + private native void allocate(@Cast("uint8_t") byte val); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java index 5cbb27119aa..b4ba6b043fd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java @@ -8163,10 +8163,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native DispatchKey legacyExtractDispatchKey(@Const @ByRef TensorBase t); - -// Targeting ../MaybeOwnedTraits.java - - + // namespace at // namespace c10 @@ -11166,12 +11163,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// Targeting ../OptionalTensorRef.java - - -// Targeting ../TensorRef.java - +// Use to convert a TensorBase (that may be undefined) to an at::Tensor +// without bumping refcount. @@ -13300,6 +13294,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../BackendMetaRef.java + +// To allow intrusive_ptr inside std::map or std::set, we need operator< // Targeting ../WeakStorage.java @@ -14821,9 +14817,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at::impl") public static native void SetVariableHooks(VariableHooksInterface hooks); @Namespace("at::impl") public static native VariableHooksInterface GetVariableHooks(); @Namespace("at::impl") public static native @Cast("bool") boolean HasVariableHooks(); -// Targeting ../VariableHooksRegisterer.java - - // namespace at::impl @@ -17950,9 +17943,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../MTIAHooksInterface.java -// Targeting ../MTIAHooksArgs.java - - // #define REGISTER_MTIA_HOOKS(clsname) // C10_REGISTER_CLASS(MTIAHooksRegistry, clsname, clsname) @Namespace("at::detail") public static native @Const @ByRef MTIAHooksInterface getMTIAHooks(); @@ -59888,9 +59878,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // no parallel algorithm (such as parallel_reduce) should split work into // smaller than GRAIN_SIZE chunks. @Namespace("at::internal") @MemberGetter public static native @Cast("const int64_t") long GRAIN_SIZE(); -// Targeting ../OpaqueOptionalTensorRef.java - +// Storage for a non-owning Tensor, without needing to include Tensor.h // Targeting ../OperandInfo.java @@ -66064,12 +66053,9 @@ The list of (type, depth) pairs controls the type of specializations and the num // #include // #include -// Targeting ../MTLCommandBuffer_t.java - - -// Targeting ../DispatchQueue_t.java - +// #ifdef __OBJC__ +// #else // #endif /** Returns true if MPS device is available. */ @@ -66087,11 +66073,11 @@ The list of (type, depth) pairs controls the type of specializations and the num @Namespace("torch::mps") public static native void commit(); /** Get the current command buffer to encode the Metal commands. */ -@Namespace("torch::mps") public static native MTLCommandBuffer_t get_command_buffer(); +@Namespace("torch::mps") public static native Pointer get_command_buffer(); /** Get the dispatch_queue_t to synchronize encoding the custom kernels * with the PyTorch MPS backend. */ -@Namespace("torch::mps") public static native DispatchQueue_t get_dispatch_queue(); +@Namespace("torch::mps") public static native Pointer get_dispatch_queue(); // namespace mps // namespace torch diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index 022f9926ba7..04ff00811f9 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -446,7 +446,7 @@ public void map(InfoMap infoMap) { .put(new Info("c10::optional >", "c10::optional >").pointerTypes("T_TypePtrLong_TOptional").cast().define()) .put(new Info("c10::optional").pointerTypes("StringViewOptional").define()) .put(new Info("c10::optional >").pointerTypes("StringViewVectorOptional").define()) - .put(new Info("c10::optional >")/*.cast?*/.pointerTypes("PointerPairOptional").define()) + .put(new Info("c10::optional >", "c10::optional >")/*.cast?*/.pointerTypes("PointerPairOptional").define()) ; @@ -963,7 +963,7 @@ public void map(InfoMap infoMap) { .put(new Info("std::pair").pointerTypes("StringAnyModulePair").define()) .put(new Info("std::pair >").pointerTypes("StringSharedModulePair").define()) .put(new Info("std::pair").pointerTypes("RecordFunctionHandleIntPair").define()) - .put(new Info("std::pair").pointerTypes("PointerPair").define()) + .put(new Info("std::pair", "std::pair").pointerTypes("PointerPair").define()) .put(new Info("std::pair").pointerTypes("SizeTMatchedSchemaPair").define()) ; @@ -1909,10 +1909,15 @@ We need either to put an annotation info on each member, or javaName("@NoOffset "CUevent_st", "mz_zip_archive", "ModuleHolderIndicator", + "at::MTIAHooksArgs", "at::ObserverContext", "at::Range", "at::StepCallbacks::StartEndPair", "at::TensorBase::unsafe_borrow_t", + "at::internal::OpaqueOptionalTensorRef", + "at::impl::VariableHooksRegisterer", // TORCH_API but unused ? + "at::TensorRef", + "at::OptionalTensorRef", //"at::mt19937_data_pod", //"at::mt19937_engine", "at::tracer::impl::NoTracerDispatchMode", @@ -1939,6 +1944,7 @@ We need either to put an annotation info on each member, or javaName("@NoOffset "c10::IValue::Payload", "c10::IValue::Payload::TriviallyCopyablePayload", "c10::IValue::Payload::TriviallyCopyablePayload::", + "c10::MaybeOwnedTraits", "c10::MultiStreamGuard", "c10::OpTableOffsetAndMask", "c10::OperatorNameView", @@ -2114,6 +2120,7 @@ We need either to put an annotation info on each member, or javaName("@NoOffset "c10::detail::makeBaseType", "torch::detail::constructSchemaOrName", "at::operator <<(std::ostream&, at::Range&)", + "at::impl::VariableHooksInterface::_register_hook", "caffe2::serialize::detail::getPadding", "at::assert_no_partial_overlap(c10::TensorImpl*, c10::TensorImpl*)", "at::TensorIteratorBase::apply_perm_and_mul", @@ -2175,7 +2182,9 @@ We need either to put an annotation info on each member, or javaName("@NoOffset "std::enable_shared_from_this", "std::enable_shared_from_this", "std::enable_shared_from_this", "std::enable_shared_from_this", "std::enable_shared_from_this", "std::enable_shared_from_this" - ).pointerTypes("Pointer").cast()); + ).pointerTypes("Pointer").cast()) + .put(new Info("MTLCommandBuffer_t", "DispatchQueue_t").valueTypes("Pointer").pointerTypes("PointerPointer").skip()); + ///// Special cases needing javaText @@ -2207,7 +2216,7 @@ We need either to put an annotation info on each member, or javaName("@NoOffset infoMap.put(new Info("at::TensorIteratorBase").purify()); - //// Callback functions + //// Function pointers // skip() is added when function pointer are parsed instead of std::function to use the class in package // functions and prevent the creation of an automatic class in main package. // If a native function returns a std::function, no way to map it. From 424835500e94d3657dfcbe638441972e9e66fd3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Tue, 17 Oct 2023 12:27:26 +0200 Subject: [PATCH 05/26] Add CUDACachingAllocator --- .../org/bytedeco/pytorch/DataPtrVector.java | 47 ++++++ .../bytedeco/pytorch/cuda/AllocatorState.java | 33 ++++ .../org/bytedeco/pytorch/cuda/BlockInfo.java | 50 ++++++ .../bytedeco/pytorch/cuda/CUDAAllocator.java | 89 ++++++++++ .../pytorch/cuda/CheckpointDelta.java | 47 ++++++ .../bytedeco/pytorch/cuda/DeviceStats.java | 79 +++++++++ .../org/bytedeco/pytorch/cuda/PointerSet.java | 49 ++++++ .../bytedeco/pytorch/cuda/SegmentInfo.java | 55 ++++++ .../bytedeco/pytorch/cuda/SnapshotInfo.java | 44 +++++ .../java/org/bytedeco/pytorch/cuda/Stat.java | 59 +++++++ .../org/bytedeco/pytorch/cuda/TraceEntry.java | 101 +++++++++++ .../pytorch/cuda/TraceEntryVector.java | 50 ++++++ .../org/bytedeco/pytorch/global/torch.java | 3 + .../bytedeco/pytorch/global/torch_cuda.java | 159 +++++++++++++++++- .../org/bytedeco/pytorch/presets/torch.java | 35 ++-- .../bytedeco/pytorch/presets/torch_cuda.java | 48 +++++- .../pytorch/presets/torch_cuda_include.h | 2 +- 17 files changed, 925 insertions(+), 25 deletions(-) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/DataPtrVector.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/AllocatorState.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/BlockInfo.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAAllocator.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CheckpointDelta.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceStats.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/PointerSet.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SegmentInfo.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SnapshotInfo.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/Stat.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TraceEntry.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TraceEntryVector.java diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DataPtrVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DataPtrVector.java new file mode 100644 index 00000000000..30279d21e40 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DataPtrVector.java @@ -0,0 +1,47 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("std::vector") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class DataPtrVector extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public DataPtrVector(Pointer p) { super(p); } + public DataPtrVector() { allocate(); } + private native void allocate(); + + + public boolean empty() { return size() == 0; } + public native long size(); + + public DataPtr front() { return get(0); } + public DataPtr back() { return get(size() - 1); } + @Index(function = "at") public native @Cast({"", "c10::DataPtr&&"}) @StdMove DataPtr get(@Cast("size_t") long i); + + public native @ByVal Iterator begin(); + public native @ByVal Iterator end(); + @NoOffset @Name("iterator") public static class Iterator extends Pointer { + public Iterator(Pointer p) { super(p); } + public Iterator() { } + + public native @Name("operator ++") @ByRef Iterator increment(); + public native @Name("operator ==") boolean equals(@ByRef Iterator it); + public native @Name("operator *") @Cast({"", "c10::DataPtr&&"}) @StdMove DataPtr get(); + } +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/AllocatorState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/AllocatorState.java new file mode 100644 index 00000000000..b5ceff70d2a --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/AllocatorState.java @@ -0,0 +1,33 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +@Namespace("c10::cuda::CUDACachingAllocator") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class AllocatorState extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public AllocatorState() { super((Pointer)null); allocate(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public AllocatorState(Pointer p) { super(p); } + @SharedPtr @Name("std::make_shared") private native void allocate(); + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/BlockInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/BlockInfo.java new file mode 100644 index 00000000000..117338acc13 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/BlockInfo.java @@ -0,0 +1,50 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +// Struct containing info of an allocation block (i.e. a fractional part of a +// cudaMalloc).. +@Namespace("c10::cuda::CUDACachingAllocator") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class BlockInfo extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public BlockInfo() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public BlockInfo(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public BlockInfo(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public BlockInfo position(long position) { + return (BlockInfo)super.position(position); + } + @Override public BlockInfo getPointer(long i) { + return new BlockInfo((Pointer)this).offsetAddress(i); + } + + public native @Cast("int64_t") long size(); public native BlockInfo size(long setter); + public native @Cast("int64_t") long requested_size(); public native BlockInfo requested_size(long setter); + public native int gc_counter(); public native BlockInfo gc_counter(int setter); + public native @Cast("bool") boolean allocated(); public native BlockInfo allocated(boolean setter); + public native @Cast("bool") boolean active(); public native BlockInfo active(boolean setter); + public native @SharedPtr GatheredContext context_when_allocated(); public native BlockInfo context_when_allocated(GatheredContext setter); // per-watcher context +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAAllocator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAAllocator.java new file mode 100644 index 00000000000..8803fdd9386 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAAllocator.java @@ -0,0 +1,89 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +@Namespace("c10::cuda::CUDACachingAllocator") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class CUDAAllocator extends Allocator { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public CUDAAllocator(Pointer p) { super(p); } + + public native Pointer raw_alloc(@Cast("size_t") long nbytes); + public native Pointer raw_alloc_with_stream(@Cast("size_t") long nbytes, @Cast("cudaStream_t") Pointer stream); + public native void raw_delete(Pointer ptr); + public native void init(int device_count); + public native @Cast("bool") boolean initialized(); + public native void setMemoryFraction(double fraction, int device); + public native void emptyCache(); + public native void cacheInfo(int dev_id, @Cast("size_t*") SizeTPointer largestBlock); + public native Pointer getBaseAllocation(Pointer ptr, @Cast("size_t*") SizeTPointer size); + public native void recordStream(@Cast({"", "c10::DataPtr&&"}) @StdMove DataPtr arg0, @ByVal CUDAStream stream); + public native @ByVal DeviceStats getDeviceStats(int device); + public native void resetAccumulatedStats(int device); + public native void resetPeakStats(int device); + public native @ByVal SnapshotInfo snapshot(); + public native void beginAllocateStreamToPool( + int device, + @Cast("cudaStream_t") Pointer stream, + @ByVal @Cast("c10::cuda::MempoolId_t*") DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair mempool_id); + public native void endAllocateStreamToPool(int device, @Cast("cudaStream_t") Pointer stream); + public native void releasePool(int device, @ByVal @Cast("c10::cuda::MempoolId_t*") DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair mempool_id); + // returns true if the allocated blocks are equal to expected live allocations + public native @Cast("bool") boolean checkPoolLiveAllocations( + int device, + @ByVal @Cast("c10::cuda::MempoolId_t*") DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair mempool_id, + @Const @ByRef PointerSet expected_live_allocations); + public native @SharedPtr Pointer getIpcDevPtr(@StdString BytePointer handle); + public native @SharedPtr Pointer getIpcDevPtr(@StdString String handle); + public native @Cast("bool") boolean isHistoryEnabled(); + + public native void attachOutOfMemoryObserver(@ByVal @Cast("c10::cuda::CUDACachingAllocator::OutOfMemoryObserver*") Pointer observer); + + public native void enablePeerAccess(int dev, int dev_to_access); + + // memory not allocated from cudaMalloc cannot be copied + // across devices using cudaMemcpyAsync if peer to peer access is disabled. + // instead it requres cudaMemcpyAsyncPeer + // with P2P Enabled, all combinations work + // with P2P Disabled: + // cudaMalloc cudaMallocAsync/cuMemMap + // cudaMemcpyAsyncPeer works works + // cudaMemcpyAsync works error + + // This function performs chooses to use the Peer version of + // memcpy if required based on where the allocated put dst/src. + public native @Cast("cudaError_t") int memcpyAsync( + Pointer dst, + int dstDevice, + @Const Pointer src, + int srcDevice, + @Cast("size_t") long count, + @Cast("cudaStream_t") Pointer stream, + @Cast("bool") boolean p2p_enabled); + public native @SharedPtr("c10::cuda::CUDACachingAllocator::AllocatorState") @ByVal AllocatorState getCheckpointState( + int device, + @ByVal @Cast("c10::cuda::MempoolId_t*") DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair id); + public native @ByVal CheckpointDelta setCheckpointPoolState( + int device, + @SharedPtr("c10::cuda::CUDACachingAllocator::AllocatorState") @ByVal AllocatorState pps); + public native @StdString BytePointer name(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CheckpointDelta.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CheckpointDelta.java new file mode 100644 index 00000000000..98695704165 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CheckpointDelta.java @@ -0,0 +1,47 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +// returns the pointers freed in the pool +// and the pointers allocated. Note: a pointer +// may appear in both freed and allocated +@Namespace("c10::cuda::CUDACachingAllocator") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class CheckpointDelta extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public CheckpointDelta() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public CheckpointDelta(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public CheckpointDelta(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public CheckpointDelta position(long position) { + return (CheckpointDelta)super.position(position); + } + @Override public CheckpointDelta getPointer(long i) { + return new CheckpointDelta((Pointer)this).offsetAddress(i); + } + + @MemberGetter public native @Cast("void**") @StdVector PointerPointer ptrs_freed(); + @MemberGetter public native @ByRef DataPtrVector dataptrs_allocd(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceStats.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceStats.java new file mode 100644 index 00000000000..1f128e2650f --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceStats.java @@ -0,0 +1,79 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +// Struct containing memory allocator summary statistics for a device. +@Namespace("c10::cuda::CUDACachingAllocator") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class DeviceStats extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public DeviceStats() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public DeviceStats(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public DeviceStats(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public DeviceStats position(long position) { + return (DeviceStats)super.position(position); + } + @Override public DeviceStats getPointer(long i) { + return new DeviceStats((Pointer)this).offsetAddress(i); + } + + // COUNT: allocations requested by client code + public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") BoolPointer allocation(); public native DeviceStats allocation(BoolPointer setter); + // COUNT: number of allocated segments from cudaMalloc(). + public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") BoolPointer segment(); public native DeviceStats segment(BoolPointer setter); + // COUNT: number of active memory blocks (allocated or used by stream) + public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") BoolPointer active(); public native DeviceStats active(BoolPointer setter); + // COUNT: number of inactive, split memory blocks (unallocated but can't be + // released via cudaFree) + public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") BoolPointer inactive_split(); public native DeviceStats inactive_split(BoolPointer setter); + + // SUM: bytes allocated by this memory alocator + public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") BoolPointer allocated_bytes(); public native DeviceStats allocated_bytes(BoolPointer setter); + // SUM: bytes reserved by this memory allocator (both free and used) + public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") BoolPointer reserved_bytes(); public native DeviceStats reserved_bytes(BoolPointer setter); + // SUM: bytes within active memory blocks + public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") BoolPointer active_bytes(); public native DeviceStats active_bytes(BoolPointer setter); + // SUM: bytes within inactive, split memory blocks + public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") BoolPointer inactive_split_bytes(); public native DeviceStats inactive_split_bytes(BoolPointer setter); + // SUM: bytes requested by client code + public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") BoolPointer requested_bytes(); public native DeviceStats requested_bytes(BoolPointer setter); + + // COUNT: total number of failed calls to CUDA malloc necessitating cache + // flushes. + public native @Cast("int64_t") long num_alloc_retries(); public native DeviceStats num_alloc_retries(long setter); + + // COUNT: total number of OOMs (i.e. failed calls to CUDA after cache flush) + public native @Cast("int64_t") long num_ooms(); public native DeviceStats num_ooms(long setter); + + // COUNT: total number of oversize blocks allocated from pool + public native @ByRef Stat oversize_allocations(); public native DeviceStats oversize_allocations(Stat setter); + + // COUNT: total number of oversize blocks requiring malloc + public native @ByRef Stat oversize_segments(); public native DeviceStats oversize_segments(Stat setter); + + // SIZE: maximum block size that is allowed to be split. + public native @Cast("int64_t") long max_split_size(); public native DeviceStats max_split_size(long setter); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/PointerSet.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/PointerSet.java new file mode 100644 index 00000000000..8667ea71341 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/PointerSet.java @@ -0,0 +1,49 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + +@Name("std::unordered_set") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class PointerSet extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public PointerSet(Pointer p) { super(p); } + public PointerSet() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef PointerSet put(@ByRef PointerSet x); + + public boolean empty() { return size() == 0; } + public native long size(); + + public Pointer front() { try (Iterator it = begin()) { return it.get(); } } + public native void insert(Pointer value); + public native void erase(Pointer value); + public native @ByVal Iterator begin(); + public native @ByVal Iterator end(); + @NoOffset @Name("iterator") public static class Iterator extends Pointer { + public Iterator(Pointer p) { super(p); } + public Iterator() { } + + public native @Name("operator ++") @ByRef Iterator increment(); + public native @Name("operator ==") boolean equals(@ByRef Iterator it); + public native @Name("operator *") @Const Pointer get(); + } +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SegmentInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SegmentInfo.java new file mode 100644 index 00000000000..48b43691a11 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SegmentInfo.java @@ -0,0 +1,55 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +// Struct containing info of a memory segment (i.e. one contiguous cudaMalloc). +@Namespace("c10::cuda::CUDACachingAllocator") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class SegmentInfo extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public SegmentInfo() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public SegmentInfo(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public SegmentInfo(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public SegmentInfo position(long position) { + return (SegmentInfo)super.position(position); + } + @Override public SegmentInfo getPointer(long i) { + return new SegmentInfo((Pointer)this).offsetAddress(i); + } + + public native @Cast("int64_t") long device(); public native SegmentInfo device(long setter); + public native @Cast("int64_t") @Name("address") long _address(); public native SegmentInfo _address(long setter); + public native @Cast("int64_t") long total_size(); public native SegmentInfo total_size(long setter); + public native @Cast("int64_t") long requested_size(); public native SegmentInfo requested_size(long setter); // unrounded, actually requested size + public native @Cast("int64_t") long allocated_size(); public native SegmentInfo allocated_size(long setter); + public native @Cast("int64_t") long active_size(); public native SegmentInfo active_size(long setter); + public native @Cast("cudaStream_t") Pointer stream(); public native SegmentInfo stream(Pointer setter); + public native @Cast("bool") boolean is_large(); public native SegmentInfo is_large(boolean setter); + public native @Cast("bool") boolean is_expandable(); public native SegmentInfo is_expandable(boolean setter); + public native @ByRef @Cast("c10::cuda::MempoolId_t*") DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair owner_private_pool_id(); public native SegmentInfo owner_private_pool_id(DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair setter); + public native @StdVector BlockInfo blocks(); public native SegmentInfo blocks(BlockInfo setter); + public native @SharedPtr GatheredContext context_when_allocated(); public native SegmentInfo context_when_allocated(GatheredContext setter); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SnapshotInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SnapshotInfo.java new file mode 100644 index 00000000000..2d9f5616251 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SnapshotInfo.java @@ -0,0 +1,44 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +@Namespace("c10::cuda::CUDACachingAllocator") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class SnapshotInfo extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public SnapshotInfo() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public SnapshotInfo(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public SnapshotInfo(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public SnapshotInfo position(long position) { + return (SnapshotInfo)super.position(position); + } + @Override public SnapshotInfo getPointer(long i) { + return new SnapshotInfo((Pointer)this).offsetAddress(i); + } + + public native @StdVector SegmentInfo segments(); public native SnapshotInfo segments(SegmentInfo setter); + public native @StdVector TraceEntryVector device_traces(); public native SnapshotInfo device_traces(TraceEntryVector setter); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/Stat.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/Stat.java new file mode 100644 index 00000000000..1bf97d1470c --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/Stat.java @@ -0,0 +1,59 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +// TODO: Turn this into an honest to goodness class. I briefly attempted to do +// this, but it was a bit irritating to figure out how to also correctly +// apply pimpl pattern so I didn't have to leak any internal implementation +// details in the header (CUDACachingAllocator could be made a pimpl, but +// you also need to appropriately define a class which is a subclass +// of Allocator. Not impossible, but required a bit more surgery than +// I wanted to do at the time.) +// +// Why is this using a namespace rather than old-style THCCachingAllocator_ +// prefix? Mostly because it made the HIPify rules easier to write; _ is +// not counted as a word boundary, so you would otherwise have to list each +// of these functions. + +@Namespace("c10::cuda::CUDACachingAllocator") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class Stat extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public Stat() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public Stat(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Stat(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public Stat position(long position) { + return (Stat)super.position(position); + } + @Override public Stat getPointer(long i) { + return new Stat((Pointer)this).offsetAddress(i); + } + + public native @Cast("int64_t") long current(); public native Stat current(long setter); + public native @Cast("int64_t") long peak(); public native Stat peak(long setter); + public native @Cast("int64_t") long allocated(); public native Stat allocated(long setter); + public native @Cast("int64_t") long freed(); public native Stat freed(long setter); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TraceEntry.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TraceEntry.java new file mode 100644 index 00000000000..1791fc3b6ae --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TraceEntry.java @@ -0,0 +1,101 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +@Namespace("c10::cuda::CUDACachingAllocator") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class TraceEntry extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TraceEntry(Pointer p) { super(p); } + + public enum Action { + ALLOC(0), // API made to the caching allocator for new memory + FREE_REQUESTED(1), // API call made to the caching allocator to free memory + FREE_COMPLETED(2), // The allocator might have to delay a free because + // it is still in use on another stream via record_stream + // This event is generated when a free actually completes. + SEGMENT_ALLOC(3), // a call to cudaMalloc to get more memory from the OS + SEGMENT_FREE(4), // a call to cudaFree to return memory to the OS (e.g. to + // defragment or empty_caches) + SEGMENT_MAP(5), // a call to cuMemMap (used with expandable_segments) + SEGMENT_UNMAP(6), // unmap part of a segment (used with expandable segments) + SNAPSHOT(7), // a call to snapshot, used to correlate memory snapshots to trace + // events + OOM(8);// the allocator threw an OutOfMemoryError (addr_ is the amount of free + // bytes reported by cuda) + + public final int value; + private Action(int v) { this.value = v; } + private Action(Action e) { this.value = e.value; } + public Action intern() { for (Action e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } + } + public TraceEntry( + Action action, + @Cast("int64_t") long addr, + @Cast("size_t") long size, + @Cast("cudaStream_t") Pointer stream, + @SharedPtr GatheredContext context/*=nullptr*/) { super((Pointer)null); allocate(action, addr, size, stream, context); } + private native void allocate( + Action action, + @Cast("int64_t") long addr, + @Cast("size_t") long size, + @Cast("cudaStream_t") Pointer stream, + @SharedPtr GatheredContext context/*=nullptr*/); + public TraceEntry( + Action action, + @Cast("int64_t") long addr, + @Cast("size_t") long size, + @Cast("cudaStream_t") Pointer stream) { super((Pointer)null); allocate(action, addr, size, stream); } + private native void allocate( + Action action, + @Cast("int64_t") long addr, + @Cast("size_t") long size, + @Cast("cudaStream_t") Pointer stream); + public TraceEntry( + @Cast("c10::cuda::CUDACachingAllocator::TraceEntry::Action") int action, + @Cast("int64_t") long addr, + @Cast("size_t") long size, + @Cast("cudaStream_t") Pointer stream, + @SharedPtr GatheredContext context/*=nullptr*/) { super((Pointer)null); allocate(action, addr, size, stream, context); } + private native void allocate( + @Cast("c10::cuda::CUDACachingAllocator::TraceEntry::Action") int action, + @Cast("int64_t") long addr, + @Cast("size_t") long size, + @Cast("cudaStream_t") Pointer stream, + @SharedPtr GatheredContext context/*=nullptr*/); + public TraceEntry( + @Cast("c10::cuda::CUDACachingAllocator::TraceEntry::Action") int action, + @Cast("int64_t") long addr, + @Cast("size_t") long size, + @Cast("cudaStream_t") Pointer stream) { super((Pointer)null); allocate(action, addr, size, stream); } + private native void allocate( + @Cast("c10::cuda::CUDACachingAllocator::TraceEntry::Action") int action, + @Cast("int64_t") long addr, + @Cast("size_t") long size, + @Cast("cudaStream_t") Pointer stream); + public native Action action_(); public native TraceEntry action_(Action setter); + public native @Cast("int64_t") long addr_(); public native TraceEntry addr_(long setter); // for OOM, this is the amount of free bytes reported by cuda + public native @SharedPtr GatheredContext context_(); public native TraceEntry context_(GatheredContext setter); + public native @Cast("cudaStream_t") Pointer stream_(); public native TraceEntry stream_(Pointer setter); + public native @Cast("int64_t") long size_(); public native TraceEntry size_(long setter); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TraceEntryVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TraceEntryVector.java new file mode 100644 index 00000000000..fe5d7b7b9ea --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TraceEntryVector.java @@ -0,0 +1,50 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + +@Name("std::vector") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class TraceEntryVector extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TraceEntryVector(Pointer p) { super(p); } + public TraceEntryVector() { allocate(); } + private native void allocate(); + + + public boolean empty() { return size() == 0; } + public native long size(); + + public TraceEntry front() { return get(0); } + public TraceEntry back() { return get(size() - 1); } + @Index(function = "at") public native @ByRef TraceEntry get(@Cast("size_t") long i); + + public native @ByVal Iterator begin(); + public native @ByVal Iterator end(); + @NoOffset @Name("iterator") public static class Iterator extends Pointer { + public Iterator(Pointer p) { super(p); } + public Iterator() { } + + public native @Name("operator ++") @ByRef Iterator increment(); + public native @Name("operator ==") boolean equals(@ByRef Iterator it); + public native @Name("operator *") @ByRef @Const TraceEntry get(); + } +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java index b4ba6b043fd..7d570b807fa 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java @@ -305,6 +305,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../FunctionSchemaVector.java +// Targeting ../DataPtrVector.java + + // Targeting ../StringTensorDictItemVector.java diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java index ea0bf0de087..00e479ea652 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java @@ -26,6 +26,9 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { @Namespace("at") public static native @ByVal @Name("make_generator") Generator make_generator_cuda(@Cast("int8_t&&") byte device_index); +// Targeting ../cuda/TraceEntryVector.java + + // Targeting ../cuda/DeviceAssertionsDataVector.java @@ -35,6 +38,9 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // Targeting ../cuda/DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair.java +// Targeting ../cuda/PointerSet.java + + // Parsed from c10/util/ArrayRef.h //===--- ArrayRef.h - Array Reference Wrapper -------------------*- C++ -*-===// @@ -667,6 +673,156 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // namespace c10 +// Parsed from c10/cuda/CUDACachingAllocator.h + +// #pragma once + +// #include +// #include +// #include +// #include +// #include +// #include + +// #include +// #include +// #include +// #include + +// Caching allocator will execute every registered callback if it unable to find +// block inside of already allocated area. + + +// #define REGISTER_FREE_MEMORY_CALLBACK(name, ...) +// C10_REGISTER_CLASS(FreeCudaMemoryCallbacksRegistry, name, __VA_ARGS__); +// Targeting ../cuda/Stat.java + + + +@Namespace("c10::cuda::CUDACachingAllocator") public enum StatType { + AGGREGATE(0), + SMALL_POOL(1), + LARGE_POOL(2), + NUM_TYPES(3);// remember to update this whenever a new stat type is added + + public final long value; + private StatType(long v) { this.value = v; } + private StatType(StatType e) { this.value = e.value; } + public StatType intern() { for (StatType e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} +// Targeting ../cuda/DeviceStats.java + + +// Targeting ../cuda/BlockInfo.java + + +// Targeting ../cuda/SegmentInfo.java + + +// Targeting ../cuda/AllocatorState.java + + +// Targeting ../cuda/TraceEntry.java + + +// Targeting ../cuda/SnapshotInfo.java + + +// Targeting ../cuda/CheckpointDelta.java + + + +@Namespace("c10::cuda::CUDACachingAllocator") public enum RecordContext { + NEVER(0), + STATE(1), // only keep stacks for active allocations + ALLOC(2), // additionally keep stacks for allocations in the trace history + ALL(3);// additionally record stacks for when something is freed + + public final int value; + private RecordContext(int v) { this.value = v; } + private RecordContext(RecordContext e) { this.value = e.value; } + public RecordContext intern() { for (RecordContext e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} + +@Namespace("c10::cuda::CUDACachingAllocator") public static native void setAllocatorSettings(@StdString BytePointer env); +@Namespace("c10::cuda::CUDACachingAllocator") public static native void setAllocatorSettings(@StdString String env); + +// Size pretty-printer +@Namespace("c10::cuda::CUDACachingAllocator") public static native @StdString BytePointer format_size(@Cast("uint64_t") long size); +// Targeting ../cuda/CUDAAllocator.java + + + +// Allocator object, statically initialized +// See BackendInitializer in CUDACachingAllocator.cpp. +// Atomic loads on x86 are just normal loads, +// (atomic stores are different), so reading this value +// is no different than loading a pointer. + + +@Namespace("c10::cuda::CUDACachingAllocator") public static native @Name("get") CUDAAllocator getAllocator(); + +// Called directly by clients. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +// CUDAGraph interactions + + + + + + + + + + + + + +// Not part of CUDA_ALLOCATOR_BACKEND_INTERFACE + + + + + + + + + // namespace CUDACachingAllocator + // namespace cuda + // namespace c10 + + // Parsed from c10/cuda/impl/CUDAGuardImpl.h // #pragma once @@ -683,9 +839,6 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // #include // #include -// Targeting ../cuda/CUDAGuardImpl.java - - // namespace impl // namespace cuda diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index 04ff00811f9..d2246eb7f9d 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -647,6 +647,7 @@ public void map(InfoMap infoMap) { .put(new Info("std::vector").pointerTypes("SymIntVector").define()) .put(new Info("std::vector >").pointerTypes("SharedSugaredValueVector").define()) .put(new Info("const std::vector").pointerTypes("FunctionSchemaVector").define()) + .put(new Info("const std::vector", "std::vector").pointerTypes("DataPtrVector").define()) // Used from cuda only ; @@ -1589,20 +1590,7 @@ public void map(InfoMap infoMap) { new PointerInfo("const torch::jit::CompilationUnit"), new PointerInfo("torch::jit::SugaredValue") }) { - // See issue #670 - String[] cppNames = new String[pi.argumentNames.length + pi.otherCppNames.length]; - int i = 0; - for (String n : pi.argumentNames) cppNames[i++] = template("std::shared_ptr", n); - for (String n : pi.otherCppNames) cppNames[i++] = n; - // Specifying the parameter of the annotation allows to disambiguate cases where a class can store either a - // std::shared_ptr or std::shared_ptr (like CompilationUnit) - // .valueTypes("@Cast(\"const torch::jit::CompilationUnit*\") CompilationUnit") seems to work too but for obscure reason - infoMap.put(new Info(cppNames).annotations("@SharedPtr(\"" + pi.argumentNames[0] + "\")").pointerTypes(pi.javaBaseName)); - - // Also annotate constructor of target class to ensure only one shared_ptr exists for each instance - String n = pi.argumentNames[0].substring(pi.argumentNames[0].lastIndexOf(' ') + 1); // Remove possible const - String n2 = n.equals("torch::nn::Module") ? "JavaCPP_torch_0003a_0003ann_0003a_0003aModule" : n; - infoMap.put(new Info(n + n.substring(n.lastIndexOf("::"))).annotations("@SharedPtr", "@Name(\"std::make_shared<" + n2 + ">\")")); + pi.makeShared(infoMap); } @@ -2425,7 +2413,7 @@ else if (!baseJavaName.equals("Future")) } } - private static class PointerInfo { + static class PointerInfo { String javaBaseName; String javaName; final String[] argumentNames; @@ -2450,6 +2438,23 @@ PointerInfo javaName(String jn) { javaName = jn; return this; } + + void makeShared(InfoMap infoMap) { + // See issue #670 + String[] cppNames = new String[argumentNames.length + otherCppNames.length]; + int i = 0; + for (String n : argumentNames) cppNames[i++] = template("std::shared_ptr", n); + for (String n : otherCppNames) cppNames[i++] = n; + // Specifying the parameter of the annotation allows to disambiguate cases where a class can store either a + // std::shared_ptr or std::shared_ptr (like CompilationUnit) + // .valueTypes("@Cast(\"const torch::jit::CompilationUnit*\") CompilationUnit") seems to work too but for obscure reason + infoMap.put(new Info(cppNames).annotations("@SharedPtr(\"" + argumentNames[0] + "\")").pointerTypes(javaBaseName)); + + // Also annotate constructor of target class to ensure only one shared_ptr exists for each instance + String n = argumentNames[0].substring(argumentNames[0].lastIndexOf(' ') + 1); // Remove possible const + String n2 = n.equals("torch::nn::Module") ? "JavaCPP_torch_0003a_0003ann_0003a_0003aModule" : n; + infoMap.put(new Info(n + n.substring(n.lastIndexOf("::"))).annotations("@SharedPtr", "@Name(\"std::make_shared<" + n2 + ">\")")); + } } @Namespace("std") public static native @MemberGetter @ByRef @Cast("std::istream*") Pointer cin(); diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java index a3c2a504c7f..bc0843ecd55 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java @@ -90,7 +90,39 @@ public void map(InfoMap infoMap) { "at::CUDAGeneratorImpl" ).skip()) - //// Already defined in main torch + + //// std::unordered_set + .put(new Info("std::unordered_set").pointerTypes("PointerSet").define()) + + //// std::atomic + .put(new Info("c10::cuda::CUDACachingAllocator::allocator").skip()) // Relies on CUDACachingAllocator.get() + .put(new Info("std::atomic").cast().pointerTypes("PyInterpreter")) + + //// std::vector + .put(new Info("std::vector").pointerTypes("DeviceAssertionsDataVector").define()) + .put(new Info("std::vector").pointerTypes("CUDAKernelLaunchInfoVector").define()) + .put(new Info("const std::vector", "std::vector").pointerTypes("TraceEntryVector").define()) + + //// Function pointers + .put(new Info("std::shared_ptr (*)()", "c10::cuda::CUDACachingAllocator::CreateContextFn").pointerTypes("GatheredContextSupplier").valueTypes("GatheredContextSupplier").skip()) + .put(new Info("c10::cuda::CUDACachingAllocator::CUDAAllocator::recordHistory", "c10::cuda::CUDACachingAllocator::recordHistory").skip()) // Until #720 solved + ; + + //// Avoiding name clashes by skipping or renaming + // Keep the instance methods of CUDAAllocator only, to not pollute global class + infoMap.put(new Info("c10::cuda::CUDACachingAllocator::get").javaNames("getAllocator")); + for (String s : new String[]{"get", "raw_alloc", "raw_alloc_with_stream", "raw_delete", "init", + "setMemoryFraction", "emptyCache", "cacheInfo", "getBaseAllocation", "recordStream", "getDeviceStats", + "resetAccumulatedStats", "resetPeakStats", "snapshot", "getCheckpointState", "setCheckpointPoolState", + "beginAllocateStreamToPool", "endAllocateStreamToPool", "isHistoryEnabled", "recordHistory", + "checkPoolLiveAllocations", "attachOutOfMemoryObserver", "releasePool", "getIpcDevPtr", "name", + "memcpyAsync", "enablePeerAccess"}) { + infoMap.put(new Info("c10::cuda::CUDACachingAllocator::CUDAAllocator::" + s)); // Necessary or the ns qualifying algorithm of Parser will pick c10::cuda::CUDACachingAllocator instead + infoMap.put(new Info("c10::cuda::CUDACachingAllocator::" + s).skip()); + } + + //// Already defined in main torch + infoMap .put(new Info("c10::Stream").pointerTypes("Stream")) .put(new Info("c10::optional").pointerTypes("StreamOptional")) .put(new Info("c10::optional").pointerTypes("DeviceOptional")) @@ -99,14 +131,12 @@ public void map(InfoMap infoMap) { .put(new Info("std::tuple").pointerTypes("T_IntInt_T")) .put(new Info("c10::optional").pointerTypes("ByteOptional")) .put(new Info("c10::IntArrayRef", "at::IntArrayRef").pointerTypes("LongArrayRef")) + .put(new Info("std::vector").pointerTypes("DataPtrVector")) .put(new Info("c10::DeviceIndex").valueTypes("byte")) .put(new Info("c10::StreamId").valueTypes("long")) .put(new Info("c10::cuda::CaptureStatus").valueTypes("int").cast().skip()) // Enum doesn't parse .put(new Info("std::pair,std::vector >").pointerTypes("DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair").define()) - .put(new Info("std::vector").pointerTypes("DeviceAssertionsDataVector").define()) - .put(new Info("std::vector").pointerTypes("CUDAKernelLaunchInfoVector").define()) - .put(new Info("std::atomic").cast().pointerTypes("PyInterpreter")) .put(new Info("c10::CuDNNError").purify()) .put(new Info("c10::impl::GPUTrace::gpuTraceState").skip()) .put(new Info("at::native::RNNDescriptor::dropout_desc_").skip()) @@ -115,6 +145,8 @@ public void map(InfoMap infoMap) { "at::native::cudnnTypeToString", "at::native::getCudnnDataType", "at::native::cudnn_version", "c10::cuda::c10_retrieve_device_side_assertion_info").skip()) + .put(new Info("c10::cuda::CUDACachingAllocator::CheckpointDelta").immutable()) // at::DataPtr is not constructible + .put(new Info( "at::native::Descriptor", "at::native::Descriptor", @@ -139,16 +171,20 @@ public void map(InfoMap infoMap) { .put(new Info( // Enums "cudnnActivationMode_t", "cudnnLossNormalizationMode_t", "cudnnRNNInputMode_t", "cudnnDirectionMode_t", "cudnnRNNMode_t", "cudaStreamCaptureMode", "cudnnDataType_t", "cudnnNanPropagation_t", - "cusparseStatus_t", "cusolverStatus_t", "cudnnRNNAlgo_t", "cudnnNanPropagation_t", "cublasStatus_t" + "cusparseStatus_t", "cusolverStatus_t", "cudnnRNNAlgo_t", "cudnnNanPropagation_t", "cublasStatus_t", "cudaError_t" ).valueTypes("int").cast()) ; new torch.ArrayInfo("CUDAStream").elementTypes("c10::cuda::CUDAStream").mapArrayRef(infoMap); + new torch.PointerInfo("c10::cuda::CUDACachingAllocator::AllocatorState").makeShared(infoMap); + // Classes that are not part of the API (no TORCH_API nor C10_API) and are not argument nor return type of API methods. infoMap.put(new Info( "c10::cuda::OptionalCUDAGuard", - "c10::cuda::OptionalCUDAStreamGuard" + "c10::cuda::OptionalCUDAStreamGuard", + "c10::cuda::impl::CUDAGuardImpl", + "c10::FreeMemoryCallback" // in API, but useless as long as we don't map FreeCudaMemoryCallbacksRegistry, ).skip()) ; diff --git a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h index 18079c8faf3..5a88880145f 100644 --- a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h +++ b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h @@ -19,7 +19,7 @@ #include "ATen/cudnn/Handle.h" #include "ATen/cudnn/Utils.h" #include "c10/cuda/CUDAGraphsC10Utils.h" -// #include "c10/cuda/CUDACachingAllocator.h", // If map needed, rename global symbols +#include "c10/cuda/CUDACachingAllocator.h", #include "c10/cuda/impl/CUDAGuardImpl.h" #include "ATen/cudnn/Descriptors.h" #include "ATen/cudnn/Types.h" From 49f2f189158cd2fd5a987ee149958b6d36a7e10e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Tue, 17 Oct 2023 13:29:32 +0200 Subject: [PATCH 06/26] Update MNIST sample in README --- pytorch/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pytorch/README.md b/pytorch/README.md index be4da837483..1777f21f3f9 100644 --- a/pytorch/README.md +++ b/pytorch/README.md @@ -93,9 +93,9 @@ public class SimpleMNIST { static class Net extends Module { Net() { // Construct and register two Linear submodules. - fc1 = register_module("fc1", new LinearImpl(784, 64)); - fc2 = register_module("fc2", new LinearImpl(64, 32)); - fc3 = register_module("fc3", new LinearImpl(32, 10)); + register_module("fc1", fc1 = new LinearImpl(784, 64)); + register_module("fc2", fc2 = new LinearImpl(64, 32)); + register_module("fc3", fc3 = new LinearImpl(32, 10)); } // Implement the Net's algorithm. @@ -109,7 +109,7 @@ public class SimpleMNIST { } // Use one of many "standard library" modules. - LinearImpl fc1 = null, fc2 = null, fc3 = null; + final LinearImpl fc1, fc2, fc3; } public static void main(String[] args) throws Exception { From 57d89d06b50ad8a7711b6b08e545394fc3cb83fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Tue, 17 Oct 2023 14:53:23 +0200 Subject: [PATCH 07/26] Skip not-exported function --- pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index d2246eb7f9d..3b080910b57 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -2123,7 +2123,8 @@ We need either to put an annotation info on each member, or javaName("@NoOffset "torch::jit::Code::operator <<(std::ostream&, const torch::jit::Code&)", // The friend operator is truly a member of torch::jit and not torch::jit::Code "torch::jit::ClassDef::create", "torch::profiler::impl::getNvtxStr", - "torch::autograd::add_node_to_current_graph_task_exec_info" + "torch::autograd::add_node_to_current_graph_task_exec_info", + "at::native::get_numel_from_nested_size_tensor" ).skip()); //// Aliases necessary because of Parser limited namespace resolution From 1660faed67d02e34b950046d461a46cdbbb6ddae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Tue, 17 Oct 2023 17:50:06 +0200 Subject: [PATCH 08/26] gen update --- pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java index 7d570b807fa..88278b681dc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java @@ -58416,7 +58416,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include @Namespace("at::native") public static native @Cast("bool") boolean nested_tensor_impl_is_contiguous(@Const NestedTensorImpl nt); -@Namespace("at::native") public static native @Cast("int64_t") long get_numel_from_nested_size_tensor(@Const @ByRef Tensor tensor); + // Targeting ../NestedTensorImpl.java From af6b64e19bf6a3932a53badd28cc1eabc0f49c56 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Wed, 18 Oct 2023 16:11:28 +0200 Subject: [PATCH 09/26] Add CUDAAllocator.recordHistory --- .../pytorch/cuda/ActivationDescriptor.java | 1 + .../bytedeco/pytorch/cuda/AllocatorState.java | 1 + .../org/bytedeco/pytorch/cuda/BlockInfo.java | 1 + .../pytorch/cuda/CTCLossDescriptor.java | 1 + .../bytedeco/pytorch/cuda/CUDAAllocator.java | 12 ++++++- .../org/bytedeco/pytorch/cuda/CUDAGuard.java | 1 + .../pytorch/cuda/CUDAKernelLaunchInfo.java | 1 + .../cuda/CUDAKernelLaunchInfoVector.java | 1 + .../cuda/CUDAKernelLaunchRegistry.java | 1 + .../pytorch/cuda/CUDAMultiStreamGuard.java | 1 + .../org/bytedeco/pytorch/cuda/CUDAStream.java | 1 + .../pytorch/cuda/CUDAStreamArrayRef.java | 1 + .../cuda/CUDAStreamCaptureModeGuard.java | 1 + .../pytorch/cuda/CUDAStreamGuard.java | 1 + .../pytorch/cuda/CheckpointDelta.java | 1 + .../org/bytedeco/pytorch/cuda/Constant.java | 1 + .../pytorch/cuda/ConvolutionDescriptor.java | 1 + .../org/bytedeco/pytorch/cuda/CuDNNError.java | 1 + .../pytorch/cuda/DeviceAssertionData.java | 1 + .../pytorch/cuda/DeviceAssertionsData.java | 1 + .../cuda/DeviceAssertionsDataVector.java | 1 + ...aVectorCUDAKernelLaunchInfoVectorPair.java | 1 + .../bytedeco/pytorch/cuda/DeviceStats.java | 1 + .../pytorch/cuda/DropoutDescriptor.java | 1 + .../pytorch/cuda/FilterDescriptor.java | 1 + .../org/bytedeco/pytorch/cuda/PointerSet.java | 1 + .../bytedeco/pytorch/cuda/RNNDescriptor.java | 1 + .../bytedeco/pytorch/cuda/SegmentInfo.java | 1 + .../bytedeco/pytorch/cuda/SnapshotInfo.java | 1 + .../cuda/SpatialTransformerDescriptor.java | 1 + .../java/org/bytedeco/pytorch/cuda/Stat.java | 1 + .../pytorch/cuda/TensorDescriptor.java | 1 + .../org/bytedeco/pytorch/cuda/TraceEntry.java | 1 + .../pytorch/cuda/TraceEntryVector.java | 1 + .../bytedeco/pytorch/global/torch_cuda.java | 1 + .../functions/GatheredContextSupplier.java | 32 +++++++++++++++++++ .../bytedeco/pytorch/presets/torch_cuda.java | 2 +- 37 files changed, 78 insertions(+), 2 deletions(-) create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/GatheredContextSupplier.java diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ActivationDescriptor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ActivationDescriptor.java index c9e732a0271..c55522b1dcb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ActivationDescriptor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ActivationDescriptor.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/AllocatorState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/AllocatorState.java index b5ceff70d2a..9e5d6a6d04b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/AllocatorState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/AllocatorState.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/BlockInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/BlockInfo.java index 117338acc13..17f04057a21 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/BlockInfo.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/BlockInfo.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CTCLossDescriptor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CTCLossDescriptor.java index 9f0548222c3..899fce74b7e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CTCLossDescriptor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CTCLossDescriptor.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAAllocator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAAllocator.java index 8803fdd9386..3fb69c5a563 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAAllocator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAAllocator.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; @@ -55,7 +56,16 @@ public native void beginAllocateStreamToPool( public native @SharedPtr Pointer getIpcDevPtr(@StdString BytePointer handle); public native @SharedPtr Pointer getIpcDevPtr(@StdString String handle); public native @Cast("bool") boolean isHistoryEnabled(); - + public native void recordHistory( + @Cast("bool") boolean enabled, + GatheredContextSupplier context_recorder, + @Cast("size_t") long alloc_trace_max_entries, + RecordContext when); + public native void recordHistory( + @Cast("bool") boolean enabled, + GatheredContextSupplier context_recorder, + @Cast("size_t") long alloc_trace_max_entries, + @Cast("c10::cuda::CUDACachingAllocator::RecordContext") int when); public native void attachOutOfMemoryObserver(@ByVal @Cast("c10::cuda::CUDACachingAllocator::OutOfMemoryObserver*") Pointer observer); public native void enablePeerAccess(int dev, int dev_to_access); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAGuard.java index 0366d8b4f34..1b9bfdde7cf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAGuard.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchInfo.java index 04011970743..0c2fdd6c2a9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchInfo.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchInfo.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchInfoVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchInfoVector.java index 51971d0db4e..a6f08d60045 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchInfoVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchInfoVector.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchRegistry.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchRegistry.java index cdf41555bba..c94ecd5c280 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchRegistry.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchRegistry.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAMultiStreamGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAMultiStreamGuard.java index 73361de9189..19b4032e95e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAMultiStreamGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAMultiStreamGuard.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStream.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStream.java index 11c4f5d399e..4eaa4668bf0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStream.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStream.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamArrayRef.java index 25ad53c806e..9525ac33be1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamArrayRef.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamCaptureModeGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamCaptureModeGuard.java index 62880eff1fb..f0a9a9ce4fb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamCaptureModeGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamCaptureModeGuard.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamGuard.java index e0b1d7666f2..a170a91450b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamGuard.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CheckpointDelta.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CheckpointDelta.java index 98695704165..8eba5a0f9a9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CheckpointDelta.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CheckpointDelta.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/Constant.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/Constant.java index 04ef3e72ac4..9f75d3238e4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/Constant.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/Constant.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ConvolutionDescriptor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ConvolutionDescriptor.java index bc73f09d29e..aac174332fb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ConvolutionDescriptor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ConvolutionDescriptor.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CuDNNError.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CuDNNError.java index 4aefce154c0..332c0a4bb9c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CuDNNError.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CuDNNError.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionData.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionData.java index e20fdc130b7..b5e40e01bc0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionData.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionData.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsData.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsData.java index 87a0fcbf46f..2afc330981c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsData.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsData.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsDataVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsDataVector.java index 4b52d62a018..e6676abb7a6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsDataVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsDataVector.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair.java index bb384e719ea..09abb0078af 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceStats.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceStats.java index 1f128e2650f..ec8ed6dedb4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceStats.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceStats.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DropoutDescriptor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DropoutDescriptor.java index d28d4c120ca..92be8c886e5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DropoutDescriptor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DropoutDescriptor.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/FilterDescriptor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/FilterDescriptor.java index 355cfec9694..b69316a4049 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/FilterDescriptor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/FilterDescriptor.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/PointerSet.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/PointerSet.java index 8667ea71341..d30737fa3d0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/PointerSet.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/PointerSet.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/RNNDescriptor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/RNNDescriptor.java index 5ac9cefd560..4573cd5b9eb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/RNNDescriptor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/RNNDescriptor.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SegmentInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SegmentInfo.java index 48b43691a11..a00d83f46fe 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SegmentInfo.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SegmentInfo.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SnapshotInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SnapshotInfo.java index 2d9f5616251..73f1ecf4981 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SnapshotInfo.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SnapshotInfo.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SpatialTransformerDescriptor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SpatialTransformerDescriptor.java index 1e80e1ae9e9..9217fa87549 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SpatialTransformerDescriptor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SpatialTransformerDescriptor.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/Stat.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/Stat.java index 1bf97d1470c..80ef5cd148c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/Stat.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/Stat.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TensorDescriptor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TensorDescriptor.java index bd394ed4f0d..5198092bc55 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TensorDescriptor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TensorDescriptor.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TraceEntry.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TraceEntry.java index 1791fc3b6ae..560933a125d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TraceEntry.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TraceEntry.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TraceEntryVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TraceEntryVector.java index fe5d7b7b9ea..dd8a661d8e6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TraceEntryVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TraceEntryVector.java @@ -3,6 +3,7 @@ package org.bytedeco.pytorch.cuda; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java index 00e479ea652..bffcb64bb5c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java @@ -5,6 +5,7 @@ import org.bytedeco.pytorch.cuda.*; import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Error; import org.bytedeco.pytorch.global.torch.DeviceType; import org.bytedeco.pytorch.global.torch.ScalarType; diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/GatheredContextSupplier.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/GatheredContextSupplier.java new file mode 100644 index 00000000000..241a46f8c49 --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/GatheredContextSupplier.java @@ -0,0 +1,32 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.Cast; +import org.bytedeco.javacpp.annotation.Properties; +import org.bytedeco.javacpp.annotation.SharedPtr; +import org.bytedeco.pytorch.GatheredContext; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class GatheredContextSupplier extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public GatheredContextSupplier(Pointer p) { + super(p); + } + + protected GatheredContextSupplier() { + allocate(); + } + + private native void allocate(); + + // See issue JavaCPP #720 + public native @Cast({"", "std::shared_ptr"}) @SharedPtr GatheredContext call(); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java index bc0843ecd55..42c9e831050 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java @@ -75,6 +75,7 @@ public void map(InfoMap infoMap) { .put(new Info().enumerate().friendly()) .put(new Info().javaText("import org.bytedeco.pytorch.*;")) + .put(new Info().javaText("import org.bytedeco.pytorch.functions.*;")) .put(new Info().javaText("import org.bytedeco.pytorch.Error;")) .put(new Info().javaText("import org.bytedeco.pytorch.global.torch.DeviceType;")) .put(new Info().javaText("import org.bytedeco.pytorch.global.torch.ScalarType;")) @@ -105,7 +106,6 @@ public void map(InfoMap infoMap) { //// Function pointers .put(new Info("std::shared_ptr (*)()", "c10::cuda::CUDACachingAllocator::CreateContextFn").pointerTypes("GatheredContextSupplier").valueTypes("GatheredContextSupplier").skip()) - .put(new Info("c10::cuda::CUDACachingAllocator::CUDAAllocator::recordHistory", "c10::cuda::CUDACachingAllocator::recordHistory").skip()) // Until #720 solved ; //// Avoiding name clashes by skipping or renaming From 2a89d39fc6f299ad6fa70d756170cefaa4ec5e34 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Wed, 18 Oct 2023 23:11:39 +0200 Subject: [PATCH 10/26] Add TensorBase.data_ptr_byte --- pytorch/src/gen/java/org/bytedeco/pytorch/TensorBase.java | 1 + pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java | 1 + 2 files changed, 2 insertions(+) diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBase.java index ff968a7d753..b0201f5c4a7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBase.java @@ -329,6 +329,7 @@ private native void allocate( // const because of the vast number of clients that // rely on this. public native @Name("data_ptr") BytePointer data_ptr_char(); + public native @Cast("uint8_t*") @Name("data_ptr") BytePointer data_ptr_byte(); public native @Name("data_ptr") ShortPointer data_ptr_short(); public native @Name("data_ptr") IntPointer data_ptr_int(); public native @Cast("int64_t*") @Name("data_ptr") LongPointer data_ptr_long(); diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index 3b080910b57..5c4e8fe1d56 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -1795,6 +1795,7 @@ We need either to put an annotation info on each member, or javaName("@NoOffset )) .put(new Info("c10::util::get_type_index").javaNames("get_type_index_string")) .put(new Info("at::TensorBase::data_ptr").javaNames("data_ptr_char")) + .put(new Info("at::TensorBase::data_ptr").javaNames("data_ptr_byte")) .put(new Info("at::TensorBase::data_ptr").javaNames("data_ptr_short")) .put(new Info("at::TensorBase::data_ptr").javaNames("data_ptr_int")) .put(new Info("at::TensorBase::data_ptr").javaNames("data_ptr_long")) From db626e17f595a794d67ff3becda0f9446e5cb9df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Mon, 23 Oct 2023 09:24:55 +0200 Subject: [PATCH 11/26] Skip not exported CUDACachingAllocator::format_size --- .../bytedeco/pytorch/global/torch_cuda.java | 2 +- .../org/bytedeco/pytorch/presets/torch.java | 27 ++++++++++--------- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java index bffcb64bb5c..c41b6bb7b44 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java @@ -751,7 +751,7 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { @Namespace("c10::cuda::CUDACachingAllocator") public static native void setAllocatorSettings(@StdString String env); // Size pretty-printer -@Namespace("c10::cuda::CUDACachingAllocator") public static native @StdString BytePointer format_size(@Cast("uint64_t") long size); + // Targeting ../cuda/CUDAAllocator.java diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index 5c4e8fe1d56..9e9e63af399 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -2106,26 +2106,27 @@ We need either to put an annotation info on each member, or javaName("@NoOffset //// TORCH_API and the like are not honored on Linux but are on Windows. We must skip all public //// functions not marked as part of API. infoMap.put(new Info( - "c10::detail::makeBaseType", - "torch::detail::constructSchemaOrName", - "at::operator <<(std::ostream&, at::Range&)", - "at::impl::VariableHooksInterface::_register_hook", - "caffe2::serialize::detail::getPadding", - "at::assert_no_partial_overlap(c10::TensorImpl*, c10::TensorImpl*)", "at::TensorIteratorBase::apply_perm_and_mul", + "at::assert_no_partial_overlap(c10::TensorImpl*, c10::TensorImpl*)", + "at::impl::VariableHooksInterface::_register_hook", + "at::native::get_numel_from_nested_size_tensor", + "at::operator <<(std::ostream&, at::Range&)", + "c10::cuda::CUDACachingAllocator::format_size", + "c10::detail::makeBaseType", + "c10::ivalue::Await::operator <<", "c10::ivalue::ConstantString::operator <<", // No idea why these are not exported. TODO: dig - "c10::ivalue::Future::operator <<", + "c10::ivalue::EnumHolder::is", // Calls ==, which is not exported "c10::ivalue::EnumHolder::operator <<", - "c10::ivalue::Await::operator <<", "c10::ivalue::EnumHolder::operator ==", // The friend operator is truly a member of c10::ivalue and not c10::ivalue::EnumHolder - "c10::ivalue::EnumHolder::is", // Calls ==, which is not exported "c10::ivalue::EnumHolder::unqualifiedClassName", + "c10::ivalue::Future::operator <<", "c10::operator <<(std::ostream&, c10::SourceLocation&)", - "torch::jit::Code::operator <<(std::ostream&, const torch::jit::Code&)", // The friend operator is truly a member of torch::jit and not torch::jit::Code - "torch::jit::ClassDef::create", - "torch::profiler::impl::getNvtxStr", + "caffe2::serialize::detail::getPadding", "torch::autograd::add_node_to_current_graph_task_exec_info", - "at::native::get_numel_from_nested_size_tensor" + "torch::detail::constructSchemaOrName", + "torch::jit::ClassDef::create", + "torch::jit::Code::operator <<(std::ostream&, const torch::jit::Code&)", // The friend operator is truly a member of torch::jit and not torch::jit::Code + "torch::profiler::impl::getNvtxStr" ).skip()); //// Aliases necessary because of Parser limited namespace resolution From 99dbdad9359494efc16e26daa2d64ddbcc2a1461 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Tue, 24 Oct 2023 00:37:05 +0200 Subject: [PATCH 12/26] Map generic data loaders --- .../bytedeco/pytorch/ChunkBatchDataset.java | 4 +- .../pytorch/ChunkBatchSharedBatchDataset.java | 2 +- .../ChunkBatchSharedTensorBatchDataset.java | 40 ++ .../org/bytedeco/pytorch/ChunkDataReader.java | 4 +- .../org/bytedeco/pytorch/ChunkDataset.java | 8 +- .../pytorch/ChunkMapBatchDataset.java | 2 +- .../org/bytedeco/pytorch/ChunkMapDataset.java | 8 +- .../pytorch/ChunkMapTensorBatchDataset.java | 39 ++ .../pytorch/ChunkMapTensorDataset.java | 49 ++ .../pytorch/ChunkRandomDataLoader.java | 2 +- .../pytorch/ChunkRandomDataLoaderBase.java | 3 +- .../pytorch/ChunkRandomTensorDataLoader.java | 30 ++ .../ChunkRandomTensorDataLoaderBase.java | 57 +++ .../pytorch/ChunkSharedBatchDataset.java | 4 +- .../ChunkSharedTensorBatchDataset.java | 52 +++ .../pytorch/ChunkStatefulDataset.java | 2 +- .../pytorch/ChunkStatefulTensorDataset.java | 34 ++ .../pytorch/ChunkTensorBatchDataset.java | 39 ++ .../pytorch/ChunkTensorDataReader.java | 48 ++ .../bytedeco/pytorch/ChunkTensorDataset.java | 68 +++ .../bytedeco/pytorch/ExampleCollation.java | 2 +- .../org/bytedeco/pytorch/ExampleIterator.java | 2 +- .../org/bytedeco/pytorch/ExampleStack.java | 2 +- .../org/bytedeco/pytorch/ExampleVector.java | 2 +- ...erator.java => ExampleVectorIterator.java} | 16 +- .../pytorch/ExampleVectorOptional.java | 2 +- .../pytorch/FullDataLoaderOptions.java | 42 ++ .../gen/java/org/bytedeco/pytorch/Future.java | 2 +- .../bytedeco/pytorch/JavaBatchDataset.java | 40 ++ .../org/bytedeco/pytorch/JavaDataset.java | 45 ++ .../org/bytedeco/pytorch/JavaDatasetBase.java | 34 ++ .../JavaDistributedRandomDataLoader.java | 37 ++ .../JavaDistributedRandomDataLoaderBase.java | 57 +++ ...JavaDistributedRandomTensorDataLoader.java | 37 ++ ...DistributedRandomTensorDataLoaderBase.java | 57 +++ .../JavaDistributedSequentialDataLoader.java | 37 ++ ...vaDistributedSequentialDataLoaderBase.java | 57 +++ ...DistributedSequentialTensorDataLoader.java | 37 ++ ...ributedSequentialTensorDataLoaderBase.java | 57 +++ .../pytorch/JavaRandomDataLoader.java | 37 ++ .../pytorch/JavaRandomDataLoaderBase.java | 57 +++ .../pytorch/JavaRandomTensorDataLoader.java | 37 ++ .../JavaRandomTensorDataLoaderBase.java | 57 +++ .../pytorch/JavaSequentialDataLoader.java | 37 ++ .../pytorch/JavaSequentialDataLoaderBase.java | 57 +++ .../JavaSequentialTensorDataLoader.java | 37 ++ .../JavaSequentialTensorDataLoaderBase.java | 57 +++ .../pytorch/JavaStatefulBatchDataset.java | 39 ++ .../pytorch/JavaStatefulDataLoader.java | 30 ++ .../pytorch/JavaStatefulDataLoaderBase.java | 57 +++ .../bytedeco/pytorch/JavaStatefulDataset.java | 47 ++ .../pytorch/JavaStatefulDatasetBase.java | 34 ++ .../JavaStatefulTensorBatchDataset.java | 39 ++ .../pytorch/JavaStatefulTensorDataLoader.java | 30 ++ .../JavaStatefulTensorDataLoaderBase.java | 57 +++ .../pytorch/JavaStatefulTensorDataset.java | 43 ++ .../JavaStatefulTensorDatasetBase.java | 34 ++ .../pytorch/JavaStreamBatchDataset.java | 39 ++ .../pytorch/JavaStreamDataLoader.java | 37 ++ .../pytorch/JavaStreamDataLoaderBase.java | 57 +++ .../bytedeco/pytorch/JavaStreamDataset.java | 44 ++ .../pytorch/JavaStreamTensorBatchDataset.java | 39 ++ .../pytorch/JavaStreamTensorDataLoader.java | 37 ++ .../JavaStreamTensorDataLoaderBase.java | 57 +++ .../pytorch/JavaStreamTensorDataset.java | 40 ++ .../pytorch/JavaTensorBatchDataset.java | 40 ++ .../bytedeco/pytorch/JavaTensorDataset.java | 41 ++ .../pytorch/JavaTensorDatasetBase.java | 34 ++ .../bytedeco/pytorch/MNISTBatchDataset.java | 4 +- .../org/bytedeco/pytorch/MNISTDataset.java | 4 +- .../pytorch/MNISTMapBatchDataset.java | 2 +- .../org/bytedeco/pytorch/MNISTMapDataset.java | 9 +- .../pytorch/MNISTRandomDataLoader.java | 2 +- .../pytorch/MNISTRandomDataLoaderBase.java | 3 +- ...chDataset.java => TensorBatchDataset.java} | 5 +- .../org/bytedeco/pytorch/TensorDataset.java | 43 ++ ...pleDataset.java => TensorDatasetBase.java} | 5 +- .../pytorch/TensorExampleCollation.java | 29 ++ .../pytorch/TensorExampleIterator.java | 46 ++ .../pytorch/TensorExampleOptional.java | 35 ++ .../bytedeco/pytorch/TensorExampleStack.java | 42 ++ .../pytorch/TensorExampleVectorIterator.java | 46 ++ .../pytorch/TensorExampleVectorOptional.java | 35 ++ .../bytedeco/pytorch/WeakStorageVector.java | 2 +- .../pytorch/WeakStorageVectorOptional.java | 2 +- .../org/bytedeco/pytorch/global/torch.java | 222 ++++++++- .../org/bytedeco/pytorch/presets/torch.java | 435 +++++++++++++----- .../org/bytedeco/pytorch/include/datasets.h | 47 ++ .../bytedeco/pytorch/presets/torch_include.h | 4 +- 89 files changed, 3141 insertions(+), 192 deletions(-) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedTensorBatchDataset.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorBatchDataset.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorDataset.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomTensorDataLoader.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomTensorDataLoaderBase.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedTensorBatchDataset.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulTensorDataset.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorBatchDataset.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorDataReader.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorDataset.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{ExampleVectorOptionalIterator.java => ExampleVectorIterator.java} (67%) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/FullDataLoaderOptions.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaBatchDataset.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaDataset.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaDatasetBase.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomDataLoader.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomDataLoaderBase.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomTensorDataLoader.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomTensorDataLoaderBase.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialDataLoader.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialDataLoaderBase.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialTensorDataLoader.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialTensorDataLoaderBase.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomDataLoader.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomDataLoaderBase.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomTensorDataLoader.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomTensorDataLoaderBase.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialDataLoader.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialDataLoaderBase.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialTensorDataLoader.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialTensorDataLoaderBase.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulBatchDataset.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataLoader.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataLoaderBase.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataset.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDatasetBase.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorBatchDataset.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataLoader.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataLoaderBase.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataset.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDatasetBase.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamBatchDataset.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataLoader.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataLoaderBase.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataset.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorBatchDataset.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataLoader.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataLoaderBase.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataset.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorBatchDataset.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorDataset.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorDatasetBase.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{TensorExampleBatchDataset.java => TensorBatchDataset.java} (84%) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TensorDataset.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{TensorExampleDataset.java => TensorDatasetBase.java} (82%) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleCollation.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleIterator.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleOptional.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleStack.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleVectorIterator.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleVectorOptional.java create mode 100644 pytorch/src/main/resources/org/bytedeco/pytorch/include/datasets.h diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchDataset.java index eb2ccc28080..4bf4085b3cd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchDataset.java @@ -16,10 +16,8 @@ import static org.bytedeco.openblas.global.openblas.*; import static org.bytedeco.pytorch.global.torch.*; - // namespace detail -/** A dataset that can yield data only in batches. */ -@Name("torch::data::datasets::BatchDataset,c10::optional,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("torch::data::datasets::BatchDataset,c10::optional,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class ChunkBatchDataset extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedBatchDataset.java index 893a57c3d5e..7d0a9cfab3f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedBatchDataset.java @@ -17,7 +17,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@Name("torch::data::datasets::BatchDataset >,c10::optional,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("torch::data::datasets::BatchDataset >,c10::optional,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class ChunkBatchSharedBatchDataset extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedTensorBatchDataset.java new file mode 100644 index 00000000000..74c33b86e3c --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedTensorBatchDataset.java @@ -0,0 +1,40 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::datasets::BatchDataset >,c10::optional,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ChunkBatchSharedTensorBatchDataset extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ChunkBatchSharedTensorBatchDataset(Pointer p) { super(p); } + + @MemberGetter public static native @Cast("const bool") boolean is_stateful(); + public static final boolean is_stateful = is_stateful(); + + /** Returns a batch of data given an index. */ + public native @ByVal TensorExampleVectorOptional get_batch(@Cast("size_t") long request); + + /** Returns the size of the dataset, or an empty optional if it is unsized. */ + public native @ByVal SizeTOptional size(); + + /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ + public native @ByVal ChunkMapTensorDataset map(@ByVal TensorExampleStack transform); + + /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataReader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataReader.java index 59ec42fe86b..d1a899046ff 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataReader.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataReader.java @@ -24,7 +24,7 @@ * A chunk could be an entire file, such as an audio data file or an image, * or part of a file in the case of a large text-file split based on seek * positions. */ -@Name("torch::data::datasets::ChunkDataReader,std::vector > >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("torch::data::datasets::ChunkDataReader,std::vector > >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class ChunkDataReader extends Pointer { static { Loader.load(); } /** Default native constructor. */ @@ -45,7 +45,7 @@ public class ChunkDataReader extends Pointer { /** Read an entire chunk. */ - @Virtual(true) public native @ByVal @Cast("torch::data::datasets::ChunkDataReader,std::vector > >::ChunkType*") ExampleVector read_chunk(@Cast("size_t") long chunk_index); + @Virtual(true) public native @ByVal @Cast("torch::data::datasets::ChunkDataReader,std::vector > >::ChunkType*") ExampleVector read_chunk(@Cast("size_t") long chunk_index); /** Returns the number of chunks available in this reader. */ @Virtual(true) public native @Cast("size_t") long chunk_count(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataset.java index 8a8ccb0a87a..f6f5d695f4f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataset.java @@ -26,7 +26,7 @@ * while the {@code ExampleSampler} determins the order of Examples that are returned * in each {@code get_batch} call. The hierarchical sampling approach used here is * inspired by this paper http://martin.zinkevich.org/publications/nips2010.pdf */ -@Name("torch::data::datasets::ChunkDataset") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("torch::data::datasets::ChunkDataset") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class ChunkDataset extends ChunkStatefulDataset { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -45,11 +45,11 @@ public ChunkDataset( ChunkDatasetOptions options, Pointer preprocessing_policy) { super((Pointer)null); allocate(chunk_reader, chunk_sampler, example_sampler, options, preprocessing_policy); } private native void allocate( - @ByVal @Cast("JavaCPP_torch_0003a_0003adata_0003a_0003adatasets_0003a_0003aChunkDataReader_0003ctorch_0003a_0003adata_0003a_0003aExample_0003c_0003e_0002cstd_0003a_0003avector_0003ctorch_0003a_0003adata_0003a_0003aExample_0003c_0003e_00020_0003e_00020_0003e*") ChunkDataReader chunk_reader, + @ByVal @Cast("JavaCPP_torch_0003a_0003adata_0003a_0003adatasets_0003a_0003aChunkDataReader_0003ctorch_0003a_0003adata_0003a_0003aExample_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e_0002cstd_0003a_0003avector_0003ctorch_0003a_0003adata_0003a_0003aExample_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e_00020_0003e_00020_0003e*") ChunkDataReader chunk_reader, @ByVal RandomSampler chunk_sampler, @ByVal RandomSampler example_sampler, @ByVal ChunkDatasetOptions options, - @ByVal(nullValue = "std::function>&)>()") @Cast("std::function>&)>*") Pointer preprocessing_policy); + @ByVal(nullValue = "std::function>&)>()") @Cast("std::function>&)>*") Pointer preprocessing_policy); /** Default get_batch method of BatchDataset. This method returns * Example batches created from the preloaded chunks. The implemenation @@ -69,7 +69,7 @@ private native void allocate( // provide a references to chunk sampler. Used mainly in distributed data // loading to set the epoch number for the sampler. - public native @Cast("torch::data::datasets::ChunkDataset::ChunkSamplerType*") @ByRef RandomSampler chunk_sampler(); + public native @Cast("torch::data::datasets::ChunkDataset::ChunkSamplerType*") @ByRef RandomSampler chunk_sampler(); public native void save(@ByRef OutputArchive archive); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapBatchDataset.java index a346bc60dd3..5a8825e8c80 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapBatchDataset.java @@ -17,7 +17,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@Name("torch::data::datasets::BatchDataset >,torch::data::transforms::Stack > >,std::vector >,at::ArrayRef >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("torch::data::datasets::BatchDataset >,torch::data::transforms::Stack > >,std::vector >,at::ArrayRef >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class ChunkMapBatchDataset extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapDataset.java index 864f0fb09c2..1a721147661 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapDataset.java @@ -16,18 +16,16 @@ import static org.bytedeco.openblas.global.openblas.*; import static org.bytedeco.pytorch.global.torch.*; - // namespace detail -/** A {@code MapDataset} is a dataset that applies a transform to a source dataset. */ -@Name("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class ChunkMapDataset extends ChunkMapBatchDataset { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ChunkMapDataset(Pointer p) { super(p); } - public ChunkMapDataset(@ByVal ChunkSharedBatchDataset dataset, @ByVal @Cast("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >::TransformType*") ExampleStack transform) { super((Pointer)null); allocate(dataset, transform); } - private native void allocate(@ByVal ChunkSharedBatchDataset dataset, @ByVal @Cast("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >::TransformType*") ExampleStack transform); + public ChunkMapDataset(@ByVal ChunkSharedBatchDataset dataset, @ByVal @Cast("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >::TransformType*") ExampleStack transform) { super((Pointer)null); allocate(dataset, transform); } + private native void allocate(@ByVal ChunkSharedBatchDataset dataset, @ByVal @Cast("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >::TransformType*") ExampleStack transform); /** Gets a batch from the source dataset and applies the transform to it, * returning the result. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorBatchDataset.java new file mode 100644 index 00000000000..6ba2c68612f --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorBatchDataset.java @@ -0,0 +1,39 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::datasets::BatchDataset >,torch::data::transforms::Stack > >,std::vector >,at::ArrayRef >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ChunkMapTensorBatchDataset extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ChunkMapTensorBatchDataset(Pointer p) { super(p); } + + @MemberGetter public static native @Cast("const bool") boolean is_stateful(); + public static final boolean is_stateful = is_stateful(); + + /** Returns a batch of data given an index. */ + public native @ByVal TensorExampleVector get_batch(@ByVal SizeTArrayRef request); + + /** Returns the size of the dataset, or an empty optional if it is unsized. */ + public native @ByVal SizeTOptional size(); + + /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ + + /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorDataset.java new file mode 100644 index 00000000000..16c39244d25 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorDataset.java @@ -0,0 +1,49 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ChunkMapTensorDataset extends ChunkMapTensorBatchDataset { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ChunkMapTensorDataset(Pointer p) { super(p); } + + + public ChunkMapTensorDataset(@ByVal ChunkSharedTensorBatchDataset dataset, @ByVal @Cast("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >::TransformType*") TensorExampleStack transform) { super((Pointer)null); allocate(dataset, transform); } + private native void allocate(@ByVal ChunkSharedTensorBatchDataset dataset, @ByVal @Cast("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >::TransformType*") TensorExampleStack transform); + + /** Gets a batch from the source dataset and applies the transform to it, + * returning the result. */ + public native @Name("get_batch") @ByVal TensorExampleOptional get_batch_example(@Cast("size_t") long indices); + + /** Returns the size of the source dataset. */ + // NOLINTNEXTLINE(bugprone-exception-escape) + public native @ByVal @NoException(true) SizeTOptional size(); + + /** Calls {@code reset()} on the underlying dataset. + * NOTE: Stateless datasets do not have a reset() method, so a call to this + * method will only compile for stateful datasets (which have a reset() + * method). */ + + + /** Returns the underlying dataset. */ + public native @Const @ByRef @NoException(true) ChunkSharedTensorBatchDataset dataset(); + + /** Returns the transform being applied. */ + public native @Const @ByRef @NoException(true) TensorExampleStack transform(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoader.java index 3ae0a154083..edc6057cfe8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoader.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoader.java @@ -30,7 +30,7 @@ * * A stateful dataloader is created by calling {@code make_data_loader} with a * stateful dataset. */ -@Name("torch::data::StatefulDataLoader >,torch::data::transforms::Stack > > >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("torch::data::StatefulDataLoader >,torch::data::transforms::Stack > > >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class ChunkRandomDataLoader extends ChunkRandomDataLoaderBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoaderBase.java index d6f1ad4a879..cd5affa75de 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoaderBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoaderBase.java @@ -17,7 +17,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@Name("torch::data::DataLoaderBase >,torch::data::transforms::Stack > >,torch::data::Example<>,size_t>") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("torch::data::DataLoaderBase >,torch::data::transforms::Stack > >,torch::data::Example,size_t>") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class ChunkRandomDataLoaderBase extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -53,4 +53,5 @@ public class ChunkRandomDataLoaderBase extends Pointer { public native void join(); /** Returns the options with which the DataLoader was configured. */ + public native @Const @ByRef @NoException(true) FullDataLoaderOptions options(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomTensorDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomTensorDataLoader.java new file mode 100644 index 00000000000..d66bab456e5 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomTensorDataLoader.java @@ -0,0 +1,30 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::StatefulDataLoader >,torch::data::transforms::Stack > > >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ChunkRandomTensorDataLoader extends ChunkRandomTensorDataLoaderBase { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ChunkRandomTensorDataLoader(Pointer p) { super(p); } + + + /** Constructs the {@code StatefulDataLoader} from a {@code dataset} and some {@code options}. */ + public ChunkRandomTensorDataLoader(@ByVal ChunkMapTensorDataset dataset, @ByVal DataLoaderOptions options) { super((Pointer)null); allocate(dataset, options); } + private native void allocate(@ByVal ChunkMapTensorDataset dataset, @ByVal DataLoaderOptions options); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomTensorDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomTensorDataLoaderBase.java new file mode 100644 index 00000000000..fea7e155d57 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomTensorDataLoaderBase.java @@ -0,0 +1,57 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::DataLoaderBase >,torch::data::transforms::Stack > >,torch::data::Example,size_t>") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ChunkRandomTensorDataLoaderBase extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ChunkRandomTensorDataLoaderBase(Pointer p) { super(p); } + + + /** Constructs a new DataLoader from a {@code dataset} to sample from, {@code options} + * to configure the DataLoader with, and a {@code sampler} that specifies the + * sampling strategy. */ + + // NOLINTNEXTLINE(bugprone-exception-escape) + + /** Returns an iterator into the DataLoader. The lifetime of the iterator is + * bound to the DataLoader. In C++ standards language, the category of the + * iterator is {@code OutputIterator}. See + * https://en.cppreference.com/w/cpp/named_req/OutputIterator for what this + * means. In short: you may increment the iterator and dereference it, but + * cannot go back, or step forward more than one position at a time. When the + * DataLoader is exhausted, it will compare equal with the special + * "sentinel" iterator returned by {@code DataLoader::end()}. Most of the time, you + * should only use range-for loops to loop over the DataLoader, but + * standard algorithms like {@code std::copy(dataloader.begin(), dataloader.end(), + * output_iterator)} are supported too. */ + public native @ByVal TensorExampleIterator begin(); + + /** Returns a special "sentinel" iterator that compares equal with a + * non-sentinel iterator once the DataLoader is exhausted. */ + public native @ByVal TensorExampleIterator end(); + + /** Joins the DataLoader's worker threads and drains internal queues. + * This function may only be invoked from the main thread (in which the + * DataLoader lives). */ + public native void join(); + + /** Returns the options with which the DataLoader was configured. */ + public native @Const @ByRef @NoException(true) FullDataLoaderOptions options(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedBatchDataset.java index a9cef9f3f8b..5c0384b1e62 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedBatchDataset.java @@ -26,7 +26,7 @@ * * Use {@code torch::data::datasets::make_shared_dataset()} to create a new * {@code SharedBatchDataset} like you would a {@code std::shared_ptr}. */ -@Name("torch::data::datasets::SharedBatchDataset >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("torch::data::datasets::SharedBatchDataset >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class ChunkSharedBatchDataset extends ChunkBatchSharedBatchDataset { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -41,7 +41,7 @@ private native void allocate( @SharedPtr ChunkDataset shared_dataset); /** Calls {@code get_batch} on the underlying dataset. */ - public native @ByVal @Cast("torch::data::datasets::SharedBatchDataset >::BatchType*") ExampleVectorOptional get_batch(@Cast("torch::data::datasets::SharedBatchDataset >::BatchRequestType") long request); + public native @ByVal @Cast("torch::data::datasets::SharedBatchDataset >::BatchType*") ExampleVectorOptional get_batch(@Cast("torch::data::datasets::SharedBatchDataset >::BatchRequestType") long request); /** Returns the {@code size} from the underlying dataset. */ public native @ByVal SizeTOptional size(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedTensorBatchDataset.java new file mode 100644 index 00000000000..50c2fd4d368 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedTensorBatchDataset.java @@ -0,0 +1,52 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::datasets::SharedBatchDataset >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ChunkSharedTensorBatchDataset extends ChunkBatchSharedTensorBatchDataset { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ChunkSharedTensorBatchDataset(Pointer p) { super(p); } + + + /** Constructs a new {@code SharedBatchDataset} from a {@code shared_ptr} to the + * {@code UnderlyingDataset}. */ + /* implicit */ public ChunkSharedTensorBatchDataset( + @SharedPtr ChunkTensorDataset shared_dataset) { super((Pointer)null); allocate(shared_dataset); } +private native void allocate( + @SharedPtr ChunkTensorDataset shared_dataset); + + /** Calls {@code get_batch} on the underlying dataset. */ + public native @ByVal @Cast("torch::data::datasets::SharedBatchDataset >::BatchType*") TensorExampleVectorOptional get_batch(@Cast("torch::data::datasets::SharedBatchDataset >::BatchRequestType") long request); + + /** Returns the {@code size} from the underlying dataset. */ + public native @ByVal SizeTOptional size(); + + /** Accesses the underlying dataset. */ + public native @ByRef @Name("operator *") ChunkTensorDataset multiply(); + + /** Accesses the underlying dataset. */ + + /** Accesses the underlying dataset. */ + public native @Name("operator ->") ChunkTensorDataset access(); + + /** Accesses the underlying dataset. */ + + /** Calls {@code reset()} on the underlying dataset. */ + public native void reset(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulDataset.java index 145c1c03674..df8566ad0e6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulDataset.java @@ -31,7 +31,7 @@ * {@code optional} (i.e. the type specified in the {@code StatefulDataset} * specialization is automatically boxed into an {@code optional} for the dataset's * {@code BatchType}). */ -@Name("torch::data::datasets::StatefulDataset,JavaCPP_torch_0003a_0003adata_0003a_0003adatasets_0003a_0003aChunkDataReader_0003ctorch_0003a_0003adata_0003a_0003aExample_0003c_0003e_0002cstd_0003a_0003avector_0003ctorch_0003a_0003adata_0003a_0003aExample_0003c_0003e_00020_0003e_00020_0003e::BatchType,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("torch::data::datasets::StatefulDataset,JavaCPP_torch_0003a_0003adata_0003a_0003adatasets_0003a_0003aChunkDataReader_0003ctorch_0003a_0003adata_0003a_0003aExample_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e_0002cstd_0003a_0003avector_0003ctorch_0003a_0003adata_0003a_0003aExample_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e_00020_0003e_00020_0003e::BatchType,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class ChunkStatefulDataset extends ChunkBatchDataset { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulTensorDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulTensorDataset.java new file mode 100644 index 00000000000..a744347c27f --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulTensorDataset.java @@ -0,0 +1,34 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::datasets::StatefulDataset,JavaCPP_torch_0003a_0003adata_0003a_0003adatasets_0003a_0003aChunkDataReader_0003ctorch_0003a_0003adata_0003a_0003aExample_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e_0002cstd_0003a_0003avector_0003ctorch_0003a_0003adata_0003a_0003aExample_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e_00020_0003e_00020_0003e::BatchType,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ChunkStatefulTensorDataset extends ChunkTensorBatchDataset { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ChunkStatefulTensorDataset(Pointer p) { super(p); } + + /** Resets internal state of the dataset. */ + public native void reset(); + + /** Saves the statefulDataset's state to OutputArchive. */ + public native void save(@ByRef OutputArchive archive); + + /** Deserializes the statefulDataset's state from the {@code archive}. */ + public native void load(@ByRef InputArchive archive); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorBatchDataset.java new file mode 100644 index 00000000000..69da762261e --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorBatchDataset.java @@ -0,0 +1,39 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::datasets::BatchDataset,c10::optional,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ChunkTensorBatchDataset extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ChunkTensorBatchDataset(Pointer p) { super(p); } + + @MemberGetter public static native @Cast("const bool") boolean is_stateful(); + public static final boolean is_stateful = is_stateful(); + + /** Returns a batch of data given an index. */ + public native @ByVal TensorExampleVectorOptional get_batch(@Cast("size_t") long request); + + /** Returns the size of the dataset, or an empty optional if it is unsized. */ + public native @ByVal SizeTOptional size(); + + /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ + + /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorDataReader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorDataReader.java new file mode 100644 index 00000000000..2598d7c7a6e --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorDataReader.java @@ -0,0 +1,48 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::datasets::ChunkDataReader,std::vector > >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ChunkTensorDataReader extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public ChunkTensorDataReader() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public ChunkTensorDataReader(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ChunkTensorDataReader(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public ChunkTensorDataReader position(long position) { + return (ChunkTensorDataReader)super.position(position); + } + @Override public ChunkTensorDataReader getPointer(long i) { + return new ChunkTensorDataReader((Pointer)this).offsetAddress(i); + } + + + + /** Read an entire chunk. */ + @Virtual(true) public native @ByVal @Cast("torch::data::datasets::ChunkDataReader,std::vector > >::ChunkType*") TensorExampleVector read_chunk(@Cast("size_t") long chunk_index); + + /** Returns the number of chunks available in this reader. */ + @Virtual(true) public native @Cast("size_t") long chunk_count(); + + /** This will clear any internal state associate with this reader. */ + @Virtual(true) public native void reset(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorDataset.java new file mode 100644 index 00000000000..5d4b34ee0d6 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorDataset.java @@ -0,0 +1,68 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::datasets::ChunkDataset") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ChunkTensorDataset extends ChunkStatefulTensorDataset { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ChunkTensorDataset(Pointer p) { super(p); } + + + public ChunkTensorDataset( + ChunkTensorDataReader chunk_reader, + RandomSampler chunk_sampler, + RandomSampler example_sampler, + ChunkDatasetOptions options) { super((Pointer)null); allocate(chunk_reader, chunk_sampler, example_sampler, options, null); } + public ChunkTensorDataset( + ChunkTensorDataReader chunk_reader, + RandomSampler chunk_sampler, + RandomSampler example_sampler, + ChunkDatasetOptions options, + Pointer preprocessing_policy) { super((Pointer)null); allocate(chunk_reader, chunk_sampler, example_sampler, options, preprocessing_policy); } + private native void allocate( + @ByVal @Cast("JavaCPP_torch_0003a_0003adata_0003a_0003adatasets_0003a_0003aChunkDataReader_0003ctorch_0003a_0003adata_0003a_0003aExample_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e_0002cstd_0003a_0003avector_0003ctorch_0003a_0003adata_0003a_0003aExample_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e_00020_0003e_00020_0003e*") ChunkTensorDataReader chunk_reader, + @ByVal RandomSampler chunk_sampler, + @ByVal RandomSampler example_sampler, + @ByVal ChunkDatasetOptions options, + @ByVal(nullValue = "std::function>&)>()") @Cast("std::function>&)>*") Pointer preprocessing_policy); + + /** Default get_batch method of BatchDataset. This method returns + * Example batches created from the preloaded chunks. The implemenation + * is dataset agnostic and does not need overriding in different chunk + * datasets. */ + public native @ByVal TensorExampleVectorOptional get_batch(@Cast("size_t") long batch_size); + + /** Helper method around get_batch as {@code batch_size} is not strictly necessary */ + public native @ByVal TensorExampleVectorOptional get_batch(); + + /** This will clear any internal state and starts the internal prefetching + * mechanism for the chunk dataset. */ + public native void reset(); + + /** size is not used for chunk dataset. */ + public native @ByVal SizeTOptional size(); + + // provide a references to chunk sampler. Used mainly in distributed data + // loading to set the epoch number for the sampler. + public native @Cast("torch::data::datasets::ChunkDataset::ChunkSamplerType*") @ByRef RandomSampler chunk_sampler(); + + public native void save(@ByRef OutputArchive archive); + + public native void load(@ByRef InputArchive archive); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleCollation.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleCollation.java index fc3d63ab8b1..f26ff6ea85e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleCollation.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleCollation.java @@ -19,7 +19,7 @@ /** A transformation of a batch to a new batch. */ -@Name("torch::data::transforms::BatchTransform >,torch::data::Example<> >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("torch::data::transforms::BatchTransform >,torch::data::Example >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class ExampleCollation extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleIterator.java index 27e10847c52..929bd97678a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleIterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleIterator.java @@ -18,7 +18,7 @@ import static org.bytedeco.pytorch.global.torch.*; // namespace detail -@Name("torch::data::Iterator >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("torch::data::Iterator >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class ExampleIterator extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleStack.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleStack.java index f67ef9a2cd1..76b3468f69a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleStack.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleStack.java @@ -38,5 +38,5 @@ public class ExampleStack extends ExampleCollation { return new ExampleStack((Pointer)this).offsetAddress(i); } - public native @ByVal Example apply_batch(@ByVal ExampleVector examples); + public native @ByVal Example apply_batch(@StdVector Example examples); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVector.java index 454209e92d4..2a7eda78f74 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVector.java @@ -17,7 +17,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@Name("std::vector >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("std::vector >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class ExampleVector extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorOptionalIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorIterator.java similarity index 67% rename from pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorOptionalIterator.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorIterator.java index 9d7253dfb60..470c9b179bc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorOptionalIterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorIterator.java @@ -18,29 +18,29 @@ import static org.bytedeco.pytorch.global.torch.*; -@Name("torch::data::Iterator > > >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ExampleVectorOptionalIterator extends Pointer { +@Name("torch::data::Iterator > >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ExampleVectorIterator extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ExampleVectorOptionalIterator(Pointer p) { super(p); } + public ExampleVectorIterator(Pointer p) { super(p); } // Type aliases to make the class recognized as a proper iterator. /** Increments the iterator. * Only permitted for valid iterators (not past the end). */ - public native @ByRef @Name("operator ++") ExampleVectorOptionalIterator increment(); + public native @ByRef @Name("operator ++") ExampleVectorIterator increment(); /** Returns the current batch. * Only permitted for valid iterators (not past the end). */ - public native @ByRef @Name("operator *") ExampleVectorOptional multiply(); + public native @ByRef @Name("operator *") ExampleVector multiply(); /** Returns a pointer to the current batch. * Only permitted for valid iterators (not past the end). */ - public native @Name("operator ->") ExampleVectorOptional access(); + public native @Name("operator ->") ExampleVector access(); /** Compares two iterators for equality. */ - public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef ExampleVectorOptionalIterator other); + public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef ExampleVectorIterator other); /** Compares two iterators for inequality. */ - public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef ExampleVectorOptionalIterator other); + public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef ExampleVectorIterator other); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorOptional.java index 44ce876c817..f1c3f540474 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorOptional.java @@ -17,7 +17,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("c10::optional > >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@NoOffset @Name("c10::optional > >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class ExampleVectorOptional extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FullDataLoaderOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FullDataLoaderOptions.java new file mode 100644 index 00000000000..ad9a1e1863e --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FullDataLoaderOptions.java @@ -0,0 +1,42 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +/** Like {@code DataLoaderOptions}, but without any unconfigured state. + * {@code DataLoaderOptions} has some options that depend on other options + * ({@code max_jobs} => {@code 2 * workers}). In the spirit of properly using the C++ type + * system, {@code DataLoaderOptions} allows only setting values. To access values, + * you must create a {@code FullDataLoaderOptions} from a {@code DataLoaderOptions} + * instance, which will do any necessary coalescing. */ +@Namespace("torch::data") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class FullDataLoaderOptions extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public FullDataLoaderOptions(Pointer p) { super(p); } + + public FullDataLoaderOptions(@ByVal DataLoaderOptions options) { super((Pointer)null); allocate(options); } + private native void allocate(@ByVal DataLoaderOptions options); + + public native @Cast("size_t") long batch_size(); public native FullDataLoaderOptions batch_size(long setter); + public native @Cast("size_t") long workers(); public native FullDataLoaderOptions workers(long setter); + public native @Cast("size_t") long max_jobs(); public native FullDataLoaderOptions max_jobs(long setter); + public native @ByRef @Cast("c10::optional*") Pointer timeout(); public native FullDataLoaderOptions timeout(Pointer setter); + public native @Cast("bool") boolean enforce_ordering(); public native FullDataLoaderOptions enforce_ordering(boolean setter); + public native @Cast("bool") boolean drop_last(); public native FullDataLoaderOptions drop_last(boolean setter); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Future.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Future.java index 365c78f9e26..9f51fb8de3e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Future.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Future.java @@ -98,7 +98,7 @@ public native void markCompleted( // This accessor should only be used if we know that the future is // completed() with no error. - public native @Const @ByRef WeakStorageVector storages(); + public native @StdVector WeakStorage storages(); /** * Add a callback to the future. diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaBatchDataset.java new file mode 100644 index 00000000000..d05b87b003f --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaBatchDataset.java @@ -0,0 +1,40 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::datasets::BatchDataset,std::vector > >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaBatchDataset extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaBatchDataset(Pointer p) { super(p); } + + @MemberGetter public static native @Cast("const bool") boolean is_stateful(); + public static final boolean is_stateful = is_stateful(); + + /** Returns a batch of data given an index. */ + public native @ByVal ExampleVector get_batch(@ByVal SizeTArrayRef request); + public native @ByVal ExampleVector get_batch(@ByVal @Cast({"uint64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... request); + + /** Returns the size of the dataset, or an empty optional if it is unsized. */ + public native @ByVal SizeTOptional size(); + + /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ + + /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDataset.java new file mode 100644 index 00000000000..6a3021353ba --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDataset.java @@ -0,0 +1,45 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +/** + * Abstract class for stateless datasets to be subclassed by Java user code. + */ + @Name("javacpp::Dataset") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaDataset extends JavaDatasetBase { + static { Loader.load(); } + /** Default native constructor. */ + public JavaDataset() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public JavaDataset(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaDataset(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public JavaDataset position(long position) { + return (JavaDataset)super.position(position); + } + @Override public JavaDataset getPointer(long i) { + return new JavaDataset((Pointer)this).offsetAddress(i); + } + + @Virtual(true) public native @ByVal Example get(@Cast("size_t") long index); + @Virtual(true) public native @ByVal @Const({false, false, true}) SizeTOptional size(); + @Virtual public native @ByVal ExampleVector get_batch(@ByVal SizeTArrayRef indices); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDatasetBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDatasetBase.java new file mode 100644 index 00000000000..da790a4b23f --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDatasetBase.java @@ -0,0 +1,34 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::datasets::Dataset,torch::data::Example >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaDatasetBase extends JavaBatchDataset { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaDatasetBase(Pointer p) { super(p); } + + + /** Returns the example at the given index. */ + public native @ByVal @Cast("torch::data::datasets::Dataset,torch::data::Example >::ExampleType*") Example get(@Cast("size_t") long index); + + /** Returns a batch of data. + * The default implementation calls {@code get()} for every requested index + * in the batch. */ + public native @ByVal ExampleVector get_batch(@ByVal SizeTArrayRef indices); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomDataLoader.java new file mode 100644 index 00000000000..5b49faf24b9 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomDataLoader.java @@ -0,0 +1,37 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::StatelessDataLoader") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaDistributedRandomDataLoader extends JavaDistributedRandomDataLoaderBase { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaDistributedRandomDataLoader(Pointer p) { super(p); } + + + /** Constructs the {@code StatelessDataLoader} from a {@code dataset}, a {@code sampler} and + * some {@code options}. */ + public JavaDistributedRandomDataLoader( + @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e*") JavaDataset dataset, + @ByVal DistributedRandomSampler sampler, + @ByVal DataLoaderOptions options) { super((Pointer)null); allocate(dataset, sampler, options); } + private native void allocate( + @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e*") JavaDataset dataset, + @ByVal DistributedRandomSampler sampler, + @ByVal DataLoaderOptions options); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomDataLoaderBase.java new file mode 100644 index 00000000000..bad54600bdf --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomDataLoaderBase.java @@ -0,0 +1,57 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::DataLoaderBase") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaDistributedRandomDataLoaderBase extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaDistributedRandomDataLoaderBase(Pointer p) { super(p); } + + + /** Constructs a new DataLoader from a {@code dataset} to sample from, {@code options} + * to configure the DataLoader with, and a {@code sampler} that specifies the + * sampling strategy. */ + + // NOLINTNEXTLINE(bugprone-exception-escape) + + /** Returns an iterator into the DataLoader. The lifetime of the iterator is + * bound to the DataLoader. In C++ standards language, the category of the + * iterator is {@code OutputIterator}. See + * https://en.cppreference.com/w/cpp/named_req/OutputIterator for what this + * means. In short: you may increment the iterator and dereference it, but + * cannot go back, or step forward more than one position at a time. When the + * DataLoader is exhausted, it will compare equal with the special + * "sentinel" iterator returned by {@code DataLoader::end()}. Most of the time, you + * should only use range-for loops to loop over the DataLoader, but + * standard algorithms like {@code std::copy(dataloader.begin(), dataloader.end(), + * output_iterator)} are supported too. */ + public native @ByVal ExampleVectorIterator begin(); + + /** Returns a special "sentinel" iterator that compares equal with a + * non-sentinel iterator once the DataLoader is exhausted. */ + public native @ByVal ExampleVectorIterator end(); + + /** Joins the DataLoader's worker threads and drains internal queues. + * This function may only be invoked from the main thread (in which the + * DataLoader lives). */ + public native void join(); + + /** Returns the options with which the DataLoader was configured. */ + public native @Const @ByRef @NoException(true) FullDataLoaderOptions options(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomTensorDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomTensorDataLoader.java new file mode 100644 index 00000000000..2e599d8805d --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomTensorDataLoader.java @@ -0,0 +1,37 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::StatelessDataLoader") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaDistributedRandomTensorDataLoader extends JavaDistributedRandomTensorDataLoaderBase { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaDistributedRandomTensorDataLoader(Pointer p) { super(p); } + + + /** Constructs the {@code StatelessDataLoader} from a {@code dataset}, a {@code sampler} and + * some {@code options}. */ + public JavaDistributedRandomTensorDataLoader( + @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e*") JavaTensorDataset dataset, + @ByVal DistributedRandomSampler sampler, + @ByVal DataLoaderOptions options) { super((Pointer)null); allocate(dataset, sampler, options); } + private native void allocate( + @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e*") JavaTensorDataset dataset, + @ByVal DistributedRandomSampler sampler, + @ByVal DataLoaderOptions options); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomTensorDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomTensorDataLoaderBase.java new file mode 100644 index 00000000000..eea76157e6b --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedRandomTensorDataLoaderBase.java @@ -0,0 +1,57 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::DataLoaderBase") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaDistributedRandomTensorDataLoaderBase extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaDistributedRandomTensorDataLoaderBase(Pointer p) { super(p); } + + + /** Constructs a new DataLoader from a {@code dataset} to sample from, {@code options} + * to configure the DataLoader with, and a {@code sampler} that specifies the + * sampling strategy. */ + + // NOLINTNEXTLINE(bugprone-exception-escape) + + /** Returns an iterator into the DataLoader. The lifetime of the iterator is + * bound to the DataLoader. In C++ standards language, the category of the + * iterator is {@code OutputIterator}. See + * https://en.cppreference.com/w/cpp/named_req/OutputIterator for what this + * means. In short: you may increment the iterator and dereference it, but + * cannot go back, or step forward more than one position at a time. When the + * DataLoader is exhausted, it will compare equal with the special + * "sentinel" iterator returned by {@code DataLoader::end()}. Most of the time, you + * should only use range-for loops to loop over the DataLoader, but + * standard algorithms like {@code std::copy(dataloader.begin(), dataloader.end(), + * output_iterator)} are supported too. */ + public native @ByVal TensorExampleVectorIterator begin(); + + /** Returns a special "sentinel" iterator that compares equal with a + * non-sentinel iterator once the DataLoader is exhausted. */ + public native @ByVal TensorExampleVectorIterator end(); + + /** Joins the DataLoader's worker threads and drains internal queues. + * This function may only be invoked from the main thread (in which the + * DataLoader lives). */ + public native void join(); + + /** Returns the options with which the DataLoader was configured. */ + public native @Const @ByRef @NoException(true) FullDataLoaderOptions options(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialDataLoader.java new file mode 100644 index 00000000000..fc113a06359 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialDataLoader.java @@ -0,0 +1,37 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::StatelessDataLoader") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaDistributedSequentialDataLoader extends JavaDistributedSequentialDataLoaderBase { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaDistributedSequentialDataLoader(Pointer p) { super(p); } + + + /** Constructs the {@code StatelessDataLoader} from a {@code dataset}, a {@code sampler} and + * some {@code options}. */ + public JavaDistributedSequentialDataLoader( + @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e*") JavaDataset dataset, + @ByVal DistributedSequentialSampler sampler, + @ByVal DataLoaderOptions options) { super((Pointer)null); allocate(dataset, sampler, options); } + private native void allocate( + @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e*") JavaDataset dataset, + @ByVal DistributedSequentialSampler sampler, + @ByVal DataLoaderOptions options); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialDataLoaderBase.java new file mode 100644 index 00000000000..65dcb5491a2 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialDataLoaderBase.java @@ -0,0 +1,57 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::DataLoaderBase") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaDistributedSequentialDataLoaderBase extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaDistributedSequentialDataLoaderBase(Pointer p) { super(p); } + + + /** Constructs a new DataLoader from a {@code dataset} to sample from, {@code options} + * to configure the DataLoader with, and a {@code sampler} that specifies the + * sampling strategy. */ + + // NOLINTNEXTLINE(bugprone-exception-escape) + + /** Returns an iterator into the DataLoader. The lifetime of the iterator is + * bound to the DataLoader. In C++ standards language, the category of the + * iterator is {@code OutputIterator}. See + * https://en.cppreference.com/w/cpp/named_req/OutputIterator for what this + * means. In short: you may increment the iterator and dereference it, but + * cannot go back, or step forward more than one position at a time. When the + * DataLoader is exhausted, it will compare equal with the special + * "sentinel" iterator returned by {@code DataLoader::end()}. Most of the time, you + * should only use range-for loops to loop over the DataLoader, but + * standard algorithms like {@code std::copy(dataloader.begin(), dataloader.end(), + * output_iterator)} are supported too. */ + public native @ByVal ExampleVectorIterator begin(); + + /** Returns a special "sentinel" iterator that compares equal with a + * non-sentinel iterator once the DataLoader is exhausted. */ + public native @ByVal ExampleVectorIterator end(); + + /** Joins the DataLoader's worker threads and drains internal queues. + * This function may only be invoked from the main thread (in which the + * DataLoader lives). */ + public native void join(); + + /** Returns the options with which the DataLoader was configured. */ + public native @Const @ByRef @NoException(true) FullDataLoaderOptions options(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialTensorDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialTensorDataLoader.java new file mode 100644 index 00000000000..734076272d6 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialTensorDataLoader.java @@ -0,0 +1,37 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::StatelessDataLoader") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaDistributedSequentialTensorDataLoader extends JavaDistributedSequentialTensorDataLoaderBase { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaDistributedSequentialTensorDataLoader(Pointer p) { super(p); } + + + /** Constructs the {@code StatelessDataLoader} from a {@code dataset}, a {@code sampler} and + * some {@code options}. */ + public JavaDistributedSequentialTensorDataLoader( + @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e*") JavaTensorDataset dataset, + @ByVal DistributedSequentialSampler sampler, + @ByVal DataLoaderOptions options) { super((Pointer)null); allocate(dataset, sampler, options); } + private native void allocate( + @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e*") JavaTensorDataset dataset, + @ByVal DistributedSequentialSampler sampler, + @ByVal DataLoaderOptions options); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialTensorDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialTensorDataLoaderBase.java new file mode 100644 index 00000000000..a364b6ab273 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDistributedSequentialTensorDataLoaderBase.java @@ -0,0 +1,57 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::DataLoaderBase") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaDistributedSequentialTensorDataLoaderBase extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaDistributedSequentialTensorDataLoaderBase(Pointer p) { super(p); } + + + /** Constructs a new DataLoader from a {@code dataset} to sample from, {@code options} + * to configure the DataLoader with, and a {@code sampler} that specifies the + * sampling strategy. */ + + // NOLINTNEXTLINE(bugprone-exception-escape) + + /** Returns an iterator into the DataLoader. The lifetime of the iterator is + * bound to the DataLoader. In C++ standards language, the category of the + * iterator is {@code OutputIterator}. See + * https://en.cppreference.com/w/cpp/named_req/OutputIterator for what this + * means. In short: you may increment the iterator and dereference it, but + * cannot go back, or step forward more than one position at a time. When the + * DataLoader is exhausted, it will compare equal with the special + * "sentinel" iterator returned by {@code DataLoader::end()}. Most of the time, you + * should only use range-for loops to loop over the DataLoader, but + * standard algorithms like {@code std::copy(dataloader.begin(), dataloader.end(), + * output_iterator)} are supported too. */ + public native @ByVal TensorExampleVectorIterator begin(); + + /** Returns a special "sentinel" iterator that compares equal with a + * non-sentinel iterator once the DataLoader is exhausted. */ + public native @ByVal TensorExampleVectorIterator end(); + + /** Joins the DataLoader's worker threads and drains internal queues. + * This function may only be invoked from the main thread (in which the + * DataLoader lives). */ + public native void join(); + + /** Returns the options with which the DataLoader was configured. */ + public native @Const @ByRef @NoException(true) FullDataLoaderOptions options(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomDataLoader.java new file mode 100644 index 00000000000..41843004e94 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomDataLoader.java @@ -0,0 +1,37 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::StatelessDataLoader") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaRandomDataLoader extends JavaRandomDataLoaderBase { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaRandomDataLoader(Pointer p) { super(p); } + + + /** Constructs the {@code StatelessDataLoader} from a {@code dataset}, a {@code sampler} and + * some {@code options}. */ + public JavaRandomDataLoader( + @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e*") JavaDataset dataset, + @ByVal RandomSampler sampler, + @ByVal DataLoaderOptions options) { super((Pointer)null); allocate(dataset, sampler, options); } + private native void allocate( + @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e*") JavaDataset dataset, + @ByVal RandomSampler sampler, + @ByVal DataLoaderOptions options); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomDataLoaderBase.java new file mode 100644 index 00000000000..0917169ac6a --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomDataLoaderBase.java @@ -0,0 +1,57 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::DataLoaderBase") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaRandomDataLoaderBase extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaRandomDataLoaderBase(Pointer p) { super(p); } + + + /** Constructs a new DataLoader from a {@code dataset} to sample from, {@code options} + * to configure the DataLoader with, and a {@code sampler} that specifies the + * sampling strategy. */ + + // NOLINTNEXTLINE(bugprone-exception-escape) + + /** Returns an iterator into the DataLoader. The lifetime of the iterator is + * bound to the DataLoader. In C++ standards language, the category of the + * iterator is {@code OutputIterator}. See + * https://en.cppreference.com/w/cpp/named_req/OutputIterator for what this + * means. In short: you may increment the iterator and dereference it, but + * cannot go back, or step forward more than one position at a time. When the + * DataLoader is exhausted, it will compare equal with the special + * "sentinel" iterator returned by {@code DataLoader::end()}. Most of the time, you + * should only use range-for loops to loop over the DataLoader, but + * standard algorithms like {@code std::copy(dataloader.begin(), dataloader.end(), + * output_iterator)} are supported too. */ + public native @ByVal ExampleVectorIterator begin(); + + /** Returns a special "sentinel" iterator that compares equal with a + * non-sentinel iterator once the DataLoader is exhausted. */ + public native @ByVal ExampleVectorIterator end(); + + /** Joins the DataLoader's worker threads and drains internal queues. + * This function may only be invoked from the main thread (in which the + * DataLoader lives). */ + public native void join(); + + /** Returns the options with which the DataLoader was configured. */ + public native @Const @ByRef @NoException(true) FullDataLoaderOptions options(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomTensorDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomTensorDataLoader.java new file mode 100644 index 00000000000..93127463eb6 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomTensorDataLoader.java @@ -0,0 +1,37 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::StatelessDataLoader") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaRandomTensorDataLoader extends JavaRandomTensorDataLoaderBase { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaRandomTensorDataLoader(Pointer p) { super(p); } + + + /** Constructs the {@code StatelessDataLoader} from a {@code dataset}, a {@code sampler} and + * some {@code options}. */ + public JavaRandomTensorDataLoader( + @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e*") JavaTensorDataset dataset, + @ByVal RandomSampler sampler, + @ByVal DataLoaderOptions options) { super((Pointer)null); allocate(dataset, sampler, options); } + private native void allocate( + @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e*") JavaTensorDataset dataset, + @ByVal RandomSampler sampler, + @ByVal DataLoaderOptions options); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomTensorDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomTensorDataLoaderBase.java new file mode 100644 index 00000000000..1724e9cb0cf --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaRandomTensorDataLoaderBase.java @@ -0,0 +1,57 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::DataLoaderBase") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaRandomTensorDataLoaderBase extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaRandomTensorDataLoaderBase(Pointer p) { super(p); } + + + /** Constructs a new DataLoader from a {@code dataset} to sample from, {@code options} + * to configure the DataLoader with, and a {@code sampler} that specifies the + * sampling strategy. */ + + // NOLINTNEXTLINE(bugprone-exception-escape) + + /** Returns an iterator into the DataLoader. The lifetime of the iterator is + * bound to the DataLoader. In C++ standards language, the category of the + * iterator is {@code OutputIterator}. See + * https://en.cppreference.com/w/cpp/named_req/OutputIterator for what this + * means. In short: you may increment the iterator and dereference it, but + * cannot go back, or step forward more than one position at a time. When the + * DataLoader is exhausted, it will compare equal with the special + * "sentinel" iterator returned by {@code DataLoader::end()}. Most of the time, you + * should only use range-for loops to loop over the DataLoader, but + * standard algorithms like {@code std::copy(dataloader.begin(), dataloader.end(), + * output_iterator)} are supported too. */ + public native @ByVal TensorExampleVectorIterator begin(); + + /** Returns a special "sentinel" iterator that compares equal with a + * non-sentinel iterator once the DataLoader is exhausted. */ + public native @ByVal TensorExampleVectorIterator end(); + + /** Joins the DataLoader's worker threads and drains internal queues. + * This function may only be invoked from the main thread (in which the + * DataLoader lives). */ + public native void join(); + + /** Returns the options with which the DataLoader was configured. */ + public native @Const @ByRef @NoException(true) FullDataLoaderOptions options(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialDataLoader.java new file mode 100644 index 00000000000..93ca46f7706 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialDataLoader.java @@ -0,0 +1,37 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::StatelessDataLoader") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaSequentialDataLoader extends JavaSequentialDataLoaderBase { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaSequentialDataLoader(Pointer p) { super(p); } + + + /** Constructs the {@code StatelessDataLoader} from a {@code dataset}, a {@code sampler} and + * some {@code options}. */ + public JavaSequentialDataLoader( + @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e*") JavaDataset dataset, + @ByVal SequentialSampler sampler, + @ByVal DataLoaderOptions options) { super((Pointer)null); allocate(dataset, sampler, options); } + private native void allocate( + @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e*") JavaDataset dataset, + @ByVal SequentialSampler sampler, + @ByVal DataLoaderOptions options); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialDataLoaderBase.java new file mode 100644 index 00000000000..84f9ca47e46 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialDataLoaderBase.java @@ -0,0 +1,57 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::DataLoaderBase") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaSequentialDataLoaderBase extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaSequentialDataLoaderBase(Pointer p) { super(p); } + + + /** Constructs a new DataLoader from a {@code dataset} to sample from, {@code options} + * to configure the DataLoader with, and a {@code sampler} that specifies the + * sampling strategy. */ + + // NOLINTNEXTLINE(bugprone-exception-escape) + + /** Returns an iterator into the DataLoader. The lifetime of the iterator is + * bound to the DataLoader. In C++ standards language, the category of the + * iterator is {@code OutputIterator}. See + * https://en.cppreference.com/w/cpp/named_req/OutputIterator for what this + * means. In short: you may increment the iterator and dereference it, but + * cannot go back, or step forward more than one position at a time. When the + * DataLoader is exhausted, it will compare equal with the special + * "sentinel" iterator returned by {@code DataLoader::end()}. Most of the time, you + * should only use range-for loops to loop over the DataLoader, but + * standard algorithms like {@code std::copy(dataloader.begin(), dataloader.end(), + * output_iterator)} are supported too. */ + public native @ByVal ExampleVectorIterator begin(); + + /** Returns a special "sentinel" iterator that compares equal with a + * non-sentinel iterator once the DataLoader is exhausted. */ + public native @ByVal ExampleVectorIterator end(); + + /** Joins the DataLoader's worker threads and drains internal queues. + * This function may only be invoked from the main thread (in which the + * DataLoader lives). */ + public native void join(); + + /** Returns the options with which the DataLoader was configured. */ + public native @Const @ByRef @NoException(true) FullDataLoaderOptions options(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialTensorDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialTensorDataLoader.java new file mode 100644 index 00000000000..f85c25d2369 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialTensorDataLoader.java @@ -0,0 +1,37 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::StatelessDataLoader") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaSequentialTensorDataLoader extends JavaSequentialTensorDataLoaderBase { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaSequentialTensorDataLoader(Pointer p) { super(p); } + + + /** Constructs the {@code StatelessDataLoader} from a {@code dataset}, a {@code sampler} and + * some {@code options}. */ + public JavaSequentialTensorDataLoader( + @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e*") JavaTensorDataset dataset, + @ByVal SequentialSampler sampler, + @ByVal DataLoaderOptions options) { super((Pointer)null); allocate(dataset, sampler, options); } + private native void allocate( + @ByVal @Cast("JavaCPP_javacpp_0003a_0003aDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e*") JavaTensorDataset dataset, + @ByVal SequentialSampler sampler, + @ByVal DataLoaderOptions options); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialTensorDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialTensorDataLoaderBase.java new file mode 100644 index 00000000000..d041c78e9bd --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaSequentialTensorDataLoaderBase.java @@ -0,0 +1,57 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::DataLoaderBase") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaSequentialTensorDataLoaderBase extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaSequentialTensorDataLoaderBase(Pointer p) { super(p); } + + + /** Constructs a new DataLoader from a {@code dataset} to sample from, {@code options} + * to configure the DataLoader with, and a {@code sampler} that specifies the + * sampling strategy. */ + + // NOLINTNEXTLINE(bugprone-exception-escape) + + /** Returns an iterator into the DataLoader. The lifetime of the iterator is + * bound to the DataLoader. In C++ standards language, the category of the + * iterator is {@code OutputIterator}. See + * https://en.cppreference.com/w/cpp/named_req/OutputIterator for what this + * means. In short: you may increment the iterator and dereference it, but + * cannot go back, or step forward more than one position at a time. When the + * DataLoader is exhausted, it will compare equal with the special + * "sentinel" iterator returned by {@code DataLoader::end()}. Most of the time, you + * should only use range-for loops to loop over the DataLoader, but + * standard algorithms like {@code std::copy(dataloader.begin(), dataloader.end(), + * output_iterator)} are supported too. */ + public native @ByVal TensorExampleVectorIterator begin(); + + /** Returns a special "sentinel" iterator that compares equal with a + * non-sentinel iterator once the DataLoader is exhausted. */ + public native @ByVal TensorExampleVectorIterator end(); + + /** Joins the DataLoader's worker threads and drains internal queues. + * This function may only be invoked from the main thread (in which the + * DataLoader lives). */ + public native void join(); + + /** Returns the options with which the DataLoader was configured. */ + public native @Const @ByRef @NoException(true) FullDataLoaderOptions options(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulBatchDataset.java new file mode 100644 index 00000000000..6dd7491ad16 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulBatchDataset.java @@ -0,0 +1,39 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::datasets::BatchDataset,c10::optional > >,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaStatefulBatchDataset extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaStatefulBatchDataset(Pointer p) { super(p); } + + @MemberGetter public static native @Cast("const bool") boolean is_stateful(); + public static final boolean is_stateful = is_stateful(); + + /** Returns a batch of data given an index. */ + public native @ByVal ExampleVectorOptional get_batch(@Cast("size_t") long request); + + /** Returns the size of the dataset, or an empty optional if it is unsized. */ + public native @ByVal SizeTOptional size(); + + /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ + + /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataLoader.java new file mode 100644 index 00000000000..260513108ad --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataLoader.java @@ -0,0 +1,30 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::StatefulDataLoader") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaStatefulDataLoader extends JavaStatefulDataLoaderBase { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaStatefulDataLoader(Pointer p) { super(p); } + + + /** Constructs the {@code StatefulDataLoader} from a {@code dataset} and some {@code options}. */ + public JavaStatefulDataLoader(@ByVal @Cast("JavaCPP_javacpp_0003a_0003aStatefulDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e*") JavaStatefulDataset dataset, @ByVal DataLoaderOptions options) { super((Pointer)null); allocate(dataset, options); } + private native void allocate(@ByVal @Cast("JavaCPP_javacpp_0003a_0003aStatefulDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e*") JavaStatefulDataset dataset, @ByVal DataLoaderOptions options); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataLoaderBase.java new file mode 100644 index 00000000000..ffe7dd7a1f0 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataLoaderBase.java @@ -0,0 +1,57 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::DataLoaderBase") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaStatefulDataLoaderBase extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaStatefulDataLoaderBase(Pointer p) { super(p); } + + + /** Constructs a new DataLoader from a {@code dataset} to sample from, {@code options} + * to configure the DataLoader with, and a {@code sampler} that specifies the + * sampling strategy. */ + + // NOLINTNEXTLINE(bugprone-exception-escape) + + /** Returns an iterator into the DataLoader. The lifetime of the iterator is + * bound to the DataLoader. In C++ standards language, the category of the + * iterator is {@code OutputIterator}. See + * https://en.cppreference.com/w/cpp/named_req/OutputIterator for what this + * means. In short: you may increment the iterator and dereference it, but + * cannot go back, or step forward more than one position at a time. When the + * DataLoader is exhausted, it will compare equal with the special + * "sentinel" iterator returned by {@code DataLoader::end()}. Most of the time, you + * should only use range-for loops to loop over the DataLoader, but + * standard algorithms like {@code std::copy(dataloader.begin(), dataloader.end(), + * output_iterator)} are supported too. */ + public native @ByVal ExampleVectorIterator begin(); + + /** Returns a special "sentinel" iterator that compares equal with a + * non-sentinel iterator once the DataLoader is exhausted. */ + public native @ByVal ExampleVectorIterator end(); + + /** Joins the DataLoader's worker threads and drains internal queues. + * This function may only be invoked from the main thread (in which the + * DataLoader lives). */ + public native void join(); + + /** Returns the options with which the DataLoader was configured. */ + public native @Const @ByRef @NoException(true) FullDataLoaderOptions options(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataset.java new file mode 100644 index 00000000000..9d95edf2a47 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDataset.java @@ -0,0 +1,47 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +/** + * Abstract class for stateful datasets to be subclassed by Java user code. + */ +@Name("javacpp::StatefulDataset") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaStatefulDataset extends JavaStatefulDatasetBase { + static { Loader.load(); } + /** Default native constructor. */ + public JavaStatefulDataset() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public JavaStatefulDataset(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaStatefulDataset(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public JavaStatefulDataset position(long position) { + return (JavaStatefulDataset)super.position(position); + } + @Override public JavaStatefulDataset getPointer(long i) { + return new JavaStatefulDataset((Pointer)this).offsetAddress(i); + } + + @Virtual(true) public native @ByVal @Const({false, false, true}) SizeTOptional size(); + @Virtual(true) public native @ByVal ExampleVectorOptional get_batch(@Cast("size_t") long size); + @Virtual(true) public native void reset(); + @Virtual(true) public native @Const({false, false, true}) void save(@ByRef OutputArchive archive); + @Virtual(true) public native void load(@ByRef InputArchive archive); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDatasetBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDatasetBase.java new file mode 100644 index 00000000000..185364c9be2 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulDatasetBase.java @@ -0,0 +1,34 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::datasets::StatefulDataset,std::vector >,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaStatefulDatasetBase extends JavaStatefulBatchDataset { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaStatefulDatasetBase(Pointer p) { super(p); } + + /** Resets internal state of the dataset. */ + public native void reset(); + + /** Saves the statefulDataset's state to OutputArchive. */ + public native void save(@ByRef OutputArchive archive); + + /** Deserializes the statefulDataset's state from the {@code archive}. */ + public native void load(@ByRef InputArchive archive); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorBatchDataset.java new file mode 100644 index 00000000000..c2dcedd1752 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorBatchDataset.java @@ -0,0 +1,39 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::datasets::BatchDataset,c10::optional > >,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaStatefulTensorBatchDataset extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaStatefulTensorBatchDataset(Pointer p) { super(p); } + + @MemberGetter public static native @Cast("const bool") boolean is_stateful(); + public static final boolean is_stateful = is_stateful(); + + /** Returns a batch of data given an index. */ + public native @ByVal TensorExampleVectorOptional get_batch(@Cast("size_t") long request); + + /** Returns the size of the dataset, or an empty optional if it is unsized. */ + public native @ByVal SizeTOptional size(); + + /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ + + /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataLoader.java new file mode 100644 index 00000000000..1017c21b949 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataLoader.java @@ -0,0 +1,30 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::StatefulDataLoader") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaStatefulTensorDataLoader extends JavaStatefulTensorDataLoaderBase { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaStatefulTensorDataLoader(Pointer p) { super(p); } + + + /** Constructs the {@code StatefulDataLoader} from a {@code dataset} and some {@code options}. */ + public JavaStatefulTensorDataLoader(@ByVal @Cast("JavaCPP_javacpp_0003a_0003aStatefulDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e*") JavaStatefulTensorDataset dataset, @ByVal DataLoaderOptions options) { super((Pointer)null); allocate(dataset, options); } + private native void allocate(@ByVal @Cast("JavaCPP_javacpp_0003a_0003aStatefulDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e*") JavaStatefulTensorDataset dataset, @ByVal DataLoaderOptions options); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataLoaderBase.java new file mode 100644 index 00000000000..7dd1c91209d --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataLoaderBase.java @@ -0,0 +1,57 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::DataLoaderBase") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaStatefulTensorDataLoaderBase extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaStatefulTensorDataLoaderBase(Pointer p) { super(p); } + + + /** Constructs a new DataLoader from a {@code dataset} to sample from, {@code options} + * to configure the DataLoader with, and a {@code sampler} that specifies the + * sampling strategy. */ + + // NOLINTNEXTLINE(bugprone-exception-escape) + + /** Returns an iterator into the DataLoader. The lifetime of the iterator is + * bound to the DataLoader. In C++ standards language, the category of the + * iterator is {@code OutputIterator}. See + * https://en.cppreference.com/w/cpp/named_req/OutputIterator for what this + * means. In short: you may increment the iterator and dereference it, but + * cannot go back, or step forward more than one position at a time. When the + * DataLoader is exhausted, it will compare equal with the special + * "sentinel" iterator returned by {@code DataLoader::end()}. Most of the time, you + * should only use range-for loops to loop over the DataLoader, but + * standard algorithms like {@code std::copy(dataloader.begin(), dataloader.end(), + * output_iterator)} are supported too. */ + public native @ByVal TensorExampleVectorIterator begin(); + + /** Returns a special "sentinel" iterator that compares equal with a + * non-sentinel iterator once the DataLoader is exhausted. */ + public native @ByVal TensorExampleVectorIterator end(); + + /** Joins the DataLoader's worker threads and drains internal queues. + * This function may only be invoked from the main thread (in which the + * DataLoader lives). */ + public native void join(); + + /** Returns the options with which the DataLoader was configured. */ + public native @Const @ByRef @NoException(true) FullDataLoaderOptions options(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataset.java new file mode 100644 index 00000000000..3447db6bad7 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDataset.java @@ -0,0 +1,43 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("javacpp::StatefulDataset") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaStatefulTensorDataset extends JavaStatefulTensorDatasetBase { + static { Loader.load(); } + /** Default native constructor. */ + public JavaStatefulTensorDataset() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public JavaStatefulTensorDataset(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaStatefulTensorDataset(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public JavaStatefulTensorDataset position(long position) { + return (JavaStatefulTensorDataset)super.position(position); + } + @Override public JavaStatefulTensorDataset getPointer(long i) { + return new JavaStatefulTensorDataset((Pointer)this).offsetAddress(i); + } + + @Virtual(true) public native @ByVal @Const({false, false, true}) SizeTOptional size(); + @Virtual(true) public native @ByVal TensorExampleVectorOptional get_batch(@Cast("size_t") long size); + @Virtual(true) public native void reset(); + @Virtual(true) public native @Const({false, false, true}) void save(@ByRef OutputArchive archive); + @Virtual(true) public native void load(@ByRef InputArchive archive); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDatasetBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDatasetBase.java new file mode 100644 index 00000000000..ad7f026b4fc --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorDatasetBase.java @@ -0,0 +1,34 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::datasets::StatefulDataset,std::vector >,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaStatefulTensorDatasetBase extends JavaStatefulTensorBatchDataset { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaStatefulTensorDatasetBase(Pointer p) { super(p); } + + /** Resets internal state of the dataset. */ + public native void reset(); + + /** Saves the statefulDataset's state to OutputArchive. */ + public native void save(@ByRef OutputArchive archive); + + /** Deserializes the statefulDataset's state from the {@code archive}. */ + public native void load(@ByRef InputArchive archive); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamBatchDataset.java new file mode 100644 index 00000000000..4069f0a12d7 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamBatchDataset.java @@ -0,0 +1,39 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::datasets::BatchDataset,std::vector >,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaStreamBatchDataset extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaStreamBatchDataset(Pointer p) { super(p); } + + @MemberGetter public static native @Cast("const bool") boolean is_stateful(); + public static final boolean is_stateful = is_stateful(); + + /** Returns a batch of data given an index. */ + public native @ByVal ExampleVector get_batch(@Cast("size_t") long request); + + /** Returns the size of the dataset, or an empty optional if it is unsized. */ + public native @ByVal SizeTOptional size(); + + /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ + + /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataLoader.java new file mode 100644 index 00000000000..4ee342eddbe --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataLoader.java @@ -0,0 +1,37 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::StatelessDataLoader") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaStreamDataLoader extends JavaStreamDataLoaderBase { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaStreamDataLoader(Pointer p) { super(p); } + + + /** Constructs the {@code StatelessDataLoader} from a {@code dataset}, a {@code sampler} and + * some {@code options}. */ + public JavaStreamDataLoader( + @ByVal @Cast("JavaCPP_javacpp_0003a_0003aStreamDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e*") JavaStreamDataset dataset, + @ByVal StreamSampler sampler, + @ByVal DataLoaderOptions options) { super((Pointer)null); allocate(dataset, sampler, options); } + private native void allocate( + @ByVal @Cast("JavaCPP_javacpp_0003a_0003aStreamDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003aTensor_0003e*") JavaStreamDataset dataset, + @ByVal StreamSampler sampler, + @ByVal DataLoaderOptions options); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataLoaderBase.java new file mode 100644 index 00000000000..d0f82a8bf67 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataLoaderBase.java @@ -0,0 +1,57 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::DataLoaderBase") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaStreamDataLoaderBase extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaStreamDataLoaderBase(Pointer p) { super(p); } + + + /** Constructs a new DataLoader from a {@code dataset} to sample from, {@code options} + * to configure the DataLoader with, and a {@code sampler} that specifies the + * sampling strategy. */ + + // NOLINTNEXTLINE(bugprone-exception-escape) + + /** Returns an iterator into the DataLoader. The lifetime of the iterator is + * bound to the DataLoader. In C++ standards language, the category of the + * iterator is {@code OutputIterator}. See + * https://en.cppreference.com/w/cpp/named_req/OutputIterator for what this + * means. In short: you may increment the iterator and dereference it, but + * cannot go back, or step forward more than one position at a time. When the + * DataLoader is exhausted, it will compare equal with the special + * "sentinel" iterator returned by {@code DataLoader::end()}. Most of the time, you + * should only use range-for loops to loop over the DataLoader, but + * standard algorithms like {@code std::copy(dataloader.begin(), dataloader.end(), + * output_iterator)} are supported too. */ + public native @ByVal ExampleVectorIterator begin(); + + /** Returns a special "sentinel" iterator that compares equal with a + * non-sentinel iterator once the DataLoader is exhausted. */ + public native @ByVal ExampleVectorIterator end(); + + /** Joins the DataLoader's worker threads and drains internal queues. + * This function may only be invoked from the main thread (in which the + * DataLoader lives). */ + public native void join(); + + /** Returns the options with which the DataLoader was configured. */ + public native @Const @ByRef @NoException(true) FullDataLoaderOptions options(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataset.java new file mode 100644 index 00000000000..7b8e284e74d --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamDataset.java @@ -0,0 +1,44 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +/** + * Abstract class for stateless stream datasets to be subclassed by Java user code. + */ +@Name("javacpp::StreamDataset") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaStreamDataset extends JavaStreamBatchDataset { + static { Loader.load(); } + /** Default native constructor. */ + public JavaStreamDataset() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public JavaStreamDataset(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaStreamDataset(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public JavaStreamDataset position(long position) { + return (JavaStreamDataset)super.position(position); + } + @Override public JavaStreamDataset getPointer(long i) { + return new JavaStreamDataset((Pointer)this).offsetAddress(i); + } + + @Virtual(true) public native @ByVal @Const({false, false, true}) SizeTOptional size(); + @Virtual(true) public native @ByVal ExampleVector get_batch(@Cast("size_t") long size); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorBatchDataset.java new file mode 100644 index 00000000000..4a9d808b8bb --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorBatchDataset.java @@ -0,0 +1,39 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::datasets::BatchDataset,std::vector >,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaStreamTensorBatchDataset extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaStreamTensorBatchDataset(Pointer p) { super(p); } + + @MemberGetter public static native @Cast("const bool") boolean is_stateful(); + public static final boolean is_stateful = is_stateful(); + + /** Returns a batch of data given an index. */ + public native @ByVal TensorExampleVector get_batch(@Cast("size_t") long request); + + /** Returns the size of the dataset, or an empty optional if it is unsized. */ + public native @ByVal SizeTOptional size(); + + /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ + + /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataLoader.java new file mode 100644 index 00000000000..0532f11043e --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataLoader.java @@ -0,0 +1,37 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::StatelessDataLoader") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaStreamTensorDataLoader extends JavaStreamTensorDataLoaderBase { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaStreamTensorDataLoader(Pointer p) { super(p); } + + + /** Constructs the {@code StatelessDataLoader} from a {@code dataset}, a {@code sampler} and + * some {@code options}. */ + public JavaStreamTensorDataLoader( + @ByVal @Cast("JavaCPP_javacpp_0003a_0003aStreamDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e*") JavaStreamTensorDataset dataset, + @ByVal StreamSampler sampler, + @ByVal DataLoaderOptions options) { super((Pointer)null); allocate(dataset, sampler, options); } + private native void allocate( + @ByVal @Cast("JavaCPP_javacpp_0003a_0003aStreamDataset_0003ctorch_0003a_0003aTensor_0002ctorch_0003a_0003adata_0003a_0003aexample_0003a_0003aNoTarget_0003e*") JavaStreamTensorDataset dataset, + @ByVal StreamSampler sampler, + @ByVal DataLoaderOptions options); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataLoaderBase.java new file mode 100644 index 00000000000..dd42ea8f3ad --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataLoaderBase.java @@ -0,0 +1,57 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::DataLoaderBase") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaStreamTensorDataLoaderBase extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaStreamTensorDataLoaderBase(Pointer p) { super(p); } + + + /** Constructs a new DataLoader from a {@code dataset} to sample from, {@code options} + * to configure the DataLoader with, and a {@code sampler} that specifies the + * sampling strategy. */ + + // NOLINTNEXTLINE(bugprone-exception-escape) + + /** Returns an iterator into the DataLoader. The lifetime of the iterator is + * bound to the DataLoader. In C++ standards language, the category of the + * iterator is {@code OutputIterator}. See + * https://en.cppreference.com/w/cpp/named_req/OutputIterator for what this + * means. In short: you may increment the iterator and dereference it, but + * cannot go back, or step forward more than one position at a time. When the + * DataLoader is exhausted, it will compare equal with the special + * "sentinel" iterator returned by {@code DataLoader::end()}. Most of the time, you + * should only use range-for loops to loop over the DataLoader, but + * standard algorithms like {@code std::copy(dataloader.begin(), dataloader.end(), + * output_iterator)} are supported too. */ + public native @ByVal TensorExampleVectorIterator begin(); + + /** Returns a special "sentinel" iterator that compares equal with a + * non-sentinel iterator once the DataLoader is exhausted. */ + public native @ByVal TensorExampleVectorIterator end(); + + /** Joins the DataLoader's worker threads and drains internal queues. + * This function may only be invoked from the main thread (in which the + * DataLoader lives). */ + public native void join(); + + /** Returns the options with which the DataLoader was configured. */ + public native @Const @ByRef @NoException(true) FullDataLoaderOptions options(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataset.java new file mode 100644 index 00000000000..05860271878 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorDataset.java @@ -0,0 +1,40 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("javacpp::StreamDataset") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaStreamTensorDataset extends JavaStreamTensorBatchDataset { + static { Loader.load(); } + /** Default native constructor. */ + public JavaStreamTensorDataset() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public JavaStreamTensorDataset(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaStreamTensorDataset(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public JavaStreamTensorDataset position(long position) { + return (JavaStreamTensorDataset)super.position(position); + } + @Override public JavaStreamTensorDataset getPointer(long i) { + return new JavaStreamTensorDataset((Pointer)this).offsetAddress(i); + } + + @Virtual(true) public native @ByVal @Const({false, false, true}) SizeTOptional size(); + @Virtual(true) public native @ByVal TensorExampleVector get_batch(@Cast("size_t") long size); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorBatchDataset.java new file mode 100644 index 00000000000..a39849fc69d --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorBatchDataset.java @@ -0,0 +1,40 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::datasets::BatchDataset,std::vector > >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaTensorBatchDataset extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaTensorBatchDataset(Pointer p) { super(p); } + + @MemberGetter public static native @Cast("const bool") boolean is_stateful(); + public static final boolean is_stateful = is_stateful(); + + /** Returns a batch of data given an index. */ + public native @ByVal TensorExampleVector get_batch(@ByVal SizeTArrayRef request); + public native @ByVal TensorExampleVector get_batch(@ByVal @Cast({"uint64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... request); + + /** Returns the size of the dataset, or an empty optional if it is unsized. */ + public native @ByVal SizeTOptional size(); + + /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ + + /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorDataset.java new file mode 100644 index 00000000000..35e2f01d5ca --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorDataset.java @@ -0,0 +1,41 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + @Name("javacpp::Dataset") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaTensorDataset extends JavaTensorDatasetBase { + static { Loader.load(); } + /** Default native constructor. */ + public JavaTensorDataset() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public JavaTensorDataset(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaTensorDataset(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public JavaTensorDataset position(long position) { + return (JavaTensorDataset)super.position(position); + } + @Override public JavaTensorDataset getPointer(long i) { + return new JavaTensorDataset((Pointer)this).offsetAddress(i); + } + + @Virtual(true) public native @ByVal TensorExample get(@Cast("size_t") long index); + @Virtual(true) public native @ByVal @Const({false, false, true}) SizeTOptional size(); + @Virtual public native @ByVal TensorExampleVector get_batch(@ByVal SizeTArrayRef indices); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorDatasetBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorDatasetBase.java new file mode 100644 index 00000000000..a386457b268 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorDatasetBase.java @@ -0,0 +1,34 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::datasets::Dataset,torch::data::Example >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JavaTensorDatasetBase extends JavaTensorBatchDataset { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public JavaTensorDatasetBase(Pointer p) { super(p); } + + + /** Returns the example at the given index. */ + public native @ByVal @Cast("torch::data::datasets::Dataset,torch::data::Example >::ExampleType*") TensorExample get(@Cast("size_t") long index); + + /** Returns a batch of data. + * The default implementation calls {@code get()} for every requested index + * in the batch. */ + public native @ByVal TensorExampleVector get_batch(@ByVal SizeTArrayRef indices); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTBatchDataset.java index fd972737358..46e103dc25e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTBatchDataset.java @@ -16,8 +16,10 @@ import static org.bytedeco.openblas.global.openblas.*; import static org.bytedeco.pytorch.global.torch.*; + // namespace detail -@Name("torch::data::datasets::BatchDataset >,at::ArrayRef >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +/** A dataset that can yield data only in batches. */ +@Name("torch::data::datasets::BatchDataset >,at::ArrayRef >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class MNISTBatchDataset extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTDataset.java index 044a0551e2c..d94f4f21748 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTDataset.java @@ -24,7 +24,7 @@ * therefore batched access is implemented (by default) by calling the random * access indexing function for each index in the requested batch of indices. * This can be customized. */ -@Name("torch::data::datasets::Dataset >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("torch::data::datasets::Dataset >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class MNISTDataset extends MNISTBatchDataset { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -32,7 +32,7 @@ public class MNISTDataset extends MNISTBatchDataset { /** Returns the example at the given index. */ - public native @ByVal @Cast("torch::data::datasets::Dataset >::ExampleType*") Example get(@Cast("size_t") long index); + public native @ByVal @Cast("torch::data::datasets::Dataset >::ExampleType*") Example get(@Cast("size_t") long index); /** Returns a batch of data. * The default implementation calls {@code get()} for every requested index diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapBatchDataset.java index 102b36a8934..5ecc5774461 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapBatchDataset.java @@ -17,7 +17,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@Name("torch::data::datasets::BatchDataset > >,std::vector >,at::ArrayRef >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("torch::data::datasets::BatchDataset > >,std::vector >,at::ArrayRef >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class MNISTMapBatchDataset extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapDataset.java index a58420e1589..2b8f534084a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapDataset.java @@ -16,20 +16,23 @@ import static org.bytedeco.openblas.global.openblas.*; import static org.bytedeco.pytorch.global.torch.*; + // namespace detail -@Name("torch::data::datasets::MapDataset > >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +/** A {@code MapDataset} is a dataset that applies a transform to a source dataset. */ +@Name("torch::data::datasets::MapDataset > >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class MNISTMapDataset extends MNISTMapBatchDataset { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MNISTMapDataset(Pointer p) { super(p); } - public MNISTMapDataset(@ByVal MNIST dataset, @ByVal @Cast("torch::data::datasets::MapDataset > >::TransformType*") ExampleStack transform) { super((Pointer)null); allocate(dataset, transform); } - private native void allocate(@ByVal MNIST dataset, @ByVal @Cast("torch::data::datasets::MapDataset > >::TransformType*") ExampleStack transform); + public MNISTMapDataset(@ByVal MNIST dataset, @ByVal @Cast("torch::data::datasets::MapDataset > >::TransformType*") ExampleStack transform) { super((Pointer)null); allocate(dataset, transform); } + private native void allocate(@ByVal MNIST dataset, @ByVal @Cast("torch::data::datasets::MapDataset > >::TransformType*") ExampleStack transform); /** Gets a batch from the source dataset and applies the transform to it, * returning the result. */ public native @Name("get_batch") @ByVal Example get_batch_example(@ByVal SizeTArrayRef indices); + public native @Name("get_batch") @ByVal Example get_batch_example(@ByVal @Cast({"uint64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... indices); /** Returns the size of the source dataset. */ // NOLINTNEXTLINE(bugprone-exception-escape) diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTRandomDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTRandomDataLoader.java index 2ca2c38e1a3..3633796f509 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTRandomDataLoader.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTRandomDataLoader.java @@ -25,7 +25,7 @@ * dataset, which acts as a simple batch request to batch mapping. The batch * request will often be an array of indices, and if the dataset is a simple * image dataset, the dataset would produce the images at those indices. */ -@Name("torch::data::StatelessDataLoader > >,torch::data::samplers::RandomSampler>") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("torch::data::StatelessDataLoader > >,torch::data::samplers::RandomSampler>") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class MNISTRandomDataLoader extends MNISTRandomDataLoaderBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTRandomDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTRandomDataLoaderBase.java index d300ca79ebd..f590b5c12ce 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTRandomDataLoaderBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTRandomDataLoaderBase.java @@ -17,7 +17,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@Name("torch::data::DataLoaderBase > >,torch::data::Example<>,std::vector >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("torch::data::DataLoaderBase > >,torch::data::Example,std::vector >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class MNISTRandomDataLoaderBase extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -53,4 +53,5 @@ public class MNISTRandomDataLoaderBase extends Pointer { public native void join(); /** Returns the options with which the DataLoader was configured. */ + public native @Const @ByRef @NoException(true) FullDataLoaderOptions options(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBatchDataset.java similarity index 84% rename from pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleBatchDataset.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/TensorBatchDataset.java index 8ddcc7b1c2c..2b1c29a6bf2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBatchDataset.java @@ -18,16 +18,17 @@ import static org.bytedeco.pytorch.global.torch.*; @Name("torch::data::datasets::BatchDataset >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TensorExampleBatchDataset extends Pointer { +public class TensorBatchDataset extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TensorExampleBatchDataset(Pointer p) { super(p); } + public TensorBatchDataset(Pointer p) { super(p); } @MemberGetter public static native @Cast("const bool") boolean is_stateful(); public static final boolean is_stateful = is_stateful(); /** Returns a batch of data given an index. */ public native @ByVal TensorExampleVector get_batch(@ByVal SizeTArrayRef request); + public native @ByVal TensorExampleVector get_batch(@ByVal @Cast({"uint64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... request); /** Returns the size of the dataset, or an empty optional if it is unsized. */ public native @ByVal SizeTOptional size(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDataset.java new file mode 100644 index 00000000000..8fc6011a328 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDataset.java @@ -0,0 +1,43 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +/** A dataset of tensors. + * Stores a single tensor internally, which is then indexed inside {@code get()}. */ +@Namespace("torch::data::datasets") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TensorDataset extends TensorDatasetBase { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TensorDataset(Pointer p) { super(p); } + + /** Creates a {@code TensorDataset} from a vector of tensors. */ + public TensorDataset(@Cast({"", "std::vector"}) @StdMove TensorVector tensors) { super((Pointer)null); allocate(tensors); } + private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector tensors); + + public TensorDataset(@ByVal Tensor tensor) { super((Pointer)null); allocate(tensor); } + private native void allocate(@ByVal Tensor tensor); + + /** Returns a single {@code TensorExample}. */ + public native @ByVal @Cast("torch::data::TensorExample*") Example get(@Cast("size_t") long index); + + /** Returns the number of tensors in the dataset. */ + public native @ByVal SizeTOptional size(); + + public native @ByRef Tensor tensor(); public native TensorDataset tensor(Tensor setter); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDatasetBase.java similarity index 82% rename from pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleDataset.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/TensorDatasetBase.java index 7b0fa945b52..a94b857df28 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDatasetBase.java @@ -18,10 +18,10 @@ import static org.bytedeco.pytorch.global.torch.*; @Name("torch::data::datasets::Dataset") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TensorExampleDataset extends TensorExampleBatchDataset { +public class TensorDatasetBase extends TensorBatchDataset { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TensorExampleDataset(Pointer p) { super(p); } + public TensorDatasetBase(Pointer p) { super(p); } /** Returns the example at the given index. */ @@ -31,4 +31,5 @@ public class TensorExampleDataset extends TensorExampleBatchDataset { * The default implementation calls {@code get()} for every requested index * in the batch. */ public native @ByVal TensorExampleVector get_batch(@ByVal SizeTArrayRef request); + public native @ByVal TensorExampleVector get_batch(@ByVal @Cast({"uint64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... request); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleCollation.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleCollation.java new file mode 100644 index 00000000000..e946eb5904e --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleCollation.java @@ -0,0 +1,29 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::data::transforms::BatchTransform >,torch::data::Example >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TensorExampleCollation extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TensorExampleCollation(Pointer p) { super(p); } + + + /** Applies the transformation to the given {@code input_batch}. */ + public native @ByVal TensorExample apply_batch(@ByVal TensorExampleVector input_batch); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleIterator.java new file mode 100644 index 00000000000..d72683349e8 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleIterator.java @@ -0,0 +1,46 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("torch::data::Iterator >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TensorExampleIterator extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TensorExampleIterator(Pointer p) { super(p); } + + // Type aliases to make the class recognized as a proper iterator. + + /** Increments the iterator. + * Only permitted for valid iterators (not past the end). */ + public native @ByRef @Name("operator ++") TensorExampleIterator increment(); + + /** Returns the current batch. + * Only permitted for valid iterators (not past the end). */ + public native @ByRef @Name("operator *") TensorExample multiply(); + + /** Returns a pointer to the current batch. + * Only permitted for valid iterators (not past the end). */ + public native @Name("operator ->") TensorExample access(); + + /** Compares two iterators for equality. */ + public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef TensorExampleIterator other); + + /** Compares two iterators for inequality. */ + public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef TensorExampleIterator other); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleOptional.java new file mode 100644 index 00000000000..0e7bd2e739b --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleOptional.java @@ -0,0 +1,35 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("c10::optional >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TensorExampleOptional extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TensorExampleOptional(Pointer p) { super(p); } + public TensorExampleOptional(TensorExample value) { this(); put(value); } + public TensorExampleOptional() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef TensorExampleOptional put(@ByRef TensorExampleOptional x); + + public native boolean has_value(); + public native void reset(); + public native @Name("value") @ByRef TensorExample get(); + @ValueSetter public native TensorExampleOptional put(@ByRef TensorExample value); +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleStack.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleStack.java new file mode 100644 index 00000000000..c05d4c21576 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleStack.java @@ -0,0 +1,42 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +/** A {@code Collation} for {@code Example} types that stacks all data + * tensors into one tensor. */ +@Name("torch::data::transforms::Stack") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TensorExampleStack extends TensorExampleCollation { + static { Loader.load(); } + /** Default native constructor. */ + public TensorExampleStack() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public TensorExampleStack(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TensorExampleStack(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public TensorExampleStack position(long position) { + return (TensorExampleStack)super.position(position); + } + @Override public TensorExampleStack getPointer(long i) { + return new TensorExampleStack((Pointer)this).offsetAddress(i); + } + + public native @ByVal @Cast("torch::data::TensorExample*") Example apply_batch(@Cast("torch::data::TensorExample*") @StdVector Example examples); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleVectorIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleVectorIterator.java new file mode 100644 index 00000000000..a0a79082bee --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleVectorIterator.java @@ -0,0 +1,46 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("torch::data::Iterator > >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TensorExampleVectorIterator extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TensorExampleVectorIterator(Pointer p) { super(p); } + + // Type aliases to make the class recognized as a proper iterator. + + /** Increments the iterator. + * Only permitted for valid iterators (not past the end). */ + public native @ByRef @Name("operator ++") TensorExampleVectorIterator increment(); + + /** Returns the current batch. + * Only permitted for valid iterators (not past the end). */ + public native @ByRef @Name("operator *") TensorExampleVector multiply(); + + /** Returns a pointer to the current batch. + * Only permitted for valid iterators (not past the end). */ + public native @Name("operator ->") TensorExampleVector access(); + + /** Compares two iterators for equality. */ + public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef TensorExampleVectorIterator other); + + /** Compares two iterators for inequality. */ + public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef TensorExampleVectorIterator other); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleVectorOptional.java new file mode 100644 index 00000000000..e164999bc12 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleVectorOptional.java @@ -0,0 +1,35 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("c10::optional > >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TensorExampleVectorOptional extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TensorExampleVectorOptional(Pointer p) { super(p); } + public TensorExampleVectorOptional(TensorExampleVector value) { this(); put(value); } + public TensorExampleVectorOptional() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef TensorExampleVectorOptional put(@ByRef TensorExampleVectorOptional x); + + public native boolean has_value(); + public native void reset(); + public native @Name("value") @ByRef TensorExampleVector get(); + @ValueSetter public native TensorExampleVectorOptional put(@ByRef TensorExampleVector value); +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorageVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorageVector.java index ffc3734822a..089709f0482 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorageVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorageVector.java @@ -17,7 +17,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@Name("std::vector") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("std::vector >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class WeakStorageVector extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorageVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorageVectorOptional.java index deb05153b50..64ff3c61059 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorageVectorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorageVectorOptional.java @@ -17,7 +17,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("c10::optional >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@NoOffset @Name("c10::optional > >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class WeakStorageVectorOptional extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java index 88278b681dc..7fa2f4535fb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java @@ -206,16 +206,22 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../PointerPairOptional.java -// Targeting ../ExampleVectorOptional.java +// Targeting ../WeakStorageVectorOptional.java + + +// Targeting ../BatchSizeOptional.java // Targeting ../ExampleOptional.java -// Targeting ../BatchSizeOptional.java +// Targeting ../ExampleVectorOptional.java -// Targeting ../WeakStorageVectorOptional.java +// Targeting ../TensorExampleOptional.java + + +// Targeting ../TensorExampleVectorOptional.java // Targeting ../T_TensorTensor_TOptional.java @@ -308,6 +314,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../DataPtrVector.java +// Targeting ../WeakStorageVector.java + + // Targeting ../StringTensorDictItemVector.java @@ -317,9 +326,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../StringSharedModuleDictItemVector.java -// Targeting ../WeakStorageVector.java - - // Targeting ../BoolVector.java @@ -437,6 +443,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../TensorExampleVector.java +// Targeting ../ExampleVector.java + + // Targeting ../EnumNameValue.java @@ -62500,13 +62509,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../DataLoaderOptions.java +// Targeting ../FullDataLoaderOptions.java + -/** Like {@code DataLoaderOptions}, but without any unconfigured state. - * {@code DataLoaderOptions} has some options that depend on other options - * ({@code max_jobs} => {@code 2 * workers}). In the spirit of properly using the C++ type - * system, {@code DataLoaderOptions} allows only setting values. To access values, - * you must create a {@code FullDataLoaderOptions} from a {@code DataLoaderOptions} - * instance, which will do any necessary coalescing. */ // namespace data // namespace torch @@ -62637,7 +62642,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../ExampleIterator.java -// Targeting ../ExampleVectorOptionalIterator.java +// Targeting ../ExampleVectorIterator.java + + +// Targeting ../TensorExampleIterator.java + + +// Targeting ../TensorExampleVectorIterator.java // namespace data @@ -62742,10 +62753,49 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// Targeting ../MNISTRandomDataLoaderBase.java + + // Targeting ../ChunkRandomDataLoaderBase.java -// Targeting ../MNISTRandomDataLoaderBase.java +// Targeting ../JavaRandomDataLoaderBase.java + + +// Targeting ../JavaDistributedRandomDataLoaderBase.java + + +// Targeting ../JavaDistributedSequentialDataLoaderBase.java + + +// Targeting ../JavaSequentialDataLoaderBase.java + + +// Targeting ../JavaStreamDataLoaderBase.java + + +// Targeting ../JavaStatefulDataLoaderBase.java + + +// Targeting ../ChunkRandomTensorDataLoaderBase.java + + +// Targeting ../JavaRandomTensorDataLoaderBase.java + + +// Targeting ../JavaDistributedRandomTensorDataLoaderBase.java + + +// Targeting ../JavaDistributedSequentialTensorDataLoaderBase.java + + +// Targeting ../JavaSequentialTensorDataLoaderBase.java + + +// Targeting ../JavaStreamTensorDataLoaderBase.java + + +// Targeting ../JavaStatefulTensorDataLoaderBase.java // namespace data @@ -62765,6 +62815,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../ChunkRandomDataLoader.java +// Targeting ../JavaStatefulDataLoader.java + + +// Targeting ../ChunkRandomTensorDataLoader.java + + +// Targeting ../JavaStatefulTensorDataLoader.java + + // namespace data // namespace torch @@ -62787,6 +62846,36 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../MNISTRandomDataLoader.java +// Targeting ../JavaRandomDataLoader.java + + +// Targeting ../JavaDistributedRandomDataLoader.java + + +// Targeting ../JavaDistributedSequentialDataLoader.java + + +// Targeting ../JavaSequentialDataLoader.java + + +// Targeting ../JavaStreamDataLoader.java + + +// Targeting ../JavaRandomTensorDataLoader.java + + +// Targeting ../JavaDistributedRandomTensorDataLoader.java + + +// Targeting ../JavaDistributedSequentialTensorDataLoader.java + + +// Targeting ../JavaSequentialTensorDataLoader.java + + +// Targeting ../JavaStreamTensorDataLoader.java + + // namespace data // namespace torch @@ -62862,6 +62951,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace data // namespace torch +// Targeting ../MNISTBatchDataset.java + + +// Targeting ../MNISTMapBatchDataset.java + + +// Targeting ../TensorBatchDataset.java + + // Targeting ../ChunkBatchDataset.java @@ -62871,19 +62969,43 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../ChunkMapBatchDataset.java -// Targeting ../MNISTBatchDataset.java +// Targeting ../JavaBatchDataset.java -// Targeting ../MNISTMapBatchDataset.java +// Targeting ../JavaStreamBatchDataset.java + + +// Targeting ../JavaStatefulBatchDataset.java -// Targeting ../TensorExampleBatchDataset.java +// Targeting ../ChunkTensorBatchDataset.java + + +// Targeting ../ChunkBatchSharedTensorBatchDataset.java + + +// Targeting ../ChunkMapTensorBatchDataset.java + + +// Targeting ../JavaTensorBatchDataset.java + + +// Targeting ../JavaStreamTensorBatchDataset.java + + +// Targeting ../JavaStatefulTensorBatchDataset.java // Targeting ../MNISTDataset.java -// Targeting ../TensorExampleDataset.java +// Targeting ../TensorDatasetBase.java + + +// Targeting ../JavaDatasetBase.java + + +// Targeting ../JavaTensorDatasetBase.java @@ -62909,6 +63031,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../ChunkStatefulDataset.java +// Targeting ../JavaStatefulDatasetBase.java + + +// Targeting ../ChunkStatefulTensorDataset.java + + +// Targeting ../JavaStatefulTensorDatasetBase.java + + /** Serializes a statefulDataset to {@code OutputArchive}. */ @@ -64431,6 +64562,9 @@ The list of (type, depth) pairs controls the type of specializations and the num // Targeting ../ChunkDataReader.java +// Targeting ../ChunkTensorDataReader.java + + /** BatchDataBuffer manages a queue of UnwrappedBatchData. After a new chunk is * loaded, BatchDataBuffer splits it into small batches and push them into the * queue. When get_batch is called from data loader, it pops cached batches and @@ -64443,6 +64577,9 @@ The list of (type, depth) pairs controls the type of specializations and the num // Targeting ../ChunkDataset.java +// Targeting ../ChunkTensorDataset.java + + // namespace datasets // namespace data // namespace torch @@ -64461,10 +64598,13 @@ The list of (type, depth) pairs controls the type of specializations and the num // #include // #include +// Targeting ../MNISTMapDataset.java + + // Targeting ../ChunkMapDataset.java -// Targeting ../MNISTMapDataset.java +// Targeting ../ChunkMapTensorDataset.java @@ -64506,6 +64646,9 @@ The list of (type, depth) pairs controls the type of specializations and the num // Targeting ../ChunkSharedBatchDataset.java +// Targeting ../ChunkSharedTensorBatchDataset.java + + /** Constructs a new {@code SharedBatchDataset} by creating a * {@code shared_ptr}. All arguments are forwarded to @@ -64525,9 +64668,9 @@ The list of (type, depth) pairs controls the type of specializations and the num // #include // #include +// Targeting ../TensorDataset.java + -/** A dataset of tensors. - * Stores a single tensor internally, which is then indexed inside {@code get()}. */ // namespace datasets // namespace data @@ -64558,6 +64701,9 @@ The list of (type, depth) pairs controls the type of specializations and the num // Targeting ../ExampleCollation.java +// Targeting ../TensorExampleCollation.java + + /** A transformation of individual input examples to individual output examples. * @@ -64636,9 +64782,9 @@ The list of (type, depth) pairs controls the type of specializations and the num // Targeting ../ExampleStack.java +// Targeting ../TensorExampleStack.java + -/** A {@code Collation} for {@code Example} types that stacks all data - * tensors into one tensor. */ // namespace transforms // namespace data // namespace torch @@ -78060,4 +78206,34 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch +// Parsed from datasets.h + +/* + I don't think we can directly virtualize Dataset<...> because of CRTP in Dataset. + + Because of issue #723, we cannot virtualize superclasses of javacpp::*Dataset, only javacpp::*Dataset. + So we redeclare/redefine virtual functions of parents in these classes, so that the JavaCPP peer classes implements + the logic to call the Java versions. +*/ +// Targeting ../JavaDataset.java + + +// Targeting ../JavaTensorDataset.java + + +// Targeting ../JavaStreamDataset.java + + +// Targeting ../JavaStreamTensorDataset.java + + +// Targeting ../JavaStatefulDataset.java + + +// Targeting ../JavaStatefulTensorDataset.java + + + + + } diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index 9e9e63af399..0d60d967406 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -69,6 +69,7 @@ "torch/csrc/jit/frontend/tree_views.h", "torch/csrc/jit/serialization/storage_context.h", + "datasets.h", "pytorch_adapters.h" }, link = {"c10", "torch_cpu", "torch"}, @@ -447,6 +448,7 @@ public void map(InfoMap infoMap) { .put(new Info("c10::optional").pointerTypes("StringViewOptional").define()) .put(new Info("c10::optional >").pointerTypes("StringViewVectorOptional").define()) .put(new Info("c10::optional >", "c10::optional >")/*.cast?*/.pointerTypes("PointerPairOptional").define()) + .put(new Info("c10::optional > >", "c10::optional >").pointerTypes("WeakStorageVectorOptional").define()) ; @@ -648,6 +650,7 @@ public void map(InfoMap infoMap) { .put(new Info("std::vector >").pointerTypes("SharedSugaredValueVector").define()) .put(new Info("const std::vector").pointerTypes("FunctionSchemaVector").define()) .put(new Info("const std::vector", "std::vector").pointerTypes("DataPtrVector").define()) // Used from cuda only + .put(new Info("const std::vector >", "std::vector >").pointerTypes("WeakStorageVector").define()) ; @@ -1020,7 +1023,7 @@ public void map(InfoMap infoMap) { /// Classes skipped for various non-investigated reasons infoMap - .put(new Info(/*"c10::intrusive_ptr", "c10::weak_intrusive_ptr", */"c10::guts::is_fundamental", + .put(new Info("c10::guts::is_fundamental", "c10::detail::CaptureKernelCall", "c10::detail::DictImpl", "c10::detail::MultiDispatchKeySet", "c10::ExclusivelyOwnedTraits", "c10::FunctionSchema::dump", "c10::domain_prefix", "c10::C10FlagsRegistry", "c10::enforce_detail::EnforceFailMessage", "c10::impl::build_feature_required_feature_not_available", "c10::detail::getMaybeFakeTypePtr_", "c10::complex_literals::operator \"\"_if", "c10::complex_literals::operator \"\"_id", @@ -1157,125 +1160,283 @@ public void map(InfoMap infoMap) { ; - //// Datasets - String VirtualChunkDataReader = "JavaCPP_torch_0003a_0003adata_0003a_0003adatasets_0003a_0003aChunkDataReader_0003ctorch_0003a_0003adata_0003a_0003aExample_0003c_0003e_0002cstd_0003a_0003avector_0003ctorch_0003a_0003adata_0003a_0003aExample_0003c_0003e_00020_0003e_00020_0003e"; - - infoMap.put(new Info("std::vector >", // "UnwrappedBatchType", - "std::vector >::ExampleType>").pointerTypes("ExampleVector").define()) - .put(new Info("std::vector >").pointerTypes("TensorExampleVector").define()) - .put(new Info("c10::optional > >", "c10::optional<" + VirtualChunkDataReader + "::BatchType>", - "torch::data::datasets::ChunkDataset<" + VirtualChunkDataReader + ",torch::data::samplers::RandomSampler,torch::data::samplers::RandomSampler>::BatchType") - .pointerTypes("ExampleVectorOptional").define()) - - .put(new Info("torch::data::Example", "torch::data::Example<>").pointerTypes("Example")) - .put(new Info("c10::optional >", "c10::optional >").pointerTypes("ExampleOptional").define()) - .put(new Info("torch::data::Example").pointerTypes("TensorExample")) - .put(new Info("torch::data::Example::Example").javaText( - "public TensorExample(@ByVal Tensor data) { super((Pointer)null); allocate(data); }\n" - + "private native void allocate(@ByVal Tensor data);\n")) - .put(new Info("torch::data::Example::target").skip()) -// .put(new Info("torch::data::detail::SentinelIterator > >").pointerTypes("ExampleSentinelIterator")) -// .put(new Info("torch::data::detail::ValidIterator > >").pointerTypes("ExampleValidIterator")) -// .put(new Info("torch::data::detail::IteratorImpl > >").pointerTypes("ExampleIteratorImpl")) - .put(new Info("torch::data::Iterator >").purify().pointerTypes("ExampleIterator")) - //.put(new Info("torch::data::Iterator > >").purify().pointerTypes("ExampleVectorIterator")) - .put(new Info("torch::data::Iterator > > >").purify().pointerTypes("ExampleVectorOptionalIterator")) - .put(new Info("torch::data::samplers::Sampler >", "torch::data::samplers::Sampler<>").pointerTypes("Sampler")) - .put(new Info("torch::data::samplers::Sampler").pointerTypes("BatchSizeSampler")) - .put(new Info("torch::data::samplers::RandomSampler").pointerTypes("RandomSampler")) - .put(new Info("torch::data::samplers::DistributedSampler >", "torch::data::samplers::DistributedSampler<>").purify().pointerTypes("DistributedSampler")) - .put(new Info("c10::optional").pointerTypes("BatchSizeOptional").define()) - .put(new Info("torch::data::transforms::BatchTransform >, torch::data::Example<> >", - "torch::data::transforms::Collation >").pointerTypes("ExampleCollation")) - .put(new Info("torch::data::transforms::Stack >").pointerTypes("ExampleStack")) - .put(new Info("c10::optional >").pointerTypes("WeakStorageVectorOptional").define()) - .put(new Info("const std::vector", "std::vector").pointerTypes("WeakStorageVector").define()) - .put(new Info("std::vector").pointerTypes("CaptureVector")) - - - .put(new Info("torch::data::datasets::ChunkDataReader,std::vector > >", VirtualChunkDataReader).pointerTypes("ChunkDataReader").virtualize()) - .put(new Info("torch::data::datasets::ChunkDataset<" + VirtualChunkDataReader + ",torch::data::samplers::RandomSampler,torch::data::samplers::RandomSampler>").pointerTypes("ChunkDataset")) - .put(new Info("torch::data::datasets::ChunkDataset<" + VirtualChunkDataReader + ",torch::data::samplers::RandomSampler,torch::data::samplers::RandomSampler>::ChunkDataset").javaText( - "public ChunkDataset(\n" - + " ChunkDataReader chunk_reader,\n" - + " RandomSampler chunk_sampler,\n" - + " RandomSampler example_sampler,\n" - + " ChunkDatasetOptions options) { super((Pointer)null); allocate(chunk_reader, chunk_sampler, example_sampler, options, null); }\n" - + "public ChunkDataset(\n" - + " ChunkDataReader chunk_reader,\n" - + " RandomSampler chunk_sampler,\n" - + " RandomSampler example_sampler,\n" - + " ChunkDatasetOptions options,\n" - + " Pointer preprocessing_policy) { super((Pointer)null); allocate(chunk_reader, chunk_sampler, example_sampler, options, preprocessing_policy); }\n" - + "private native void allocate(\n" - + " @ByVal @Cast(\"" + VirtualChunkDataReader + "*\") ChunkDataReader chunk_reader,\n" - + " @ByVal RandomSampler chunk_sampler,\n" - + " @ByVal RandomSampler example_sampler,\n" - + " @ByVal ChunkDatasetOptions options,\n" - + " @ByVal(nullValue = \"std::function>&)>()\") @Cast(\"std::function>&)>*\") Pointer preprocessing_policy);\n")) - .put(new Info("torch::data::datasets::StatefulDataset," + VirtualChunkDataReader + "::BatchType,size_t>") - .pointerTypes("ChunkStatefulDataset")) - .put(new Info("torch::data::datasets::BatchDataset,c10::optional<" + VirtualChunkDataReader + "::BatchType>,size_t>", - "torch::data::datasets::BatchDataset,std::vector > >") - .pointerTypes("ChunkBatchDataset")) - .put(new Info("torch::data::datasets::BatchDataset >,c10::optional<" + VirtualChunkDataReader + "::BatchType>,size_t>", - "torch::data::datasets::BatchDataset >,torch::data::datasets::ChunkDataset<" + VirtualChunkDataReader + ",torch::data::samplers::RandomSampler,torch::data::samplers::RandomSampler>::BatchType,torch::data::datasets::ChunkDataset<" + VirtualChunkDataReader + ",torch::data::samplers::RandomSampler,torch::data::samplers::RandomSampler>::BatchRequestType>") - .pointerTypes("ChunkBatchSharedBatchDataset")) - .put(new Info("torch::data::datasets::BatchDataset >,c10::optional<" + VirtualChunkDataReader + "::BatchType>,size_t>::map") - .javaText("public native @ByVal ChunkMapDataset map(@ByVal ExampleStack transform);")) - .put(new Info("torch::data::datasets::SharedBatchDataset >") - .pointerTypes("ChunkSharedBatchDataset")) - .put(new Info("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >") - .pointerTypes("ChunkMapDataset")) - .put(new Info("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >::reset") - .skip()) - .put(new Info("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >::DatasetType") - .pointerTypes("ChunkSharedBatchDataset")) - .put(new Info("torch::data::datasets::BatchDataset >,torch::data::transforms::Stack > >,std::vector >,at::ArrayRef >", - "torch::data::datasets::BatchDataset >,torch::data::transforms::Stack > >,torch::data::datasets::detail::optional_if_t >::is_stateful,torch::data::transforms::Stack >::OutputBatchType>,torch::data::datasets::SharedBatchDataset >::BatchRequestType>") - .pointerTypes("ChunkMapBatchDataset")) - .put(new Info("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >::BatchRequestType").pointerTypes("SizeTArrayRef")) - .put(new Info("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >::OutputBatchType").pointerTypes("Example")) - .put(new Info("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >::get_batch") - .javaText("public native @Name(\"get_batch\") @ByVal ExampleOptional get_batch_example(@Cast(\"size_t\") long indices);")) - .put(new Info("torch::data::DataLoaderBase >,torch::data::transforms::Stack > >,torch::data::Example<>,size_t>", - "torch::data::DataLoaderBase >,torch::data::transforms::Stack > >,torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >::BatchType::value_type,torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >::BatchRequestType>") - .purify().pointerTypes("ChunkRandomDataLoaderBase")) - .put(new Info("torch::data::StatefulDataLoader >,torch::data::transforms::Stack > > >") - .pointerTypes("ChunkRandomDataLoader")) - - .put(new Info("torch::data::DataLoaderBase > >,torch::data::Example<>,std::vector >", - "torch::data::DataLoaderBase > >,torch::data::datasets::MapDataset > >::BatchType,torch::data::samplers::RandomSampler::BatchRequestType>") - .purify().pointerTypes("MNISTRandomDataLoaderBase")) - .put(new Info("torch::data::StatelessDataLoader > >,torch::data::samplers::RandomSampler>").pointerTypes("MNISTRandomDataLoader")) - .put(new Info("torch::data::datasets::Dataset >", - "torch::data::datasets::Dataset").pointerTypes("MNISTDataset")) - .put(new Info("torch::data::datasets::BatchDataset >,at::ArrayRef >", - "torch::data::datasets::BatchDataset > >").pointerTypes("MNISTBatchDataset")) - .put(new Info("torch::data::datasets::BatchDataset >,at::ArrayRef >::map") - .javaText("public native @ByVal MNISTMapDataset map(@ByVal ExampleStack transform);")) + //// Data loader + infoMap + .put(new Info("torch::data::example::NoTarget")) // To ensure ns resolution gets it correctly + .put(new Info( + "torch::data::Example::Example" + ).javaText( + "public TensorExample(@ByVal Tensor data) { super((Pointer)null); allocate(data); }\n" + + "private native void allocate(@ByVal Tensor data);\n")) /* or generated constructor will want argument "NoTarget */ + .put(new Info("torch::data::Example::target").skip()) + + .put(new Info( + "torch::data::samplers::Sampler >", + "torch::data::samplers::Sampler<>" + ).pointerTypes("Sampler")) + .put(new Info( + "torch::data::samplers::Sampler" + ).pointerTypes("BatchSizeSampler")) + .put(new Info( + "torch::data::samplers::RandomSampler" + ).pointerTypes("RandomSampler")) + .put(new Info( + "torch::data::samplers::DistributedSampler >", + "torch::data::samplers::DistributedSampler<>" + ).purify().pointerTypes("DistributedSampler")) + .put(new Info( + "c10::optional" + ).pointerTypes("BatchSizeOptional").define()) + + .put(new Info("torch::data::DataLoaderBase > >,torch::data::Example,std::vector >", + "torch::data::DataLoaderBase > >,torch::data::datasets::MapDataset > >::BatchType,torch::data::samplers::RandomSampler::BatchRequestType>") + .purify().pointerTypes("MNISTRandomDataLoaderBase")) + .put(new Info("torch::data::StatelessDataLoader > >,torch::data::samplers::RandomSampler>").pointerTypes("MNISTRandomDataLoader")) + .put(new Info("torch::data::datasets::Dataset >", + "torch::data::datasets::Dataset").pointerTypes("MNISTDataset")) + .put(new Info("torch::data::datasets::BatchDataset >,at::ArrayRef >", + "torch::data::datasets::BatchDataset > >").pointerTypes("MNISTBatchDataset")) + .put(new Info("torch::data::datasets::BatchDataset >,at::ArrayRef >::map") + .javaText("public native @ByVal MNISTMapDataset map(@ByVal ExampleStack transform);")) // .put(new Info("torch::data::datasets::BatchDataset >,at::ArrayRef >::map > >") // .javaNames("map")) - .put(new Info("torch::data::datasets::MapDataset > >").pointerTypes("MNISTMapDataset")) - .put(new Info("torch::data::datasets::MapDataset > >::reset").skip()) - .put(new Info("torch::data::datasets::MapDataset > >::DatasetType").pointerTypes("MNIST")) - .put(new Info("torch::data::datasets::BatchDataset > >,std::vector >,at::ArrayRef >", - "torch::data::datasets::BatchDataset > >,torch::data::datasets::detail::optional_if_t >::OutputBatchType>,torch::data::datasets::MNIST::BatchRequestType>") - .pointerTypes("MNISTMapBatchDataset")) + .put(new Info("torch::data::datasets::MapDataset > >").pointerTypes("MNISTMapDataset")) + .put(new Info("torch::data::datasets::MapDataset > >::reset").skip()) + .put(new Info("torch::data::datasets::MapDataset > >::DatasetType").pointerTypes("MNIST")) + .put(new Info("torch::data::datasets::BatchDataset > >,std::vector >,at::ArrayRef >", + "torch::data::datasets::BatchDataset > >,torch::data::datasets::detail::optional_if_t >::OutputBatchType>,torch::data::datasets::MNIST::BatchRequestType>") + .pointerTypes("MNISTMapBatchDataset")) // .put(new Info("torch::data::datasets::MapDataset > >::BatchRequestType").pointerTypes("SizeTArrayRef")) // .put(new Info("torch::data::datasets::MapDataset > >::OutputBatchType").pointerTypes("Example")) - .put(new Info("torch::data::datasets::MapDataset > >::get_batch") - .javaText("public native @Name(\"get_batch\") @ByVal Example get_batch_example(@ByVal SizeTArrayRef indices);")) - - .put(new Info("torch::data::datasets::Dataset", - "torch::data::datasets::Dataset").pointerTypes("TensorExampleDataset")) - .put(new Info("torch::data::datasets::BatchDataset >", - "torch::data::datasets::BatchDataset >").pointerTypes("TensorExampleBatchDataset")) - .put(new Info("torch::data::datasets::Dataset::get_batch", - "torch::data::datasets::BatchDataset >::get_batch") - .javaText("public native @ByVal TensorExampleVector get_batch(@ByVal SizeTArrayRef request);")) + .put(new Info("torch::data::datasets::MapDataset > >::get_batch") + .javaText("public native @Name(\"get_batch\") @ByVal Example get_batch_example(@ByVal SizeTArrayRef indices);\n" + + "public native @Name(\"get_batch\") @ByVal Example get_batch_example(@ByVal @Cast({\"uint64_t*\", \"c10::ArrayRef\", \"std::vector&\"}) @StdVector long... indices);")) + + // Simple implementation from tensor.h serving a dataset from a single tensor + .put(new Info("torch::data::datasets::TensorDataset")) // Ensure proper ns resolution + .put(new Info( + "torch::data::datasets::Dataset" + ).pointerTypes("TensorDatasetBase")) + .put(new Info( + "torch::data::datasets::BatchDataset >" + ).pointerTypes("TensorBatchDataset")) + .put(new Info("torch::data::datasets::Dataset::get_batch", + "torch::data::datasets::BatchDataset >::get_batch") + .javaText("public native @ByVal TensorExampleVector get_batch(@ByVal SizeTArrayRef request);\n" + + "public native @ByVal TensorExampleVector get_batch(@ByVal @Cast({\"uint64_t*\", \"c10::ArrayRef\", \"std::vector&\"}) @StdVector long... request);")) ; + for (String[] ex : new String[][]{ + /* Prefix, Data, Target */ + {"", "torch::Tensor", "torch::Tensor"}, + {"Tensor", "torch::Tensor", "torch::data::example::NoTarget"} + }) { + String example = ex[2] == null ? template("torch::data::Example", ex[1]) : template("torch::data::Example", ex[1], ex[2]); + ; + String p = ex[0]; + String chunkDataReader = template("torch::data::datasets::ChunkDataReader", example, template("std::vector", example)); + String mangledChunkDataReader = mangle(chunkDataReader); + String mangledJavaDataset = mangle(template("javacpp::Dataset", ex[1], ex[2])); + String mangledJavaStreamDataset = mangle(template("javacpp::StreamDataset", ex[1], ex[2])); + String mangledJavaStatefulDataset = mangle(template("javacpp::StatefulDataset", ex[1], ex[2])); + + infoMap + .put(new Info( + example, + template("torch::data::datasets::MapDataset", template("torch::data::datasets::SharedBatchDataset", template("torch::data::datasets::ChunkDataset", mangledChunkDataReader, "torch::data::samplers::RandomSampler", "torch::data::samplers::RandomSampler")), template("torch::data::transforms::Stack", example)) + "::OutputBatchType" + ).pointerTypes(p + "Example")) + .put(new Info( + template("std::vector", example), + template("std::vector", template("torch::data::datasets::Dataset", template("javacpp::Dataset", ex[1], ex[2]), example) + "::ExampleType"), + template("std::vector", template("torch::data::datasets::Dataset", template("javacpp::StreamDataset", ex[1], ex[2]), example) + "::ExampleType"), + template("std::vector", template("torch::data::datasets::Dataset", template("javacpp::StatefulDataset", ex[1], ex[2]), example) + "::ExampleType"), + template("std::vector", template("torch::data::datasets::Dataset", mangledJavaDataset, example) + "::ExampleType"), + template("std::vector", template("torch::data::datasets::Dataset", mangledJavaStreamDataset, example) + "::ExampleType"), + template("std::vector", template("torch::data::datasets::Dataset", mangledJavaStatefulDataset, example) + "::ExampleType") + ).pointerTypes(p + "ExampleVector").define()) + .put(new Info(template("c10::optional", example)).pointerTypes(p + "ExampleOptional").define()) + .put(new Info( + template("c10::optional", template("std::vector", example)), + template("c10::optional", mangledChunkDataReader + "::BatchType"), + template("torch::data::datasets::ChunkDataset", mangledChunkDataReader, "torch::data::samplers::RandomSampler", "torch::data::samplers::RandomSampler") + "::BatchType", + mangledJavaStreamDataset + "::BatchType" + ).pointerTypes(p + "ExampleVectorOptional").define()) + .put(new Info( + template("torch::data::Iterator", example), + template("torch::data::Iterator", mangledJavaDataset + "::BatchType::value_type") + ).pointerTypes(p + "ExampleIterator").purify()) + .put(new Info( + template("torch::data::Iterator", template("std::vector", example)), + template("torch::data::Iterator", mangledJavaDataset + "::BatchType"), + template("torch::data::Iterator", mangledJavaStreamDataset + "::BatchType"), + template("torch::data::Iterator", mangledJavaStatefulDataset + "::BatchType::value_type") + ).purify().pointerTypes(p + "ExampleVectorIterator")) + + .put(new Info( + template("torch::data::transforms::BatchTransform", template("std::vector", example), example), + template("torch::data::transforms::Collation", example) + ).pointerTypes(p + "ExampleCollation")) + .put(new Info(template("torch::data::transforms::Stack", example)).pointerTypes(p + "ExampleStack")) + .put(new Info(chunkDataReader).pointerTypes("Chunk" + p + "DataReader").virtualize()) + .put(new Info( + template("torch::data::datasets::ChunkDataset", mangledChunkDataReader, "torch::data::samplers::RandomSampler", "torch::data::samplers::RandomSampler") + ).pointerTypes("Chunk" + p + "Dataset")) + .put(new Info( + template("torch::data::datasets::ChunkDataset", mangledChunkDataReader, "torch::data::samplers::RandomSampler", "torch::data::samplers::RandomSampler") + "::ChunkDataset" + ).javaText( + "public Chunk" + p + "Dataset(\n" + + " Chunk" + p + "DataReader chunk_reader,\n" + + " RandomSampler chunk_sampler,\n" + + " RandomSampler example_sampler,\n" + + " ChunkDatasetOptions options) { super((Pointer)null); allocate(chunk_reader, chunk_sampler, example_sampler, options, null); }\n" + + "public Chunk" + p + "Dataset(\n" + + " Chunk" + p + "DataReader chunk_reader,\n" + + " RandomSampler chunk_sampler,\n" + + " RandomSampler example_sampler,\n" + + " ChunkDatasetOptions options,\n" + + " Pointer preprocessing_policy) { super((Pointer)null); allocate(chunk_reader, chunk_sampler, example_sampler, options, preprocessing_policy); }\n" + + "private native void allocate(\n" + + " @ByVal @Cast(\"" + mangledChunkDataReader + "*\") Chunk" + p + "DataReader chunk_reader,\n" + + " @ByVal RandomSampler chunk_sampler,\n" + + " @ByVal RandomSampler example_sampler,\n" + + " @ByVal ChunkDatasetOptions options,\n" + + " @ByVal(nullValue = \"std::function&)>()\") @Cast(\"std::function&)>*\") Pointer preprocessing_policy);\n")) + .put(new Info( + template("torch::data::datasets::StatefulDataset", template("torch::data::datasets::ChunkDataset", mangledChunkDataReader, "torch::data::samplers::RandomSampler", "torch::data::samplers::RandomSampler"), mangledChunkDataReader + "::BatchType", "size_t") + ).pointerTypes("ChunkStateful" + p + "Dataset")) + .put(new Info( + template("torch::data::datasets::BatchDataset", template("torch::data::datasets::ChunkDataset", mangledChunkDataReader, "torch::data::samplers::RandomSampler", "torch::data::samplers::RandomSampler"), template("c10::optional", mangledChunkDataReader + "::BatchType"), "size_t"), + template("torch::data::datasets::BatchDataset", template("torch::data::datasets::ChunkDataset", mangledChunkDataReader, "torch::data::samplers::RandomSampler", "torch::data::samplers::RandomSampler"), template("std::vector", example)) + ).pointerTypes("Chunk" + p + "BatchDataset")) + .put(new Info( + template("torch::data::datasets::BatchDataset", template("torch::data::datasets::SharedBatchDataset", template("torch::data::datasets::ChunkDataset", mangledChunkDataReader, "torch::data::samplers::RandomSampler", "torch::data::samplers::RandomSampler")), template("c10::optional", mangledChunkDataReader + "::BatchType"), "size_t"), + template("torch::data::datasets::BatchDataset", template("torch::data::datasets::SharedBatchDataset", template("torch::data::datasets::ChunkDataset", mangledChunkDataReader, "torch::data::samplers::RandomSampler", "torch::data::samplers::RandomSampler")), template("torch::data::datasets::ChunkDataset", mangledChunkDataReader, "torch::data::samplers::RandomSampler", "torch::data::samplers::RandomSampler") + "::BatchType", template("torch::data::datasets::ChunkDataset", mangledChunkDataReader, "torch::data::samplers::RandomSampler", "torch::data::samplers::RandomSampler") + "::BatchRequestType") + ).pointerTypes("ChunkBatchShared" + p + "BatchDataset")) + .put(new Info( + template("torch::data::datasets::BatchDataset", template("torch::data::datasets::SharedBatchDataset", template("torch::data::datasets::ChunkDataset", mangledChunkDataReader, "torch::data::samplers::RandomSampler", "torch::data::samplers::RandomSampler")), template("c10::optional", mangledChunkDataReader + "::BatchType"), "size_t") + "::map" + ).javaText("public native @ByVal ChunkMap" + p + "Dataset map(@ByVal " + p + "ExampleStack transform);")) + .put(new Info( + template("torch::data::datasets::SharedBatchDataset", template("torch::data::datasets::ChunkDataset", mangledChunkDataReader, "torch::data::samplers::RandomSampler", "torch::data::samplers::RandomSampler")) + ).pointerTypes("ChunkShared" + p + "BatchDataset")) + .put(new Info( + template("torch::data::datasets::MapDataset", template("torch::data::datasets::SharedBatchDataset", template("torch::data::datasets::ChunkDataset", mangledChunkDataReader, "torch::data::samplers::RandomSampler", "torch::data::samplers::RandomSampler")), template("torch::data::transforms::Stack", example)) + ).pointerTypes("ChunkMap" + p + "Dataset")) + .put(new Info( + template("torch::data::datasets::MapDataset", template("torch::data::datasets::SharedBatchDataset", template("torch::data::datasets::ChunkDataset", mangledChunkDataReader, "torch::data::samplers::RandomSampler", "torch::data::samplers::RandomSampler")), template("torch::data::transforms::Stack", example)) + "::reset" + ).skip()) + .put(new Info( + template("torch::data::datasets::MapDataset", template("torch::data::datasets::SharedBatchDataset", template("torch::data::datasets::ChunkDataset", mangledChunkDataReader, "torch::data::samplers::RandomSampler", "torch::data::samplers::RandomSampler")), template("torch::data::transforms::Stack", example)) + "::DatasetType" + ).pointerTypes("ChunkShared" + p + "BatchDataset")) + .put(new Info( + template("torch::data::datasets::BatchDataset", template("torch::data::datasets::MapDataset", template("torch::data::datasets::SharedBatchDataset", template("torch::data::datasets::ChunkDataset", mangledChunkDataReader, "torch::data::samplers::RandomSampler", "torch::data::samplers::RandomSampler")), template("torch::data::transforms::Stack", example)), template("std::vector", example), "at::ArrayRef"), + template("torch::data::datasets::BatchDataset", template("torch::data::datasets::MapDataset", template("torch::data::datasets::SharedBatchDataset", template("torch::data::datasets::ChunkDataset", mangledChunkDataReader, "torch::data::samplers::RandomSampler", "torch::data::samplers::RandomSampler")), template("torch::data::transforms::Stack", example)), template("torch::data::datasets::detail::optional_if_t", template("torch::data::datasets::SharedBatchDataset", template("torch::data::datasets::ChunkDataset", mangledChunkDataReader, "torch::data::samplers::RandomSampler", "torch::data::samplers::RandomSampler")) + "::is_stateful", template("torch::data::transforms::Stack", example) + "::OutputBatchType"), template("torch::data::datasets::SharedBatchDataset", template("torch::data::datasets::ChunkDataset", mangledChunkDataReader, "torch::data::samplers::RandomSampler", "torch::data::samplers::RandomSampler")) + "::BatchRequestType") + ).pointerTypes("ChunkMap" + p + "BatchDataset")) + .put(new Info( + template("torch::data::datasets::MapDataset", template("torch::data::datasets::SharedBatchDataset", template("torch::data::datasets::ChunkDataset", mangledChunkDataReader, "torch::data::samplers::RandomSampler", "torch::data::samplers::RandomSampler")), template("torch::data::transforms::Stack", example)) + "::BatchRequestType", + template("torch::data::datasets::BatchDataset", mangledJavaDataset, template("std::vector", example)) + "::BatchRequest", + template("torch::data::datasets::BatchDataset", template("javacpp::Dataset", ex[1], ex[2]), template("std::vector", example)) + "::BatchRequest" + ).pointerTypes("SizeTArrayRef", "@Cast({\"uint64_t*\", \"c10::ArrayRef\", \"std::vector&\"}) @StdVector long...")) + .put(new Info( + template("torch::data::datasets::MapDataset", template("torch::data::datasets::SharedBatchDataset", template("torch::data::datasets::ChunkDataset", mangledChunkDataReader, "torch::data::samplers::RandomSampler", "torch::data::samplers::RandomSampler")), template("torch::data::transforms::Stack", example)) + "::get_batch" + ).javaText("public native @Name(\"get_batch\") @ByVal " + p + "ExampleOptional get_batch_example(@Cast(\"size_t\") long indices);")) + .put(new Info( + template("torch::data::DataLoaderBase", template("torch::data::datasets::MapDataset", template("torch::data::datasets::SharedBatchDataset", template("torch::data::datasets::ChunkDataset", mangledChunkDataReader, "torch::data::samplers::RandomSampler", "torch::data::samplers::RandomSampler")), template("torch::data::transforms::Stack", example)), example, "size_t"), + template("torch::data::DataLoaderBase", template("torch::data::datasets::MapDataset", template("torch::data::datasets::SharedBatchDataset", template("torch::data::datasets::ChunkDataset", mangledChunkDataReader, "torch::data::samplers::RandomSampler", "torch::data::samplers::RandomSampler")), template("torch::data::transforms::Stack", example)), template("torch::data::datasets::MapDataset", template("torch::data::datasets::SharedBatchDataset", template("torch::data::datasets::ChunkDataset", mangledChunkDataReader, "torch::data::samplers::RandomSampler", "torch::data::samplers::RandomSampler")), template("torch::data::transforms::Stack", example)) + "::BatchType::value_type", template("torch::data::datasets::MapDataset", template("torch::data::datasets::SharedBatchDataset", template("torch::data::datasets::ChunkDataset", mangledChunkDataReader, "torch::data::samplers::RandomSampler", "torch::data::samplers::RandomSampler")), template("torch::data::transforms::Stack", example)) + "::BatchRequestType") + ).purify().pointerTypes("ChunkRandom" + p + "DataLoaderBase")) + .put(new Info( + template("torch::data::StatefulDataLoader", template("torch::data::datasets::MapDataset", template("torch::data::datasets::SharedBatchDataset", template("torch::data::datasets::ChunkDataset", mangledChunkDataReader, "torch::data::samplers::RandomSampler", "torch::data::samplers::RandomSampler")), template("torch::data::transforms::Stack", example))) + ).pointerTypes("ChunkRandom" + p + "DataLoader")) + + .put(new Info( + template("torch::data::datasets::BatchDataset", template("javacpp::Dataset", ex[1], ex[2]), template("std::vector", example)) + ).pointerTypes("Java" + p + "BatchDataset")) + .put(new Info( + template("torch::data::datasets::Dataset", template("javacpp::Dataset", ex[1], ex[2]), example) + ).pointerTypes("Java" + p + "DatasetBase").purify()) + .put(new Info( + template("torch::data::StatelessDataLoader", mangledJavaDataset, "torch::data::samplers::RandomSampler") + ).pointerTypes("JavaRandom" + p + "DataLoader")) + .put(new Info( + template("torch::data::DataLoaderBase", mangledJavaDataset, mangledJavaDataset + "::BatchType", "torch::data::samplers::RandomSampler::BatchRequestType") + ).pointerTypes("JavaRandom" + p + "DataLoaderBase").purify()) + .put(new Info( + template("torch::data::StatelessDataLoader", mangledJavaDataset, "torch::data::samplers::DistributedRandomSampler") + ).pointerTypes("JavaDistributedRandom" + p + "DataLoader")) + .put(new Info( + template("torch::data::DataLoaderBase", mangledJavaDataset, mangledJavaDataset + "::BatchType", "torch::data::samplers::DistributedRandomSampler::BatchRequestType") + ).pointerTypes("JavaDistributedRandom" + p + "DataLoaderBase").purify()) + .put(new Info( + template("torch::data::StatelessDataLoader", mangledJavaDataset, "torch::data::samplers::DistributedSequentialSampler") + ).pointerTypes("JavaDistributedSequential" + p + "DataLoader")) + .put(new Info( + template("torch::data::DataLoaderBase", mangledJavaDataset, mangledJavaDataset + "::BatchType", "torch::data::samplers::DistributedSequentialSampler::BatchRequestType") + ).pointerTypes("JavaDistributedSequential" + p + "DataLoaderBase").purify()) + .put(new Info( + template("torch::data::StatelessDataLoader", mangledJavaDataset, "torch::data::samplers::SequentialSampler") + ).pointerTypes("JavaSequential" + p + "DataLoader")) + .put(new Info( + template("torch::data::DataLoaderBase", mangledJavaDataset, mangledJavaDataset + "::BatchType", "torch::data::samplers::SequentialSampler::BatchRequestType") + ).pointerTypes("JavaSequential" + p + "DataLoaderBase").purify()) + .put(new Info( + template("torch::data::datasets::BatchDataset", template("javacpp::StreamDataset", ex[1], ex[2]), template("std::vector", example), "size_t") + ).pointerTypes("JavaStream" + p + "BatchDataset")) + .put(new Info( + template("torch::data::StatelessDataLoader", mangledJavaStreamDataset, "torch::data::samplers::StreamSampler") + ).pointerTypes("JavaStream" + p + "DataLoader")) + .put(new Info( + template("torch::data::DataLoaderBase", mangledJavaStreamDataset, mangledJavaStreamDataset + "::BatchType", "torch::data::samplers::StreamSampler::BatchRequestType") + ).pointerTypes("JavaStream" + p + "DataLoaderBase").purify()) + + .put(new Info( + template("javacpp::Dataset", ex[1], ex[2]) + ).pointerTypes("Java" + p + "Dataset").virtualize()) + .put(new Info( + mangledJavaDataset + ).pointerTypes("@Cast(\"" + mangledJavaDataset + "*\") Java" + p + "Dataset")) + .put(new Info( + template("javacpp::StreamDataset", ex[1], ex[2]) + ).pointerTypes("JavaStream" + p + "Dataset").virtualize()) + .put(new Info( + mangledJavaStreamDataset + ).pointerTypes("@Cast(\"" + mangledJavaStreamDataset + "*\") JavaStream" + p + "Dataset")) + .put(new Info( + template("javacpp::StatefulDataset", ex[1], ex[2]) + ).pointerTypes("JavaStateful" + p + "Dataset").virtualize()) + .put(new Info( + mangledJavaStatefulDataset + ).pointerTypes("@Cast(\"" + mangledJavaStatefulDataset + "*\") JavaStateful" + p + "Dataset")) + .put(new Info( + template("torch::data::datasets::StatefulDataset", template("javacpp::StatefulDataset", ex[1], ex[2]), template("std::vector", example), "size_t") + ).pointerTypes("JavaStateful" + p + "DatasetBase").purify()) + .put(new Info( + template("torch::data::StatefulDataLoader", mangledJavaStatefulDataset) + ).pointerTypes("JavaStateful" + p + "DataLoader")) + .put(new Info( + template("torch::data::DataLoaderBase", mangledJavaStatefulDataset, mangledJavaStatefulDataset + "::BatchType::value_type", mangledJavaStatefulDataset + "::BatchRequestType") + ).pointerTypes("JavaStateful" + p + "DataLoaderBase").purify()) + .put(new Info( + template("torch::data::datasets::BatchDataset", template("javacpp::StatefulDataset", ex[1], ex[2]), template("c10::optional", template("std::vector", example)), "size_t") + ).pointerTypes("JavaStateful" + p + "BatchDataset").purify()) + ; + } + addCppName(infoMap, + "std::vector >", + "std::vector >::ExampleType>"); + + // Because explicitly defined in stack.h + addCppName(infoMap, + "torch::data::Example", + "torch::data::Example<>"); + addCppName(infoMap, + "torch::data::transforms::Stack >", + "torch::data::transforms::Stack >"); + addCppName(infoMap, + "torch::data::transforms::Stack >", + "torch::data::transforms::Stack"); + addCppName(infoMap, + "torch::data::transforms::Collation,std::vector > >", + "torch::data::transforms::Collation >"); + //// Tensor factories String[] factories = {"_cudnn_init_dropout_state", "arange", "bartlett_window", "blackman_window", "empty", "_empty_affine_quantized", @@ -2057,11 +2218,7 @@ We need either to put an annotation info on each member, or javaName("@NoOffset "torch::data::DataLoaderBase::QuitWorker", "torch::data::DataLoaderBase::Result", "torch::data::DataLoaderBase::Sequenced", - "torch::data::FullDataLoaderOptions", - "torch::data::Iterator > > >", - "torch::data::Iterator > >", "torch::data::WorkerException", - "torch::data::datasets::TensorDataset", "torch::data::datasets::detail::BatchDataBuffer::UnwrappedBatchData", "torch::detail::ClassNotSelected", "torch::detail::TorchLibraryInit", @@ -2283,6 +2440,48 @@ private static String template(String t, String... args) { return sb.toString(); } + // Copy from Generator + private static String mangle(String name) { + StringBuilder mangledName = new StringBuilder(2 * name.length()); + mangledName.append("JavaCPP_"); + for (int i = 0; i < name.length(); i++) { + char c = name.charAt(i); + if ((c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z')) { + mangledName.append(c); + } else if (c == '_') { + mangledName.append("_1"); + } else if (c == ';') { + mangledName.append("_2"); + } else if (c == '[') { + mangledName.append("_3"); + } else if (c == '.' || c == '/') { + mangledName.append("_"); + } else { + String code = Integer.toHexString(c); + mangledName.append("_0"); + switch (code.length()) { + case 1: + mangledName.append("0"); + case 2: + mangledName.append("0"); + case 3: + mangledName.append("0"); + default: + mangledName.append(code); + } + } + } + return mangledName.toString(); + } + + // We cannot add a cppName to an existing info, we must clone the info and change the cpp name + // keeping the first (main) cppName. + static private void addCppName(InfoMap infoMap, String... n) { + Info i = new Info(infoMap.getFirst(n[0])); + i.cppNames(n); + infoMap.put(i); + } + static class ArrayInfo { String baseJavaName; String[] elementTypes = new String[0]; diff --git a/pytorch/src/main/resources/org/bytedeco/pytorch/include/datasets.h b/pytorch/src/main/resources/org/bytedeco/pytorch/include/datasets.h new file mode 100644 index 00000000000..f26b8630588 --- /dev/null +++ b/pytorch/src/main/resources/org/bytedeco/pytorch/include/datasets.h @@ -0,0 +1,47 @@ +/* + I don't think we can directly virtualize Dataset<...> because of CRTP in Dataset. + + Because of issue #723, we cannot virtualize superclasses of javacpp::*Dataset, only javacpp::*Dataset. + We must redeclare/redefine virtual functions of parents in these classes, so that the JavaCPP peer classes implement + the wrappers that call the Java implementations. +*/ + +namespace javacpp { + +/** + * Abstract class for stateless datasets to be subclassed by Java user code. + */ + template + struct Dataset : public torch::data::datasets::Dataset, torch::data::Example> { + virtual ~Dataset() = default; + virtual torch::data::Example get(size_t index) override = 0; + virtual c10::optional size() const override = 0; + virtual std::vector> get_batch(c10::ArrayRef indices) override { + return torch::data::datasets::Dataset, torch::data::Example>::get_batch(indices); + }; +}; + +/** + * Abstract class for stateless stream datasets to be subclassed by Java user code. + */ +template +struct StreamDataset : public torch::data::datasets::BatchDataset, std::vector>, size_t> { + virtual ~StreamDataset() = default; + virtual c10::optional size() const override = 0; + virtual std::vector> get_batch(size_t size) override = 0; +}; + +/** + * Abstract class for stateful datasets to be subclassed by Java user code. + */ +template +struct StatefulDataset : public torch::data::datasets::StatefulDataset, std::vector>, size_t> { + virtual ~StatefulDataset() = default; + virtual c10::optional size() const override = 0; + virtual c10::optional>> get_batch(size_t size) override = 0; + virtual void reset() override = 0; + virtual void save(torch::serialize::OutputArchive& archive) const override = 0; + virtual void load(torch::serialize::InputArchive& archive) override = 0; +}; + +} \ No newline at end of file diff --git a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h index a1ba3d04897..cc26edbecb6 100644 --- a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h +++ b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h @@ -1419,4 +1419,6 @@ #include "torch/csrc/jit/frontend/tree_views.h" #include "torch/csrc/jit/frontend/script_type_parser.h" #include "torch/csrc/jit/serialization/unpickler.h" -#include "torch/csrc/jit/serialization/pickle.h" \ No newline at end of file +#include "torch/csrc/jit/serialization/pickle.h" + +#include "datasets.h" \ No newline at end of file From 1dc9d4f8267fdd3c5ebc8221cbf4a7921876fb4f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Tue, 24 Oct 2023 14:01:22 +0200 Subject: [PATCH 13/26] Accept Java arrays for primitive ArrayRef --- .../org/bytedeco/pytorch/ByteArrayRef.java | 1 + .../pytorch/ChunkMapBatchDataset.java | 1 + .../pytorch/ChunkMapTensorBatchDataset.java | 1 + .../DimVectorInferExpandGeometryResult.java | 4 +- .../org/bytedeco/pytorch/FloatArrayRef.java | 1 + .../org/bytedeco/pytorch/IntArrayRef.java | 1 + .../org/bytedeco/pytorch/JavaDatasetBase.java | 1 + .../pytorch/JavaTensorDatasetBase.java | 1 + .../org/bytedeco/pytorch/LongArrayRef.java | 2 +- .../pytorch/LongArrayRefOptional.java | 4 +- .../java/org/bytedeco/pytorch/LongList.java | 4 +- .../bytedeco/pytorch/LongVaryingShape.java | 4 +- .../bytedeco/pytorch/MNISTBatchDataset.java | 1 + .../org/bytedeco/pytorch/MNISTDataset.java | 1 + .../pytorch/MNISTMapBatchDataset.java | 1 + .../java/org/bytedeco/pytorch/MetaBase.java | 20 +- .../org/bytedeco/pytorch/ShortArrayRef.java | 1 + .../org/bytedeco/pytorch/SizeTArrayRef.java | 1 + .../org/bytedeco/pytorch/SizesAndStrides.java | 4 +- .../org/bytedeco/pytorch/SymbolicShape.java | 4 +- .../gen/java/org/bytedeco/pytorch/Tensor.java | 161 +- .../java/org/bytedeco/pytorch/TensorImpl.java | 12 +- .../org/bytedeco/pytorch/TensorIterator.java | 4 +- .../bytedeco/pytorch/TensorIteratorBase.java | 10 +- .../pytorch/TensorIteratorConfig.java | 6 +- .../java/org/bytedeco/pytorch/TensorType.java | 12 +- .../pytorch/VariableHooksInterface.java | 2 +- .../org/bytedeco/pytorch/global/torch.java | 2255 +++++++++-------- .../org/bytedeco/pytorch/presets/torch.java | 16 +- 29 files changed, 1289 insertions(+), 1247 deletions(-) diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ByteArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ByteArrayRef.java index 10a6ca2a047..2c7414c1709 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ByteArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ByteArrayRef.java @@ -103,6 +103,7 @@ public class ByteArrayRef extends Pointer { /** equals - Check for element-wise equality. */ public native @Cast("const bool") boolean equals(@ByVal ByteArrayRef RHS); + public native @Cast("const bool") boolean equals(@ByVal @Cast({"jbyte*", "c10::ArrayRef", "std::vector&"}) @StdVector("jbyte") byte... RHS); /** slice(n, m) - Take M elements of the array starting at element N */ public native @Const @ByVal ByteArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapBatchDataset.java index 5a8825e8c80..69c084905ea 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapBatchDataset.java @@ -28,6 +28,7 @@ public class ChunkMapBatchDataset extends Pointer { /** Returns a batch of data given an index. */ public native @ByVal ExampleVector get_batch(@ByVal SizeTArrayRef request); + public native @ByVal ExampleVector get_batch(@ByVal @Cast({"size_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("size_t") long... request); /** Returns the size of the dataset, or an empty optional if it is unsized. */ public native @ByVal SizeTOptional size(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorBatchDataset.java index 6ba2c68612f..2fbce701de8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorBatchDataset.java @@ -28,6 +28,7 @@ public class ChunkMapTensorBatchDataset extends Pointer { /** Returns a batch of data given an index. */ public native @ByVal TensorExampleVector get_batch(@ByVal SizeTArrayRef request); + public native @ByVal TensorExampleVector get_batch(@ByVal @Cast({"size_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("size_t") long... request); /** Returns the size of the dataset, or an empty optional if it is unsized. */ public native @ByVal SizeTOptional size(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DimVectorInferExpandGeometryResult.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DimVectorInferExpandGeometryResult.java index 1fee80e6d40..46f7032ccb6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DimVectorInferExpandGeometryResult.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DimVectorInferExpandGeometryResult.java @@ -32,6 +32,6 @@ public class DimVectorInferExpandGeometryResult extends Pointer { private native void allocate(@Cast("size_t") long ndim); public DimVectorInferExpandGeometryResult(@ByVal LongArrayRef sizes_, @Cast("size_t") long ndim) { super((Pointer)null); allocate(sizes_, ndim); } private native void allocate(@ByVal LongArrayRef sizes_, @Cast("size_t") long ndim); - public DimVectorInferExpandGeometryResult(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes_, @Cast("size_t") long ndim) { super((Pointer)null); allocate(sizes_, ndim); } - private native void allocate(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes_, @Cast("size_t") long ndim); + public DimVectorInferExpandGeometryResult(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes_, @Cast("size_t") long ndim) { super((Pointer)null); allocate(sizes_, ndim); } + private native void allocate(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes_, @Cast("size_t") long ndim); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FloatArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FloatArrayRef.java index ae0f3aa9ea8..093a011160e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FloatArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FloatArrayRef.java @@ -103,6 +103,7 @@ public class FloatArrayRef extends Pointer { /** equals - Check for element-wise equality. */ public native @Cast("const bool") boolean equals(@ByVal FloatArrayRef RHS); + public native @Cast("const bool") boolean equals(@ByVal @Cast({"float*", "c10::ArrayRef", "std::vector&"}) @StdVector("float") float... RHS); /** slice(n, m) - Take M elements of the array starting at element N */ public native @Const @ByVal FloatArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IntArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IntArrayRef.java index 9ec05261006..0988c901f88 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IntArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IntArrayRef.java @@ -103,6 +103,7 @@ public class IntArrayRef extends Pointer { /** equals - Check for element-wise equality. */ public native @Cast("const bool") boolean equals(@ByVal IntArrayRef RHS); + public native @Cast("const bool") boolean equals(@ByVal @Cast({"jint*", "c10::ArrayRef", "std::vector&"}) @StdVector("jint") int... RHS); /** slice(n, m) - Take M elements of the array starting at element N */ public native @Const @ByVal IntArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDatasetBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDatasetBase.java index da790a4b23f..745d26d64ac 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDatasetBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaDatasetBase.java @@ -31,4 +31,5 @@ public class JavaDatasetBase extends JavaBatchDataset { * The default implementation calls {@code get()} for every requested index * in the batch. */ public native @ByVal ExampleVector get_batch(@ByVal SizeTArrayRef indices); + public native @ByVal ExampleVector get_batch(@ByVal @Cast({"size_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("size_t") long... indices); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorDatasetBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorDatasetBase.java index a386457b268..e0cfc8559a3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorDatasetBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorDatasetBase.java @@ -31,4 +31,5 @@ public class JavaTensorDatasetBase extends JavaTensorBatchDataset { * The default implementation calls {@code get()} for every requested index * in the batch. */ public native @ByVal TensorExampleVector get_batch(@ByVal SizeTArrayRef indices); + public native @ByVal TensorExampleVector get_batch(@ByVal @Cast({"size_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("size_t") long... indices); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRef.java index 3d957c2bb04..6ddd4b17d82 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRef.java @@ -105,7 +105,7 @@ public class LongArrayRef extends Pointer { /** equals - Check for element-wise equality. */ public native @Cast("const bool") boolean equals(@ByVal LongArrayRef RHS); - public native @Cast("const bool") boolean equals(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... RHS); + public native @Cast("const bool") boolean equals(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... RHS); /** slice(n, m) - Take M elements of the array starting at element N */ public native @Const @ByVal LongArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRefOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRefOptional.java index d5aeec4c537..83ab525139f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRefOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRefOptional.java @@ -23,7 +23,7 @@ public class LongArrayRefOptional extends Pointer { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LongArrayRefOptional(Pointer p) { super(p); } public LongArrayRefOptional(LongArrayRef value) { this(); put(value); } - public LongArrayRefOptional(@Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... value) { this(); put(value); } + public LongArrayRefOptional(@Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... value) { this(); put(value); } public LongArrayRefOptional() { allocate(); } private native void allocate(); public native @Name("operator =") @ByRef LongArrayRefOptional put(@ByRef LongArrayRefOptional x); @@ -32,6 +32,6 @@ public class LongArrayRefOptional extends Pointer { public native void reset(); public native @Name("value") @ByRef LongArrayRef get(); @ValueSetter public native LongArrayRefOptional put(@ByRef LongArrayRef value); - @ValueSetter public native LongArrayRefOptional put(@ByRef @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... value); + @ValueSetter public native LongArrayRefOptional put(@ByRef @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongList.java index 33e4be27673..84a87ff9f2a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongList.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongList.java @@ -46,8 +46,8 @@ public class LongList extends Pointer { */ public LongList(@ByVal LongArrayRef initial_values) { super((Pointer)null); allocate(initial_values); } private native void allocate(@ByVal LongArrayRef initial_values); - public LongList(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... initial_values) { super((Pointer)null); allocate(initial_values); } - private native void allocate(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... initial_values); + public LongList(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... initial_values) { super((Pointer)null); allocate(initial_values); } + private native void allocate(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... initial_values); /** * Create a generic list with runtime type information. diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongVaryingShape.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongVaryingShape.java index 88f25ea2bd4..dc220822fcd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongVaryingShape.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongVaryingShape.java @@ -29,8 +29,8 @@ public class LongVaryingShape extends Pointer { public LongVaryingShape(@ByVal LongArrayRef vec) { super((Pointer)null); allocate(vec); } private native void allocate(@ByVal LongArrayRef vec); - public LongVaryingShape(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... vec) { super((Pointer)null); allocate(vec); } - private native void allocate(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... vec); + public LongVaryingShape(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... vec) { super((Pointer)null); allocate(vec); } + private native void allocate(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... vec); public LongVaryingShape(@ByVal(nullValue = "c10::optional(c10::nullopt)") SizeTOptional size) { super((Pointer)null); allocate(size); } private native void allocate(@ByVal(nullValue = "c10::optional(c10::nullopt)") SizeTOptional size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTBatchDataset.java index 46e103dc25e..e67ad1d5b65 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTBatchDataset.java @@ -30,6 +30,7 @@ public class MNISTBatchDataset extends Pointer { /** Returns a batch of data given an index. */ public native @ByVal ExampleVector get_batch(@ByVal SizeTArrayRef request); + public native @ByVal ExampleVector get_batch(@ByVal @Cast({"size_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("size_t") long... request); /** Returns the size of the dataset, or an empty optional if it is unsized. */ public native @ByVal SizeTOptional size(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTDataset.java index d94f4f21748..81c26c9fccd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTDataset.java @@ -38,4 +38,5 @@ public class MNISTDataset extends MNISTBatchDataset { * The default implementation calls {@code get()} for every requested index * in the batch. */ public native @ByVal ExampleVector get_batch(@ByVal SizeTArrayRef indices); + public native @ByVal ExampleVector get_batch(@ByVal @Cast({"size_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("size_t") long... indices); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapBatchDataset.java index 5ecc5774461..5939e42c8c9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapBatchDataset.java @@ -28,6 +28,7 @@ public class MNISTMapBatchDataset extends Pointer { /** Returns a batch of data given an index. */ public native @ByVal ExampleVector get_batch(@ByVal SizeTArrayRef request); + public native @ByVal ExampleVector get_batch(@ByVal @Cast({"size_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("size_t") long... request); /** Returns the size of the dataset, or an empty optional if it is unsized. */ public native @ByVal SizeTOptional size(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MetaBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MetaBase.java index f0aafab5c3f..05270ff5b61 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MetaBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MetaBase.java @@ -66,14 +66,14 @@ public native void set_output_strided( @ByVal TensorOptions options); public native void set_output_strided( @Cast("int64_t") long output_idx, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] strides, @ByVal TensorOptions options, @ByVal(nullValue = "at::DimnameList{}") DimnameArrayRef names); public native void set_output_strided( @Cast("int64_t") long output_idx, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] strides, @ByVal TensorOptions options); // Use this function whenever the kernel knows how to handle arbitrary strided @@ -92,14 +92,14 @@ public native void set_output_raw_strided( @ByVal TensorOptions options); public native void set_output_raw_strided( @Cast("int64_t") long output_idx, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides_hint, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] strides_hint, @ByVal TensorOptions options, @ByVal(nullValue = "at::DimnameList{}") DimnameArrayRef names); public native void set_output_raw_strided( @Cast("int64_t") long output_idx, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides_hint, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] strides_hint, @ByVal TensorOptions options); // Use this function if the kernel requires contiguous strides. @@ -115,12 +115,12 @@ public native void set_output_contiguous( @ByVal TensorOptions options); public native void set_output_contiguous( @Cast("int64_t") long output_idx, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, @ByVal TensorOptions options, @ByVal(nullValue = "at::DimnameList{}") DimnameArrayRef names); public native void set_output_contiguous( @Cast("int64_t") long output_idx, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, @ByVal TensorOptions options); // Returns a reference to an undefined tensor if there is no presupplied diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ShortArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ShortArrayRef.java index 7b4761e342f..fdc804d46cc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ShortArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ShortArrayRef.java @@ -103,6 +103,7 @@ public class ShortArrayRef extends Pointer { /** equals - Check for element-wise equality. */ public native @Cast("const bool") boolean equals(@ByVal ShortArrayRef RHS); + public native @Cast("const bool") boolean equals(@ByVal @Cast({"jshort*", "c10::ArrayRef", "std::vector&"}) @StdVector("jshort") short... RHS); /** slice(n, m) - Take M elements of the array starting at element N */ public native @Const @ByVal ShortArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTArrayRef.java index c4ca3e0ed65..e524f683eb6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTArrayRef.java @@ -97,6 +97,7 @@ public class SizeTArrayRef extends Pointer { /** equals - Check for element-wise equality. */ public native @Cast("const bool") boolean equals(@ByVal SizeTArrayRef RHS); + public native @Cast("const bool") boolean equals(@ByVal @Cast({"size_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("size_t") long... RHS); /** slice(n, m) - Take M elements of the array starting at element N */ public native @Const @ByVal SizeTArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SizesAndStrides.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SizesAndStrides.java index ebb8a24e0dc..546e13bdba2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SizesAndStrides.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SizesAndStrides.java @@ -68,10 +68,10 @@ public class SizesAndStrides extends Pointer { public native @ByVal @NoException(true) LongArrayRef sizes_arrayref(); public native void set_sizes(@ByVal LongArrayRef newSizes); - public native void set_sizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... newSizes); + public native void set_sizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... newSizes); public native void set_strides(@ByVal LongArrayRef strides); - public native void set_strides(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... strides); + public native void set_strides(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... strides); public native @Cast("int64_t*") @NoException(true) LongPointer strides_data(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolicShape.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolicShape.java index 1d9450851e2..39fe279d8f0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolicShape.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolicShape.java @@ -54,8 +54,8 @@ public class SymbolicShape extends Pointer { public SymbolicShape(@ByVal LongArrayRef dims) { super((Pointer)null); allocate(dims); } private native void allocate(@ByVal LongArrayRef dims); - public SymbolicShape(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims) { super((Pointer)null); allocate(dims); } - private native void allocate(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); + public SymbolicShape(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dims) { super((Pointer)null); allocate(dims); } + private native void allocate(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dims); public native @ByVal @Name("operator []") ShapeSymbol get(@Cast("size_t") long i); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java index 102a9d2cd32..b6c979b4382 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java @@ -348,8 +348,8 @@ private native void allocate( //example //Tensor * add(Tensor & b); - public native void __dispatch__backward(@ByVal @Cast("at::TensorList*") TensorArrayRef inputs, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional gradient, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional retain_graph, @Cast("bool") boolean create_graph/*=false*/); - public native void __dispatch__backward(@ByVal @Cast("at::TensorList*") TensorArrayRef inputs); + public native void __dispatch__backward(@ByVal TensorArrayRef inputs, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional gradient, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional retain_graph, @Cast("bool") boolean create_graph/*=false*/); + public native void __dispatch__backward(@ByVal TensorArrayRef inputs); public native void __dispatch_set_data(@Const @ByRef Tensor new_data); public native @ByVal Tensor __dispatch_data(); public native @Cast("bool") boolean __dispatch_is_leaf(); @@ -433,14 +433,14 @@ private native void allocate( public native @ByRef Tensor arctanh_(); public native @ByVal Tensor as_strided(@ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); public native @ByVal Tensor as_strided(@ByVal LongArrayRef size, @ByVal LongArrayRef stride); - public native @ByVal Tensor as_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); - public native @ByVal Tensor as_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); + public native @ByVal Tensor as_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); + public native @ByVal Tensor as_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); public native @ByVal Tensor as_strided_symint(@ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional storage_offset); public native @ByVal Tensor as_strided_symint(@ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride); public native @Const @ByRef Tensor as_strided_(@ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); public native @Const @ByRef Tensor as_strided_(@ByVal LongArrayRef size, @ByVal LongArrayRef stride); - public native @Const @ByRef Tensor as_strided_(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); - public native @Const @ByRef Tensor as_strided_(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); + public native @Const @ByRef Tensor as_strided_(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); + public native @Const @ByRef Tensor as_strided_(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); public native @Const @ByRef Tensor as_strided__symint(@ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional storage_offset); public native @Const @ByRef Tensor as_strided__symint(@ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride); public native @ByVal Tensor asin(); @@ -481,7 +481,7 @@ private native void allocate( public native @ByRef Tensor logical_or_(@Const @ByRef Tensor other); public native @ByVal Tensor bmm(@Const @ByRef Tensor mat2); public native @ByVal Tensor broadcast_to(@ByVal LongArrayRef size); - public native @ByVal Tensor broadcast_to(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); + public native @ByVal Tensor broadcast_to(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); public native @ByVal Tensor broadcast_to_symint(@ByVal SymIntArrayRef size); public native @ByVal Tensor ceil(); public native @ByRef Tensor ceil_(); @@ -495,8 +495,8 @@ private native void allocate( public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split_symint(@ByVal SymInt sections); public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@ByVal LongArrayRef indices, @Cast("int64_t") long dim/*=0*/); public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@ByVal LongArrayRef indices); - public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] indices, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... indices); + public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] indices, @Cast("int64_t") long dim/*=0*/); + public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... indices); public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split_symint(@ByVal SymIntArrayRef indices, @Cast("int64_t") long dim/*=0*/); public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split_symint(@ByVal SymIntArrayRef indices); public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor tensor_indices_or_sections, @Cast("int64_t") long dim/*=0*/); @@ -534,7 +534,7 @@ private native void allocate( public native @ByVal Tensor cosh(); public native @ByRef Tensor cosh_(); public native @ByVal Tensor count_nonzero(@ByVal LongArrayRef dim); - public native @ByVal Tensor count_nonzero(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); + public native @ByVal Tensor count_nonzero(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); public native @ByVal Tensor count_nonzero(@ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim); public native @ByVal Tensor count_nonzero(); public native @ByVal Tensor cov(@Cast("int64_t") long correction/*=1*/, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional fweights, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional aweights); @@ -596,53 +596,53 @@ private native void allocate( public native @ByVal Tensor vdot(@Const @ByRef Tensor other); public native @ByVal Tensor new_empty(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); public native @ByVal Tensor new_empty(@ByVal LongArrayRef size); - public native @ByVal Tensor new_empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); - public native @ByVal Tensor new_empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); + public native @ByVal Tensor new_empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); + public native @ByVal Tensor new_empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); public native @ByVal Tensor new_empty(@ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - public native @ByVal Tensor new_empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + public native @ByVal Tensor new_empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); public native @ByVal Tensor new_empty_symint(@ByVal SymIntArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); public native @ByVal Tensor new_empty_symint(@ByVal SymIntArrayRef size); public native @ByVal Tensor new_empty_symint(@ByVal SymIntArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); public native @ByVal Tensor new_empty_strided(@ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); public native @ByVal Tensor new_empty_strided(@ByVal LongArrayRef size, @ByVal LongArrayRef stride); - public native @ByVal Tensor new_empty_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); - public native @ByVal Tensor new_empty_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); + public native @ByVal Tensor new_empty_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); + public native @ByVal Tensor new_empty_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); public native @ByVal Tensor new_empty_strided(@ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - public native @ByVal Tensor new_empty_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + public native @ByVal Tensor new_empty_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); public native @ByVal Tensor new_empty_strided_symint(@ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); public native @ByVal Tensor new_empty_strided_symint(@ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride); public native @ByVal Tensor new_empty_strided_symint(@ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); public native @ByVal Tensor new_full(@ByVal LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); public native @ByVal Tensor new_full(@ByVal LongArrayRef size, @Const @ByRef Scalar fill_value); - public native @ByVal Tensor new_full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); - public native @ByVal Tensor new_full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value); + public native @ByVal Tensor new_full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); + public native @ByVal Tensor new_full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Scalar fill_value); public native @ByVal Tensor new_full(@ByVal LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - public native @ByVal Tensor new_full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + public native @ByVal Tensor new_full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Scalar fill_value, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); public native @ByVal Tensor new_full_symint(@ByVal SymIntArrayRef size, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); public native @ByVal Tensor new_full_symint(@ByVal SymIntArrayRef size, @Const @ByRef Scalar fill_value); public native @ByVal Tensor new_full_symint(@ByVal SymIntArrayRef size, @Const @ByRef Scalar fill_value, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); public native @ByVal Tensor new_zeros(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); public native @ByVal Tensor new_zeros(@ByVal LongArrayRef size); - public native @ByVal Tensor new_zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); - public native @ByVal Tensor new_zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); + public native @ByVal Tensor new_zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); + public native @ByVal Tensor new_zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); public native @ByVal Tensor new_zeros(@ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - public native @ByVal Tensor new_zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + public native @ByVal Tensor new_zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); public native @ByVal Tensor new_zeros_symint(@ByVal SymIntArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); public native @ByVal Tensor new_zeros_symint(@ByVal SymIntArrayRef size); public native @ByVal Tensor new_zeros_symint(@ByVal SymIntArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); public native @ByVal Tensor new_ones(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); public native @ByVal Tensor new_ones(@ByVal LongArrayRef size); - public native @ByVal Tensor new_ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); - public native @ByVal Tensor new_ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); + public native @ByVal Tensor new_ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); + public native @ByVal Tensor new_ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); public native @ByVal Tensor new_ones(@ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - public native @ByVal Tensor new_ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + public native @ByVal Tensor new_ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); public native @ByVal Tensor new_ones_symint(@ByVal SymIntArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); public native @ByVal Tensor new_ones_symint(@ByVal SymIntArrayRef size); public native @ByVal Tensor new_ones_symint(@ByVal SymIntArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); public native @Const @ByRef Tensor resize_(@ByVal LongArrayRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); public native @Const @ByRef Tensor resize_(@ByVal LongArrayRef size); - public native @Const @ByRef Tensor resize_(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); - public native @Const @ByRef Tensor resize_(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); + public native @Const @ByRef Tensor resize_(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); + public native @Const @ByRef Tensor resize_(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); public native @Const @ByRef Tensor resize__symint(@ByVal SymIntArrayRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); public native @Const @ByRef Tensor resize__symint(@ByVal SymIntArrayRef size); public native @ByVal Tensor erf(); @@ -657,8 +657,8 @@ private native void allocate( public native @ByRef Tensor expm1_(); public native @ByVal Tensor expand(@ByVal LongArrayRef size, @Cast("bool") boolean implicit/*=false*/); public native @ByVal Tensor expand(@ByVal LongArrayRef size); - public native @ByVal Tensor expand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("bool") boolean implicit/*=false*/); - public native @ByVal Tensor expand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); + public native @ByVal Tensor expand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Cast("bool") boolean implicit/*=false*/); + public native @ByVal Tensor expand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); public native @ByVal Tensor expand_symint(@ByVal SymIntArrayRef size, @Cast("bool") boolean implicit/*=false*/); public native @ByVal Tensor expand_symint(@ByVal SymIntArrayRef size); public native @ByVal Tensor expand_as(@Const @ByRef Tensor other); @@ -668,10 +668,10 @@ private native void allocate( public native @ByVal Tensor flatten(@ByVal Dimname start_dim, @ByVal Dimname end_dim, @ByVal Dimname out_dim); public native @ByVal Tensor flatten(@ByVal DimnameArrayRef dims, @ByVal Dimname out_dim); public native @ByVal Tensor unflatten(@Cast("int64_t") long dim, @ByVal LongArrayRef sizes); - public native @ByVal Tensor unflatten(@Cast("int64_t") long dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); + public native @ByVal Tensor unflatten(@Cast("int64_t") long dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... sizes); public native @ByVal Tensor unflatten_symint(@Cast("int64_t") long dim, @ByVal SymIntArrayRef sizes); public native @ByVal Tensor unflatten(@ByVal Dimname dim, @ByVal LongArrayRef sizes, @ByVal DimnameArrayRef names); - public native @ByVal Tensor unflatten(@ByVal Dimname dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, @ByVal DimnameArrayRef names); + public native @ByVal Tensor unflatten(@ByVal Dimname dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, @ByVal DimnameArrayRef names); public native @ByVal Tensor unflatten_symint(@ByVal Dimname dim, @ByVal SymIntArrayRef sizes, @ByVal DimnameArrayRef names); public native @ByRef Tensor fill_(@Const @ByRef Scalar value); public native @ByRef Tensor fill_(@Const @ByRef Tensor value); @@ -743,8 +743,8 @@ private native void allocate( public native @ByVal Tensor logcumsumexp(@ByVal Dimname dim); public native @ByVal Tensor logsumexp(@ByVal LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); public native @ByVal Tensor logsumexp(@ByVal LongArrayRef dim); - public native @ByVal Tensor logsumexp(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); - public native @ByVal Tensor logsumexp(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); + public native @ByVal Tensor logsumexp(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal Tensor logsumexp(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); public native @ByVal Tensor logsumexp(@ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); public native @ByVal Tensor logsumexp(@ByVal DimnameArrayRef dim); public native @ByVal Tensor matmul(@Const @ByRef Tensor other); @@ -758,7 +758,7 @@ private native void allocate( public native @ByVal T_TensorTensor_T max(@ByVal Dimname dim); public native @ByVal Tensor amax(@ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); public native @ByVal Tensor amax(); - public native @ByVal Tensor amax(@ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal Tensor amax(@ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim/*=false*/); public native @ByVal Tensor mean(@ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); public native @ByVal Tensor mean(); public native @ByVal Tensor mean(@ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); @@ -786,7 +786,7 @@ private native void allocate( public native @ByVal T_TensorTensor_T min(@ByVal Dimname dim); public native @ByVal Tensor amin(@ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); public native @ByVal Tensor amin(); - public native @ByVal Tensor amin(@ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal Tensor amin(@ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim/*=false*/); public native @ByVal Tensor mm(@Const @ByRef Tensor mat2); public native @ByVal T_TensorTensor_T mode(@Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean keepdim/*=false*/); public native @ByVal T_TensorTensor_T mode(); @@ -810,12 +810,12 @@ private native void allocate( public native @ByVal Tensor narrow(@Cast("int64_t") long dim, @Const @ByRef Tensor start, @Cast("int64_t") long length); public native @ByVal Tensor narrow_symint(@Cast("int64_t") long dim, @Const @ByRef Tensor start, @ByVal SymInt length); public native @ByVal Tensor permute(@ByVal LongArrayRef dims); - public native @ByVal Tensor permute(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); + public native @ByVal Tensor permute(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dims); public native @ByVal Tensor movedim(@ByVal LongArrayRef source, @ByVal LongArrayRef destination); - public native @ByVal Tensor movedim(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] source, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... destination); + public native @ByVal Tensor movedim(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] source, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... destination); public native @ByVal Tensor movedim(@Cast("int64_t") long source, @Cast("int64_t") long destination); public native @ByVal Tensor moveaxis(@ByVal LongArrayRef source, @ByVal LongArrayRef destination); - public native @ByVal Tensor moveaxis(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] source, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... destination); + public native @ByVal Tensor moveaxis(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] source, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... destination); public native @ByVal Tensor moveaxis(@Cast("int64_t") long source, @Cast("int64_t") long destination); public native @ByVal Tensor numpy_T(); public native @ByVal Tensor matrix_H(); @@ -840,7 +840,7 @@ private native void allocate( public native @ByVal Tensor negative(); public native @ByRef Tensor negative_(); public native @ByVal Tensor repeat(@ByVal LongArrayRef repeats); - public native @ByVal Tensor repeat(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... repeats); + public native @ByVal Tensor repeat(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... repeats); public native @ByVal Tensor repeat_symint(@ByVal SymIntArrayRef repeats); public native @ByVal Tensor repeat_interleave(@Const @ByRef Tensor repeats, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional output_size); public native @ByVal Tensor repeat_interleave(@Const @ByRef Tensor repeats); @@ -849,10 +849,10 @@ private native void allocate( public native @ByVal Tensor repeat_interleave_symint(@ByVal SymInt repeats, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional output_size); public native @ByVal Tensor repeat_interleave_symint(@ByVal SymInt repeats); public native @ByVal Tensor reshape(@ByVal LongArrayRef shape); - public native @ByVal Tensor reshape(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... shape); + public native @ByVal Tensor reshape(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... shape); public native @ByVal Tensor reshape_symint(@ByVal SymIntArrayRef shape); public native @ByVal Tensor _reshape_alias(@ByVal LongArrayRef size, @ByVal LongArrayRef stride); - public native @ByVal Tensor _reshape_alias(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); + public native @ByVal Tensor _reshape_alias(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); public native @ByVal Tensor _reshape_alias_symint(@ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride); public native @ByVal Tensor reshape_as(@Const @ByRef Tensor other); public native @ByVal Tensor round(); @@ -899,8 +899,8 @@ private native void allocate( public native @ByVal Tensor diagonal_scatter(@Const @ByRef Tensor src); public native @ByVal Tensor as_strided_scatter(@Const @ByRef Tensor src, @ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); public native @ByVal Tensor as_strided_scatter(@Const @ByRef Tensor src, @ByVal LongArrayRef size, @ByVal LongArrayRef stride); - public native @ByVal Tensor as_strided_scatter(@Const @ByRef Tensor src, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); - public native @ByVal Tensor as_strided_scatter(@Const @ByRef Tensor src, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); + public native @ByVal Tensor as_strided_scatter(@Const @ByRef Tensor src, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); + public native @ByVal Tensor as_strided_scatter(@Const @ByRef Tensor src, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); public native @ByVal Tensor as_strided_scatter_symint(@Const @ByRef Tensor src, @ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional storage_offset); public native @ByVal Tensor as_strided_scatter_symint(@Const @ByRef Tensor src, @ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride); public native @ByVal Tensor smm(@Const @ByRef Tensor mat2); @@ -918,40 +918,40 @@ private native void allocate( public native @Cast({"", "std::vector"}) @StdMove TensorVector split_symint(@ByVal SymInt split_size); public native @Cast({"", "std::vector"}) @StdMove TensorVector split(@ByVal LongArrayRef split_size, @Cast("int64_t") long dim/*=0*/); public native @Cast({"", "std::vector"}) @StdMove TensorVector split(@ByVal LongArrayRef split_size); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_size, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... split_size); + public native @Cast({"", "std::vector"}) @StdMove TensorVector split(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] split_size, @Cast("int64_t") long dim/*=0*/); + public native @Cast({"", "std::vector"}) @StdMove TensorVector split(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... split_size); public native @Cast({"", "std::vector"}) @StdMove TensorVector split_symint(@ByVal SymIntArrayRef split_size, @Cast("int64_t") long dim/*=0*/); public native @Cast({"", "std::vector"}) @StdMove TensorVector split_symint(@ByVal SymIntArrayRef split_size); public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@ByVal LongArrayRef split_sizes); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_sizes, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... split_sizes); + public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] split_sizes, @Cast("int64_t") long dim/*=0*/); + public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... split_sizes); public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes_symint(@ByVal SymIntArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes_symint(@ByVal SymIntArrayRef split_sizes); public native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); public native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@ByVal LongArrayRef split_sizes); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_sizes, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... split_sizes); + public native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] split_sizes, @Cast("int64_t") long dim/*=0*/); + public native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... split_sizes); public native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_symint(@ByVal SymIntArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); public native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_symint(@ByVal SymIntArrayRef split_sizes); public native @Cast({"", "std::vector"}) @StdMove TensorVector hsplit(@Cast("int64_t") long sections); public native @Cast({"", "std::vector"}) @StdMove TensorVector hsplit(@ByVal LongArrayRef indices); - public native @Cast({"", "std::vector"}) @StdMove TensorVector hsplit(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... indices); + public native @Cast({"", "std::vector"}) @StdMove TensorVector hsplit(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... indices); public native @Cast({"", "std::vector"}) @StdMove TensorVector vsplit(@Cast("int64_t") long sections); public native @Cast({"", "std::vector"}) @StdMove TensorVector vsplit(@ByVal LongArrayRef indices); - public native @Cast({"", "std::vector"}) @StdMove TensorVector vsplit(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... indices); + public native @Cast({"", "std::vector"}) @StdMove TensorVector vsplit(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... indices); public native @Cast({"", "std::vector"}) @StdMove TensorVector dsplit(@Cast("int64_t") long sections); public native @Cast({"", "std::vector"}) @StdMove TensorVector dsplit(@ByVal LongArrayRef indices); - public native @Cast({"", "std::vector"}) @StdMove TensorVector dsplit(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... indices); + public native @Cast({"", "std::vector"}) @StdMove TensorVector dsplit(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... indices); public native @ByVal Tensor squeeze(); public native @ByVal Tensor squeeze(@Cast("int64_t") long dim); public native @ByVal Tensor squeeze(@ByVal Dimname dim); public native @ByVal Tensor squeeze(@ByVal LongArrayRef dim); - public native @ByVal Tensor squeeze(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); + public native @ByVal Tensor squeeze(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); public native @ByRef Tensor squeeze_(); public native @ByRef Tensor squeeze_(@Cast("int64_t") long dim); public native @ByRef Tensor squeeze_(@ByVal LongArrayRef dim); - public native @ByRef Tensor squeeze_(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); + public native @ByRef Tensor squeeze_(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); public native @ByRef Tensor squeeze_(@ByVal Dimname dim); public native @ByVal Tensor sspaddmm(@Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); public native @ByVal Tensor sspaddmm(@Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2); @@ -973,7 +973,7 @@ private native void allocate( public native @ByVal Tensor nansum(); public native @ByVal Tensor nansum(@ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); public native @ByVal Tensor sum_to_size(@ByVal LongArrayRef size); - public native @ByVal Tensor sum_to_size(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); + public native @ByVal Tensor sum_to_size(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); public native @ByVal Tensor sum_to_size_symint(@ByVal SymIntArrayRef size); public native @ByVal Tensor sqrt(); public native @ByRef Tensor sqrt_(); @@ -1004,25 +1004,25 @@ private native void allocate( public native @ByVal Tensor tanh(); public native @ByRef Tensor tanh_(); public native @ByVal Tensor tile(@ByVal LongArrayRef dims); - public native @ByVal Tensor tile(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); + public native @ByVal Tensor tile(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dims); public native @ByVal Tensor tile_symint(@ByVal SymIntArrayRef dims); public native @ByVal Tensor transpose(@Cast("int64_t") long dim0, @Cast("int64_t") long dim1); public native @ByVal Tensor transpose(@ByVal Dimname dim0, @ByVal Dimname dim1); public native @ByRef Tensor transpose_(@Cast("int64_t") long dim0, @Cast("int64_t") long dim1); public native @ByVal Tensor flip(@ByVal LongArrayRef dims); - public native @ByVal Tensor flip(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); + public native @ByVal Tensor flip(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dims); public native @ByVal Tensor fliplr(); public native @ByVal Tensor flipud(); public native @ByVal Tensor roll(@ByVal LongArrayRef shifts, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef dims); public native @ByVal Tensor roll(@ByVal LongArrayRef shifts); - public native @ByVal Tensor roll(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] shifts, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); - public native @ByVal Tensor roll(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... shifts); + public native @ByVal Tensor roll(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] shifts, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dims); + public native @ByVal Tensor roll(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... shifts); public native @ByVal Tensor roll_symint(@ByVal SymIntArrayRef shifts, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef dims); public native @ByVal Tensor roll_symint(@ByVal SymIntArrayRef shifts); - public native @ByVal Tensor roll_symint(@ByVal SymIntArrayRef shifts, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); + public native @ByVal Tensor roll_symint(@ByVal SymIntArrayRef shifts, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dims); public native @ByVal Tensor rot90(@Cast("int64_t") long k/*=1*/, @ByVal(nullValue = "at::IntArrayRef({0,1})") LongArrayRef dims); public native @ByVal Tensor rot90(); - public native @ByVal Tensor rot90(@Cast("int64_t") long k/*=1*/, @ByVal(nullValue = "at::IntArrayRef({0,1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); + public native @ByVal Tensor rot90(@Cast("int64_t") long k/*=1*/, @ByVal(nullValue = "at::IntArrayRef({0,1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dims); public native @ByVal Tensor _nested_tensor_size(); public native @ByVal Tensor _nested_tensor_strides(); public native @ByVal Tensor _nested_tensor_storage_offsets(); @@ -1052,11 +1052,11 @@ private native void allocate( public native @ByVal Tensor norm(@Const @ByRef(nullValue = "at::Scalar(2)") Scalar p); public native @ByVal Tensor norm(); public native @ByVal Tensor norm(@Const @ByRef ScalarOptional p, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, ScalarType dtype); - public native @ByVal Tensor norm(@Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, ScalarType dtype); + public native @ByVal Tensor norm(@Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim, ScalarType dtype); public native @ByVal Tensor norm(@Const @ByRef ScalarOptional p, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); public native @ByVal Tensor norm(@Const @ByRef ScalarOptional p, @ByVal LongArrayRef dim); - public native @ByVal Tensor norm(@Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); - public native @ByVal Tensor norm(@Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); + public native @ByVal Tensor norm(@Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal Tensor norm(@Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); public native @ByVal Tensor norm(@Const @ByRef ScalarOptional p, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim, ScalarType dtype); public native @ByVal Tensor norm(@Const @ByRef ScalarOptional p, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); public native @ByVal Tensor norm(@Const @ByRef ScalarOptional p, @ByVal DimnameArrayRef dim); @@ -1093,9 +1093,9 @@ private native void allocate( public native @ByVal Tensor _addmm_activation(@Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha, @Cast("bool") boolean use_gelu/*=false*/); public native @ByVal Tensor _addmm_activation(@Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2); public native @Const @ByRef Tensor sparse_resize_(@ByVal LongArrayRef size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); - public native @Const @ByRef Tensor sparse_resize_(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); + public native @Const @ByRef Tensor sparse_resize_(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); public native @Const @ByRef Tensor sparse_resize_and_clear_(@ByVal LongArrayRef size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); - public native @Const @ByRef Tensor sparse_resize_and_clear_(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); + public native @Const @ByRef Tensor sparse_resize_and_clear_(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); public native @ByVal Tensor sparse_mask(@Const @ByRef Tensor mask); public native @ByVal Tensor _sparse_mask_projection(@Const @ByRef Tensor mask, @Cast("bool") boolean accumulate_matches/*=false*/); public native @ByVal Tensor _sparse_mask_projection(@Const @ByRef Tensor mask); @@ -1140,20 +1140,20 @@ private native void allocate( public native @ByVal Tensor _to_sparse_csc(); public native @ByVal Tensor to_sparse_bsr(@ByVal LongArrayRef blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); public native @ByVal Tensor to_sparse_bsr(@ByVal LongArrayRef blocksize); - public native @ByVal Tensor to_sparse_bsr(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); - public native @ByVal Tensor to_sparse_bsr(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... blocksize); + public native @ByVal Tensor to_sparse_bsr(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); + public native @ByVal Tensor to_sparse_bsr(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... blocksize); public native @ByVal Tensor _to_sparse_bsr(@ByVal LongArrayRef blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); public native @ByVal Tensor _to_sparse_bsr(@ByVal LongArrayRef blocksize); - public native @ByVal Tensor _to_sparse_bsr(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); - public native @ByVal Tensor _to_sparse_bsr(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... blocksize); + public native @ByVal Tensor _to_sparse_bsr(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); + public native @ByVal Tensor _to_sparse_bsr(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... blocksize); public native @ByVal Tensor to_sparse_bsc(@ByVal LongArrayRef blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); public native @ByVal Tensor to_sparse_bsc(@ByVal LongArrayRef blocksize); - public native @ByVal Tensor to_sparse_bsc(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); - public native @ByVal Tensor to_sparse_bsc(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... blocksize); + public native @ByVal Tensor to_sparse_bsc(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); + public native @ByVal Tensor to_sparse_bsc(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... blocksize); public native @ByVal Tensor _to_sparse_bsc(@ByVal LongArrayRef blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); public native @ByVal Tensor _to_sparse_bsc(@ByVal LongArrayRef blocksize); - public native @ByVal Tensor _to_sparse_bsc(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); - public native @ByVal Tensor _to_sparse_bsc(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... blocksize); + public native @ByVal Tensor _to_sparse_bsc(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); + public native @ByVal Tensor _to_sparse_bsc(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... blocksize); public native @ByVal Tensor to_mkldnn(@ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); public native @ByVal Tensor to_mkldnn(); public native @ByVal Tensor dequantize(); @@ -1179,14 +1179,14 @@ private native void allocate( public native @ByRef Tensor set_(@Cast({"", "c10::Storage&&"}) @StdMove Storage source); public native @ByRef Tensor set_(@Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride); public native @ByRef Tensor set_(@Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size); - public native @ByRef Tensor set_(@Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); - public native @ByRef Tensor set_(@Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); + public native @ByRef Tensor set_(@Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); + public native @ByRef Tensor set_(@Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); public native @ByRef Tensor set__symint(@Cast({"", "c10::Storage&&"}) @StdMove Storage source, @ByVal SymInt storage_offset, @ByVal SymIntArrayRef size, @ByVal(nullValue = "c10::SymIntArrayRef{}") SymIntArrayRef stride); public native @ByRef Tensor set__symint(@Cast({"", "c10::Storage&&"}) @StdMove Storage source, @ByVal SymInt storage_offset, @ByVal SymIntArrayRef size); public native @ByRef Tensor set_(@Const @ByRef Tensor source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride); public native @ByRef Tensor set_(@Const @ByRef Tensor source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size); - public native @ByRef Tensor set_(@Const @ByRef Tensor source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); - public native @ByRef Tensor set_(@Const @ByRef Tensor source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); + public native @ByRef Tensor set_(@Const @ByRef Tensor source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); + public native @ByRef Tensor set_(@Const @ByRef Tensor source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); public native @ByRef Tensor set__symint(@Const @ByRef Tensor source, @ByVal SymInt storage_offset, @ByVal SymIntArrayRef size, @ByVal(nullValue = "c10::SymIntArrayRef{}") SymIntArrayRef stride); public native @ByRef Tensor set__symint(@Const @ByRef Tensor source, @ByVal SymInt storage_offset, @ByVal SymIntArrayRef size); public native @ByRef Tensor set_(@Const @ByRef Tensor source); @@ -1199,7 +1199,7 @@ private native void allocate( public native @ByRef Tensor masked_scatter_(@Const @ByRef Tensor mask, @Const @ByRef Tensor source); public native @ByVal Tensor masked_scatter(@Const @ByRef Tensor mask, @Const @ByRef Tensor source); public native @ByVal Tensor view(@ByVal LongArrayRef size); - public native @ByVal Tensor view(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); + public native @ByVal Tensor view(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); public native @ByVal Tensor view_symint(@ByVal SymIntArrayRef size); public native @ByVal Tensor view(ScalarType dtype); public native @ByRef Tensor put_(@Const @ByRef Tensor index, @Const @ByRef Tensor source, @Cast("bool") boolean accumulate/*=false*/); @@ -1445,6 +1445,7 @@ private native void allocate( public native @ByVal T_TensorTensor_T histogram(@Const @ByRef Tensor bins); public native @ByVal T_TensorTensor_T histogram(@Cast("int64_t") long bins/*=100*/, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); public native @ByVal T_TensorTensor_T histogram(); + public native @ByVal T_TensorTensor_T histogram(@Cast("int64_t") long bins/*=100*/, @ByVal(nullValue = "c10::optional >(c10::nullopt)") @Cast({"double*", "c10::ArrayRef", "std::vector&"}) @StdVector double[] range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); public native @ByVal Tensor fmod(@Const @ByRef Scalar other); public native @ByRef Tensor fmod_(@Const @ByRef Scalar other); public native @ByVal Tensor fmod(@Const @ByRef Tensor other); @@ -1621,7 +1622,7 @@ private native void allocate( public native @ByVal Tensor data(); - public native void _backward(@ByVal @Cast("at::TensorList*") TensorArrayRef inputs, @Const @ByRef TensorOptional gradient, @ByVal BoolOptional keep_graph, @Cast("bool") boolean create_graph); + public native void _backward(@ByVal TensorArrayRef inputs, @Const @ByRef TensorOptional gradient, @ByVal BoolOptional keep_graph, @Cast("bool") boolean create_graph); public native @Const @ByRef Tensor requires_grad_(@Cast("bool") boolean _requires_grad/*=true*/); public native @Const @ByRef Tensor requires_grad_(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImpl.java index 48e58e18111..d98d8bcdd70 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImpl.java @@ -703,7 +703,7 @@ public native void set_sizes_and_strides( // This is renamed to avoid breaking overload BC public native void generic_set_sizes_contiguous(@ByVal SymIntArrayRef sizes); public native void generic_set_sizes_contiguous(@ByVal LongArrayRef sizes); - public native void generic_set_sizes_contiguous(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); + public native void generic_set_sizes_contiguous(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... sizes); /** * Change the size at some dimension. This DOES NOT update strides; @@ -740,7 +740,7 @@ public native void set_sizes_and_strides( * this is the responsibility of the caller */ public native void set_sizes_contiguous(@ByVal LongArrayRef new_size); - public native void set_sizes_contiguous(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... new_size); + public native void set_sizes_contiguous(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... new_size); /** * Set the sizes and strides of a tensor. @@ -757,12 +757,12 @@ public native void set_sizes_and_strides( @ByVal LongArrayRef new_size, @ByVal LongArrayRef new_stride); public native void set_sizes_and_strides( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] new_size, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] new_stride, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] new_size, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] new_stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); public native void set_sizes_and_strides( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] new_size, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... new_stride); + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] new_size, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... new_stride); /** * Set whether a tensor allows changes to its metadata (e.g. sizes / strides / diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIterator.java index 7819686a368..063586f8b4e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIterator.java @@ -80,8 +80,8 @@ public native void set_output_raw_strided( @ByVal DimnameArrayRef names); public native void set_output_raw_strided( @Cast("int64_t") long output_idx, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] strides, @ByVal TensorOptions options, @ByVal DimnameArrayRef names); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorBase.java index cd04ea89ae2..2a5d0fc3ca0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorBase.java @@ -100,7 +100,7 @@ public class TensorIteratorBase extends MetaBase { public native void narrow(int dim, @Cast("int64_t") long start, @Cast("int64_t") long size); /** Narrows every dim after and including {@code start_dim} to size one. */ public native void select_all_keeping_dim(int start_dim, @ByVal LongArrayRef starts); - public native void select_all_keeping_dim(int start_dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... starts); + public native void select_all_keeping_dim(int start_dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... starts); /** Replaces the data pointer for the operand at index {@code arg}. * The new pointer should have the same sizes, strides and dtype as the * original */ @@ -135,7 +135,7 @@ public class TensorIteratorBase extends MetaBase { /** Inverts the re-ordering done by reorder_dimensions. This can only be * called *before* coalesce_dimensions() is called. */ public native @ByVal DimVector invert_perm(@ByVal LongArrayRef input); - public native @ByVal DimVector invert_perm(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input); + public native @ByVal DimVector invert_perm(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... input); /** Reapply same re-ordering as it is done by reorder_dimensions. This can * only be called *before* coalesce_dimensions() is called. */ @@ -149,7 +149,7 @@ public class TensorIteratorBase extends MetaBase { // Helper functions for advanced stride manipulations (e.g. torch.flip) public native void _unsafe_set_arg_strides(int arg, @ByVal LongArrayRef strides); - public native void _unsafe_set_arg_strides(int arg, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... strides); + public native void _unsafe_set_arg_strides(int arg, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... strides); public native void _unsafe_set_arg_data(int arg, Pointer data); /** true if the stride computation can use 32-bit arithmetic. Used by GPU @@ -179,8 +179,8 @@ public native void set_output_raw_strided( @ByVal DimnameArrayRef names); public native void set_output_raw_strided( @Cast("int64_t") long output_idx, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] strides, @ByVal TensorOptions options, @ByVal DimnameArrayRef names); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorConfig.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorConfig.java index 60703f9b1cc..0dd50b40613 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorConfig.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorConfig.java @@ -152,13 +152,13 @@ public class TensorIteratorConfig extends Pointer { public native @ByRef TensorIteratorConfig declare_static_dtype(ScalarType dtype); public native @ByRef TensorIteratorConfig declare_static_device(@ByVal Device device); public native @ByRef TensorIteratorConfig declare_static_shape(@ByVal LongArrayRef shape); - public native @ByRef TensorIteratorConfig declare_static_shape(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... shape); + public native @ByRef TensorIteratorConfig declare_static_shape(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... shape); public native @ByRef TensorIteratorConfig declare_static_shape( @ByVal LongArrayRef shape, @ByVal LongArrayRef squash_dims); public native @ByRef TensorIteratorConfig declare_static_shape( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] shape, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... squash_dims); + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] shape, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... squash_dims); // It would be better if this was && qualified, but this would be at the cost // of a lot of boilerplate above diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorType.java index 31d67e8f65e..894e44784ee 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorType.java @@ -72,7 +72,7 @@ public class TensorType extends SharedType { public static native @SharedPtr("c10::TensorType") @ByVal TensorType createContiguous( ScalarType scalar_type, @ByVal Device device, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... sizes); public static native @ByVal Type.TypePtr fromNumberType(@Const @ByRef Type typ); public static native @ByVal Type.TypePtr fromBoolType(); @@ -111,13 +111,13 @@ public class TensorType extends SharedType { @ByVal LongArrayRef sizes, @ByVal LongArrayRef strides); public native @SharedPtr("c10::TensorType") @ByVal TensorType withSizesStrides( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... strides); + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... strides); public native @SharedPtr("c10::TensorType") @ByVal TensorType withSymbolicShapes(@ByVal SymbolicShape ssizes); public native @SharedPtr("c10::TensorType") @ByVal TensorType withSizes(@ByVal LongArrayRef sizes); - public native @SharedPtr("c10::TensorType") @ByVal TensorType withSizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); + public native @SharedPtr("c10::TensorType") @ByVal TensorType withSizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... sizes); public native @SharedPtr("c10::TensorType") @ByVal TensorType withDevice(@Const @ByVal DeviceOptional device); @@ -162,8 +162,8 @@ public class TensorType extends SharedType { public static native @ByVal @Cast("std::vector*") LongVector contiguousStridesOf( @ByVal LongArrayRef in_sizes); public static native @ByVal @Cast("std::vector*") LongVector contiguousStridesOf( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] in_sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] in_sizes, @ByVal(nullValue = "at::MemoryFormat(c10::MemoryFormat::Contiguous)") MemoryFormat memory_format); public static native @ByVal @Cast("std::vector*") LongVector contiguousStridesOf( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... in_sizes); + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... in_sizes); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/VariableHooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/VariableHooksInterface.java index 1d9d16c2218..bcb9a4d69cc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/VariableHooksInterface.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/VariableHooksInterface.java @@ -39,7 +39,7 @@ public class VariableHooksInterface extends Pointer { public native @Cast("int64_t") long _version(@Const @ByRef TensorBase arg0); public native void retain_grad(@Const @ByRef TensorBase arg0); public native @Cast("bool") boolean retains_grad(@Const @ByRef TensorBase arg0); - public native void _backward(@Const @ByRef Tensor arg0, @ByVal @Cast("at::TensorList*") TensorArrayRef arg1, @Const @ByRef TensorOptional arg2, @ByVal BoolOptional arg3, @Cast("bool") boolean arg4); + public native void _backward(@Const @ByRef Tensor arg0, @ByVal TensorArrayRef arg1, @Const @ByRef TensorOptional arg2, @ByVal BoolOptional arg3, @Cast("bool") boolean arg4); public native void requires_grad_(@Const @ByRef TensorBase arg0, @Cast("bool") boolean arg1); public native void basic_autograd_not_implemented_fallback(@Const @ByRef OperatorHandle op, @ByVal DispatchKeySet dispatch_keys, IValueVector stack); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java index 7fa2f4535fb..5b5555c824a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java @@ -3754,10 +3754,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // performance @Namespace("c10") public static native @ByVal @Cast("std::vector*") LongVector get_channels_last_strides_2d(@ByVal LongArrayRef sizes); -@Namespace("c10") public static native @ByVal @Cast("std::vector*") LongVector get_channels_last_strides_2d(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); +@Namespace("c10") public static native @ByVal @Cast("std::vector*") LongVector get_channels_last_strides_2d(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... sizes); @Namespace("c10") public static native @ByVal @Cast("std::vector*") LongVector get_channels_last_strides_3d(@ByVal LongArrayRef sizes); -@Namespace("c10") public static native @ByVal @Cast("std::vector*") LongVector get_channels_last_strides_3d(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); +@Namespace("c10") public static native @ByVal @Cast("std::vector*") LongVector get_channels_last_strides_3d(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... sizes); // NOTE: // Below are Helper functions for is_channels_last_strides_xd. @@ -3825,15 +3825,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Const @ByVal LongArrayRef sizes, @Const @ByVal LongArrayRef strides); @Namespace("c10") public static native @Cast("bool") boolean is_channels_last_strides_2d( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... strides); + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... strides); @Namespace("c10") public static native @Cast("bool") boolean is_channels_last_strides_3d( @Const @ByVal LongArrayRef sizes, @Const @ByVal LongArrayRef strides); @Namespace("c10") public static native @Cast("bool") boolean is_channels_last_strides_3d( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... strides); + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... strides); // namespace c10 @@ -6506,13 +6506,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Prefer using a more semantic constructor, like // fromIntArrayRefKnownNonNegative @Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefUnchecked(@ByVal LongArrayRef array_ref); -@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefUnchecked(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... array_ref); +@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefUnchecked(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... array_ref); @Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefKnownNonNegative(@ByVal LongArrayRef array_ref); -@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefKnownNonNegative(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... array_ref); +@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefKnownNonNegative(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... array_ref); @Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefSlow(@ByVal LongArrayRef array_ref); -@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefSlow(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... array_ref); +@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefSlow(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... array_ref); // namespace c10 @@ -7336,20 +7336,21 @@ public class torch extends org.bytedeco.pytorch.presets.torch { * A utility function to convert vector to vector. */ @Namespace("c10") public static native @ByVal @Cast("std::vector*") LongVector ToVectorint64_t(@Const @ByRef IntArrayRef src); +@Namespace("c10") public static native @ByVal @Cast("std::vector*") LongVector ToVectorint64_t(@ByRef @Cast({"jint*", "c10::ArrayRef", "std::vector&"}) @StdVector("jint") int... src); /** * Return product of all dimensions starting from k */ @Namespace("c10") public static native @Cast("int64_t") long size_from_dim_(int k, @ByVal LongArrayRef dims); -@Namespace("c10") public static native @Cast("int64_t") long size_from_dim_(int k, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); +@Namespace("c10") public static native @Cast("int64_t") long size_from_dim_(int k, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dims); // Product of all dims up to k (not including dims[k]) @Namespace("c10") public static native @Cast("int64_t") long size_to_dim_(int k, @ByVal LongArrayRef dims); -@Namespace("c10") public static native @Cast("int64_t") long size_to_dim_(int k, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); +@Namespace("c10") public static native @Cast("int64_t") long size_to_dim_(int k, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dims); // Product of all dims between k and l (not including dims[k] and dims[l]) @Namespace("c10") public static native @Cast("int64_t") long size_between_dim_(int k, int l, @ByVal LongArrayRef dims); -@Namespace("c10") public static native @Cast("int64_t") long size_between_dim_(int k, int l, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); +@Namespace("c10") public static native @Cast("int64_t") long size_between_dim_(int k, int l, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dims); // Wrap around axis_index if it is negative, s.t., -1 is the last dim @Namespace("c10") public static native int canonical_axis_index_(int axis_index, int ndims); @@ -12149,8 +12150,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Const @ByVal LongArrayRef sizes, @Const @ByVal LongArrayRef strides); @Namespace("c10") public static native @Cast("bool") boolean is_contiguous_strides( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... strides); + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... strides); // Targeting ../AnyType.java @@ -14475,7 +14476,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @Cast("int64_t") long maybe_wrap_dim(@Cast("int64_t") long dim, TensorImpl tensor); -@Namespace("at") public static native @Cast("int64_t") long maybe_wrap_dim(@Cast("int64_t") long dim, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @Cast("int64_t") long maybe_wrap_dim(@Cast("int64_t") long dim, @ByVal TensorArrayRef tensors); @Namespace("at") public static native @Cast("int64_t") long maybe_wrap_dim( @Cast("int64_t") long dim, @@ -14671,7 +14672,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at::namedinference") public static native void propagate_names_except( @Const @ByRef Tensor result, @Const @ByRef Tensor src, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... excluded_idxs); + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... excluded_idxs); // Used for reduction ops that have a `keepdim` arg. @Namespace("at::namedinference") public static native void propagate_names_for_reduction( @@ -14682,7 +14683,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at::namedinference") public static native void propagate_names_for_reduction( @Const @ByRef Tensor result, @Const @ByRef Tensor src, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] excluded_idxs, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] excluded_idxs, @Cast("bool") boolean keepdim); @Namespace("at::namedinference") public static native void propagate_names_for_expand( @@ -18234,7 +18235,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include @Namespace("at::detail") public static native void check_size_nonnegative(@ByVal LongArrayRef size); -@Namespace("at::detail") public static native void check_size_nonnegative(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at::detail") public static native void check_size_nonnegative(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); @Namespace("at::detail") public static native void check_size_nonnegative(@ByVal SymIntArrayRef size); @@ -18246,11 +18247,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @ByVal LongArrayRef sizes, @Cast("size_t") long itemsize); @Namespace("at::detail") public static native @Cast("size_t") long computeStorageNbytesContiguous( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, @Cast("size_t") long itemsize, @Cast("size_t") long storage_offset/*=0*/); @Namespace("at::detail") public static native @Cast("size_t") long computeStorageNbytesContiguous( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, @Cast("size_t") long itemsize); @Namespace("at::detail") public static native @ByVal SymInt computeStorageNbytesContiguous( @ByVal SymIntArrayRef sizes, @@ -18269,13 +18270,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @ByVal LongArrayRef strides, @Cast("size_t") long itemsize); @Namespace("at::detail") public static native @Cast("size_t") long computeStorageNbytes( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] strides, @Cast("size_t") long itemsize, @Cast("size_t") long storage_offset/*=0*/); @Namespace("at::detail") public static native @Cast("size_t") long computeStorageNbytes( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] strides, @Cast("size_t") long itemsize); @Namespace("at::detail") public static native @ByVal SymInt computeStorageNbytes( @ByVal SymIntArrayRef sizes, @@ -18294,7 +18295,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { ScalarType scalar_type, @ByVal MemoryFormatOptional memory_format_opt); @Namespace("at::detail") public static native @ByVal TensorBase empty_generic( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, Allocator allocator, @ByVal DispatchKeySet ks, ScalarType scalar_type, @@ -18307,8 +18308,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @ByVal DispatchKeySet ks, ScalarType scalar_type); @Namespace("at::detail") public static native @ByVal TensorBase empty_strided_generic( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, Allocator allocator, @ByVal DispatchKeySet ks, ScalarType scalar_type); @@ -18329,12 +18330,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @ByVal LongArrayRef size, ScalarType dtype); @Namespace("at::detail") public static native @ByVal TensorBase empty_cpu( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, ScalarType dtype, @Cast("bool") boolean pin_memory/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format_opt); @Namespace("at::detail") public static native @ByVal TensorBase empty_cpu( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, ScalarType dtype); @Namespace("at::detail") public static native @ByVal TensorBase empty_cpu( @@ -18345,7 +18346,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @ByVal BoolOptional pin_memory_opt, @ByVal MemoryFormatOptional memory_format_opt); @Namespace("at::detail") public static native @ByVal TensorBase empty_cpu( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal ScalarTypeOptional dtype_opt, @ByVal LayoutOptional layout_opt, @ByVal DeviceOptional device_opt, @@ -18353,7 +18354,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @ByVal MemoryFormatOptional memory_format_opt); @Namespace("at::detail") public static native @ByVal TensorBase empty_cpu(@ByVal LongArrayRef size, @Const @ByRef TensorOptions options); -@Namespace("at::detail") public static native @ByVal TensorBase empty_cpu(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef TensorOptions options); +@Namespace("at::detail") public static native @ByVal TensorBase empty_cpu(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef TensorOptions options); @Namespace("at::detail") public static native @ByVal TensorBase empty_strided_cpu( @ByVal LongArrayRef size, @@ -18365,13 +18366,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @ByVal LongArrayRef stride, ScalarType dtype); @Namespace("at::detail") public static native @ByVal TensorBase empty_strided_cpu( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, ScalarType dtype, @Cast("bool") boolean pin_memory/*=false*/); @Namespace("at::detail") public static native @ByVal TensorBase empty_strided_cpu( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, ScalarType dtype); @Namespace("at::detail") public static native @ByVal TensorBase empty_strided_cpu( @@ -18382,8 +18383,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @ByVal DeviceOptional device_opt, @ByVal BoolOptional pin_memory_opt); @Namespace("at::detail") public static native @ByVal TensorBase empty_strided_cpu( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal ScalarTypeOptional dtype_opt, @ByVal LayoutOptional layout_opt, @ByVal DeviceOptional device_opt, @@ -18394,8 +18395,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @ByVal LongArrayRef stride, @Const @ByRef TensorOptions options); @Namespace("at::detail") public static native @ByVal TensorBase empty_strided_cpu( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @Const @ByRef TensorOptions options); @Namespace("at::detail") public static native @ByVal TensorBase empty_meta( @@ -18406,11 +18407,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @ByVal LongArrayRef size, ScalarType dtype); @Namespace("at::detail") public static native @ByVal TensorBase empty_meta( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, ScalarType dtype, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format_opt); @Namespace("at::detail") public static native @ByVal TensorBase empty_meta( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, ScalarType dtype); @Namespace("at::detail") public static native @ByVal TensorBase empty_meta( @@ -18421,7 +18422,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @ByVal BoolOptional pin_memory_opt, @ByVal MemoryFormatOptional memory_format_opt); @Namespace("at::detail") public static native @ByVal TensorBase empty_meta( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal ScalarTypeOptional dtype_opt, @ByVal LayoutOptional layout_opt, @ByVal DeviceOptional device_opt, @@ -18437,10 +18438,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @ByVal MemoryFormatOptional memory_format_opt); @Namespace("at::detail") public static native @ByVal TensorBase empty_meta(@ByVal LongArrayRef size, @Const @ByRef TensorOptions options); -@Namespace("at::detail") public static native @ByVal TensorBase empty_meta(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef TensorOptions options); +@Namespace("at::detail") public static native @ByVal TensorBase empty_meta(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef TensorOptions options); @Namespace("at::detail") public static native @ByVal TensorBase empty_strided_meta(@ByVal LongArrayRef size, @ByVal LongArrayRef stride, ScalarType dtype); -@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_meta(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, ScalarType dtype); +@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_meta(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, ScalarType dtype); @Namespace("at::detail") public static native @ByVal TensorBase empty_strided_meta( @ByVal LongArrayRef size, @@ -18450,8 +18451,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @ByVal DeviceOptional device_opt, @ByVal BoolOptional pin_memory_opt); @Namespace("at::detail") public static native @ByVal TensorBase empty_strided_meta( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal ScalarTypeOptional dtype_opt, @ByVal LayoutOptional layout_opt, @ByVal DeviceOptional device_opt, @@ -18462,8 +18463,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @ByVal LongArrayRef stride, @Const @ByRef TensorOptions options); @Namespace("at::detail") public static native @ByVal TensorBase empty_strided_meta( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @Const @ByRef TensorOptions options); @Namespace("at::detail") public static native @ByVal TensorBase empty_strided_symint_meta( @@ -18501,7 +18502,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // explicitly constructing a tensor, e.g., when you want to choose a kernel // strategy based on whether a subgeometry is contiguous. @Namespace("at") public static native @Cast("bool") boolean geometry_is_contiguous(@ByVal LongArrayRef sizes, @ByVal LongArrayRef strides); -@Namespace("at") public static native @Cast("bool") boolean geometry_is_contiguous(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... strides); +@Namespace("at") public static native @Cast("bool") boolean geometry_is_contiguous(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... strides); // Targeting ../TensorGeometry.java @@ -18668,7 +18669,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native void checkSize( @Cast("at::CheckedFrom") String c, @Const @ByRef TensorGeometryArg t, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... sizes); @Namespace("at") public static native void checkSize_symint( @Cast("at::CheckedFrom") BytePointer c, @Const @ByRef TensorGeometryArg t, @@ -18794,16 +18795,16 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Cast("int64_t") long dim_size, @Cast("int64_t") long size); @Namespace("at::detail") public static native @ByVal @Cast("std::vector*") LongVector defaultStrides(@ByVal LongArrayRef sizes); -@Namespace("at::detail") public static native @ByVal @Cast("std::vector*") LongVector defaultStrides(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); +@Namespace("at::detail") public static native @ByVal @Cast("std::vector*") LongVector defaultStrides(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... sizes); @Namespace("at::detail") public static native @ByVal LongVectorOptional computeStride( @ByVal LongArrayRef oldshape, @ByVal LongArrayRef oldstride, @ByVal LongArrayRef newshape); @Namespace("at::detail") public static native @ByVal LongVectorOptional computeStride( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] oldshape, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] oldstride, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... newshape); + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] oldshape, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] oldstride, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... newshape); @Namespace("at::detail") public static native @ByVal SymDimVectorOptional computeStride( @ByVal SymIntArrayRef oldshape, @@ -18815,8 +18816,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @ByVal LongArrayRef oldstride, @Const @ByRef DimVector newshape); @Namespace("at::detail") public static native @ByVal DimVectorOptional computeStride( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] oldshape, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] oldstride, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] oldshape, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] oldstride, @Const @ByRef DimVector newshape); // namespace detail @@ -18982,7 +18983,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal @NoException(true) TensorMaker for_blob(Pointer data, @ByVal LongArrayRef sizes); -@Namespace("at") public static native @ByVal @NoException(true) TensorMaker for_blob(Pointer data, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); +@Namespace("at") public static native @ByVal @NoException(true) TensorMaker for_blob(Pointer data, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... sizes); @Namespace("at") public static native @ByVal Tensor from_blob( Pointer data, @@ -18998,15 +18999,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { PointerConsumer deleter); @Namespace("at") public static native @ByVal Tensor from_blob( Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] strides, PointerConsumer deleter, @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); @Namespace("at") public static native @ByVal Tensor from_blob( Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] strides, PointerConsumer deleter); @Namespace("at") public static native @ByVal Tensor from_blob( @@ -19025,16 +19026,16 @@ public class torch extends org.bytedeco.pytorch.presets.torch { PointerConsumer deleter); @Namespace("at") public static native @ByVal Tensor from_blob( Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] strides, @Cast("int64_t") long storage_offset, PointerConsumer deleter, @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); @Namespace("at") public static native @ByVal Tensor from_blob( Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] strides, @Cast("int64_t") long storage_offset, PointerConsumer deleter); @@ -19050,13 +19051,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { PointerConsumer deleter); @Namespace("at") public static native @ByVal Tensor from_blob( Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, PointerConsumer deleter, @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); @Namespace("at") public static native @ByVal Tensor from_blob( Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, PointerConsumer deleter); @Namespace("at") public static native @ByVal Tensor from_blob( @@ -19070,13 +19071,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @ByVal LongArrayRef strides); @Namespace("at") public static native @ByVal Tensor from_blob( Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] strides, @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); @Namespace("at") public static native @ByVal Tensor from_blob( Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... strides); + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... strides); @Namespace("at") public static native @ByVal Tensor from_blob( Pointer data, @@ -19087,11 +19088,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @ByVal LongArrayRef sizes); @Namespace("at") public static native @ByVal Tensor from_blob( Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); @Namespace("at") public static native @ByVal Tensor from_blob( Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... sizes); // namespace at @@ -19122,26 +19123,34 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // return at::tensor(ArrayRef(value)); // } @Namespace("at") public static native @ByVal Tensor tensor(@ByVal ByteArrayRef values, @Const @ByRef TensorOptions options); +@Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"jbyte*", "c10::ArrayRef", "std::vector&"}) @StdVector("jbyte") byte[] values, @Const @ByRef TensorOptions options); @Namespace("at") public static native @ByVal Tensor tensor(@Cast("uint8_t") byte value, @Const @ByRef TensorOptions options); @Namespace("at") public static native @ByVal Tensor tensor(@ByVal ByteArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"jbyte*", "c10::ArrayRef", "std::vector&"}) @StdVector("jbyte") byte... values); @Namespace("at") public static native @ByVal Tensor tensor(@Cast("uint8_t") byte value); @Namespace("at") public static native @ByVal Tensor tensor(@ByVal ShortArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"jshort*", "c10::ArrayRef", "std::vector&"}) @StdVector("jshort") short[] values, @Const @ByRef TensorOptions options); @Namespace("at") public static native @ByVal Tensor tensor(short value, @Const @ByRef TensorOptions options); @Namespace("at") public static native @ByVal Tensor tensor(@ByVal ShortArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"jshort*", "c10::ArrayRef", "std::vector&"}) @StdVector("jshort") short... values); @Namespace("at") public static native @ByVal Tensor tensor(short value); @Namespace("at") public static native @ByVal Tensor tensor(@ByVal IntArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"jint*", "c10::ArrayRef", "std::vector&"}) @StdVector("jint") int[] values, @Const @ByRef TensorOptions options); @Namespace("at") public static native @ByVal Tensor tensor(int value, @Const @ByRef TensorOptions options); @Namespace("at") public static native @ByVal Tensor tensor(@ByVal IntArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"jint*", "c10::ArrayRef", "std::vector&"}) @StdVector("jint") int... values); @Namespace("at") public static native @ByVal Tensor tensor(int value); @Namespace("at") public static native @ByVal Tensor tensor(@ByVal LongArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] values, @Const @ByRef TensorOptions options); @Namespace("at") public static native @ByVal Tensor tensor(@Cast("int64_t") long value, @Const @ByRef TensorOptions options); @Namespace("at") public static native @ByVal Tensor tensor(@ByVal LongArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... values); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... values); @Namespace("at") public static native @ByVal Tensor tensor(@Cast("int64_t") long value); @Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"float*", "c10::ArrayRef", "std::vector&"}) @StdVector("float") float[] values, @Const @ByRef TensorOptions options); @Namespace("at") public static native @ByVal Tensor tensor(float value, @Const @ByRef TensorOptions options); @Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"float*", "c10::ArrayRef", "std::vector&"}) @StdVector("float") float... values); @Namespace("at") public static native @ByVal Tensor tensor(float value); @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleArrayRef values, @Const @ByRef TensorOptions options); @Namespace("at") public static native @ByVal Tensor tensor(double value, @Const @ByRef TensorOptions options); @@ -19347,7 +19356,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor @Namespace("at") public static native @ByVal Tensor adaptive_avg_pool1d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size); -@Namespace("at") public static native @ByVal Tensor adaptive_avg_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +@Namespace("at") public static native @ByVal Tensor adaptive_avg_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... output_size); @@ -19378,12 +19387,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor adaptive_avg_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size); -@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... output_size); // aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor adaptive_avg_pool2d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByRef Tensor out); // aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) @@ -19396,7 +19405,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor @Namespace("at") public static native @ByVal Tensor adaptive_avg_pool2d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size); -@Namespace("at") public static native @ByVal Tensor adaptive_avg_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +@Namespace("at") public static native @ByVal Tensor adaptive_avg_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... output_size); // aten::adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor @@ -19432,12 +19441,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor adaptive_avg_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size); -@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... output_size); // aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor adaptive_avg_pool3d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByRef Tensor out); // aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!) @@ -19450,7 +19459,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor @Namespace("at") public static native @ByVal Tensor adaptive_avg_pool3d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size); -@Namespace("at") public static native @ByVal Tensor adaptive_avg_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +@Namespace("at") public static native @ByVal Tensor adaptive_avg_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... output_size); // aten::adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor @@ -19518,7 +19527,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor) @Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool1d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size); -@Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +@Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... output_size); @@ -19549,14 +19558,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) @Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool2d_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size); -@Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool2d_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +@Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool2d_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... output_size); // aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) @Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool2d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByRef Tensor out, @ByRef Tensor indices); -@Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByRef Tensor out, @ByRef Tensor indices); +@Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByRef Tensor out, @ByRef Tensor indices); // aten::adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor) @Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool2d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size); -@Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +@Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... output_size); @@ -19622,14 +19631,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) @Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool3d_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size); -@Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool3d_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +@Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool3d_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... output_size); // aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) @Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool3d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByRef Tensor out, @ByRef Tensor indices); -@Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByRef Tensor out, @ByRef Tensor indices); +@Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByRef Tensor out, @ByRef Tensor indices); // aten::adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor) @Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool3d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size); -@Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +@Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... output_size); @@ -19998,7 +20007,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::affine_grid_generator(Tensor theta, SymInt[] size, bool align_corners) -> Tensor @Namespace("at") public static native @ByVal Tensor affine_grid_generator(@Const @ByRef Tensor theta, @ByVal LongArrayRef size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByVal Tensor affine_grid_generator(@Const @ByRef Tensor theta, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByVal Tensor affine_grid_generator(@Const @ByRef Tensor theta, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Cast("bool") boolean align_corners); // aten::affine_grid_generator(Tensor theta, SymInt[] size, bool align_corners) -> Tensor @@ -20007,12 +20016,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::affine_grid_generator.out(Tensor theta, SymInt[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor affine_grid_generator_out(@ByRef Tensor out, @Const @ByRef Tensor theta, @ByVal LongArrayRef size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByRef Tensor affine_grid_generator_out(@ByRef Tensor out, @Const @ByRef Tensor theta, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByRef Tensor affine_grid_generator_out(@ByRef Tensor out, @Const @ByRef Tensor theta, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Cast("bool") boolean align_corners); // aten::affine_grid_generator.out(Tensor theta, SymInt[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor affine_grid_generator_outf(@Const @ByRef Tensor theta, @ByVal LongArrayRef size, @Cast("bool") boolean align_corners, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor affine_grid_generator_outf(@Const @ByRef Tensor theta, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("bool") boolean align_corners, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor affine_grid_generator_outf(@Const @ByRef Tensor theta, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Cast("bool") boolean align_corners, @ByRef Tensor out); // aten::affine_grid_generator.out(Tensor theta, SymInt[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) @@ -20052,7 +20061,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::affine_grid_generator_backward(Tensor grad, SymInt[] size, bool align_corners) -> Tensor @Namespace("at") public static native @ByVal Tensor affine_grid_generator_backward(@Const @ByRef Tensor grad, @ByVal LongArrayRef size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByVal Tensor affine_grid_generator_backward(@Const @ByRef Tensor grad, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByVal Tensor affine_grid_generator_backward(@Const @ByRef Tensor grad, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Cast("bool") boolean align_corners); // aten::affine_grid_generator_backward(Tensor grad, SymInt[] size, bool align_corners) -> Tensor @@ -20180,7 +20189,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::align_tensors(Tensor[] tensors) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector align_tensors(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector align_tensors(@ByVal TensorArrayRef tensors); @@ -20359,15 +20368,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor @Namespace("at") public static native @ByVal Tensor amax(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByVal Tensor amax(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor amax(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor amax(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim/*=false*/); // aten::amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor amax_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByRef Tensor amax_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor amax_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor amax_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim/*=false*/); // aten::amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor amax_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor amax_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor amax_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); @@ -20399,15 +20408,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor @Namespace("at") public static native @ByVal Tensor amin(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByVal Tensor amin(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor amin(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor amin(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim/*=false*/); // aten::amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor amin_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByRef Tensor amin_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor amin_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor amin_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim/*=false*/); // aten::amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor amin_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor amin_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor amin_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); @@ -21066,8 +21075,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a) @Namespace("at") public static native @ByVal Tensor as_strided(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); @Namespace("at") public static native @ByVal Tensor as_strided(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByVal LongArrayRef stride); -@Namespace("at") public static native @ByVal Tensor as_strided(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); -@Namespace("at") public static native @ByVal Tensor as_strided(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +@Namespace("at") public static native @ByVal Tensor as_strided(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); +@Namespace("at") public static native @ByVal Tensor as_strided(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); // aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a) @@ -21078,8 +21087,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!) @Namespace("at") public static native @Const @ByRef Tensor as_strided_(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); @Namespace("at") public static native @Const @ByRef Tensor as_strided_(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByVal LongArrayRef stride); -@Namespace("at") public static native @Const @ByRef Tensor as_strided_(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); -@Namespace("at") public static native @Const @ByRef Tensor as_strided_(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +@Namespace("at") public static native @Const @ByRef Tensor as_strided_(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); +@Namespace("at") public static native @Const @ByRef Tensor as_strided_(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); // aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!) @@ -21117,8 +21126,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor @Namespace("at") public static native @ByVal Tensor as_strided_copy(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); @Namespace("at") public static native @ByVal Tensor as_strided_copy(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByVal LongArrayRef stride); -@Namespace("at") public static native @ByVal Tensor as_strided_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); -@Namespace("at") public static native @ByVal Tensor as_strided_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +@Namespace("at") public static native @ByVal Tensor as_strided_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); +@Namespace("at") public static native @ByVal Tensor as_strided_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); // aten::as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor @@ -21129,13 +21138,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor as_strided_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); @Namespace("at") public static native @ByRef Tensor as_strided_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByVal LongArrayRef stride); -@Namespace("at") public static native @ByRef Tensor as_strided_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); -@Namespace("at") public static native @ByRef Tensor as_strided_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +@Namespace("at") public static native @ByRef Tensor as_strided_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); +@Namespace("at") public static native @ByRef Tensor as_strided_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); // aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor as_strided_copy_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal LongOptional storage_offset, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor as_strided_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal LongOptional storage_offset, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor as_strided_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal LongOptional storage_offset, @ByRef Tensor out); // aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) @@ -21177,8 +21186,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor @Namespace("at") public static native @ByVal Tensor as_strided_scatter(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); @Namespace("at") public static native @ByVal Tensor as_strided_scatter(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal LongArrayRef size, @ByVal LongArrayRef stride); -@Namespace("at") public static native @ByVal Tensor as_strided_scatter(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); -@Namespace("at") public static native @ByVal Tensor as_strided_scatter(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +@Namespace("at") public static native @ByVal Tensor as_strided_scatter(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); +@Namespace("at") public static native @ByVal Tensor as_strided_scatter(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); // aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor @@ -21189,13 +21198,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor as_strided_scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); @Namespace("at") public static native @ByRef Tensor as_strided_scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal LongArrayRef size, @ByVal LongArrayRef stride); -@Namespace("at") public static native @ByRef Tensor as_strided_scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); -@Namespace("at") public static native @ByRef Tensor as_strided_scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +@Namespace("at") public static native @ByRef Tensor as_strided_scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); +@Namespace("at") public static native @ByRef Tensor as_strided_scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); // aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor as_strided_scatter_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal LongOptional storage_offset, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor as_strided_scatter_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal LongOptional storage_offset, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor as_strided_scatter_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal LongOptional storage_offset, @ByRef Tensor out); // aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) @@ -21425,7 +21434,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor atleast_1d(@Const @ByRef Tensor self); // aten::atleast_1d.Sequence(Tensor[] tensors) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector atleast_1d(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector atleast_1d(@ByVal TensorArrayRef tensors); @@ -21458,7 +21467,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor atleast_2d(@Const @ByRef Tensor self); // aten::atleast_2d.Sequence(Tensor[] tensors) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector atleast_2d(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector atleast_2d(@ByVal TensorArrayRef tensors); @@ -21491,7 +21500,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor atleast_3d(@Const @ByRef Tensor self); // aten::atleast_3d.Sequence(Tensor[] tensors) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector atleast_3d(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector atleast_3d(@ByVal TensorArrayRef tensors); @@ -21523,8 +21532,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor @Namespace("at") public static native @ByVal Tensor avg_pool1d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/); @Namespace("at") public static native @ByVal Tensor avg_pool1d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor avg_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/); -@Namespace("at") public static native @ByVal Tensor avg_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByVal Tensor avg_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/); +@Namespace("at") public static native @ByVal Tensor avg_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); @@ -21556,17 +21565,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor avg_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional divisor_override); @Namespace("at") public static native @ByRef Tensor avg_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor avg_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional divisor_override); -@Namespace("at") public static native @ByRef Tensor avg_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByRef Tensor avg_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional divisor_override); +@Namespace("at") public static native @ByRef Tensor avg_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor avg_pool2d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor avg_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor avg_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override, @ByRef Tensor out); // aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor @Namespace("at") public static native @ByVal Tensor avg_pool2d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional divisor_override); @Namespace("at") public static native @ByVal Tensor avg_pool2d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor avg_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional divisor_override); -@Namespace("at") public static native @ByVal Tensor avg_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByVal Tensor avg_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional divisor_override); +@Namespace("at") public static native @ByVal Tensor avg_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); @@ -21597,14 +21606,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor avg_pool2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override); -@Namespace("at") public static native @ByRef Tensor avg_pool2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override); +@Namespace("at") public static native @ByRef Tensor avg_pool2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override); // aten::avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor avg_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor avg_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor avg_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override, @ByRef Tensor grad_input); // aten::avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor @Namespace("at") public static native @ByVal Tensor avg_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override); -@Namespace("at") public static native @ByVal Tensor avg_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override); +@Namespace("at") public static native @ByVal Tensor avg_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override); @@ -21636,17 +21645,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor avg_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional divisor_override); @Namespace("at") public static native @ByRef Tensor avg_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor avg_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional divisor_override); -@Namespace("at") public static native @ByRef Tensor avg_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByRef Tensor avg_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional divisor_override); +@Namespace("at") public static native @ByRef Tensor avg_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor avg_pool3d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor avg_pool3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor avg_pool3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override, @ByRef Tensor out); // aten::avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor @Namespace("at") public static native @ByVal Tensor avg_pool3d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional divisor_override); @Namespace("at") public static native @ByVal Tensor avg_pool3d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor avg_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional divisor_override); -@Namespace("at") public static native @ByVal Tensor avg_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByVal Tensor avg_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional divisor_override); +@Namespace("at") public static native @ByVal Tensor avg_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); @@ -21677,14 +21686,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor avg_pool3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override); -@Namespace("at") public static native @ByRef Tensor avg_pool3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override); +@Namespace("at") public static native @ByRef Tensor avg_pool3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override); // aten::avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor avg_pool3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor avg_pool3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor avg_pool3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override, @ByRef Tensor grad_input); // aten::avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor @Namespace("at") public static native @ByVal Tensor avg_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override); -@Namespace("at") public static native @ByVal Tensor avg_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override); +@Namespace("at") public static native @ByVal Tensor avg_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override); @@ -22684,12 +22693,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::block_diag(Tensor[] tensors) -> Tensor -@Namespace("at") public static native @ByVal Tensor block_diag(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @ByVal Tensor block_diag(@ByVal TensorArrayRef tensors); // aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor block_diag_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @ByRef Tensor block_diag_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors); // aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor block_diag_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor block_diag_outf(@ByVal TensorArrayRef tensors, @ByRef Tensor out); @@ -22754,7 +22763,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::broadcast_tensors(Tensor[] tensors) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector broadcast_tensors(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector broadcast_tensors(@ByVal TensorArrayRef tensors); @@ -22785,7 +22794,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a) @Namespace("at") public static native @ByVal Tensor broadcast_to(@Const @ByRef Tensor self, @ByVal LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor broadcast_to(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at") public static native @ByVal Tensor broadcast_to(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a) @@ -22897,7 +22906,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::cartesian_prod(Tensor[] tensors) -> Tensor -@Namespace("at") public static native @ByVal Tensor cartesian_prod(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @ByVal Tensor cartesian_prod(@ByVal TensorArrayRef tensors); @@ -22937,12 +22946,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByRef Tensor cat_outf(@Const @ByRef TensorArrayRef tensors, @Cast("int64_t") long dim, @ByRef Tensor out); // aten::cat.names(Tensor[] tensors, Dimname dim) -> Tensor -@Namespace("at") public static native @ByVal Tensor cat(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByVal Dimname dim); +@Namespace("at") public static native @ByVal Tensor cat(@ByVal TensorArrayRef tensors, @ByVal Dimname dim); // aten::cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cat_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByVal Dimname dim); +@Namespace("at") public static native @ByRef Tensor cat_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors, @ByVal Dimname dim); // aten::cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cat_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByVal Dimname dim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor cat_outf(@ByVal TensorArrayRef tensors, @ByVal Dimname dim, @ByRef Tensor out); @@ -23182,12 +23191,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::chain_matmul(Tensor[] matrices) -> Tensor -@Namespace("at") public static native @ByVal Tensor chain_matmul(@ByVal @Cast("at::TensorList*") TensorArrayRef matrices); +@Namespace("at") public static native @ByVal Tensor chain_matmul(@ByVal TensorArrayRef matrices); // aten::chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor chain_matmul_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef matrices); +@Namespace("at") public static native @ByRef Tensor chain_matmul_out(@ByRef Tensor out, @ByVal TensorArrayRef matrices); // aten::chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor chain_matmul_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef matrices, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor chain_matmul_outf(@ByVal TensorArrayRef matrices, @ByRef Tensor out); @@ -23726,32 +23735,32 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor col2im_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef dilation, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride); -@Namespace("at") public static native @ByRef Tensor col2im_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +@Namespace("at") public static native @ByRef Tensor col2im_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); // aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor col2im_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef dilation, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor col2im_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor col2im_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByRef Tensor out); // aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor col2im_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef dilation, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride); -@Namespace("at") public static native @ByRef Tensor col2im_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +@Namespace("at") public static native @ByRef Tensor col2im_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); // aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor col2im_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef dilation, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor col2im_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor col2im_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByRef Tensor out); // aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor @Namespace("at") public static native @ByVal Tensor col2im(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef dilation, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride); -@Namespace("at") public static native @ByVal Tensor col2im(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +@Namespace("at") public static native @ByVal Tensor col2im(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); // aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor @Namespace("at") public static native @ByVal Tensor col2im_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef dilation, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride); -@Namespace("at") public static native @ByVal Tensor col2im_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +@Namespace("at") public static native @ByVal Tensor col2im_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); @@ -23845,12 +23854,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::column_stack(Tensor[] tensors) -> Tensor -@Namespace("at") public static native @ByVal Tensor column_stack(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @ByVal Tensor column_stack(@ByVal TensorArrayRef tensors); // aten::column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor column_stack_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @ByRef Tensor column_stack_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors); // aten::column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor column_stack_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor column_stack_outf(@ByVal TensorArrayRef tensors, @ByRef Tensor out); @@ -23946,22 +23955,22 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::concat(Tensor[] tensors, int dim=0) -> Tensor -@Namespace("at") public static native @ByVal Tensor concat(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @ByVal Tensor concat(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @ByVal Tensor concat(@ByVal TensorArrayRef tensors, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal Tensor concat(@ByVal TensorArrayRef tensors); // aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor concat_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @ByRef Tensor concat_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @ByRef Tensor concat_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByRef Tensor concat_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors); // aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor concat_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @Cast("int64_t") long dim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor concat_outf(@ByVal TensorArrayRef tensors, @Cast("int64_t") long dim, @ByRef Tensor out); // aten::concat.names(Tensor[] tensors, Dimname dim) -> Tensor -@Namespace("at") public static native @ByVal Tensor concat(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByVal Dimname dim); +@Namespace("at") public static native @ByVal Tensor concat(@ByVal TensorArrayRef tensors, @ByVal Dimname dim); // aten::concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor concat_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByVal Dimname dim); +@Namespace("at") public static native @ByRef Tensor concat_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors, @ByVal Dimname dim); // aten::concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor concat_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByVal Dimname dim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor concat_outf(@ByVal TensorArrayRef tensors, @ByVal Dimname dim, @ByRef Tensor out); @@ -23991,22 +24000,22 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::concatenate(Tensor[] tensors, int dim=0) -> Tensor -@Namespace("at") public static native @ByVal Tensor concatenate(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @ByVal Tensor concatenate(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @ByVal Tensor concatenate(@ByVal TensorArrayRef tensors, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal Tensor concatenate(@ByVal TensorArrayRef tensors); // aten::concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor concatenate_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @ByRef Tensor concatenate_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @ByRef Tensor concatenate_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByRef Tensor concatenate_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors); // aten::concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor concatenate_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @Cast("int64_t") long dim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor concatenate_outf(@ByVal TensorArrayRef tensors, @Cast("int64_t") long dim, @ByRef Tensor out); // aten::concatenate.names(Tensor[] tensors, Dimname dim) -> Tensor -@Namespace("at") public static native @ByVal Tensor concatenate(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByVal Dimname dim); +@Namespace("at") public static native @ByVal Tensor concatenate(@ByVal TensorArrayRef tensors, @ByVal Dimname dim); // aten::concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor concatenate_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByVal Dimname dim); +@Namespace("at") public static native @ByRef Tensor concatenate_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors, @ByVal Dimname dim); // aten::concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor concatenate_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByVal Dimname dim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor concatenate_outf(@ByVal TensorArrayRef tensors, @ByVal Dimname dim, @ByRef Tensor out); @@ -24106,8 +24115,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor @Namespace("at") public static native @ByVal Tensor constant_pad_nd(@Const @ByRef Tensor self, @ByVal LongArrayRef pad, @Const @ByRef(nullValue = "at::Scalar(0)") Scalar value); @Namespace("at") public static native @ByVal Tensor constant_pad_nd(@Const @ByRef Tensor self, @ByVal LongArrayRef pad); -@Namespace("at") public static native @ByVal Tensor constant_pad_nd(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] pad, @Const @ByRef(nullValue = "at::Scalar(0)") Scalar value); -@Namespace("at") public static native @ByVal Tensor constant_pad_nd(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... pad); +@Namespace("at") public static native @ByVal Tensor constant_pad_nd(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] pad, @Const @ByRef(nullValue = "at::Scalar(0)") Scalar value); +@Namespace("at") public static native @ByVal Tensor constant_pad_nd(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... pad); // aten::constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor @@ -24118,13 +24127,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor constant_pad_nd_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef pad, @Const @ByRef(nullValue = "at::Scalar(0)") Scalar value); @Namespace("at") public static native @ByRef Tensor constant_pad_nd_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef pad); -@Namespace("at") public static native @ByRef Tensor constant_pad_nd_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] pad, @Const @ByRef(nullValue = "at::Scalar(0)") Scalar value); -@Namespace("at") public static native @ByRef Tensor constant_pad_nd_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... pad); +@Namespace("at") public static native @ByRef Tensor constant_pad_nd_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] pad, @Const @ByRef(nullValue = "at::Scalar(0)") Scalar value); +@Namespace("at") public static native @ByRef Tensor constant_pad_nd_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... pad); // aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor constant_pad_nd_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef pad, @Const @ByRef Scalar value, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor constant_pad_nd_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] pad, @Const @ByRef Scalar value, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor constant_pad_nd_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] pad, @Const @ByRef Scalar value, @ByRef Tensor out); // aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!) @@ -24194,20 +24203,20 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, SymInt[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); @Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); -@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/); // aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, SymInt[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv1d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); @Namespace("at") public static native @ByVal Tensor conv1d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); -@Namespace("at") public static native @ByVal Tensor conv1d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv1d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/); // aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, str padding="valid", int[1] dilation=1, int groups=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView BytePointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); @Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView BytePointer padding); -@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @StringView String padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); -@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @StringView String padding); +@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @StringView String padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @StringView String padding); @@ -24239,20 +24248,20 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); @Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); -@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/); // aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv2d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); @Namespace("at") public static native @ByVal Tensor conv2d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); -@Namespace("at") public static native @ByVal Tensor conv2d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv2d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/); // aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, str padding="valid", int[2] dilation=1, int groups=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView BytePointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); @Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView BytePointer padding); -@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @StringView String padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); -@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @StringView String padding); +@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @StringView String padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @StringView String padding); @@ -24284,20 +24293,20 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); @Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); -@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/); // aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv3d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); @Namespace("at") public static native @ByVal Tensor conv3d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); -@Namespace("at") public static native @ByVal Tensor conv3d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv3d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/); // aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, str padding="valid", int[3] dilation=1, int groups=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView BytePointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); @Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @StringView BytePointer padding); -@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @StringView String padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); -@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @StringView String padding); +@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @StringView String padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @StringView String padding); @@ -24328,32 +24337,32 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::conv_depthwise3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation) -> Tensor @Namespace("at") public static native @ByVal Tensor conv_depthwise3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation); -@Namespace("at") public static native @ByVal Tensor conv_depthwise3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); +@Namespace("at") public static native @ByVal Tensor conv_depthwise3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); // aten::conv_depthwise3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation) -> Tensor @Namespace("at") public static native @ByVal Tensor conv_depthwise3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation); -@Namespace("at") public static native @ByVal Tensor conv_depthwise3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); +@Namespace("at") public static native @ByVal Tensor conv_depthwise3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); // aten::conv_depthwise3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor conv_depthwise3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation); -@Namespace("at") public static native @ByRef Tensor conv_depthwise3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); +@Namespace("at") public static native @ByRef Tensor conv_depthwise3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); // aten::conv_depthwise3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor conv_depthwise3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor conv_depthwise3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor conv_depthwise3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @ByRef Tensor out); // aten::conv_depthwise3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor conv_depthwise3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation); -@Namespace("at") public static native @ByRef Tensor conv_depthwise3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); +@Namespace("at") public static native @ByRef Tensor conv_depthwise3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); // aten::conv_depthwise3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor conv_depthwise3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor conv_depthwise3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor conv_depthwise3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @ByRef Tensor out); @@ -24453,13 +24462,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, SymInt[1] padding=0, SymInt[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv_transpose1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByVal Tensor conv_transpose1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); -@Namespace("at") public static native @ByVal Tensor conv_transpose1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); +@Namespace("at") public static native @ByVal Tensor conv_transpose1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); // aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, SymInt[1] padding=0, SymInt[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv_transpose1d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByVal Tensor conv_transpose1d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); -@Namespace("at") public static native @ByVal Tensor conv_transpose1d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); +@Namespace("at") public static native @ByVal Tensor conv_transpose1d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); @@ -24492,13 +24501,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv_transpose2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByVal Tensor conv_transpose2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); -@Namespace("at") public static native @ByVal Tensor conv_transpose2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); +@Namespace("at") public static native @ByVal Tensor conv_transpose2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); // aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv_transpose2d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByVal Tensor conv_transpose2d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); -@Namespace("at") public static native @ByVal Tensor conv_transpose2d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); +@Namespace("at") public static native @ByVal Tensor conv_transpose2d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); @@ -24531,13 +24540,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv_transpose3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByVal Tensor conv_transpose3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); -@Namespace("at") public static native @ByVal Tensor conv_transpose3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); +@Namespace("at") public static native @ByVal Tensor conv_transpose3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); // aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor @Namespace("at") public static native @ByVal Tensor conv_transpose3d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByVal Tensor conv_transpose3d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); -@Namespace("at") public static native @ByVal Tensor conv_transpose3d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); +@Namespace("at") public static native @ByVal Tensor conv_transpose3d_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); @@ -24569,32 +24578,32 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups) -> Tensor @Namespace("at") public static native @ByVal Tensor convolution(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByVal Tensor convolution(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByVal Tensor convolution(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups); // aten::convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups) -> Tensor @Namespace("at") public static native @ByVal Tensor convolution_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByVal Tensor convolution_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByVal Tensor convolution_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups); // aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor convolution_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByRef Tensor convolution_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByRef Tensor convolution_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups); // aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor convolution_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor convolution_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor convolution_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups, @ByRef Tensor out); // aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByRef Tensor convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByRef Tensor convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups); // aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor convolution_symint_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor convolution_symint_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor convolution_symint_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups, @ByRef Tensor out); @@ -24626,32 +24635,32 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal LongArrayRefOptional bias_sizes, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); // aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRefOptional bias_sizes, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRefOptional bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRefOptional bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); // aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal LongArrayRefOptional bias_sizes, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); // aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal LongArrayRefOptional bias_sizes, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); -@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); // aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_symint_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRefOptional bias_sizes, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_symint_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRefOptional bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_symint_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRefOptional bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); // aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_symint_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRefOptional bias_sizes, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); -@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_symint_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRefOptional bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_symint_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRefOptional bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); @@ -24683,14 +24692,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_overrideable(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_overrideable(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_overrideable(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); // aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_overrideable_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_overrideable_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_overrideable_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); // aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) @Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_overrideable_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); -@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_overrideable_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_overrideable_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); @@ -24721,14 +24730,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor @Namespace("at") public static native @ByVal Tensor convolution_overrideable(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByVal Tensor convolution_overrideable(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByVal Tensor convolution_overrideable(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups); // aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor convolution_overrideable_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByRef Tensor convolution_overrideable_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByRef Tensor convolution_overrideable_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups); // aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor convolution_overrideable_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor convolution_overrideable_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor convolution_overrideable_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups, @ByRef Tensor out); @@ -25048,7 +25057,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor @Namespace("at") public static native @ByVal Tensor count_nonzero(@Const @ByRef Tensor self, @ByVal LongArrayRef dim); -@Namespace("at") public static native @ByVal Tensor count_nonzero(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +@Namespace("at") public static native @ByVal Tensor count_nonzero(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); // aten::count_nonzero(Tensor self, int? dim=None) -> Tensor @Namespace("at") public static native @ByVal Tensor count_nonzero(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim); @@ -25056,10 +25065,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::count_nonzero.dim_IntList_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor count_nonzero_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef dim); -@Namespace("at") public static native @ByRef Tensor count_nonzero_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +@Namespace("at") public static native @ByRef Tensor count_nonzero_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); // aten::count_nonzero.dim_IntList_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor count_nonzero_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef dim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor count_nonzero_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor count_nonzero_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByRef Tensor out); // aten::count_nonzero.out(Tensor self, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor count_nonzero_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim); @@ -25265,8 +25274,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor @Namespace("at") public static native @ByVal Tensor ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal LongArrayRef input_lengths, @ByVal LongArrayRef target_lengths, @Cast("int64_t") long blank/*=0*/, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @Cast("bool") boolean zero_infinity/*=false*/); @Namespace("at") public static native @ByVal Tensor ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal LongArrayRef input_lengths, @ByVal LongArrayRef target_lengths); -@Namespace("at") public static native @ByVal Tensor ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_lengths, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] target_lengths, @Cast("int64_t") long blank/*=0*/, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @Cast("bool") boolean zero_infinity/*=false*/); -@Namespace("at") public static native @ByVal Tensor ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_lengths, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... target_lengths); +@Namespace("at") public static native @ByVal Tensor ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_lengths, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] target_lengths, @Cast("int64_t") long blank/*=0*/, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @Cast("bool") boolean zero_infinity/*=false*/); +@Namespace("at") public static native @ByVal Tensor ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_lengths, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... target_lengths); // aten::ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor @Namespace("at") public static native @ByVal Tensor ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @Const @ByRef Tensor input_lengths, @Const @ByRef Tensor target_lengths, @Cast("int64_t") long blank/*=0*/, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @Cast("bool") boolean zero_infinity/*=false*/); @@ -25441,14 +25450,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::cudnn_convolution(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor @Namespace("at") public static native @ByVal Tensor cudnn_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); -@Namespace("at") public static native @ByVal Tensor cudnn_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); +@Namespace("at") public static native @ByVal Tensor cudnn_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); // aten::cudnn_convolution.out(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor cudnn_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); -@Namespace("at") public static native @ByRef Tensor cudnn_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); // aten::cudnn_convolution.out(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor cudnn_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor cudnn_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32, @ByRef Tensor out); @@ -25479,14 +25488,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor @Namespace("at") public static native @ByVal Tensor cudnn_convolution_add_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByVal Tensor cudnn_convolution_add_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByVal Tensor cudnn_convolution_add_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups); // aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor cudnn_convolution_add_relu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByRef Tensor cudnn_convolution_add_relu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_add_relu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups); // aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor cudnn_convolution_add_relu_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor cudnn_convolution_add_relu_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_add_relu_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @ByRef Tensor out); @@ -25517,14 +25526,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor @Namespace("at") public static native @ByVal Tensor cudnn_convolution_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByVal Tensor cudnn_convolution_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByVal Tensor cudnn_convolution_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups); // aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor cudnn_convolution_relu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByRef Tensor cudnn_convolution_relu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_relu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups); // aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor cudnn_convolution_relu_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor cudnn_convolution_relu_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_relu_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @ByRef Tensor out); @@ -25555,14 +25564,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::cudnn_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor @Namespace("at") public static native @ByVal Tensor cudnn_convolution_transpose(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); -@Namespace("at") public static native @ByVal Tensor cudnn_convolution_transpose(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); +@Namespace("at") public static native @ByVal Tensor cudnn_convolution_transpose(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); // aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor cudnn_convolution_transpose_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); -@Namespace("at") public static native @ByRef Tensor cudnn_convolution_transpose_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_transpose_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); // aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor cudnn_convolution_transpose_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor cudnn_convolution_transpose_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_transpose_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32, @ByRef Tensor out); @@ -26064,7 +26073,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor dequantize(@Const @ByRef Tensor self); // aten::dequantize.tensors(Tensor[] tensors) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector dequantize(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector dequantize(@ByVal TensorArrayRef tensors); // aten::dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor dequantize_out(@ByRef Tensor out, @Const @ByRef Tensor self); @@ -26072,9 +26081,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByRef Tensor dequantize_outf(@Const @ByRef Tensor self, @ByRef Tensor out); // aten::dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void dequantize_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native void dequantize_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef tensors); // aten::dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void dequantize_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByVal @Cast("at::TensorList*") TensorArrayRef out); +@Namespace("at") public static native void dequantize_outf(@ByVal TensorArrayRef tensors, @ByVal TensorArrayRef out); @@ -26343,7 +26352,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor @Namespace("at") public static native @ByVal Tensor diagonal_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef input_sizes, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2); -@Namespace("at") public static native @ByVal Tensor diagonal_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2); +@Namespace("at") public static native @ByVal Tensor diagonal_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_sizes, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2); // aten::diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor @@ -26352,12 +26361,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor diagonal_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef input_sizes, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2); -@Namespace("at") public static native @ByRef Tensor diagonal_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2); +@Namespace("at") public static native @ByRef Tensor diagonal_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_sizes, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2); // aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor diagonal_backward_outf(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef input_sizes, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor diagonal_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor diagonal_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_sizes, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2, @ByRef Tensor out); // aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!) @@ -26759,7 +26768,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] @Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector dsplit(@Const @ByRef Tensor self, @ByVal LongArrayRef indices); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector dsplit(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... indices); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector dsplit(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... indices); @@ -26789,12 +26798,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::dstack(Tensor[] tensors) -> Tensor -@Namespace("at") public static native @ByVal Tensor dstack(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @ByVal Tensor dstack(@ByVal TensorArrayRef tensors); // aten::dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor dstack_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @ByRef Tensor dstack_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors); // aten::dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor dstack_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor dstack_outf(@ByVal TensorArrayRef tensors, @ByRef Tensor out); @@ -26824,10 +26833,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor einsum(@StringView BytePointer equation, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional path); -@Namespace("at") public static native @ByVal Tensor einsum(@StringView BytePointer equation, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); -@Namespace("at") public static native @ByVal Tensor einsum(@StringView String equation, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... path); -@Namespace("at") public static native @ByVal Tensor einsum(@StringView String equation, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @ByVal Tensor einsum(@StringView BytePointer equation, @ByVal TensorArrayRef tensors, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional path); +@Namespace("at") public static native @ByVal Tensor einsum(@StringView BytePointer equation, @ByVal TensorArrayRef tensors); +@Namespace("at") public static native @ByVal Tensor einsum(@StringView String equation, @ByVal TensorArrayRef tensors, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... path); +@Namespace("at") public static native @ByVal Tensor einsum(@StringView String equation, @ByVal TensorArrayRef tensors); @@ -27178,22 +27187,22 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor @Namespace("at") public static native @ByVal Tensor empty(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); @Namespace("at") public static native @ByVal Tensor empty(@ByVal LongArrayRef size, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByVal Tensor empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByVal Tensor empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names); // aten::empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor @Namespace("at") public static native @ByVal Tensor empty(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); // aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor @Namespace("at") public static native @ByVal Tensor empty(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); @Namespace("at") public static native @ByVal Tensor empty(@ByVal LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at") public static native @ByVal Tensor empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor @Namespace("at") public static native @ByVal Tensor empty(@ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); // aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor @@ -27208,13 +27217,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor empty_out(@ByRef Tensor out, @ByVal LongArrayRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); @Namespace("at") public static native @ByRef Tensor empty_out(@ByRef Tensor out, @ByVal LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor empty_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByRef Tensor empty_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at") public static native @ByRef Tensor empty_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByRef Tensor empty_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor empty_outf(@ByVal LongArrayRef size, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor empty_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor empty_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); // aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) @@ -27229,11 +27238,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::empty.names_out(int[] size, *, Dimname[]? names, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor empty_out(@ByRef Tensor out, @ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); @Namespace("at") public static native @ByRef Tensor empty_out(@ByRef Tensor out, @ByVal LongArrayRef size, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByRef Tensor empty_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByRef Tensor empty_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByRef Tensor empty_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByRef Tensor empty_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names); // aten::empty.names_out(int[] size, *, Dimname[]? names, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor empty_outf(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor empty_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor empty_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); @@ -27304,45 +27313,45 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::empty_permuted(SymInt[] size, int[] physical_layout, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor empty_permuted(@ByVal LongArrayRef size, @ByVal LongArrayRef physical_layout, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("at") public static native @ByVal Tensor empty_permuted(@ByVal LongArrayRef size, @ByVal LongArrayRef physical_layout); -@Namespace("at") public static native @ByVal Tensor empty_permuted(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] physical_layout, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor empty_permuted(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... physical_layout); +@Namespace("at") public static native @ByVal Tensor empty_permuted(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] physical_layout, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor empty_permuted(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... physical_layout); // aten::empty_permuted(SymInt[] size, int[] physical_layout, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor empty_permuted(@ByVal LongArrayRef size, @ByVal LongArrayRef physical_layout, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor empty_permuted(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] physical_layout, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor empty_permuted(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] physical_layout, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); // aten::empty_permuted(SymInt[] size, int[] physical_layout, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor empty_permuted_symint(@ByVal SymIntArrayRef size, @ByVal LongArrayRef physical_layout, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("at") public static native @ByVal Tensor empty_permuted_symint(@ByVal SymIntArrayRef size, @ByVal LongArrayRef physical_layout); -@Namespace("at") public static native @ByVal Tensor empty_permuted_symint(@ByVal SymIntArrayRef size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] physical_layout, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor empty_permuted_symint(@ByVal SymIntArrayRef size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... physical_layout); +@Namespace("at") public static native @ByVal Tensor empty_permuted_symint(@ByVal SymIntArrayRef size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] physical_layout, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor empty_permuted_symint(@ByVal SymIntArrayRef size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... physical_layout); // aten::empty_permuted(SymInt[] size, int[] physical_layout, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor empty_permuted_symint(@ByVal SymIntArrayRef size, @ByVal LongArrayRef physical_layout, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor empty_permuted_symint(@ByVal SymIntArrayRef size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] physical_layout, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor empty_permuted_symint(@ByVal SymIntArrayRef size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] physical_layout, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); // aten::empty_permuted.out(SymInt[] size, int[] physical_layout, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor empty_permuted_out(@ByRef Tensor out, @ByVal LongArrayRef size, @ByVal LongArrayRef physical_layout); -@Namespace("at") public static native @ByRef Tensor empty_permuted_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... physical_layout); +@Namespace("at") public static native @ByRef Tensor empty_permuted_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... physical_layout); // aten::empty_permuted.out(SymInt[] size, int[] physical_layout, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor empty_permuted_outf(@ByVal LongArrayRef size, @ByVal LongArrayRef physical_layout, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor empty_permuted_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] physical_layout, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor empty_permuted_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] physical_layout, @ByRef Tensor out); // aten::empty_permuted.out(SymInt[] size, int[] physical_layout, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor empty_permuted_symint_out(@ByRef Tensor out, @ByVal SymIntArrayRef size, @ByVal LongArrayRef physical_layout); -@Namespace("at") public static native @ByRef Tensor empty_permuted_symint_out(@ByRef Tensor out, @ByVal SymIntArrayRef size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... physical_layout); +@Namespace("at") public static native @ByRef Tensor empty_permuted_symint_out(@ByRef Tensor out, @ByVal SymIntArrayRef size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... physical_layout); // aten::empty_permuted.out(SymInt[] size, int[] physical_layout, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor empty_permuted_symint_outf(@ByVal SymIntArrayRef size, @ByVal LongArrayRef physical_layout, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor empty_permuted_symint_outf(@ByVal SymIntArrayRef size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] physical_layout, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor empty_permuted_symint_outf(@ByVal SymIntArrayRef size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] physical_layout, @ByRef Tensor out); @@ -27375,20 +27384,20 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor @Namespace("at") public static native @ByVal Tensor empty_quantized(@ByVal LongArrayRef size, @Const @ByRef Tensor qtensor, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); @Namespace("at") public static native @ByVal Tensor empty_quantized(@ByVal LongArrayRef size, @Const @ByRef Tensor qtensor); -@Namespace("at") public static native @ByVal Tensor empty_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor qtensor, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor empty_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor qtensor); +@Namespace("at") public static native @ByVal Tensor empty_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Tensor qtensor, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor empty_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Tensor qtensor); // aten::empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor @Namespace("at") public static native @ByVal Tensor empty_quantized(@ByVal LongArrayRef size, @Const @ByRef Tensor qtensor, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor empty_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor qtensor, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor empty_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Tensor qtensor, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); // aten::empty_quantized.out(int[] size, Tensor qtensor, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor empty_quantized_out(@ByRef Tensor out, @ByVal LongArrayRef size, @Const @ByRef Tensor qtensor, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); @Namespace("at") public static native @ByRef Tensor empty_quantized_out(@ByRef Tensor out, @ByVal LongArrayRef size, @Const @ByRef Tensor qtensor); -@Namespace("at") public static native @ByRef Tensor empty_quantized_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor qtensor, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByRef Tensor empty_quantized_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor qtensor); +@Namespace("at") public static native @ByRef Tensor empty_quantized_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Tensor qtensor, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByRef Tensor empty_quantized_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Tensor qtensor); // aten::empty_quantized.out(int[] size, Tensor qtensor, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor empty_quantized_outf(@ByVal LongArrayRef size, @Const @ByRef Tensor qtensor, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor empty_quantized_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor qtensor, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor empty_quantized_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Tensor qtensor, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); @@ -27420,13 +27429,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor empty_strided(@ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("at") public static native @ByVal Tensor empty_strided(@ByVal LongArrayRef size, @ByVal LongArrayRef stride); -@Namespace("at") public static native @ByVal Tensor empty_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor empty_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +@Namespace("at") public static native @ByVal Tensor empty_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor empty_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); // aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor empty_strided(@ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor empty_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor empty_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); // aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @@ -27440,12 +27449,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor empty_strided_out(@ByRef Tensor out, @ByVal LongArrayRef size, @ByVal LongArrayRef stride); -@Namespace("at") public static native @ByRef Tensor empty_strided_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +@Namespace("at") public static native @ByRef Tensor empty_strided_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); // aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor empty_strided_outf(@ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor empty_strided_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor empty_strided_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByRef Tensor out); // aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) @@ -27802,8 +27811,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor @Namespace("at") public static native @ByVal Tensor expand_copy(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @Cast("bool") boolean implicit/*=false*/); @Namespace("at") public static native @ByVal Tensor expand_copy(@Const @ByRef Tensor self, @ByVal LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor expand_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("bool") boolean implicit/*=false*/); -@Namespace("at") public static native @ByVal Tensor expand_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at") public static native @ByVal Tensor expand_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Cast("bool") boolean implicit/*=false*/); +@Namespace("at") public static native @ByVal Tensor expand_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor @@ -27814,13 +27823,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor expand_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef size, @Cast("bool") boolean implicit/*=false*/); @Namespace("at") public static native @ByRef Tensor expand_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor expand_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("bool") boolean implicit/*=false*/); -@Namespace("at") public static native @ByRef Tensor expand_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at") public static native @ByRef Tensor expand_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Cast("bool") boolean implicit/*=false*/); +@Namespace("at") public static native @ByRef Tensor expand_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor expand_copy_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @Cast("bool") boolean implicit, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor expand_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("bool") boolean implicit, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor expand_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Cast("bool") boolean implicit, @ByRef Tensor out); // aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!) @@ -28559,35 +28568,35 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::fft_fft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor @Namespace("at") public static native @ByVal Tensor fft_fft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_fft2(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_fft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_fft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_fft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor @Namespace("at") public static native @ByVal Tensor fft_fft2_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_fft2_symint(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_fft2_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_fft2_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_fft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_fft2_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor fft_fft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByRef Tensor fft_fft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_fft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor fft_fft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_fft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); // aten::fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_fft2_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_fft2_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor fft_fft2_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByRef Tensor fft_fft2_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_fft2_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor fft_fft2_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_fft2_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); @@ -28807,35 +28816,35 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::fft_hfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor @Namespace("at") public static native @ByVal Tensor fft_hfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_hfft2(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_hfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_hfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_hfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor @Namespace("at") public static native @ByVal Tensor fft_hfft2_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_hfft2_symint(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_hfft2_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_hfft2_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_hfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_hfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); -@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); +@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); // aten::fft_hfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_symint_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_symint_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_symint_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_symint_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_hfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); -@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); +@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); @@ -28984,35 +28993,35 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::fft_ifft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor @Namespace("at") public static native @ByVal Tensor fft_ifft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_ifft2(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_ifft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_ifft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_ifft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor @Namespace("at") public static native @ByVal Tensor fft_ifft2_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_ifft2_symint(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_ifft2_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_ifft2_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_ifft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_ifft2_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor fft_ifft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByRef Tensor fft_ifft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_ifft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor fft_ifft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_ifft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); // aten::fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_ifft2_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_ifft2_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor fft_ifft2_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByRef Tensor fft_ifft2_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_ifft2_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor fft_ifft2_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_ifft2_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); @@ -29193,35 +29202,35 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::fft_ihfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor @Namespace("at") public static native @ByVal Tensor fft_ihfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_ihfft2(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_ihfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_ihfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_ihfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor @Namespace("at") public static native @ByVal Tensor fft_ihfft2_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_ihfft2_symint(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_ihfft2_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_ihfft2_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); -@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); // aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_symint_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_symint_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_symint_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_symint_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); -@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal StringViewOptional norm, @Const @ByRef Tensor out); @@ -29370,35 +29379,35 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::fft_irfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor @Namespace("at") public static native @ByVal Tensor fft_irfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_irfft2(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_irfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_irfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_irfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor @Namespace("at") public static native @ByVal Tensor fft_irfft2_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_irfft2_symint(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_irfft2_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_irfft2_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_irfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_irfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor fft_irfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByRef Tensor fft_irfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_irfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor fft_irfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_irfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); // aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_irfft2_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_irfft2_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor fft_irfft2_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByRef Tensor fft_irfft2_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_irfft2_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor fft_irfft2_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_irfft2_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); @@ -29547,35 +29556,35 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::fft_rfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor @Namespace("at") public static native @ByVal Tensor fft_rfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_rfft2(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_rfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_rfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_rfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor @Namespace("at") public static native @ByVal Tensor fft_rfft2_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByVal Tensor fft_rfft2_symint(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_rfft2_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByVal Tensor fft_rfft2_symint(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_rfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_rfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor fft_rfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByRef Tensor fft_rfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_rfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor fft_rfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_rfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); // aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_rfft2_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); @Namespace("at") public static native @ByRef Tensor fft_rfft2_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor fft_rfft2_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); +@Namespace("at") public static native @ByRef Tensor fft_rfft2_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); // aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fft_rfft2_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal StringViewOptional norm, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor fft_rfft2_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_rfft2_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRefOptional s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal StringViewOptional norm, @ByRef Tensor out); @@ -29861,7 +29870,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::flatten_dense_tensors(Tensor[] tensors) -> Tensor -@Namespace("at") public static native @ByVal Tensor flatten_dense_tensors(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @ByVal Tensor flatten_dense_tensors(@ByVal TensorArrayRef tensors); @@ -29892,14 +29901,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::flip(Tensor self, int[] dims) -> Tensor @Namespace("at") public static native @ByVal Tensor flip(@Const @ByRef Tensor self, @ByVal LongArrayRef dims); -@Namespace("at") public static native @ByVal Tensor flip(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); +@Namespace("at") public static native @ByVal Tensor flip(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dims); // aten::flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor flip_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef dims); -@Namespace("at") public static native @ByRef Tensor flip_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); +@Namespace("at") public static native @ByRef Tensor flip_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dims); // aten::flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor flip_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef dims, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor flip_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dims, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor flip_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dims, @ByRef Tensor out); @@ -30268,14 +30277,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) @Namespace("at") public static native @ByVal T_TensorTensor_T fractional_max_pool2d_out(@ByRef Tensor output, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef output_size, @Const @ByRef Tensor random_samples); -@Namespace("at") public static native @ByVal T_TensorTensor_T fractional_max_pool2d_out(@ByRef Tensor output, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor random_samples); +@Namespace("at") public static native @ByVal T_TensorTensor_T fractional_max_pool2d_out(@ByRef Tensor output, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Const @ByRef Tensor random_samples); // aten::fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) @Namespace("at") public static native @ByVal T_TensorTensor_T fractional_max_pool2d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef output_size, @Const @ByRef Tensor random_samples, @ByRef Tensor output, @ByRef Tensor indices); -@Namespace("at") public static native @ByVal T_TensorTensor_T fractional_max_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor random_samples, @ByRef Tensor output, @ByRef Tensor indices); +@Namespace("at") public static native @ByVal T_TensorTensor_T fractional_max_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Const @ByRef Tensor random_samples, @ByRef Tensor output, @ByRef Tensor indices); // aten::fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor) @Namespace("at") public static native @ByVal T_TensorTensor_T fractional_max_pool2d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef output_size, @Const @ByRef Tensor random_samples); -@Namespace("at") public static native @ByVal T_TensorTensor_T fractional_max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor random_samples); +@Namespace("at") public static native @ByVal T_TensorTensor_T fractional_max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Const @ByRef Tensor random_samples); @@ -30306,14 +30315,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fractional_max_pool2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef output_size, @Const @ByRef Tensor indices); -@Namespace("at") public static native @ByRef Tensor fractional_max_pool2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor indices); +@Namespace("at") public static native @ByRef Tensor fractional_max_pool2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Const @ByRef Tensor indices); // aten::fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fractional_max_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef output_size, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor fractional_max_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor fractional_max_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); // aten::fractional_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices) -> Tensor @Namespace("at") public static native @ByVal Tensor fractional_max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef output_size, @Const @ByRef Tensor indices); -@Namespace("at") public static native @ByVal Tensor fractional_max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor indices); +@Namespace("at") public static native @ByVal Tensor fractional_max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Const @ByRef Tensor indices); @@ -30344,14 +30353,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) @Namespace("at") public static native @ByVal T_TensorTensor_T fractional_max_pool3d_out(@ByRef Tensor output, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef output_size, @Const @ByRef Tensor random_samples); -@Namespace("at") public static native @ByVal T_TensorTensor_T fractional_max_pool3d_out(@ByRef Tensor output, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor random_samples); +@Namespace("at") public static native @ByVal T_TensorTensor_T fractional_max_pool3d_out(@ByRef Tensor output, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Const @ByRef Tensor random_samples); // aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) @Namespace("at") public static native @ByVal T_TensorTensor_T fractional_max_pool3d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef output_size, @Const @ByRef Tensor random_samples, @ByRef Tensor output, @ByRef Tensor indices); -@Namespace("at") public static native @ByVal T_TensorTensor_T fractional_max_pool3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor random_samples, @ByRef Tensor output, @ByRef Tensor indices); +@Namespace("at") public static native @ByVal T_TensorTensor_T fractional_max_pool3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Const @ByRef Tensor random_samples, @ByRef Tensor output, @ByRef Tensor indices); // aten::fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor) @Namespace("at") public static native @ByVal T_TensorTensor_T fractional_max_pool3d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef output_size, @Const @ByRef Tensor random_samples); -@Namespace("at") public static native @ByVal T_TensorTensor_T fractional_max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor random_samples); +@Namespace("at") public static native @ByVal T_TensorTensor_T fractional_max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Const @ByRef Tensor random_samples); @@ -30382,14 +30391,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fractional_max_pool3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef output_size, @Const @ByRef Tensor indices); -@Namespace("at") public static native @ByRef Tensor fractional_max_pool3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor indices); +@Namespace("at") public static native @ByRef Tensor fractional_max_pool3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Const @ByRef Tensor indices); // aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor fractional_max_pool3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef output_size, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor fractional_max_pool3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor fractional_max_pool3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); // aten::fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor @Namespace("at") public static native @ByVal Tensor fractional_max_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef output_size, @Const @ByRef Tensor indices); -@Namespace("at") public static native @ByVal Tensor fractional_max_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor indices); +@Namespace("at") public static native @ByVal Tensor fractional_max_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Const @ByRef Tensor indices); @@ -30456,17 +30465,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor @Namespace("at") public static native @ByVal Tensor frobenius_norm(@Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByVal Tensor frobenius_norm(@Const @ByRef Tensor self, @ByVal LongArrayRef dim); -@Namespace("at") public static native @ByVal Tensor frobenius_norm(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor frobenius_norm(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +@Namespace("at") public static native @ByVal Tensor frobenius_norm(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor frobenius_norm(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); // aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor frobenius_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByRef Tensor frobenius_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef dim); -@Namespace("at") public static native @ByRef Tensor frobenius_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor frobenius_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +@Namespace("at") public static native @ByRef Tensor frobenius_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor frobenius_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); // aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor frobenius_norm_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor frobenius_norm_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor frobenius_norm_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); @@ -30543,22 +30552,22 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor full(@ByVal LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("at") public static native @ByVal Tensor full(@ByVal LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByVal Tensor full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByVal Tensor full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names); // aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor full(@ByVal LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); // aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor full(@ByVal LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("at") public static native @ByVal Tensor full(@ByVal LongArrayRef size, @Const @ByRef Scalar fill_value); -@Namespace("at") public static native @ByVal Tensor full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value); +@Namespace("at") public static native @ByVal Tensor full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Scalar fill_value); // aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor full(@ByVal LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Scalar fill_value, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); // aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @@ -30572,12 +30581,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor full_out(@ByRef Tensor out, @ByVal LongArrayRef size, @Const @ByRef Scalar fill_value); -@Namespace("at") public static native @ByRef Tensor full_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value); +@Namespace("at") public static native @ByRef Tensor full_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Scalar fill_value); // aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor full_outf(@ByVal LongArrayRef size, @Const @ByRef Scalar fill_value, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor full_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor full_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Scalar fill_value, @ByRef Tensor out); // aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) @@ -30590,10 +30599,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor full_out(@ByRef Tensor out, @ByVal LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByRef Tensor full_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByRef Tensor full_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names); // aten::full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor full_outf(@ByVal LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor full_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor full_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names, @ByRef Tensor out); @@ -31191,14 +31200,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::gradient.scalararray(Tensor self, *, Scalar spacing, int[] dim, int edge_order=1) -> Tensor[] @Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @Const @ByRef Scalar spacing, @ByVal LongArrayRef dim, @Cast("int64_t") long edge_order/*=1*/); @Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @Const @ByRef Scalar spacing, @ByVal LongArrayRef dim); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @Const @ByRef Scalar spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("int64_t") long edge_order/*=1*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @Const @ByRef Scalar spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @Const @ByRef Scalar spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("int64_t") long edge_order/*=1*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @Const @ByRef Scalar spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); // aten::gradient.array(Tensor self, *, int[] dim, int edge_order=1) -> Tensor[] @Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("int64_t") long edge_order/*=1*/); @Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal LongArrayRef dim); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("int64_t") long edge_order/*=1*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("int64_t") long edge_order/*=1*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); // aten::gradient.scalarrayint(Tensor self, *, Scalar[] spacing, int? dim=None, int edge_order=1) -> Tensor[] @Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("int64_t") long edge_order/*=1*/); @@ -31207,18 +31216,18 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::gradient.scalarrayarray(Tensor self, *, Scalar[] spacing, int[] dim, int edge_order=1) -> Tensor[] @Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing, @ByVal LongArrayRef dim, @Cast("int64_t") long edge_order/*=1*/); @Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing, @ByVal LongArrayRef dim); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("int64_t") long edge_order/*=1*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("int64_t") long edge_order/*=1*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); // aten::gradient.tensorarrayint(Tensor self, *, Tensor[] spacing, int? dim=None, int edge_order=1) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal @Cast("at::TensorList*") TensorArrayRef spacing, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("int64_t") long edge_order/*=1*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal @Cast("at::TensorList*") TensorArrayRef spacing); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal TensorArrayRef spacing, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("int64_t") long edge_order/*=1*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal TensorArrayRef spacing); // aten::gradient.tensorarray(Tensor self, *, Tensor[] spacing, int[] dim, int edge_order=1) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal @Cast("at::TensorList*") TensorArrayRef spacing, @ByVal LongArrayRef dim, @Cast("int64_t") long edge_order/*=1*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal @Cast("at::TensorList*") TensorArrayRef spacing, @ByVal LongArrayRef dim); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal @Cast("at::TensorList*") TensorArrayRef spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("int64_t") long edge_order/*=1*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal @Cast("at::TensorList*") TensorArrayRef spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal TensorArrayRef spacing, @ByVal LongArrayRef dim, @Cast("int64_t") long edge_order/*=1*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal TensorArrayRef spacing, @ByVal LongArrayRef dim); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal TensorArrayRef spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("int64_t") long edge_order/*=1*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal TensorArrayRef spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); @@ -31535,10 +31544,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal T_TensorTensor_T gru(@Const @ByRef Tensor input, @Const @ByRef Tensor hx, @ByVal @Cast("at::TensorList*") TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first); +@Namespace("at") public static native @ByVal T_TensorTensor_T gru(@Const @ByRef Tensor input, @Const @ByRef Tensor hx, @ByVal TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first); // aten::gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal T_TensorTensor_T gru(@Const @ByRef Tensor data, @Const @ByRef Tensor batch_sizes, @Const @ByRef Tensor hx, @ByVal @Cast("at::TensorList*") TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional); +@Namespace("at") public static native @ByVal T_TensorTensor_T gru(@Const @ByRef Tensor data, @Const @ByRef Tensor batch_sizes, @Const @ByRef Tensor hx, @ByVal TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional); @@ -32171,12 +32180,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) @Namespace("at") public static native @ByVal T_TensorTensor_T histogram_out(@ByRef Tensor hist, @ByRef Tensor bin_edges, @Const @ByRef Tensor self, @Cast("int64_t") long bins/*=100*/, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); @Namespace("at") public static native @ByVal T_TensorTensor_T histogram_out(@ByRef Tensor hist, @ByRef Tensor bin_edges, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal T_TensorTensor_T histogram_out(@ByRef Tensor hist, @ByRef Tensor bin_edges, @Const @ByRef Tensor self, @Cast("int64_t") long bins/*=100*/, @ByVal(nullValue = "c10::optional >(c10::nullopt)") @Cast({"double*", "c10::ArrayRef", "std::vector&"}) @StdVector double[] range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); // aten::histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) @Namespace("at") public static native @ByVal T_TensorTensor_T histogram_outf(@Const @ByRef Tensor self, @Cast("int64_t") long bins, @ByVal DoubleArrayRefOptional range, @Const @ByRef TensorOptional weight, @Cast("bool") boolean density, @ByRef Tensor hist, @ByRef Tensor bin_edges); +@Namespace("at") public static native @ByVal T_TensorTensor_T histogram_outf(@Const @ByRef Tensor self, @Cast("int64_t") long bins, @ByVal @Cast({"double*", "c10::ArrayRef", "std::vector&"}) @StdVector double[] range, @Const @ByRef TensorOptional weight, @Cast("bool") boolean density, @ByRef Tensor hist, @ByRef Tensor bin_edges); // aten::histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges) @Namespace("at") public static native @ByVal T_TensorTensor_T histogram(@Const @ByRef Tensor self, @Cast("int64_t") long bins/*=100*/, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); @Namespace("at") public static native @ByVal T_TensorTensor_T histogram(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal T_TensorTensor_T histogram(@Const @ByRef Tensor self, @Cast("int64_t") long bins/*=100*/, @ByVal(nullValue = "c10::optional >(c10::nullopt)") @Cast({"double*", "c10::ArrayRef", "std::vector&"}) @StdVector double[] range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); @@ -32208,16 +32220,18 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::histogramdd(Tensor self, int[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges) @Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @ByVal LongArrayRef bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); @Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @ByVal LongArrayRef bins); -@Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); -@Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... bins); +@Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") @Cast({"double*", "c10::ArrayRef", "std::vector&"}) @StdVector double[] range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... bins); // aten::histogramdd.int_bins(Tensor self, int bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges) @Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @Cast("int64_t") long bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); @Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @Cast("int64_t") long bins); +@Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @Cast("int64_t") long bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") @Cast({"double*", "c10::ArrayRef", "std::vector&"}) @StdVector double[] range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); // aten::histogramdd.TensorList_bins(Tensor self, Tensor[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges) -@Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @ByVal @Cast("at::TensorList*") TensorArrayRef bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); -@Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @ByVal @Cast("at::TensorList*") TensorArrayRef bins); +@Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @ByVal TensorArrayRef bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @ByVal TensorArrayRef bins); +@Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @ByVal TensorArrayRef bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") @Cast({"double*", "c10::ArrayRef", "std::vector&"}) @StdVector double[] range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); @@ -32251,7 +32265,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] @Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector hsplit(@Const @ByRef Tensor self, @ByVal LongArrayRef indices); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector hsplit(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... indices); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector hsplit(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... indices); @@ -32316,12 +32330,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::hstack(Tensor[] tensors) -> Tensor -@Namespace("at") public static native @ByVal Tensor hstack(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @ByVal Tensor hstack(@ByVal TensorArrayRef tensors); // aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hstack_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @ByRef Tensor hstack_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors); // aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hstack_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor hstack_outf(@ByVal TensorArrayRef tensors, @ByRef Tensor out); @@ -32567,14 +32581,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor im2col_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef dilation, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride); -@Namespace("at") public static native @ByRef Tensor im2col_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +@Namespace("at") public static native @ByRef Tensor im2col_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); // aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor im2col_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef dilation, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor im2col_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor im2col_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByRef Tensor out); // aten::im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor @Namespace("at") public static native @ByVal Tensor im2col(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef dilation, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride); -@Namespace("at") public static native @ByVal Tensor im2col(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +@Namespace("at") public static native @ByVal Tensor im2col(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); @@ -32924,7 +32938,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor @Namespace("at") public static native @ByVal Tensor index_select_backward(@Const @ByRef Tensor grad, @ByVal LongArrayRef self_sizes, @Cast("int64_t") long dim, @Const @ByRef Tensor index); -@Namespace("at") public static native @ByVal Tensor index_select_backward(@Const @ByRef Tensor grad, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] self_sizes, @Cast("int64_t") long dim, @Const @ByRef Tensor index); +@Namespace("at") public static native @ByVal Tensor index_select_backward(@Const @ByRef Tensor grad, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] self_sizes, @Cast("int64_t") long dim, @Const @ByRef Tensor index); // aten::index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor @@ -34152,8 +34166,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor @Namespace("at") public static native @ByVal Tensor layer_norm(@Const @ByRef Tensor input, @ByVal LongArrayRef normalized_shape, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, double eps/*=1e-05*/, @Cast("bool") boolean cudnn_enable/*=true*/); @Namespace("at") public static native @ByVal Tensor layer_norm(@Const @ByRef Tensor input, @ByVal LongArrayRef normalized_shape); -@Namespace("at") public static native @ByVal Tensor layer_norm(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] normalized_shape, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, double eps/*=1e-05*/, @Cast("bool") boolean cudnn_enable/*=true*/); -@Namespace("at") public static native @ByVal Tensor layer_norm(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... normalized_shape); +@Namespace("at") public static native @ByVal Tensor layer_norm(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] normalized_shape, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, double eps/*=1e-05*/, @Cast("bool") boolean cudnn_enable/*=true*/); +@Namespace("at") public static native @ByVal Tensor layer_norm(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... normalized_shape); // aten::layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor @@ -35498,28 +35512,28 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::linalg_matrix_norm(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor @Namespace("at") public static native @ByVal Tensor linalg_matrix_norm(@Const @ByRef Tensor self, @Const @ByRef Scalar ord, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); @Namespace("at") public static native @ByVal Tensor linalg_matrix_norm(@Const @ByRef Tensor self, @Const @ByRef Scalar ord); -@Namespace("at") public static native @ByVal Tensor linalg_matrix_norm(@Const @ByRef Tensor self, @Const @ByRef Scalar ord, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor linalg_matrix_norm(@Const @ByRef Tensor self, @Const @ByRef Scalar ord, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); // aten::linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar ord, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); @Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar ord); -@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar ord, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar ord, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); // aten::linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar ord, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar ord, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar ord, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); // aten::linalg_matrix_norm.str_ord(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor @Namespace("at") public static native @ByVal Tensor linalg_matrix_norm(@Const @ByRef Tensor self, @StringView BytePointer ord/*="fro"*/, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); @Namespace("at") public static native @ByVal Tensor linalg_matrix_norm(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor linalg_matrix_norm(@Const @ByRef Tensor self, @StringView String ord/*="fro"*/, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor linalg_matrix_norm(@Const @ByRef Tensor self, @StringView String ord/*="fro"*/, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); // aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @StringView BytePointer ord/*="fro"*/, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); @Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @StringView String ord/*="fro"*/, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @StringView String ord/*="fro"*/, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); // aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_outf(@Const @ByRef Tensor self, @StringView BytePointer ord, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_outf(@Const @ByRef Tensor self, @StringView String ord, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_outf(@Const @ByRef Tensor self, @StringView String ord, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); @@ -35651,12 +35665,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::linalg_multi_dot(Tensor[] tensors) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_multi_dot(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @ByVal Tensor linalg_multi_dot(@ByVal TensorArrayRef tensors); // aten::linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_multi_dot_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @ByRef Tensor linalg_multi_dot_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors); // aten::linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_multi_dot_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor linalg_multi_dot_outf(@ByVal TensorArrayRef tensors, @ByRef Tensor out); @@ -37106,17 +37120,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor @Namespace("at") public static native @ByVal Tensor logsumexp(@Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByVal Tensor logsumexp(@Const @ByRef Tensor self, @ByVal LongArrayRef dim); -@Namespace("at") public static native @ByVal Tensor logsumexp(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor logsumexp(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +@Namespace("at") public static native @ByVal Tensor logsumexp(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor logsumexp(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); // aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByRef Tensor logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef dim); -@Namespace("at") public static native @ByRef Tensor logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +@Namespace("at") public static native @ByRef Tensor logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); // aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor logsumexp_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor logsumexp_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor logsumexp_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); // aten::logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor @Namespace("at") public static native @ByVal Tensor logsumexp(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); @@ -37199,10 +37213,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal T_TensorTensorTensor_T lstm(@Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef hx, @ByVal @Cast("at::TensorList*") TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T lstm(@Const @ByRef Tensor input, @ByVal TensorArrayRef hx, @ByVal TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first); // aten::lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal T_TensorTensorTensor_T lstm(@Const @ByRef Tensor data, @Const @ByRef Tensor batch_sizes, @ByVal @Cast("at::TensorList*") TensorArrayRef hx, @ByVal @Cast("at::TensorList*") TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T lstm(@Const @ByRef Tensor data, @Const @ByRef Tensor batch_sizes, @ByVal TensorArrayRef hx, @ByVal TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional); @@ -37232,8 +37246,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal T_TensorTensor_T lstm_cell(@Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef hx, @Const @ByRef Tensor w_ih, @Const @ByRef Tensor w_hh, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional b_ih, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional b_hh); -@Namespace("at") public static native @ByVal T_TensorTensor_T lstm_cell(@Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef hx, @Const @ByRef Tensor w_ih, @Const @ByRef Tensor w_hh); +@Namespace("at") public static native @ByVal T_TensorTensor_T lstm_cell(@Const @ByRef Tensor input, @ByVal TensorArrayRef hx, @Const @ByRef Tensor w_ih, @Const @ByRef Tensor w_hh, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional b_ih, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional b_hh); +@Namespace("at") public static native @ByVal T_TensorTensor_T lstm_cell(@Const @ByRef Tensor input, @ByVal TensorArrayRef hx, @Const @ByRef Tensor w_ih, @Const @ByRef Tensor w_hh); @@ -37263,12 +37277,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::lstm_mps_backward(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[]) -@Namespace("at") public static native @ByVal T_TensorTensorVectorTensorVector_T lstm_mps_backward(@Const @ByRef TensorOptional grad_y, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Const @ByRef Tensor z_state, @Const @ByRef Tensor cell_state_fwd, @Const @ByRef Tensor input, @Const @ByRef Tensor layersOutputs, @ByVal @Cast("at::TensorList*") TensorArrayRef hx, @ByVal @Cast("at::TensorList*") TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first); +@Namespace("at") public static native @ByVal T_TensorTensorVectorTensorVector_T lstm_mps_backward(@Const @ByRef TensorOptional grad_y, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Const @ByRef Tensor z_state, @Const @ByRef Tensor cell_state_fwd, @Const @ByRef Tensor input, @Const @ByRef Tensor layersOutputs, @ByVal TensorArrayRef hx, @ByVal TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first); // aten::lstm_mps_backward.out(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> () -@Namespace("at") public static native void lstm_mps_backward_out(@ByRef Tensor out0, @ByVal @Cast("at::TensorList*") TensorArrayRef out1, @ByVal @Cast("at::TensorList*") TensorArrayRef out2, @Const @ByRef TensorOptional grad_y, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Const @ByRef Tensor z_state, @Const @ByRef Tensor cell_state_fwd, @Const @ByRef Tensor input, @Const @ByRef Tensor layersOutputs, @ByVal @Cast("at::TensorList*") TensorArrayRef hx, @ByVal @Cast("at::TensorList*") TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first); +@Namespace("at") public static native void lstm_mps_backward_out(@ByRef Tensor out0, @ByVal TensorArrayRef out1, @ByVal TensorArrayRef out2, @Const @ByRef TensorOptional grad_y, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Const @ByRef Tensor z_state, @Const @ByRef Tensor cell_state_fwd, @Const @ByRef Tensor input, @Const @ByRef Tensor layersOutputs, @ByVal TensorArrayRef hx, @ByVal TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first); // aten::lstm_mps_backward.out(Tensor? grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> () -@Namespace("at") public static native void lstm_mps_backward_outf(@Const @ByRef TensorOptional grad_y, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Const @ByRef Tensor z_state, @Const @ByRef Tensor cell_state_fwd, @Const @ByRef Tensor input, @Const @ByRef Tensor layersOutputs, @ByVal @Cast("at::TensorList*") TensorArrayRef hx, @ByVal @Cast("at::TensorList*") TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first, @ByRef Tensor out0, @ByVal @Cast("at::TensorList*") TensorArrayRef out1, @ByVal @Cast("at::TensorList*") TensorArrayRef out2); +@Namespace("at") public static native void lstm_mps_backward_outf(@Const @ByRef TensorOptional grad_y, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Const @ByRef Tensor z_state, @Const @ByRef Tensor cell_state_fwd, @Const @ByRef Tensor input, @Const @ByRef Tensor layersOutputs, @ByVal TensorArrayRef hx, @ByVal TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first, @ByRef Tensor out0, @ByVal TensorArrayRef out1, @ByVal TensorArrayRef out2); @@ -37901,8 +37915,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor @Namespace("at") public static native @ByVal Tensor max_pool1d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); @Namespace("at") public static native @ByVal Tensor max_pool1d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor max_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor max_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByVal Tensor max_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor max_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); @@ -37934,8 +37948,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) @Namespace("at") public static native @ByVal T_TensorTensor_T max_pool1d_with_indices(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); @Namespace("at") public static native @ByVal T_TensorTensor_T max_pool1d_with_indices(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool1d_with_indices(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool1d_with_indices(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool1d_with_indices(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool1d_with_indices(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); @@ -37967,8 +37981,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor @Namespace("at") public static native @ByVal Tensor max_pool2d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); @Namespace("at") public static native @ByVal Tensor max_pool2d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByVal Tensor max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); @@ -38000,17 +38014,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor @Namespace("at") public static native @ByVal Tensor max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); @Namespace("at") public static native @ByVal Tensor max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByVal Tensor max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::max_pool2d_backward.out(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor max_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); @Namespace("at") public static native @ByRef Tensor max_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor max_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByRef Tensor max_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByRef Tensor max_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByRef Tensor max_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::max_pool2d_backward.out(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor max_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor max_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor max_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); @@ -38042,17 +38056,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) @Namespace("at") public static native @ByVal T_TensorTensor_T max_pool2d_with_indices_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); @Namespace("at") public static native @ByVal T_TensorTensor_T max_pool2d_with_indices_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool2d_with_indices_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool2d_with_indices_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool2d_with_indices_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool2d_with_indices_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) @Namespace("at") public static native @ByVal T_TensorTensor_T max_pool2d_with_indices_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out, @ByRef Tensor indices); -@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool2d_with_indices_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out, @ByRef Tensor indices); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool2d_with_indices_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out, @ByRef Tensor indices); // aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) @Namespace("at") public static native @ByVal T_TensorTensor_T max_pool2d_with_indices(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); @Namespace("at") public static native @ByVal T_TensorTensor_T max_pool2d_with_indices(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool2d_with_indices(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool2d_with_indices(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool2d_with_indices(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool2d_with_indices(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); @@ -38083,14 +38097,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor max_pool2d_with_indices_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices); -@Namespace("at") public static native @ByRef Tensor max_pool2d_with_indices_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices); +@Namespace("at") public static native @ByRef Tensor max_pool2d_with_indices_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices); // aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor max_pool2d_with_indices_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor max_pool2d_with_indices_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor max_pool2d_with_indices_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); // aten::max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor @Namespace("at") public static native @ByVal Tensor max_pool2d_with_indices_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices); -@Namespace("at") public static native @ByVal Tensor max_pool2d_with_indices_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices); +@Namespace("at") public static native @ByVal Tensor max_pool2d_with_indices_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices); @@ -38122,8 +38136,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor @Namespace("at") public static native @ByVal Tensor max_pool3d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); @Namespace("at") public static native @ByVal Tensor max_pool3d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByVal Tensor max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); @@ -38155,17 +38169,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) @Namespace("at") public static native @ByVal T_TensorTensor_T max_pool3d_with_indices_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); @Namespace("at") public static native @ByVal T_TensorTensor_T max_pool3d_with_indices_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool3d_with_indices_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool3d_with_indices_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool3d_with_indices_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool3d_with_indices_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) @Namespace("at") public static native @ByVal T_TensorTensor_T max_pool3d_with_indices_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out, @ByRef Tensor indices); -@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool3d_with_indices_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out, @ByRef Tensor indices); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool3d_with_indices_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out, @ByRef Tensor indices); // aten::max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) @Namespace("at") public static native @ByVal T_TensorTensor_T max_pool3d_with_indices(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); @Namespace("at") public static native @ByVal T_TensorTensor_T max_pool3d_with_indices(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool3d_with_indices(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool3d_with_indices(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool3d_with_indices(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool3d_with_indices(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); @@ -38196,14 +38210,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor max_pool3d_with_indices_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices); -@Namespace("at") public static native @ByRef Tensor max_pool3d_with_indices_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices); +@Namespace("at") public static native @ByRef Tensor max_pool3d_with_indices_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices); // aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor max_pool3d_with_indices_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor max_pool3d_with_indices_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor max_pool3d_with_indices_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); // aten::max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor @Namespace("at") public static native @ByVal Tensor max_pool3d_with_indices_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices); -@Namespace("at") public static native @ByVal Tensor max_pool3d_with_indices_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices); +@Namespace("at") public static native @ByVal Tensor max_pool3d_with_indices_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices); @@ -38234,12 +38248,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor max_unpool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal LongArrayRef output_size); -@Namespace("at") public static native @ByRef Tensor max_unpool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +@Namespace("at") public static native @ByRef Tensor max_unpool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... output_size); // aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor max_unpool2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal LongArrayRef output_size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor max_unpool2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor max_unpool2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByRef Tensor out); // aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) @@ -38252,7 +38266,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::max_unpool2d(Tensor self, Tensor indices, SymInt[2] output_size) -> Tensor @Namespace("at") public static native @ByVal Tensor max_unpool2d(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal LongArrayRef output_size); -@Namespace("at") public static native @ByVal Tensor max_unpool2d(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +@Namespace("at") public static native @ByVal Tensor max_unpool2d(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... output_size); // aten::max_unpool2d(Tensor self, Tensor indices, SymInt[2] output_size) -> Tensor @@ -38288,32 +38302,32 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor max_unpool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal LongArrayRef output_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor max_unpool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByRef Tensor max_unpool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); // aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor max_unpool3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal LongArrayRef output_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor max_unpool3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor max_unpool3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByRef Tensor out); // aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor max_unpool3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal SymIntArrayRef output_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor max_unpool3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal SymIntArrayRef output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByRef Tensor max_unpool3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal SymIntArrayRef output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); // aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor max_unpool3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal SymIntArrayRef output_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor max_unpool3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal SymIntArrayRef output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor max_unpool3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal SymIntArrayRef output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByRef Tensor out); // aten::max_unpool3d(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding) -> Tensor @Namespace("at") public static native @ByVal Tensor max_unpool3d(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal LongArrayRef output_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor max_unpool3d(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByVal Tensor max_unpool3d(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); // aten::max_unpool3d(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding) -> Tensor @Namespace("at") public static native @ByVal Tensor max_unpool3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal SymIntArrayRef output_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor max_unpool3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal SymIntArrayRef output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByVal Tensor max_unpool3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal SymIntArrayRef output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); @@ -38490,11 +38504,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::meshgrid(Tensor[] tensors) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector meshgrid(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector meshgrid(@ByVal TensorArrayRef tensors); // aten::meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector meshgrid(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @StringView BytePointer indexing); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector meshgrid(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @StringView String indexing); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector meshgrid(@ByVal TensorArrayRef tensors, @StringView BytePointer indexing); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector meshgrid(@ByVal TensorArrayRef tensors, @StringView String indexing); @@ -38693,32 +38707,32 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor @Namespace("at") public static native @ByVal Tensor miopen_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -@Namespace("at") public static native @ByVal Tensor miopen_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +@Namespace("at") public static native @ByVal Tensor miopen_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); // aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor @Namespace("at") public static native @ByVal Tensor miopen_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -@Namespace("at") public static native @ByVal Tensor miopen_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +@Namespace("at") public static native @ByVal Tensor miopen_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); // aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor miopen_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -@Namespace("at") public static native @ByRef Tensor miopen_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +@Namespace("at") public static native @ByRef Tensor miopen_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); // aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor miopen_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor miopen_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor miopen_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); // aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor miopen_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -@Namespace("at") public static native @ByRef Tensor miopen_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +@Namespace("at") public static native @ByRef Tensor miopen_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); // aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor miopen_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor miopen_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor miopen_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); @@ -38750,7 +38764,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor @Namespace("at") public static native @ByVal Tensor miopen_convolution_add_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByVal Tensor miopen_convolution_add_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByVal Tensor miopen_convolution_add_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups); @@ -38781,7 +38795,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor @Namespace("at") public static native @ByVal Tensor miopen_convolution_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByVal Tensor miopen_convolution_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByVal Tensor miopen_convolution_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups); @@ -38812,32 +38826,32 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor @Namespace("at") public static native @ByVal Tensor miopen_convolution_transpose(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -@Namespace("at") public static native @ByVal Tensor miopen_convolution_transpose(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +@Namespace("at") public static native @ByVal Tensor miopen_convolution_transpose(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); // aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor @Namespace("at") public static native @ByVal Tensor miopen_convolution_transpose_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -@Namespace("at") public static native @ByVal Tensor miopen_convolution_transpose_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +@Namespace("at") public static native @ByVal Tensor miopen_convolution_transpose_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); // aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -@Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +@Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); // aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); // aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -@Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +@Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); // aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); @@ -38869,32 +38883,32 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor @Namespace("at") public static native @ByVal Tensor miopen_depthwise_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -@Namespace("at") public static native @ByVal Tensor miopen_depthwise_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +@Namespace("at") public static native @ByVal Tensor miopen_depthwise_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); // aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor @Namespace("at") public static native @ByVal Tensor miopen_depthwise_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -@Namespace("at") public static native @ByVal Tensor miopen_depthwise_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +@Namespace("at") public static native @ByVal Tensor miopen_depthwise_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); // aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -@Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +@Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); // aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); // aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -@Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +@Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); // aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); @@ -38925,15 +38939,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensor_T miopen_rnn(@Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal LongArrayRef batch_sizes, @Const @ByRef TensorOptional dropout_state); -@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensor_T miopen_rnn(@Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Const @ByRef TensorOptional dropout_state); +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensor_T miopen_rnn(@Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal LongArrayRef batch_sizes, @Const @ByRef TensorOptional dropout_state); +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensor_T miopen_rnn(@Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] batch_sizes, @Const @ByRef TensorOptional dropout_state); // aten::miopen_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) -@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensor_T miopen_rnn_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4, @Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal LongArrayRef batch_sizes, @Const @ByRef TensorOptional dropout_state); -@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensor_T miopen_rnn_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4, @Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Const @ByRef TensorOptional dropout_state); +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensor_T miopen_rnn_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4, @Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal LongArrayRef batch_sizes, @Const @ByRef TensorOptional dropout_state); +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensor_T miopen_rnn_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4, @Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] batch_sizes, @Const @ByRef TensorOptional dropout_state); // aten::miopen_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) -@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensor_T miopen_rnn_outf(@Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal LongArrayRef batch_sizes, @Const @ByRef TensorOptional dropout_state, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4); -@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensor_T miopen_rnn_outf(@Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Const @ByRef TensorOptional dropout_state, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4); +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensor_T miopen_rnn_outf(@Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal LongArrayRef batch_sizes, @Const @ByRef TensorOptional dropout_state, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4); +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensor_T miopen_rnn_outf(@Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] batch_sizes, @Const @ByRef TensorOptional dropout_state, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4); @@ -38963,15 +38977,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[]) -@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorVector_T miopen_rnn_backward(@Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal LongArrayRef batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorVector_T miopen_rnn_backward(@Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorVector_T miopen_rnn_backward(@Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal LongArrayRef batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorVector_T miopen_rnn_backward(@Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask); // aten::miopen_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> () -@Namespace("at") public static native void miopen_rnn_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByVal @Cast("at::TensorList*") TensorArrayRef out3, @Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal LongArrayRef batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native void miopen_rnn_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByVal @Cast("at::TensorList*") TensorArrayRef out3, @Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native void miopen_rnn_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByVal TensorArrayRef out3, @Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal LongArrayRef batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native void miopen_rnn_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByVal TensorArrayRef out3, @Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask); // aten::miopen_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> () -@Namespace("at") public static native void miopen_rnn_backward_outf(@Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal LongArrayRef batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByVal @Cast("at::TensorList*") TensorArrayRef out3); -@Namespace("at") public static native void miopen_rnn_backward_outf(@Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByVal @Cast("at::TensorList*") TensorArrayRef out3); +@Namespace("at") public static native void miopen_rnn_backward_outf(@Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal LongArrayRef batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByVal TensorArrayRef out3); +@Namespace("at") public static native void miopen_rnn_backward_outf(@Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByVal TensorArrayRef out3); @@ -39070,14 +39084,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor @Namespace("at") public static native @ByVal Tensor mkldnn_adaptive_avg_pool2d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size); -@Namespace("at") public static native @ByVal Tensor mkldnn_adaptive_avg_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +@Namespace("at") public static native @ByVal Tensor mkldnn_adaptive_avg_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... output_size); // aten::mkldnn_adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor mkldnn_adaptive_avg_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size); -@Namespace("at") public static native @ByRef Tensor mkldnn_adaptive_avg_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +@Namespace("at") public static native @ByRef Tensor mkldnn_adaptive_avg_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... output_size); // aten::mkldnn_adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor mkldnn_adaptive_avg_pool2d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor mkldnn_adaptive_avg_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor mkldnn_adaptive_avg_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByRef Tensor out); @@ -39143,32 +39157,32 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups) -> Tensor @Namespace("at") public static native @ByVal Tensor mkldnn_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByVal Tensor mkldnn_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByVal Tensor mkldnn_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups); // aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups) -> Tensor @Namespace("at") public static native @ByVal Tensor mkldnn_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByVal Tensor mkldnn_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByVal Tensor mkldnn_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups); // aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor mkldnn_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByRef Tensor mkldnn_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByRef Tensor mkldnn_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups); // aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor mkldnn_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor mkldnn_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor mkldnn_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @ByRef Tensor out); // aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor mkldnn_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByRef Tensor mkldnn_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByRef Tensor mkldnn_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups); // aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor mkldnn_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor mkldnn_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor mkldnn_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @ByRef Tensor out); @@ -39272,14 +39286,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::mkldnn_linear_backward_input(int[] input_size, Tensor grad_output, Tensor weight) -> Tensor @Namespace("at") public static native @ByVal Tensor mkldnn_linear_backward_input(@ByVal LongArrayRef input_size, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight); -@Namespace("at") public static native @ByVal Tensor mkldnn_linear_backward_input(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight); +@Namespace("at") public static native @ByVal Tensor mkldnn_linear_backward_input(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight); // aten::mkldnn_linear_backward_input.out(int[] input_size, Tensor grad_output, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor mkldnn_linear_backward_input_out(@ByRef Tensor out, @ByVal LongArrayRef input_size, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight); -@Namespace("at") public static native @ByRef Tensor mkldnn_linear_backward_input_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight); +@Namespace("at") public static native @ByRef Tensor mkldnn_linear_backward_input_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight); // aten::mkldnn_linear_backward_input.out(int[] input_size, Tensor grad_output, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor mkldnn_linear_backward_input_outf(@ByVal LongArrayRef input_size, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor mkldnn_linear_backward_input_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor mkldnn_linear_backward_input_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByRef Tensor out); @@ -39346,17 +39360,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor @Namespace("at") public static native @ByVal Tensor mkldnn_max_pool2d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); @Namespace("at") public static native @ByVal Tensor mkldnn_max_pool2d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::mkldnn_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); @Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::mkldnn_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); @@ -39388,17 +39402,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::mkldnn_max_pool2d_backward(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor @Namespace("at") public static native @ByVal Tensor mkldnn_max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); @Namespace("at") public static native @ByVal Tensor mkldnn_max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::mkldnn_max_pool2d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); @Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::mkldnn_max_pool2d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); @@ -39430,17 +39444,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::mkldnn_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor @Namespace("at") public static native @ByVal Tensor mkldnn_max_pool3d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); @Namespace("at") public static native @ByVal Tensor mkldnn_max_pool3d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::mkldnn_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); @Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::mkldnn_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); @@ -39472,17 +39486,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::mkldnn_max_pool3d_backward(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor @Namespace("at") public static native @ByVal Tensor mkldnn_max_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); @Namespace("at") public static native @ByVal Tensor mkldnn_max_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::mkldnn_max_pool3d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); @Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::mkldnn_max_pool3d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); @@ -39514,15 +39528,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::mkldnn_reorder_conv2d_weight(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None) -> Tensor @Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv2d_weight(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional input_size); @Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv2d_weight(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv2d_weight(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); +@Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv2d_weight(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); // aten::mkldnn_reorder_conv2d_weight.out(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional input_size); @Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); +@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); // aten::mkldnn_reorder_conv2d_weight.out(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal LongArrayRefOptional input_size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByRef Tensor out); @@ -39554,15 +39568,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::mkldnn_reorder_conv3d_weight(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1) -> Tensor @Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv3d_weight(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); @Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv3d_weight(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv3d_weight(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv3d_weight(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/); // aten::mkldnn_reorder_conv3d_weight.out(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv3d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); @Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv3d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv3d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv3d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups/*=1*/); // aten::mkldnn_reorder_conv3d_weight.out(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv3d_weight_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv3d_weight_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv3d_weight_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @ByRef Tensor out); @@ -39593,14 +39607,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::mkldnn_rnn_layer(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) -> (Tensor, Tensor, Tensor, Tensor) @Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T mkldnn_rnn_layer(@Const @ByRef Tensor input, @Const @ByRef Tensor weight0, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_, @Cast("bool") boolean reverse, @ByVal LongArrayRef batch_sizes, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first, @Cast("bool") boolean train); -@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T mkldnn_rnn_layer(@Const @ByRef Tensor input, @Const @ByRef Tensor weight0, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_, @Cast("bool") boolean reverse, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first, @Cast("bool") boolean train); +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T mkldnn_rnn_layer(@Const @ByRef Tensor input, @Const @ByRef Tensor weight0, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_, @Cast("bool") boolean reverse, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] batch_sizes, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first, @Cast("bool") boolean train); // aten::mkldnn_rnn_layer.out(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) @Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T mkldnn_rnn_layer_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @Const @ByRef Tensor input, @Const @ByRef Tensor weight0, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_, @Cast("bool") boolean reverse, @ByVal LongArrayRef batch_sizes, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first, @Cast("bool") boolean train); -@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T mkldnn_rnn_layer_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @Const @ByRef Tensor input, @Const @ByRef Tensor weight0, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_, @Cast("bool") boolean reverse, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first, @Cast("bool") boolean train); +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T mkldnn_rnn_layer_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @Const @ByRef Tensor input, @Const @ByRef Tensor weight0, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_, @Cast("bool") boolean reverse, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] batch_sizes, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first, @Cast("bool") boolean train); // aten::mkldnn_rnn_layer.out(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) @Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T mkldnn_rnn_layer_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight0, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_, @Cast("bool") boolean reverse, @ByVal LongArrayRef batch_sizes, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first, @Cast("bool") boolean train, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3); -@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T mkldnn_rnn_layer_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight0, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_, @Cast("bool") boolean reverse, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first, @Cast("bool") boolean train, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3); +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T mkldnn_rnn_layer_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight0, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_, @Cast("bool") boolean reverse, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] batch_sizes, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first, @Cast("bool") boolean train, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3); @@ -39631,14 +39645,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::mkldnn_rnn_layer_backward(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor) @Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensorTensorTensor_T mkldnn_rnn_layer_backward(@Const @ByRef Tensor input, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor weight4, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_tmp, @Const @ByRef Tensor output, @Const @ByRef Tensor hy_, @Const @ByRef Tensor cy_, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("bool") boolean reverse, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal LongArrayRef batch_sizes, @Cast("bool") boolean batch_first, @Const @ByRef Tensor workspace); -@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensorTensorTensor_T mkldnn_rnn_layer_backward(@Const @ByRef Tensor input, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor weight4, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_tmp, @Const @ByRef Tensor output, @Const @ByRef Tensor hy_, @Const @ByRef Tensor cy_, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("bool") boolean reverse, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Cast("bool") boolean batch_first, @Const @ByRef Tensor workspace); +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensorTensorTensor_T mkldnn_rnn_layer_backward(@Const @ByRef Tensor input, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor weight4, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_tmp, @Const @ByRef Tensor output, @Const @ByRef Tensor hy_, @Const @ByRef Tensor cy_, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("bool") boolean reverse, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] batch_sizes, @Cast("bool") boolean batch_first, @Const @ByRef Tensor workspace); // aten::mkldnn_rnn_layer_backward.out(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5, Tensor(g!) out6) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!)) @Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensorTensorTensor_T mkldnn_rnn_layer_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4, @ByRef Tensor out5, @ByRef Tensor out6, @Const @ByRef Tensor input, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor weight4, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_tmp, @Const @ByRef Tensor output, @Const @ByRef Tensor hy_, @Const @ByRef Tensor cy_, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("bool") boolean reverse, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal LongArrayRef batch_sizes, @Cast("bool") boolean batch_first, @Const @ByRef Tensor workspace); -@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensorTensorTensor_T mkldnn_rnn_layer_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4, @ByRef Tensor out5, @ByRef Tensor out6, @Const @ByRef Tensor input, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor weight4, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_tmp, @Const @ByRef Tensor output, @Const @ByRef Tensor hy_, @Const @ByRef Tensor cy_, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("bool") boolean reverse, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Cast("bool") boolean batch_first, @Const @ByRef Tensor workspace); +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensorTensorTensor_T mkldnn_rnn_layer_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4, @ByRef Tensor out5, @ByRef Tensor out6, @Const @ByRef Tensor input, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor weight4, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_tmp, @Const @ByRef Tensor output, @Const @ByRef Tensor hy_, @Const @ByRef Tensor cy_, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("bool") boolean reverse, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] batch_sizes, @Cast("bool") boolean batch_first, @Const @ByRef Tensor workspace); // aten::mkldnn_rnn_layer_backward.out(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5, Tensor(g!) out6) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!)) @Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensorTensorTensor_T mkldnn_rnn_layer_backward_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor weight4, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_tmp, @Const @ByRef Tensor output, @Const @ByRef Tensor hy_, @Const @ByRef Tensor cy_, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("bool") boolean reverse, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal LongArrayRef batch_sizes, @Cast("bool") boolean batch_first, @Const @ByRef Tensor workspace, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4, @ByRef Tensor out5, @ByRef Tensor out6); -@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensorTensorTensor_T mkldnn_rnn_layer_backward_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor weight4, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_tmp, @Const @ByRef Tensor output, @Const @ByRef Tensor hy_, @Const @ByRef Tensor cy_, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("bool") boolean reverse, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Cast("bool") boolean batch_first, @Const @ByRef Tensor workspace, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4, @ByRef Tensor out5, @ByRef Tensor out6); +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensorTensorTensor_T mkldnn_rnn_layer_backward_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor weight4, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_tmp, @Const @ByRef Tensor output, @Const @ByRef Tensor hy_, @Const @ByRef Tensor cy_, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("bool") boolean reverse, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] batch_sizes, @Cast("bool") boolean batch_first, @Const @ByRef Tensor workspace, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4, @ByRef Tensor out5, @ByRef Tensor out6); @@ -39751,7 +39765,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a) @Namespace("at") public static native @ByVal Tensor moveaxis(@Const @ByRef Tensor self, @ByVal LongArrayRef source, @ByVal LongArrayRef destination); -@Namespace("at") public static native @ByVal Tensor moveaxis(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] source, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... destination); +@Namespace("at") public static native @ByVal Tensor moveaxis(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] source, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... destination); // aten::moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a) @Namespace("at") public static native @ByVal Tensor moveaxis(@Const @ByRef Tensor self, @Cast("int64_t") long source, @Cast("int64_t") long destination); @@ -39785,7 +39799,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a) @Namespace("at") public static native @ByVal Tensor movedim(@Const @ByRef Tensor self, @ByVal LongArrayRef source, @ByVal LongArrayRef destination); -@Namespace("at") public static native @ByVal Tensor movedim(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] source, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... destination); +@Namespace("at") public static native @ByVal Tensor movedim(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] source, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... destination); // aten::movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a) @Namespace("at") public static native @ByVal Tensor movedim(@Const @ByRef Tensor self, @Cast("int64_t") long source, @Cast("int64_t") long destination); @@ -39819,14 +39833,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) @Namespace("at") public static native @ByVal T_TensorTensorTensor_T mps_convolution_backward(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal T_TensorTensorTensor_T mps_convolution_backward(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T mps_convolution_backward(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); // aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) @Namespace("at") public static native @ByVal T_TensorTensorTensor_T mps_convolution_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal T_TensorTensorTensor_T mps_convolution_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T mps_convolution_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); // aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) @Namespace("at") public static native @ByVal T_TensorTensorTensor_T mps_convolution_backward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); -@Namespace("at") public static native @ByVal T_TensorTensorTensor_T mps_convolution_backward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T mps_convolution_backward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); @@ -39857,14 +39871,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask) -> (Tensor, Tensor) @Namespace("at") public static native @ByVal T_TensorTensor_T mps_convolution_transpose_backward(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal T_TensorTensor_T mps_convolution_transpose_backward(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensor_T mps_convolution_transpose_backward(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); // aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) @Namespace("at") public static native @ByVal T_TensorTensor_T mps_convolution_transpose_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal T_TensorTensor_T mps_convolution_transpose_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensor_T mps_convolution_transpose_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); // aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) @Namespace("at") public static native @ByVal T_TensorTensor_T mps_convolution_transpose_backward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1); -@Namespace("at") public static native @ByVal T_TensorTensor_T mps_convolution_transpose_backward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1); +@Namespace("at") public static native @ByVal T_TensorTensor_T mps_convolution_transpose_backward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1); @@ -40966,7 +40980,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor) @Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm(@Const @ByRef Tensor input, @ByVal LongArrayRef normalized_shape, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, double eps); -@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] normalized_shape, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, double eps); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] normalized_shape, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, double eps); // aten::native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor) @@ -40975,12 +40989,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) @Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor input, @ByVal LongArrayRef normalized_shape, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, double eps); -@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] normalized_shape, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, double eps); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] normalized_shape, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, double eps); // aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) @Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_outf(@Const @ByRef Tensor input, @ByVal LongArrayRef normalized_shape, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, double eps, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); -@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_outf(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] normalized_shape, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, double eps, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_outf(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] normalized_shape, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, double eps, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); // aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) @@ -41020,7 +41034,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor) @Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_backward(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @ByVal LongArrayRef normalized_shape, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_backward(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] normalized_shape, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_backward(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] normalized_shape, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("std::array*") BoolPointer output_mask); // aten::native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor) @@ -41029,12 +41043,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) @Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @ByVal LongArrayRef normalized_shape, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] normalized_shape, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] normalized_shape, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("std::array*") BoolPointer output_mask); // aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) @Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_backward_outf(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @ByVal LongArrayRef normalized_shape, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); -@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_backward_outf(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] normalized_shape, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_backward_outf(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] normalized_shape, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); // aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) @@ -41078,7 +41092,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::native_norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype) -> Tensor @Namespace("at") public static native @ByVal Tensor native_norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor native_norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor native_norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype); // aten::native_norm.out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor native_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(2)") Scalar p); @@ -41088,10 +41102,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::native_norm.ScalarOpt_dim_dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor native_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor native_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor native_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype); // aten::native_norm.ScalarOpt_dim_dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor native_norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor native_norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor native_norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); @@ -41276,12 +41290,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor new_empty_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor new_empty_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at") public static native @ByRef Tensor new_empty_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor new_empty_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor new_empty_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor new_empty_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByRef Tensor out); // aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @@ -41324,12 +41338,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor new_empty_strided_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByVal LongArrayRef stride); -@Namespace("at") public static native @ByRef Tensor new_empty_strided_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +@Namespace("at") public static native @ByRef Tensor new_empty_strided_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); // aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor new_empty_strided_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor new_empty_strided_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor new_empty_strided_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByRef Tensor out); // aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) @@ -41372,12 +41386,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor new_full_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef size, @Const @ByRef Scalar fill_value); -@Namespace("at") public static native @ByRef Tensor new_full_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value); +@Namespace("at") public static native @ByRef Tensor new_full_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Scalar fill_value); // aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor new_full_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @Const @ByRef Scalar fill_value, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor new_full_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor new_full_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Scalar fill_value, @ByRef Tensor out); // aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) @@ -41420,12 +41434,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor new_ones_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor new_ones_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at") public static native @ByRef Tensor new_ones_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor new_ones_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor new_ones_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor new_ones_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByRef Tensor out); // aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @@ -41468,12 +41482,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor new_zeros_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor new_zeros_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at") public static native @ByRef Tensor new_zeros_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor new_zeros_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor new_zeros_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor new_zeros_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByRef Tensor out); // aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @@ -42008,29 +42022,29 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor @Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, ScalarType dtype); -@Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, ScalarType dtype); +@Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim, ScalarType dtype); // aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor @Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal LongArrayRef dim); -@Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +@Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); // aten::norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, ScalarType dtype); -@Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, ScalarType dtype); +@Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim, ScalarType dtype); // aten::norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, ScalarType dtype, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, ScalarType dtype, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim, ScalarType dtype, @ByRef Tensor out); // aten::norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal LongArrayRef dim); -@Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +@Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); // aten::norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); // aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor @Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim, ScalarType dtype); @@ -42153,13 +42167,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor normal(double mean, double std, @ByVal LongArrayRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("at") public static native @ByVal Tensor normal(double mean, double std, @ByVal LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor normal(double mean, double std, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor normal(double mean, double std, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at") public static native @ByVal Tensor normal(double mean, double std, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor normal(double mean, double std, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor normal(double mean, double std, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor normal(double mean, double std, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor normal(double mean, double std, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); // aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @@ -42173,13 +42187,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor normal_out(@ByRef Tensor out, double mean, double std, @ByVal LongArrayRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByRef Tensor normal_out(@ByRef Tensor out, double mean, double std, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByRef Tensor normal_out(@ByRef Tensor out, double mean, double std, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); // aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor normal_outf(double mean, double std, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor normal_outf(double mean, double std, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor normal_outf(double mean, double std, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByRef Tensor out); // aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) @@ -42280,17 +42294,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor @Namespace("at") public static native @ByVal Tensor nuclear_norm(@Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByVal Tensor nuclear_norm(@Const @ByRef Tensor self, @ByVal LongArrayRef dim); -@Namespace("at") public static native @ByVal Tensor nuclear_norm(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor nuclear_norm(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +@Namespace("at") public static native @ByVal Tensor nuclear_norm(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor nuclear_norm(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); // aten::nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor nuclear_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByRef Tensor nuclear_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef dim); -@Namespace("at") public static native @ByRef Tensor nuclear_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor nuclear_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +@Namespace("at") public static native @ByRef Tensor nuclear_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor nuclear_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); // aten::nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor nuclear_norm_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor nuclear_norm_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor nuclear_norm_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); @@ -42381,22 +42395,22 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor ones(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("at") public static native @ByVal Tensor ones(@ByVal LongArrayRef size, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByVal Tensor ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByVal Tensor ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names); // aten::ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor ones(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); // aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor ones(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("at") public static native @ByVal Tensor ones(@ByVal LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at") public static native @ByVal Tensor ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor ones(@ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); // aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @@ -42410,12 +42424,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor ones_out(@ByRef Tensor out, @ByVal LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor ones_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at") public static native @ByRef Tensor ones_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor ones_outf(@ByVal LongArrayRef size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor ones_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor ones_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByRef Tensor out); // aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @@ -42428,10 +42442,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::ones.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor ones_out(@ByRef Tensor out, @ByVal LongArrayRef size, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByRef Tensor ones_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByRef Tensor ones_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names); // aten::ones.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor ones_outf(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor ones_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor ones_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names, @ByRef Tensor out); @@ -42670,8 +42684,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::pad(Tensor self, SymInt[] pad, str mode="constant", float? value=None) -> Tensor @Namespace("at") public static native @ByVal Tensor pad(@Const @ByRef Tensor self, @ByVal LongArrayRef pad, @StringView BytePointer mode/*="constant"*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional value); @Namespace("at") public static native @ByVal Tensor pad(@Const @ByRef Tensor self, @ByVal LongArrayRef pad); -@Namespace("at") public static native @ByVal Tensor pad(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] pad, @StringView String mode/*="constant"*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional value); -@Namespace("at") public static native @ByVal Tensor pad(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... pad); +@Namespace("at") public static native @ByVal Tensor pad(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] pad, @StringView String mode/*="constant"*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional value); +@Namespace("at") public static native @ByVal Tensor pad(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... pad); // aten::pad(Tensor self, SymInt[] pad, str mode="constant", float? value=None) -> Tensor @@ -42708,8 +42722,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0) -> Tensor -@Namespace("at") public static native @ByVal Tensor pad_sequence(@ByVal @Cast("at::TensorList*") TensorArrayRef sequences, @Cast("bool") boolean batch_first/*=false*/, double padding_value/*=0.0*/); -@Namespace("at") public static native @ByVal Tensor pad_sequence(@ByVal @Cast("at::TensorList*") TensorArrayRef sequences); +@Namespace("at") public static native @ByVal Tensor pad_sequence(@ByVal TensorArrayRef sequences, @Cast("bool") boolean batch_first/*=false*/, double padding_value/*=0.0*/); +@Namespace("at") public static native @ByVal Tensor pad_sequence(@ByVal TensorArrayRef sequences); @@ -42802,7 +42816,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::permute(Tensor(a) self, int[] dims) -> Tensor(a) @Namespace("at") public static native @ByVal Tensor permute(@Const @ByRef Tensor self, @ByVal LongArrayRef dims); -@Namespace("at") public static native @ByVal Tensor permute(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); +@Namespace("at") public static native @ByVal Tensor permute(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dims); @@ -42833,14 +42847,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::permute_copy(Tensor self, int[] dims) -> Tensor @Namespace("at") public static native @ByVal Tensor permute_copy(@Const @ByRef Tensor self, @ByVal LongArrayRef dims); -@Namespace("at") public static native @ByVal Tensor permute_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); +@Namespace("at") public static native @ByVal Tensor permute_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dims); // aten::permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor permute_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef dims); -@Namespace("at") public static native @ByRef Tensor permute_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); +@Namespace("at") public static native @ByRef Tensor permute_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dims); // aten::permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor permute_copy_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef dims, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor permute_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dims, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor permute_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dims, @ByRef Tensor out); @@ -43690,7 +43704,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor quantize_per_tensor(@Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, ScalarType dtype); // aten::quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector quantize_per_tensor(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, ScalarType dtype); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector quantize_per_tensor(@ByVal TensorArrayRef tensors, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, ScalarType dtype); // aten::quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor quantize_per_tensor_out(@ByRef Tensor out, @Const @ByRef Tensor self, double scale, @Cast("int64_t") long zero_point, ScalarType dtype); @@ -43703,9 +43717,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByRef Tensor quantize_per_tensor_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, ScalarType dtype, @ByRef Tensor out); // aten::quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void quantize_per_tensor_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, ScalarType dtype); +@Namespace("at") public static native void quantize_per_tensor_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef tensors, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, ScalarType dtype); // aten::quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void quantize_per_tensor_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, ScalarType dtype, @ByVal @Cast("at::TensorList*") TensorArrayRef out); +@Namespace("at") public static native void quantize_per_tensor_outf(@ByVal TensorArrayRef tensors, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, ScalarType dtype, @ByVal TensorArrayRef out); @@ -43835,7 +43849,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal T_TensorTensor_T quantized_lstm_cell(@Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef hx, @Const @ByRef Tensor w_ih, @Const @ByRef Tensor w_hh, @Const @ByRef Tensor b_ih, @Const @ByRef Tensor b_hh, @Const @ByRef Tensor packed_ih, @Const @ByRef Tensor packed_hh, @Const @ByRef Tensor col_offsets_ih, @Const @ByRef Tensor col_offsets_hh, @Const @ByRef Scalar scale_ih, @Const @ByRef Scalar scale_hh, @Const @ByRef Scalar zero_point_ih, @Const @ByRef Scalar zero_point_hh); +@Namespace("at") public static native @ByVal T_TensorTensor_T quantized_lstm_cell(@Const @ByRef Tensor input, @ByVal TensorArrayRef hx, @Const @ByRef Tensor w_ih, @Const @ByRef Tensor w_hh, @Const @ByRef Tensor b_ih, @Const @ByRef Tensor b_hh, @Const @ByRef Tensor packed_ih, @Const @ByRef Tensor packed_hh, @Const @ByRef Tensor col_offsets_ih, @Const @ByRef Tensor col_offsets_hh, @Const @ByRef Scalar scale_ih, @Const @ByRef Scalar scale_hh, @Const @ByRef Scalar zero_point_ih, @Const @ByRef Scalar zero_point_hh); @@ -43867,17 +43881,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::quantized_max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor @Namespace("at") public static native @ByVal Tensor quantized_max_pool1d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); @Namespace("at") public static native @ByVal Tensor quantized_max_pool1d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor quantized_max_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor quantized_max_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByVal Tensor quantized_max_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor quantized_max_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::quantized_max_pool1d.out(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor quantized_max_pool1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); @Namespace("at") public static native @ByRef Tensor quantized_max_pool1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor quantized_max_pool1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByRef Tensor quantized_max_pool1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByRef Tensor quantized_max_pool1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByRef Tensor quantized_max_pool1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::quantized_max_pool1d.out(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor quantized_max_pool1d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor quantized_max_pool1d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor quantized_max_pool1d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); @@ -43909,17 +43923,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor @Namespace("at") public static native @ByVal Tensor quantized_max_pool2d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); @Namespace("at") public static native @ByVal Tensor quantized_max_pool2d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor quantized_max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor quantized_max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByVal Tensor quantized_max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor quantized_max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::quantized_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor quantized_max_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); @Namespace("at") public static native @ByRef Tensor quantized_max_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor quantized_max_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByRef Tensor quantized_max_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByRef Tensor quantized_max_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByRef Tensor quantized_max_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::quantized_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor quantized_max_pool2d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor quantized_max_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor quantized_max_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); @@ -43951,17 +43965,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::quantized_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor @Namespace("at") public static native @ByVal Tensor quantized_max_pool3d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); @Namespace("at") public static native @ByVal Tensor quantized_max_pool3d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor quantized_max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor quantized_max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByVal Tensor quantized_max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor quantized_max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::quantized_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor quantized_max_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); @Namespace("at") public static native @ByRef Tensor quantized_max_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor quantized_max_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByRef Tensor quantized_max_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByRef Tensor quantized_max_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByRef Tensor quantized_max_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::quantized_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor quantized_max_pool3d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor quantized_max_pool3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor quantized_max_pool3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); @@ -44091,13 +44105,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor rand(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("at") public static native @ByVal Tensor rand(@ByVal LongArrayRef size, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names); // aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor rand(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); // aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @@ -44112,13 +44126,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor rand(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("at") public static native @ByVal Tensor rand(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); // aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor rand(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); // aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @@ -44133,13 +44147,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor rand(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("at") public static native @ByVal Tensor rand(@ByVal LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor rand(@ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); // aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @@ -44154,13 +44168,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor rand(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("at") public static native @ByVal Tensor rand(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator); -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); +@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator); // aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor rand(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); // aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @@ -44174,12 +44188,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor rand_out(@ByRef Tensor out, @ByVal LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor rand_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at") public static native @ByRef Tensor rand_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor rand_outf(@ByVal LongArrayRef size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor rand_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor rand_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByRef Tensor out); // aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @@ -44192,12 +44206,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor rand_out(@ByRef Tensor out, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator); -@Namespace("at") public static native @ByRef Tensor rand_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); +@Namespace("at") public static native @ByRef Tensor rand_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator); // aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor rand_outf(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor rand_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor rand_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByRef Tensor out); // aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) @@ -44210,12 +44224,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor rand_out(@ByRef Tensor out, @ByVal LongArrayRef size, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByRef Tensor rand_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByRef Tensor rand_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names); // aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor rand_outf(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor rand_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor rand_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names, @ByRef Tensor out); // aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) @@ -44228,12 +44242,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor rand_out(@ByRef Tensor out, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByRef Tensor rand_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByRef Tensor rand_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); // aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor rand_outf(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor rand_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor rand_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByRef Tensor out); // aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) @@ -44313,13 +44327,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); // aten::randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @@ -44334,13 +44348,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator); -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator); // aten::randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); // aten::randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @@ -44355,13 +44369,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); // aten::randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @@ -44376,13 +44390,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator); -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator); // aten::randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); // aten::randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @@ -44396,12 +44410,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long high, @ByVal LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long high, @ByVal LongArrayRef size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByRef Tensor out); // aten::randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @@ -44414,12 +44428,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator); -@Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); +@Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator); // aten::randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByRef Tensor out); // aten::randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) @@ -44432,12 +44446,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByRef Tensor out); // aten::randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @@ -44450,12 +44464,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator); -@Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); +@Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator); // aten::randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByRef Tensor out); // aten::randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) @@ -44595,13 +44609,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randn(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("at") public static native @ByVal Tensor randn(@ByVal LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randn(@ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); // aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @@ -44616,13 +44630,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randn(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("at") public static native @ByVal Tensor randn(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator); -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); +@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator); // aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randn(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); // aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @@ -44637,13 +44651,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randn(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("at") public static native @ByVal Tensor randn(@ByVal LongArrayRef size, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names); // aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randn(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); // aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @@ -44658,13 +44672,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randn(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("at") public static native @ByVal Tensor randn(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); // aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor randn(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); // aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @@ -44678,12 +44692,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randn_out(@ByRef Tensor out, @ByVal LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor randn_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at") public static native @ByRef Tensor randn_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randn_outf(@ByVal LongArrayRef size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor randn_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor randn_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByRef Tensor out); // aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @@ -44696,12 +44710,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randn_out(@ByRef Tensor out, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator); -@Namespace("at") public static native @ByRef Tensor randn_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); +@Namespace("at") public static native @ByRef Tensor randn_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator); // aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randn_outf(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor randn_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor randn_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByRef Tensor out); // aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) @@ -44714,12 +44728,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randn_out(@ByRef Tensor out, @ByVal LongArrayRef size, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByRef Tensor randn_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByRef Tensor randn_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names); // aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randn_outf(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor randn_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor randn_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names, @ByRef Tensor out); // aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) @@ -44732,12 +44746,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randn_out(@ByRef Tensor out, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByRef Tensor randn_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByRef Tensor randn_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); // aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor randn_outf(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor randn_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor randn_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByRef Tensor out); // aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) @@ -45169,12 +45183,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor reflection_pad1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor reflection_pad1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByRef Tensor reflection_pad1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); // aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor reflection_pad1d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor reflection_pad1d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor reflection_pad1d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByRef Tensor out); // aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) @@ -45187,7 +45201,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor @Namespace("at") public static native @ByVal Tensor reflection_pad1d(@Const @ByRef Tensor self, @ByVal LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor reflection_pad1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByVal Tensor reflection_pad1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); // aten::reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor @@ -45223,12 +45237,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor reflection_pad1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor reflection_pad1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByRef Tensor reflection_pad1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); // aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor reflection_pad1d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor reflection_pad1d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor reflection_pad1d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByRef Tensor grad_input); // aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) @@ -45241,7 +45255,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::reflection_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor @Namespace("at") public static native @ByVal Tensor reflection_pad1d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor reflection_pad1d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByVal Tensor reflection_pad1d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); // aten::reflection_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor @@ -45277,12 +45291,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor reflection_pad2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor reflection_pad2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByRef Tensor reflection_pad2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); // aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor reflection_pad2d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor reflection_pad2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor reflection_pad2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByRef Tensor out); // aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) @@ -45295,7 +45309,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor @Namespace("at") public static native @ByVal Tensor reflection_pad2d(@Const @ByRef Tensor self, @ByVal LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor reflection_pad2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByVal Tensor reflection_pad2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); // aten::reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor @@ -45331,12 +45345,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor reflection_pad2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor reflection_pad2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByRef Tensor reflection_pad2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); // aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor reflection_pad2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor reflection_pad2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor reflection_pad2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByRef Tensor grad_input); // aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) @@ -45349,7 +45363,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::reflection_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor @Namespace("at") public static native @ByVal Tensor reflection_pad2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor reflection_pad2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByVal Tensor reflection_pad2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); // aten::reflection_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor @@ -45385,12 +45399,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor reflection_pad3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor reflection_pad3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByRef Tensor reflection_pad3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); // aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor reflection_pad3d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor reflection_pad3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor reflection_pad3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByRef Tensor out); // aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) @@ -45403,7 +45417,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor @Namespace("at") public static native @ByVal Tensor reflection_pad3d(@Const @ByRef Tensor self, @ByVal LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor reflection_pad3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByVal Tensor reflection_pad3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); // aten::reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor @@ -45439,12 +45453,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor reflection_pad3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor reflection_pad3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByRef Tensor reflection_pad3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); // aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor reflection_pad3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor reflection_pad3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor reflection_pad3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByRef Tensor grad_input); // aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) @@ -45457,7 +45471,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::reflection_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor @Namespace("at") public static native @ByVal Tensor reflection_pad3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor reflection_pad3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByVal Tensor reflection_pad3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); // aten::reflection_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor @@ -45679,12 +45693,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor repeat_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef repeats); -@Namespace("at") public static native @ByRef Tensor repeat_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... repeats); +@Namespace("at") public static native @ByRef Tensor repeat_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... repeats); // aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor repeat_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef repeats, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor repeat_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] repeats, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor repeat_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] repeats, @ByRef Tensor out); // aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!) @@ -45775,12 +45789,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor replication_pad1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor replication_pad1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByRef Tensor replication_pad1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); // aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor replication_pad1d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor replication_pad1d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor replication_pad1d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByRef Tensor out); // aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) @@ -45793,7 +45807,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor @Namespace("at") public static native @ByVal Tensor replication_pad1d(@Const @ByRef Tensor self, @ByVal LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor replication_pad1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByVal Tensor replication_pad1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); // aten::replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor @@ -45829,12 +45843,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor replication_pad1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor replication_pad1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByRef Tensor replication_pad1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); // aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor replication_pad1d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor replication_pad1d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor replication_pad1d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByRef Tensor grad_input); // aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) @@ -45847,7 +45861,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::replication_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor @Namespace("at") public static native @ByVal Tensor replication_pad1d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor replication_pad1d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByVal Tensor replication_pad1d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); // aten::replication_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor @@ -45883,12 +45897,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor replication_pad2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor replication_pad2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByRef Tensor replication_pad2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); // aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor replication_pad2d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor replication_pad2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor replication_pad2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByRef Tensor out); // aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) @@ -45901,7 +45915,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor @Namespace("at") public static native @ByVal Tensor replication_pad2d(@Const @ByRef Tensor self, @ByVal LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor replication_pad2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByVal Tensor replication_pad2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); // aten::replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor @@ -45937,12 +45951,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor replication_pad2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor replication_pad2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByRef Tensor replication_pad2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); // aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor replication_pad2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor replication_pad2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor replication_pad2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByRef Tensor grad_input); // aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) @@ -45955,7 +45969,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::replication_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor @Namespace("at") public static native @ByVal Tensor replication_pad2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor replication_pad2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByVal Tensor replication_pad2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); // aten::replication_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor @@ -45991,12 +46005,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor replication_pad3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor replication_pad3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByRef Tensor replication_pad3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); // aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor replication_pad3d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor replication_pad3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor replication_pad3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByRef Tensor out); // aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) @@ -46009,7 +46023,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor @Namespace("at") public static native @ByVal Tensor replication_pad3d(@Const @ByRef Tensor self, @ByVal LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor replication_pad3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByVal Tensor replication_pad3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); // aten::replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor @@ -46045,12 +46059,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor replication_pad3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor replication_pad3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByRef Tensor replication_pad3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); // aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor replication_pad3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor replication_pad3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor replication_pad3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByRef Tensor grad_input); // aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) @@ -46063,7 +46077,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::replication_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor @Namespace("at") public static native @ByVal Tensor replication_pad3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor replication_pad3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByVal Tensor replication_pad3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); // aten::replication_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor @@ -46127,7 +46141,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a) @Namespace("at") public static native @ByVal Tensor reshape(@Const @ByRef Tensor self, @ByVal LongArrayRef shape); -@Namespace("at") public static native @ByVal Tensor reshape(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... shape); +@Namespace("at") public static native @ByVal Tensor reshape(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... shape); // aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a) @@ -46193,13 +46207,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @Const @ByRef Tensor resize_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); @Namespace("at") public static native @Const @ByRef Tensor resize_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef size); -@Namespace("at") public static native @Const @ByRef Tensor resize_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @Const @ByRef Tensor resize_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at") public static native @Const @ByRef Tensor resize_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @Const @ByRef Tensor resize_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @Const @ByRef Tensor resize_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByVal MemoryFormatOptional memory_format, @Const @ByRef Tensor out); -@Namespace("at") public static native @Const @ByRef Tensor resize_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal MemoryFormatOptional memory_format, @Const @ByRef Tensor out); +@Namespace("at") public static native @Const @ByRef Tensor resize_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal MemoryFormatOptional memory_format, @Const @ByRef Tensor out); // aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) @@ -46214,8 +46228,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor @Namespace("at") public static native @ByVal Tensor resize(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); @Namespace("at") public static native @ByVal Tensor resize(@Const @ByRef Tensor self, @ByVal LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor resize(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor resize(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at") public static native @ByVal Tensor resize(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor resize(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor @@ -46485,10 +46499,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal T_TensorTensor_T rnn_relu(@Const @ByRef Tensor input, @Const @ByRef Tensor hx, @ByVal @Cast("at::TensorList*") TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first); +@Namespace("at") public static native @ByVal T_TensorTensor_T rnn_relu(@Const @ByRef Tensor input, @Const @ByRef Tensor hx, @ByVal TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first); // aten::rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal T_TensorTensor_T rnn_relu(@Const @ByRef Tensor data, @Const @ByRef Tensor batch_sizes, @Const @ByRef Tensor hx, @ByVal @Cast("at::TensorList*") TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional); +@Namespace("at") public static native @ByVal T_TensorTensor_T rnn_relu(@Const @ByRef Tensor data, @Const @ByRef Tensor batch_sizes, @Const @ByRef Tensor hx, @ByVal TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional); @@ -46549,10 +46563,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal T_TensorTensor_T rnn_tanh(@Const @ByRef Tensor input, @Const @ByRef Tensor hx, @ByVal @Cast("at::TensorList*") TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first); +@Namespace("at") public static native @ByVal T_TensorTensor_T rnn_tanh(@Const @ByRef Tensor input, @Const @ByRef Tensor hx, @ByVal TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first); // aten::rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal T_TensorTensor_T rnn_tanh(@Const @ByRef Tensor data, @Const @ByRef Tensor batch_sizes, @Const @ByRef Tensor hx, @ByVal @Cast("at::TensorList*") TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional); +@Namespace("at") public static native @ByVal T_TensorTensor_T rnn_tanh(@Const @ByRef Tensor data, @Const @ByRef Tensor batch_sizes, @Const @ByRef Tensor hx, @ByVal TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional); @@ -46615,37 +46629,37 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::roll(Tensor self, SymInt[1] shifts, int[1] dims=[]) -> Tensor @Namespace("at") public static native @ByVal Tensor roll(@Const @ByRef Tensor self, @ByVal LongArrayRef shifts, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef dims); @Namespace("at") public static native @ByVal Tensor roll(@Const @ByRef Tensor self, @ByVal LongArrayRef shifts); -@Namespace("at") public static native @ByVal Tensor roll(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] shifts, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); -@Namespace("at") public static native @ByVal Tensor roll(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... shifts); +@Namespace("at") public static native @ByVal Tensor roll(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] shifts, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dims); +@Namespace("at") public static native @ByVal Tensor roll(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... shifts); // aten::roll(Tensor self, SymInt[1] shifts, int[1] dims=[]) -> Tensor @Namespace("at") public static native @ByVal Tensor roll_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef shifts, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef dims); @Namespace("at") public static native @ByVal Tensor roll_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef shifts); -@Namespace("at") public static native @ByVal Tensor roll_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef shifts, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); +@Namespace("at") public static native @ByVal Tensor roll_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef shifts, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dims); // aten::roll.out(Tensor self, SymInt[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor roll_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef shifts, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef dims); @Namespace("at") public static native @ByRef Tensor roll_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef shifts); -@Namespace("at") public static native @ByRef Tensor roll_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] shifts, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); -@Namespace("at") public static native @ByRef Tensor roll_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... shifts); +@Namespace("at") public static native @ByRef Tensor roll_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] shifts, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dims); +@Namespace("at") public static native @ByRef Tensor roll_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... shifts); // aten::roll.out(Tensor self, SymInt[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor roll_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef shifts, @ByVal LongArrayRef dims, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor roll_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] shifts, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dims, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor roll_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] shifts, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dims, @ByRef Tensor out); // aten::roll.out(Tensor self, SymInt[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor roll_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef shifts, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef dims); @Namespace("at") public static native @ByRef Tensor roll_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef shifts); -@Namespace("at") public static native @ByRef Tensor roll_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef shifts, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); +@Namespace("at") public static native @ByRef Tensor roll_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef shifts, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dims); // aten::roll.out(Tensor self, SymInt[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor roll_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef shifts, @ByVal LongArrayRef dims, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor roll_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef shifts, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dims, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor roll_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef shifts, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dims, @ByRef Tensor out); @@ -46678,15 +46692,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor @Namespace("at") public static native @ByVal Tensor rot90(@Const @ByRef Tensor self, @Cast("int64_t") long k/*=1*/, @ByVal(nullValue = "at::IntArrayRef({0,1})") LongArrayRef dims); @Namespace("at") public static native @ByVal Tensor rot90(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor rot90(@Const @ByRef Tensor self, @Cast("int64_t") long k/*=1*/, @ByVal(nullValue = "at::IntArrayRef({0,1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); +@Namespace("at") public static native @ByVal Tensor rot90(@Const @ByRef Tensor self, @Cast("int64_t") long k/*=1*/, @ByVal(nullValue = "at::IntArrayRef({0,1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dims); // aten::rot90.out(Tensor self, int k=1, int[] dims=[0,1], *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor rot90_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long k/*=1*/, @ByVal(nullValue = "at::IntArrayRef({0,1})") LongArrayRef dims); @Namespace("at") public static native @ByRef Tensor rot90_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor rot90_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long k/*=1*/, @ByVal(nullValue = "at::IntArrayRef({0,1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); +@Namespace("at") public static native @ByRef Tensor rot90_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long k/*=1*/, @ByVal(nullValue = "at::IntArrayRef({0,1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dims); // aten::rot90.out(Tensor self, int k=1, int[] dims=[0,1], *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor rot90_outf(@Const @ByRef Tensor self, @Cast("int64_t") long k, @ByVal LongArrayRef dims, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor rot90_outf(@Const @ByRef Tensor self, @Cast("int64_t") long k, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dims, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor rot90_outf(@Const @ByRef Tensor self, @Cast("int64_t") long k, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dims, @ByRef Tensor out); @@ -46828,12 +46842,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::row_stack(Tensor[] tensors) -> Tensor -@Namespace("at") public static native @ByVal Tensor row_stack(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @ByVal Tensor row_stack(@ByVal TensorArrayRef tensors); // aten::row_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor row_stack_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @ByRef Tensor row_stack_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors); // aten::row_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor row_stack_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor row_stack_outf(@ByVal TensorArrayRef tensors, @ByRef Tensor out); @@ -47450,7 +47464,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor @Namespace("at") public static native @ByVal Tensor select_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long index); -@Namespace("at") public static native @ByVal Tensor select_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long index); +@Namespace("at") public static native @ByVal Tensor select_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long index); // aten::select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor @@ -47459,12 +47473,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor select_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long index); -@Namespace("at") public static native @ByRef Tensor select_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long index); +@Namespace("at") public static native @ByRef Tensor select_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long index); // aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor select_backward_outf(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long index, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor select_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long index, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor select_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long index, @ByRef Tensor out); // aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) @@ -47651,13 +47665,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor set_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride); @Namespace("at") public static native @ByRef Tensor set_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor set_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); -@Namespace("at") public static native @ByRef Tensor set_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at") public static native @ByRef Tensor set_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); +@Namespace("at") public static native @ByRef Tensor set_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor set_outf(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor set_outf(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor set_outf(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByRef Tensor out); // aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!) @@ -47672,8 +47686,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::set.source_Storage_storage_offset(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor @Namespace("at") public static native @ByVal Tensor set(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride); @Namespace("at") public static native @ByVal Tensor set(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor set(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); -@Namespace("at") public static native @ByVal Tensor set(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at") public static native @ByVal Tensor set(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); +@Namespace("at") public static native @ByVal Tensor set(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::set.source_Storage_storage_offset(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor @@ -48189,7 +48203,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor @Namespace("at") public static native @ByVal Tensor slice_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long start, @Cast("int64_t") long end, @Cast("int64_t") long step); -@Namespace("at") public static native @ByVal Tensor slice_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long start, @Cast("int64_t") long end, @Cast("int64_t") long step); +@Namespace("at") public static native @ByVal Tensor slice_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long start, @Cast("int64_t") long end, @Cast("int64_t") long step); // aten::slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor @@ -48198,12 +48212,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slice_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long start, @Cast("int64_t") long end, @Cast("int64_t") long step); -@Namespace("at") public static native @ByRef Tensor slice_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long start, @Cast("int64_t") long end, @Cast("int64_t") long step); +@Namespace("at") public static native @ByRef Tensor slice_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long start, @Cast("int64_t") long end, @Cast("int64_t") long step); // aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slice_backward_outf(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long start, @Cast("int64_t") long end, @Cast("int64_t") long step, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor slice_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long start, @Cast("int64_t") long end, @Cast("int64_t") long step, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor slice_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long start, @Cast("int64_t") long end, @Cast("int64_t") long step, @ByRef Tensor out); // aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!) @@ -48389,39 +48403,39 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding); @Namespace("at") public static native @ByRef Tensor slow_conv3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor slow_conv3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -@Namespace("at") public static native @ByRef Tensor slow_conv3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByRef Tensor slow_conv3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); +@Namespace("at") public static native @ByRef Tensor slow_conv3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor slow_conv3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor slow_conv3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByRef Tensor out); // aten::slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding); @Namespace("at") public static native @ByRef Tensor slow_conv3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor slow_conv3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding); -@Namespace("at") public static native @ByRef Tensor slow_conv3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByRef Tensor slow_conv3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding); +@Namespace("at") public static native @ByRef Tensor slow_conv3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor slow_conv3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor slow_conv3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding, @ByRef Tensor out); // aten::slow_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0) -> Tensor @Namespace("at") public static native @ByVal Tensor slow_conv3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding); @Namespace("at") public static native @ByVal Tensor slow_conv3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor slow_conv3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -@Namespace("at") public static native @ByVal Tensor slow_conv3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByVal Tensor slow_conv3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); +@Namespace("at") public static native @ByVal Tensor slow_conv3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::slow_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0) -> Tensor @Namespace("at") public static native @ByVal Tensor slow_conv3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding); @Namespace("at") public static native @ByVal Tensor slow_conv3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor slow_conv3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding); -@Namespace("at") public static native @ByVal Tensor slow_conv3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByVal Tensor slow_conv3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding); +@Namespace("at") public static native @ByVal Tensor slow_conv3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); @@ -48453,32 +48467,32 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_out(@ByRef Tensor output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_out(@ByRef Tensor output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_out(@ByRef Tensor output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); // aten::slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByRef Tensor output); -@Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor output); +@Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByRef Tensor output); // aten::slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_symint_out(@ByRef Tensor output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding); -@Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_symint_out(@ByRef Tensor output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding); +@Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_symint_out(@ByRef Tensor output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding); // aten::slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByRef Tensor output); -@Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding, @ByRef Tensor output); +@Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding, @ByRef Tensor output); // aten::slow_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding) -> Tensor @Namespace("at") public static native @ByVal Tensor slow_conv3d_forward(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor slow_conv3d_forward(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByVal Tensor slow_conv3d_forward(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); // aten::slow_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding) -> Tensor @Namespace("at") public static native @ByVal Tensor slow_conv3d_forward_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding); -@Namespace("at") public static native @ByVal Tensor slow_conv3d_forward_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding); +@Namespace("at") public static native @ByVal Tensor slow_conv3d_forward_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding); @@ -48511,39 +48525,39 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1) -> Tensor @Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); +@Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1) -> Tensor @Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); +@Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @ByRef Tensor out); // aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @ByRef Tensor out); @@ -48576,39 +48590,39 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::slow_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1) -> Tensor @Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); +@Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::slow_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1) -> Tensor @Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); +@Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @ByRef Tensor out); // aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @ByRef Tensor out); @@ -48641,39 +48655,39 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef dilation, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @ByRef Tensor out); // aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal LongArrayRef dilation, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @ByRef Tensor out); // aten::slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1) -> Tensor @Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); +@Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1) -> Tensor @Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); +@Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); @@ -48706,39 +48720,39 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef dilation, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @ByRef Tensor out); // aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal LongArrayRef dilation, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dilation, @ByRef Tensor out); // aten::slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1) -> Tensor @Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); +@Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1) -> Tensor @Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); @Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); +@Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); @@ -49196,10 +49210,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor @Namespace("at") public static native @ByVal Tensor sparse_bsc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal TensorOptions options); -@Namespace("at") public static native @ByVal Tensor sparse_bsc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); +@Namespace("at") public static native @ByVal Tensor sparse_bsc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal TensorOptions options); // aten::sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor @Namespace("at") public static native @ByVal Tensor sparse_bsc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor sparse_bsc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor sparse_bsc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); // aten::sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor @Namespace("at") public static native @ByVal Tensor sparse_bsc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); @@ -49235,10 +49249,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor @Namespace("at") public static native @ByVal Tensor sparse_bsr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal TensorOptions options); -@Namespace("at") public static native @ByVal Tensor sparse_bsr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); +@Namespace("at") public static native @ByVal Tensor sparse_bsr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal TensorOptions options); // aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor @Namespace("at") public static native @ByVal Tensor sparse_bsr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor sparse_bsr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor sparse_bsr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); // aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor @Namespace("at") public static native @ByVal Tensor sparse_bsr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); @@ -49274,10 +49288,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor @Namespace("at") public static native @ByVal Tensor sparse_compressed_tensor(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal TensorOptions options); -@Namespace("at") public static native @ByVal Tensor sparse_compressed_tensor(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); +@Namespace("at") public static native @ByVal Tensor sparse_compressed_tensor(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal TensorOptions options); // aten::sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor @Namespace("at") public static native @ByVal Tensor sparse_compressed_tensor(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor sparse_compressed_tensor(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor sparse_compressed_tensor(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); // aten::sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor @Namespace("at") public static native @ByVal Tensor sparse_compressed_tensor(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); @@ -49313,10 +49327,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor @Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@ByVal LongArrayRef size, @ByVal TensorOptions options); -@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); +@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal TensorOptions options); // aten::sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor @Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); // aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor @Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional is_coalesced); @@ -49327,18 +49341,18 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor @Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional is_coalesced); @Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional is_coalesced); -@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional is_coalesced); +@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor @Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal BoolOptional is_coalesced); -@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal BoolOptional is_coalesced); +@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal BoolOptional is_coalesced); // aten::sparse_coo_tensor.size_out(int[] size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor sparse_coo_tensor_out(@ByRef Tensor out, @ByVal LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor sparse_coo_tensor_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at") public static native @ByRef Tensor sparse_coo_tensor_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::sparse_coo_tensor.size_out(int[] size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor sparse_coo_tensor_outf(@ByVal LongArrayRef size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor sparse_coo_tensor_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor sparse_coo_tensor_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByRef Tensor out); @@ -49369,10 +49383,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor @Namespace("at") public static native @ByVal Tensor sparse_csc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal TensorOptions options); -@Namespace("at") public static native @ByVal Tensor sparse_csc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); +@Namespace("at") public static native @ByVal Tensor sparse_csc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal TensorOptions options); // aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor @Namespace("at") public static native @ByVal Tensor sparse_csc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor sparse_csc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor sparse_csc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); // aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor @Namespace("at") public static native @ByVal Tensor sparse_csc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); @@ -49408,10 +49422,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor @Namespace("at") public static native @ByVal Tensor sparse_csr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal TensorOptions options); -@Namespace("at") public static native @ByVal Tensor sparse_csr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); +@Namespace("at") public static native @ByVal Tensor sparse_csr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal TensorOptions options); // aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor @Namespace("at") public static native @ByVal Tensor sparse_csr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor sparse_csr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor sparse_csr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); // aten::sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor @Namespace("at") public static native @ByVal Tensor sparse_csr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); @@ -49507,14 +49521,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::sparse_resize.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @Const @ByRef Tensor sparse_resize_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); -@Namespace("at") public static native @Const @ByRef Tensor sparse_resize_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); +@Namespace("at") public static native @Const @ByRef Tensor sparse_resize_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); // aten::sparse_resize.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @Const @ByRef Tensor sparse_resize_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @Const @ByRef Tensor out); -@Namespace("at") public static native @Const @ByRef Tensor sparse_resize_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @Const @ByRef Tensor out); +@Namespace("at") public static native @Const @ByRef Tensor sparse_resize_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @Const @ByRef Tensor out); // aten::sparse_resize(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor @Namespace("at") public static native @ByVal Tensor sparse_resize(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); -@Namespace("at") public static native @ByVal Tensor sparse_resize(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); +@Namespace("at") public static native @ByVal Tensor sparse_resize(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); @@ -49545,14 +49559,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::sparse_resize_and_clear.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @Const @ByRef Tensor sparse_resize_and_clear_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); -@Namespace("at") public static native @Const @ByRef Tensor sparse_resize_and_clear_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); +@Namespace("at") public static native @Const @ByRef Tensor sparse_resize_and_clear_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); // aten::sparse_resize_and_clear.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @Const @ByRef Tensor sparse_resize_and_clear_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @Const @ByRef Tensor out); -@Namespace("at") public static native @Const @ByRef Tensor sparse_resize_and_clear_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @Const @ByRef Tensor out); +@Namespace("at") public static native @Const @ByRef Tensor sparse_resize_and_clear_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @Const @ByRef Tensor out); // aten::sparse_resize_and_clear(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor @Namespace("at") public static native @ByVal Tensor sparse_resize_and_clear(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); -@Namespace("at") public static native @ByVal Tensor sparse_resize_and_clear(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); +@Namespace("at") public static native @ByVal Tensor sparse_resize_and_clear(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); @@ -50902,17 +50916,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::special_logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor @Namespace("at") public static native @ByVal Tensor special_logsumexp(@Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByVal Tensor special_logsumexp(@Const @ByRef Tensor self, @ByVal LongArrayRef dim); -@Namespace("at") public static native @ByVal Tensor special_logsumexp(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor special_logsumexp(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +@Namespace("at") public static native @ByVal Tensor special_logsumexp(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor special_logsumexp(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); // aten::special_logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor special_logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); @Namespace("at") public static native @ByRef Tensor special_logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef dim); -@Namespace("at") public static native @ByRef Tensor special_logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor special_logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +@Namespace("at") public static native @ByRef Tensor special_logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor special_logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); // aten::special_logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor special_logsumexp_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor special_logsumexp_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor special_logsumexp_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); @@ -51834,8 +51848,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[] @Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split(@Const @ByRef Tensor self, @ByVal LongArrayRef split_size, @Cast("int64_t") long dim/*=0*/); @Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split(@Const @ByRef Tensor self, @ByVal LongArrayRef split_size); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_size, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... split_size); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] split_size, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... split_size); // aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[] @@ -51881,21 +51895,21 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void split_copy_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @Cast("int64_t") long split_size, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native void split_copy_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @Cast("int64_t") long split_size); +@Namespace("at") public static native void split_copy_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @Cast("int64_t") long split_size, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native void split_copy_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @Cast("int64_t") long split_size); // aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void split_copy_outf(@Const @ByRef Tensor self, @Cast("int64_t") long split_size, @Cast("int64_t") long dim, @ByVal @Cast("at::TensorList*") TensorArrayRef out); +@Namespace("at") public static native void split_copy_outf(@Const @ByRef Tensor self, @Cast("int64_t") long split_size, @Cast("int64_t") long dim, @ByVal TensorArrayRef out); // aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void split_copy_symint_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymInt split_size, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native void split_copy_symint_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymInt split_size); +@Namespace("at") public static native void split_copy_symint_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymInt split_size, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native void split_copy_symint_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymInt split_size); // aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void split_copy_symint_outf(@Const @ByRef Tensor self, @ByVal SymInt split_size, @Cast("int64_t") long dim, @ByVal @Cast("at::TensorList*") TensorArrayRef out); +@Namespace("at") public static native void split_copy_symint_outf(@Const @ByRef Tensor self, @ByVal SymInt split_size, @Cast("int64_t") long dim, @ByVal TensorArrayRef out); @@ -51928,8 +51942,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[] @Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); @Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... split_sizes); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... split_sizes); // aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[] @@ -51967,8 +51981,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] @Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_copy(@Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); @Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_copy(@Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... split_sizes); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... split_sizes); // aten::split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] @@ -51977,24 +51991,24 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void split_with_sizes_copy_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native void split_with_sizes_copy_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes); -@Namespace("at") public static native void split_with_sizes_copy_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native void split_with_sizes_copy_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... split_sizes); +@Namespace("at") public static native void split_with_sizes_copy_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native void split_with_sizes_copy_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes); +@Namespace("at") public static native void split_with_sizes_copy_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native void split_with_sizes_copy_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... split_sizes); // aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void split_with_sizes_copy_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim, @ByVal @Cast("at::TensorList*") TensorArrayRef out); -@Namespace("at") public static native void split_with_sizes_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_sizes, @Cast("int64_t") long dim, @ByVal @Cast("at::TensorList*") TensorArrayRef out); +@Namespace("at") public static native void split_with_sizes_copy_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim, @ByVal TensorArrayRef out); +@Namespace("at") public static native void split_with_sizes_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] split_sizes, @Cast("int64_t") long dim, @ByVal TensorArrayRef out); // aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void split_with_sizes_copy_symint_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native void split_with_sizes_copy_symint_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes); +@Namespace("at") public static native void split_with_sizes_copy_symint_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native void split_with_sizes_copy_symint_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes); // aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void split_with_sizes_copy_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes, @Cast("int64_t") long dim, @ByVal @Cast("at::TensorList*") TensorArrayRef out); +@Namespace("at") public static native void split_with_sizes_copy_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes, @Cast("int64_t") long dim, @ByVal TensorArrayRef out); @@ -52111,7 +52125,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a) @Namespace("at") public static native @ByVal Tensor squeeze(@Const @ByRef Tensor self, @ByVal LongArrayRef dim); -@Namespace("at") public static native @ByVal Tensor squeeze(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +@Namespace("at") public static native @ByVal Tensor squeeze(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); @@ -52148,7 +52162,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::squeeze_copy.dims(Tensor self, int[] dim) -> Tensor @Namespace("at") public static native @ByVal Tensor squeeze_copy(@Const @ByRef Tensor self, @ByVal LongArrayRef dim); -@Namespace("at") public static native @ByVal Tensor squeeze_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +@Namespace("at") public static native @ByVal Tensor squeeze_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); // aten::squeeze_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor squeeze_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); @@ -52162,10 +52176,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::squeeze_copy.dims_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor squeeze_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef dim); -@Namespace("at") public static native @ByRef Tensor squeeze_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +@Namespace("at") public static native @ByRef Tensor squeeze_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dim); // aten::squeeze_copy.dims_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor squeeze_copy_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef dim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor squeeze_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor squeeze_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByRef Tensor out); @@ -52232,14 +52246,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::stack(Tensor[] tensors, int dim=0) -> Tensor -@Namespace("at") public static native @ByVal Tensor stack(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @ByVal Tensor stack(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @ByVal Tensor stack(@ByVal TensorArrayRef tensors, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal Tensor stack(@ByVal TensorArrayRef tensors); // aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor stack_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @ByRef Tensor stack_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @ByRef Tensor stack_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByRef Tensor stack_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors); // aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor stack_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @Cast("int64_t") long dim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor stack_outf(@ByVal TensorArrayRef tensors, @Cast("int64_t") long dim, @ByRef Tensor out); @@ -53187,8 +53201,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[] @Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor self, @ByVal LongArrayRef indices, @Cast("int64_t") long dim/*=0*/); @Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor self, @ByVal LongArrayRef indices); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] indices, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... indices); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] indices, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... indices); // aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[] @@ -53229,14 +53243,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor @Namespace("at") public static native @ByVal Tensor tensordot(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal LongArrayRef dims_self, @ByVal LongArrayRef dims_other); -@Namespace("at") public static native @ByVal Tensor tensordot(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dims_self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims_other); +@Namespace("at") public static native @ByVal Tensor tensordot(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dims_self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dims_other); // aten::tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor tensordot_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal LongArrayRef dims_self, @ByVal LongArrayRef dims_other); -@Namespace("at") public static native @ByRef Tensor tensordot_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dims_self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims_other); +@Namespace("at") public static native @ByRef Tensor tensordot_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dims_self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dims_other); // aten::tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor tensordot_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal LongArrayRef dims_self, @ByVal LongArrayRef dims_other, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor tensordot_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dims_self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dims_other, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor tensordot_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dims_self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dims_other, @ByRef Tensor out); @@ -53268,17 +53282,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor thnn_conv2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding); @Namespace("at") public static native @ByRef Tensor thnn_conv2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor thnn_conv2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -@Namespace("at") public static native @ByRef Tensor thnn_conv2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByRef Tensor thnn_conv2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); +@Namespace("at") public static native @ByRef Tensor thnn_conv2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); // aten::thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor thnn_conv2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor thnn_conv2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor thnn_conv2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @ByRef Tensor out); // aten::thnn_conv2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0) -> Tensor @Namespace("at") public static native @ByVal Tensor thnn_conv2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding); @Namespace("at") public static native @ByVal Tensor thnn_conv2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor thnn_conv2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -@Namespace("at") public static native @ByVal Tensor thnn_conv2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +@Namespace("at") public static native @ByVal Tensor thnn_conv2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... padding); +@Namespace("at") public static native @ByVal Tensor thnn_conv2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... kernel_size); @@ -53382,7 +53396,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::tile(Tensor self, SymInt[] dims) -> Tensor @Namespace("at") public static native @ByVal Tensor tile(@Const @ByRef Tensor self, @ByVal LongArrayRef dims); -@Namespace("at") public static native @ByVal Tensor tile(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); +@Namespace("at") public static native @ByVal Tensor tile(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dims); // aten::tile(Tensor self, SymInt[] dims) -> Tensor @@ -53846,7 +53860,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::trace_backward(Tensor grad, SymInt[] sizes) -> Tensor @Namespace("at") public static native @ByVal Tensor trace_backward(@Const @ByRef Tensor grad, @ByVal LongArrayRef sizes); -@Namespace("at") public static native @ByVal Tensor trace_backward(@Const @ByRef Tensor grad, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); +@Namespace("at") public static native @ByVal Tensor trace_backward(@Const @ByRef Tensor grad, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... sizes); // aten::trace_backward(Tensor grad, SymInt[] sizes) -> Tensor @@ -54381,10 +54395,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unbind_copy(@Const @ByRef Tensor self); // aten::unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void unbind_copy_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native void unbind_copy_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self); +@Namespace("at") public static native void unbind_copy_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native void unbind_copy_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self); // aten::unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void unbind_copy_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal @Cast("at::TensorList*") TensorArrayRef out); +@Namespace("at") public static native void unbind_copy_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal TensorArrayRef out); @@ -54415,7 +54429,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::unflatten.int(Tensor(a) self, int dim, SymInt[] sizes) -> Tensor(a) @Namespace("at") public static native @ByVal Tensor unflatten(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal LongArrayRef sizes); -@Namespace("at") public static native @ByVal Tensor unflatten(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); +@Namespace("at") public static native @ByVal Tensor unflatten(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... sizes); // aten::unflatten.int(Tensor(a) self, int dim, SymInt[] sizes) -> Tensor(a) @@ -54424,7 +54438,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::unflatten.Dimname(Tensor(a) self, Dimname dim, SymInt[] sizes, Dimname[] names) -> Tensor(a) @Namespace("at") public static native @ByVal Tensor unflatten(@Const @ByRef Tensor self, @ByVal Dimname dim, @ByVal LongArrayRef sizes, @ByVal DimnameArrayRef names); -@Namespace("at") public static native @ByVal Tensor unflatten(@Const @ByRef Tensor self, @ByVal Dimname dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, @ByVal DimnameArrayRef names); +@Namespace("at") public static native @ByVal Tensor unflatten(@Const @ByRef Tensor self, @ByVal Dimname dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, @ByVal DimnameArrayRef names); // aten::unflatten.Dimname(Tensor(a) self, Dimname dim, SymInt[] sizes, Dimname[] names) -> Tensor(a) @@ -54459,7 +54473,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::unflatten_dense_tensors(Tensor flat, Tensor[] tensors) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unflatten_dense_tensors(@Const @ByRef Tensor flat, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unflatten_dense_tensors(@Const @ByRef Tensor flat, @ByVal TensorArrayRef tensors); @@ -54518,7 +54532,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor @Namespace("at") public static native @ByVal Tensor unfold_backward(@Const @ByRef Tensor grad_in, @ByVal LongArrayRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long size, @Cast("int64_t") long step); -@Namespace("at") public static native @ByVal Tensor unfold_backward(@Const @ByRef Tensor grad_in, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long size, @Cast("int64_t") long step); +@Namespace("at") public static native @ByVal Tensor unfold_backward(@Const @ByRef Tensor grad_in, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long size, @Cast("int64_t") long step); // aten::unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor @@ -54527,12 +54541,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor unfold_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_in, @ByVal LongArrayRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long size, @Cast("int64_t") long step); -@Namespace("at") public static native @ByRef Tensor unfold_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_in, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long size, @Cast("int64_t") long step); +@Namespace("at") public static native @ByRef Tensor unfold_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_in, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long size, @Cast("int64_t") long step); // aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor unfold_backward_outf(@Const @ByRef Tensor grad_in, @ByVal LongArrayRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long size, @Cast("int64_t") long step, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor unfold_backward_outf(@Const @ByRef Tensor grad_in, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long size, @Cast("int64_t") long step, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor unfold_backward_outf(@Const @ByRef Tensor grad_in, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long size, @Cast("int64_t") long step, @ByRef Tensor out); // aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!) @@ -54795,21 +54809,21 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void unsafe_split_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @Cast("int64_t") long split_size, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native void unsafe_split_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @Cast("int64_t") long split_size); +@Namespace("at") public static native void unsafe_split_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @Cast("int64_t") long split_size, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native void unsafe_split_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @Cast("int64_t") long split_size); // aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void unsafe_split_outf(@Const @ByRef Tensor self, @Cast("int64_t") long split_size, @Cast("int64_t") long dim, @ByVal @Cast("at::TensorList*") TensorArrayRef out); +@Namespace("at") public static native void unsafe_split_outf(@Const @ByRef Tensor self, @Cast("int64_t") long split_size, @Cast("int64_t") long dim, @ByVal TensorArrayRef out); // aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void unsafe_split_symint_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymInt split_size, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native void unsafe_split_symint_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymInt split_size); +@Namespace("at") public static native void unsafe_split_symint_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymInt split_size, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native void unsafe_split_symint_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymInt split_size); // aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void unsafe_split_symint_outf(@Const @ByRef Tensor self, @ByVal SymInt split_size, @Cast("int64_t") long dim, @ByVal @Cast("at::TensorList*") TensorArrayRef out); +@Namespace("at") public static native void unsafe_split_symint_outf(@Const @ByRef Tensor self, @ByVal SymInt split_size, @Cast("int64_t") long dim, @ByVal TensorArrayRef out); @@ -54842,8 +54856,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] @Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); @Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... split_sizes); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... split_sizes); // aten::unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] @@ -54852,24 +54866,24 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void unsafe_split_with_sizes_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native void unsafe_split_with_sizes_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes); -@Namespace("at") public static native void unsafe_split_with_sizes_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native void unsafe_split_with_sizes_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... split_sizes); +@Namespace("at") public static native void unsafe_split_with_sizes_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native void unsafe_split_with_sizes_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes); +@Namespace("at") public static native void unsafe_split_with_sizes_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native void unsafe_split_with_sizes_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... split_sizes); // aten::unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void unsafe_split_with_sizes_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim, @ByVal @Cast("at::TensorList*") TensorArrayRef out); -@Namespace("at") public static native void unsafe_split_with_sizes_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_sizes, @Cast("int64_t") long dim, @ByVal @Cast("at::TensorList*") TensorArrayRef out); +@Namespace("at") public static native void unsafe_split_with_sizes_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim, @ByVal TensorArrayRef out); +@Namespace("at") public static native void unsafe_split_with_sizes_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] split_sizes, @Cast("int64_t") long dim, @ByVal TensorArrayRef out); // aten::unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void unsafe_split_with_sizes_symint_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native void unsafe_split_with_sizes_symint_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes); +@Namespace("at") public static native void unsafe_split_with_sizes_symint_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native void unsafe_split_with_sizes_symint_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes); // aten::unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void unsafe_split_with_sizes_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes, @Cast("int64_t") long dim, @ByVal @Cast("at::TensorList*") TensorArrayRef out); +@Namespace("at") public static native void unsafe_split_with_sizes_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes, @Cast("int64_t") long dim, @ByVal TensorArrayRef out); @@ -54966,23 +54980,24 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor @Namespace("at") public static native @ByVal Tensor upsample_bicubic2d(@Const @ByRef Tensor input, @ByVal LongArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); -@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); +@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal @Cast({"double*", "c10::ArrayRef", "std::vector&"}) @StdVector double... scale_factors); // aten::upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor @Namespace("at") public static native @ByVal Tensor upsample_bicubic2d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); +@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal @Cast({"double*", "c10::ArrayRef", "std::vector&"}) @StdVector double... scale_factors); // aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); @Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Cast("bool") boolean align_corners); // aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); // aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) @@ -54997,8 +55012,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor @Namespace("at") public static native @ByVal Tensor upsample_bicubic2d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); @Namespace("at") public static native @ByVal Tensor upsample_bicubic2d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Cast("bool") boolean align_corners); // aten::upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor @@ -55036,13 +55051,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); @Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @Cast("bool") boolean align_corners); // aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); // aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) @@ -55057,8 +55072,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::upsample_bicubic2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor @Namespace("at") public static native @ByVal Tensor upsample_bicubic2d_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); @Namespace("at") public static native @ByVal Tensor upsample_bicubic2d_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @Cast("bool") boolean align_corners); // aten::upsample_bicubic2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor @@ -55095,23 +55110,24 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor @Namespace("at") public static native @ByVal Tensor upsample_bilinear2d(@Const @ByRef Tensor input, @ByVal LongArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); -@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); +@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal @Cast({"double*", "c10::ArrayRef", "std::vector&"}) @StdVector double... scale_factors); // aten::upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor @Namespace("at") public static native @ByVal Tensor upsample_bilinear2d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); +@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal @Cast({"double*", "c10::ArrayRef", "std::vector&"}) @StdVector double... scale_factors); // aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); @Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Cast("bool") boolean align_corners); // aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); // aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) @@ -55126,8 +55142,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor @Namespace("at") public static native @ByVal Tensor upsample_bilinear2d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); @Namespace("at") public static native @ByVal Tensor upsample_bilinear2d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Cast("bool") boolean align_corners); // aten::upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor @@ -55165,13 +55181,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); @Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @Cast("bool") boolean align_corners); // aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); // aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) @@ -55186,8 +55202,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor @Namespace("at") public static native @ByVal Tensor upsample_bilinear2d_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); @Namespace("at") public static native @ByVal Tensor upsample_bilinear2d_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @Cast("bool") boolean align_corners); // aten::upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor @@ -55224,23 +55240,24 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor @Namespace("at") public static native @ByVal Tensor upsample_linear1d(@Const @ByRef Tensor input, @ByVal LongArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); -@Namespace("at") public static native @ByVal Tensor upsample_linear1d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); +@Namespace("at") public static native @ByVal Tensor upsample_linear1d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal @Cast({"double*", "c10::ArrayRef", "std::vector&"}) @StdVector double... scale_factors); // aten::upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor @Namespace("at") public static native @ByVal Tensor upsample_linear1d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); +@Namespace("at") public static native @ByVal Tensor upsample_linear1d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal @Cast({"double*", "c10::ArrayRef", "std::vector&"}) @StdVector double... scale_factors); // aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor upsample_linear1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); @Namespace("at") public static native @ByRef Tensor upsample_linear1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByRef Tensor upsample_linear1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByRef Tensor upsample_linear1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByRef Tensor upsample_linear1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByRef Tensor upsample_linear1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Cast("bool") boolean align_corners); // aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor upsample_linear1d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor upsample_linear1d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor upsample_linear1d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales, @ByRef Tensor out); // aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) @@ -55255,8 +55272,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor @Namespace("at") public static native @ByVal Tensor upsample_linear1d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); @Namespace("at") public static native @ByVal Tensor upsample_linear1d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByVal Tensor upsample_linear1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByVal Tensor upsample_linear1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByVal Tensor upsample_linear1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByVal Tensor upsample_linear1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Cast("bool") boolean align_corners); // aten::upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor @@ -55294,13 +55311,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor upsample_linear1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); @Namespace("at") public static native @ByRef Tensor upsample_linear1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByRef Tensor upsample_linear1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByRef Tensor upsample_linear1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByRef Tensor upsample_linear1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByRef Tensor upsample_linear1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @Cast("bool") boolean align_corners); // aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor upsample_linear1d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor upsample_linear1d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor upsample_linear1d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales, @ByRef Tensor grad_input); // aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) @@ -55315,8 +55332,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor @Namespace("at") public static native @ByVal Tensor upsample_linear1d_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); @Namespace("at") public static native @ByVal Tensor upsample_linear1d_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByVal Tensor upsample_linear1d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByVal Tensor upsample_linear1d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByVal Tensor upsample_linear1d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByVal Tensor upsample_linear1d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @Cast("bool") boolean align_corners); // aten::upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor @@ -55353,23 +55370,24 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor @Namespace("at") public static native @ByVal Tensor upsample_nearest1d(@Const @ByRef Tensor input, @ByVal LongArrayRefOptional output_size, @ByVal DoubleArrayRefOptional scale_factors); -@Namespace("at") public static native @ByVal Tensor upsample_nearest1d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal DoubleArrayRefOptional scale_factors); +@Namespace("at") public static native @ByVal Tensor upsample_nearest1d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"double*", "c10::ArrayRef", "std::vector&"}) @StdVector double... scale_factors); // aten::upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor @Namespace("at") public static native @ByVal Tensor upsample_nearest1d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @ByVal DoubleArrayRefOptional scale_factors); +@Namespace("at") public static native @ByVal Tensor upsample_nearest1d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @ByVal @Cast({"double*", "c10::ArrayRef", "std::vector&"}) @StdVector double... scale_factors); // aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor upsample_nearest1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); @Namespace("at") public static native @ByRef Tensor upsample_nearest1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size); -@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... output_size); // aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor upsample_nearest1d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByVal DoubleOptional scales, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal DoubleOptional scales, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal DoubleOptional scales, @ByRef Tensor out); // aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) @@ -55384,8 +55402,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor @Namespace("at") public static native @ByVal Tensor upsample_nearest1d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); @Namespace("at") public static native @ByVal Tensor upsample_nearest1d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size); -@Namespace("at") public static native @ByVal Tensor upsample_nearest1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByVal Tensor upsample_nearest1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +@Namespace("at") public static native @ByVal Tensor upsample_nearest1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByVal Tensor upsample_nearest1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... output_size); // aten::upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor @@ -55423,13 +55441,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor upsample_nearest1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); @Namespace("at") public static native @ByRef Tensor upsample_nearest1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size); -@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); +@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... input_size); // aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor upsample_nearest1d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @ByVal DoubleOptional scales, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal DoubleOptional scales, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @ByVal DoubleOptional scales, @ByRef Tensor grad_input); // aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) @@ -55444,8 +55462,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor @Namespace("at") public static native @ByVal Tensor upsample_nearest1d_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); @Namespace("at") public static native @ByVal Tensor upsample_nearest1d_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size); -@Namespace("at") public static native @ByVal Tensor upsample_nearest1d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByVal Tensor upsample_nearest1d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); +@Namespace("at") public static native @ByVal Tensor upsample_nearest1d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByVal Tensor upsample_nearest1d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... input_size); // aten::upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor @@ -55482,23 +55500,24 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor @Namespace("at") public static native @ByVal Tensor upsample_nearest2d(@Const @ByRef Tensor input, @ByVal LongArrayRefOptional output_size, @ByVal DoubleArrayRefOptional scale_factors); -@Namespace("at") public static native @ByVal Tensor upsample_nearest2d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal DoubleArrayRefOptional scale_factors); +@Namespace("at") public static native @ByVal Tensor upsample_nearest2d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"double*", "c10::ArrayRef", "std::vector&"}) @StdVector double... scale_factors); // aten::upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor @Namespace("at") public static native @ByVal Tensor upsample_nearest2d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @ByVal DoubleArrayRefOptional scale_factors); +@Namespace("at") public static native @ByVal Tensor upsample_nearest2d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @ByVal @Cast({"double*", "c10::ArrayRef", "std::vector&"}) @StdVector double... scale_factors); // aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor upsample_nearest2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); @Namespace("at") public static native @ByRef Tensor upsample_nearest2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size); -@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... output_size); // aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor upsample_nearest2d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); // aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) @@ -55513,8 +55532,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor @Namespace("at") public static native @ByVal Tensor upsample_nearest2d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); @Namespace("at") public static native @ByVal Tensor upsample_nearest2d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size); -@Namespace("at") public static native @ByVal Tensor upsample_nearest2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_nearest2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +@Namespace("at") public static native @ByVal Tensor upsample_nearest2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_nearest2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... output_size); // aten::upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor @@ -55552,13 +55571,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor upsample_nearest2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); @Namespace("at") public static native @ByRef Tensor upsample_nearest2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size); -@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); +@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... input_size); // aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor upsample_nearest2d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); // aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) @@ -55573,8 +55592,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor @Namespace("at") public static native @ByVal Tensor upsample_nearest2d_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); @Namespace("at") public static native @ByVal Tensor upsample_nearest2d_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size); -@Namespace("at") public static native @ByVal Tensor upsample_nearest2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_nearest2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); +@Namespace("at") public static native @ByVal Tensor upsample_nearest2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_nearest2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... input_size); // aten::upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor @@ -55611,23 +55630,24 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor @Namespace("at") public static native @ByVal Tensor upsample_nearest3d(@Const @ByRef Tensor input, @ByVal LongArrayRefOptional output_size, @ByVal DoubleArrayRefOptional scale_factors); -@Namespace("at") public static native @ByVal Tensor upsample_nearest3d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal DoubleArrayRefOptional scale_factors); +@Namespace("at") public static native @ByVal Tensor upsample_nearest3d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"double*", "c10::ArrayRef", "std::vector&"}) @StdVector double... scale_factors); // aten::upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor @Namespace("at") public static native @ByVal Tensor upsample_nearest3d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @ByVal DoubleArrayRefOptional scale_factors); +@Namespace("at") public static native @ByVal Tensor upsample_nearest3d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @ByVal @Cast({"double*", "c10::ArrayRef", "std::vector&"}) @StdVector double... scale_factors); // aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor upsample_nearest3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); @Namespace("at") public static native @ByRef Tensor upsample_nearest3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size); -@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... output_size); // aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor upsample_nearest3d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); // aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) @@ -55642,8 +55662,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor @Namespace("at") public static native @ByVal Tensor upsample_nearest3d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); @Namespace("at") public static native @ByVal Tensor upsample_nearest3d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size); -@Namespace("at") public static native @ByVal Tensor upsample_nearest3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_nearest3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +@Namespace("at") public static native @ByVal Tensor upsample_nearest3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_nearest3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... output_size); // aten::upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor @@ -55681,13 +55701,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor upsample_nearest3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); @Namespace("at") public static native @ByRef Tensor upsample_nearest3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size); -@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); +@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... input_size); // aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor upsample_nearest3d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); // aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) @@ -55702,8 +55722,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor @Namespace("at") public static native @ByVal Tensor upsample_nearest3d_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); @Namespace("at") public static native @ByVal Tensor upsample_nearest3d_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size); -@Namespace("at") public static native @ByVal Tensor upsample_nearest3d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_nearest3d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); +@Namespace("at") public static native @ByVal Tensor upsample_nearest3d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_nearest3d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... input_size); // aten::upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor @@ -55740,23 +55760,24 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor @Namespace("at") public static native @ByVal Tensor upsample_trilinear3d(@Const @ByRef Tensor input, @ByVal LongArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); -@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); +@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal @Cast({"double*", "c10::ArrayRef", "std::vector&"}) @StdVector double... scale_factors); // aten::upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor @Namespace("at") public static native @ByVal Tensor upsample_trilinear3d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); +@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal @Cast({"double*", "c10::ArrayRef", "std::vector&"}) @StdVector double... scale_factors); // aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); @Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Cast("bool") boolean align_corners); // aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); // aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) @@ -55771,8 +55792,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor @Namespace("at") public static native @ByVal Tensor upsample_trilinear3d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); @Namespace("at") public static native @ByVal Tensor upsample_trilinear3d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @Cast("bool") boolean align_corners); // aten::upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor @@ -55810,13 +55831,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); @Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @Cast("bool") boolean align_corners); // aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); // aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) @@ -55831,8 +55852,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor @Namespace("at") public static native @ByVal Tensor upsample_trilinear3d_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); @Namespace("at") public static native @ByVal Tensor upsample_trilinear3d_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] input_size, @Cast("bool") boolean align_corners); // aten::upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor @@ -55869,7 +55890,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor @Namespace("at") public static native @ByVal Tensor value_selecting_reduction_backward(@Const @ByRef Tensor grad, @Cast("int64_t") long dim, @Const @ByRef Tensor indices, @ByVal LongArrayRef sizes, @Cast("bool") boolean keepdim); -@Namespace("at") public static native @ByVal Tensor value_selecting_reduction_backward(@Const @ByRef Tensor grad, @Cast("int64_t") long dim, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, @Cast("bool") boolean keepdim); +@Namespace("at") public static native @ByVal Tensor value_selecting_reduction_backward(@Const @ByRef Tensor grad, @Cast("int64_t") long dim, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, @Cast("bool") boolean keepdim); // aten::value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor @@ -56355,7 +56376,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::view_copy(Tensor self, SymInt[] size) -> Tensor @Namespace("at") public static native @ByVal Tensor view_copy(@Const @ByRef Tensor self, @ByVal LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor view_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at") public static native @ByVal Tensor view_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::view_copy(Tensor self, SymInt[] size) -> Tensor @@ -56367,12 +56388,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor view_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor view_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at") public static native @ByRef Tensor view_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor view_copy_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor view_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor view_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByRef Tensor out); // aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @@ -56420,7 +56441,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::vsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] @Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector vsplit(@Const @ByRef Tensor self, @ByVal LongArrayRef indices); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector vsplit(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... indices); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector vsplit(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... indices); @@ -56450,12 +56471,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::vstack(Tensor[] tensors) -> Tensor -@Namespace("at") public static native @ByVal Tensor vstack(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @ByVal Tensor vstack(@ByVal TensorArrayRef tensors); // aten::vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor vstack_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @ByRef Tensor vstack_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors); // aten::vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor vstack_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor vstack_outf(@ByVal TensorArrayRef tensors, @ByRef Tensor out); @@ -56662,22 +56683,22 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor zeros(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("at") public static native @ByVal Tensor zeros(@ByVal LongArrayRef size, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByVal Tensor zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByVal Tensor zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names); // aten::zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor zeros(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); // aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor zeros(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("at") public static native @ByVal Tensor zeros(@ByVal LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at") public static native @ByVal Tensor zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @Namespace("at") public static native @ByVal Tensor zeros(@ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); // aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @@ -56691,12 +56712,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor zeros_out(@ByRef Tensor out, @ByVal LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor zeros_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at") public static native @ByRef Tensor zeros_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor zeros_outf(@ByVal LongArrayRef size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor zeros_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor zeros_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByRef Tensor out); // aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @@ -56709,10 +56730,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // aten::zeros.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor zeros_out(@ByRef Tensor out, @ByVal LongArrayRef size, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByRef Tensor zeros_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByRef Tensor zeros_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names); // aten::zeros.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor zeros_outf(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor zeros_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor zeros_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names, @ByRef Tensor out); @@ -58152,12 +58173,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include @Namespace("at") public static native @ByVal @Cast("std::vector*") LongVector infer_size(@ByVal LongArrayRef a, @ByVal LongArrayRef b); -@Namespace("at") public static native @ByVal @Cast("std::vector*") LongVector infer_size(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] a, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... b); +@Namespace("at") public static native @ByVal @Cast("std::vector*") LongVector infer_size(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] a, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... b); @Namespace("at") public static native @ByVal SymIntVector infer_size_symint( @ByVal SymIntArrayRef a, @ByVal SymIntArrayRef b); @Namespace("at") public static native @ByVal DimVector infer_size_dimvector(@ByVal LongArrayRef a, @ByVal LongArrayRef b); -@Namespace("at") public static native @ByVal DimVector infer_size_dimvector(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] a, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... b); +@Namespace("at") public static native @ByVal DimVector infer_size_dimvector(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] a, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... b); @Namespace("at") public static native @ByVal SymDimVector infer_size_symdimvector(@ByVal SymIntArrayRef a, @ByVal SymIntArrayRef b); // Targeting ../DimVectorInferExpandGeometryResult.java @@ -58168,31 +58189,31 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @ByVal LongArrayRef tensor_strides, @ByVal LongArrayRef sizes); @Namespace("at") public static native @ByVal @Cast("std::tuple,std::vector >*") LongVector inferExpandGeometry( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] tensor_sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] tensor_strides, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] tensor_sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] tensor_strides, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... sizes); @Namespace("at") public static native @ByVal DimVectorInferExpandGeometryResult inferExpandGeometry_dimvector( @ByVal LongArrayRef tensor_sizes, @ByVal LongArrayRef tensor_strides, @ByVal LongArrayRef sizes); @Namespace("at") public static native @ByVal DimVectorInferExpandGeometryResult inferExpandGeometry_dimvector( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] tensor_sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] tensor_strides, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] tensor_sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] tensor_strides, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... sizes); @Namespace("at") public static native @ByVal @Cast("std::vector*") LongVector infer_dense_strides( @ByVal LongArrayRef tensor_sizes, @ByVal LongArrayRef tensor_strides); @Namespace("at") public static native @ByVal @Cast("std::vector*") LongVector infer_dense_strides( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] tensor_sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... tensor_strides); + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] tensor_sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... tensor_strides); // True if input shapes are expandable // NOTE: infer_size did a similar check, please keep them sync if change is // needed @Namespace("at") public static native @Cast("bool") boolean are_expandable(@ByVal LongArrayRef shape1, @ByVal LongArrayRef shape2); -@Namespace("at") public static native @Cast("bool") boolean are_expandable(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] shape1, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... shape2); +@Namespace("at") public static native @Cast("bool") boolean are_expandable(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] shape1, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... shape2); // avoid copy-construction of Tensor by using a reference_wrapper. @@ -58307,7 +58328,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @ByVal LongArrayRef sizes); @Namespace("at") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned expand_size( @Const @ByRef Tensor to_expand, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... sizes); @@ -58317,12 +58338,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Cast("const char*") BytePointer api_name); @Namespace("at") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned expand_size( @Const @ByRef Tensor to_expand, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, String api_name); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector expand_outplace(@ByVal @Cast("at::TensorList*") TensorArrayRef to_expand); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector expand_outplace(@ByVal TensorArrayRef to_expand); @Namespace("at") public static native @ByVal Tensor sum_to( @ByVal Tensor tensor, @@ -58343,18 +58364,18 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Const @ByVal LongArrayRef shape); @Namespace("at") public static native @ByVal Tensor sum_to( @ByVal Tensor tensor, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] shape, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] shape, @Cast("bool") boolean always_return_non_view/*=false*/); @Namespace("at") public static native @ByVal Tensor sum_to( @ByVal Tensor tensor, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... shape); + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... shape); @Namespace("at") public static native @Cast("bool") boolean is_expandable_to( @ByVal SymIntArrayRef shape, @ByVal SymIntArrayRef desired); @Namespace("at") public static native @Cast("bool") boolean is_expandable_to(@ByVal LongArrayRef shape, @ByVal LongArrayRef desired); -@Namespace("at") public static native @Cast("bool") boolean is_expandable_to(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] shape, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... desired); +@Namespace("at") public static native @Cast("bool") boolean is_expandable_to(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] shape, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... desired); // namespace at @@ -59488,7 +59509,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Computes the contiguous strides of a tensor, given its sizes. @Namespace("c10") public static native @ByVal @Cast("c10::DimVector*") SymDimVector contiguous_strides(@Const @ByVal LongArrayRef sizes); -@Namespace("c10") public static native @ByVal @Cast("c10::DimVector*") SymDimVector contiguous_strides(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); +@Namespace("c10") public static native @ByVal @Cast("c10::DimVector*") SymDimVector contiguous_strides(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... sizes); // namespace c10 @@ -61911,8 +61932,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Const @ByRef(nullValue = "at::TensorOptions()") TensorOptions options); @Namespace("torch") public static native @ByVal Tensor from_blob( Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] strides, PointerConsumer deleter, @Const @ByRef(nullValue = "at::TensorOptions()") TensorOptions options); @@ -61935,7 +61956,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Const @ByRef(nullValue = "at::TensorOptions()") TensorOptions options); @Namespace("torch") public static native @ByVal Tensor from_blob( Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, PointerConsumer deleter, @Const @ByRef(nullValue = "at::TensorOptions()") TensorOptions options); @@ -61963,46 +61984,46 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("torch") public static native @ByVal @Name("blackman_window") Tensor torch_blackman_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic); @Namespace("torch") public static native @ByVal @Name("empty") Tensor torch_empty(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); @Namespace("torch") public static native @ByVal @Name("empty") Tensor torch_empty(@ByVal LongArrayRef size, @ByVal DimnameListOptional names); -@Namespace("torch") public static native @ByVal @Name("empty") Tensor torch_empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("torch") public static native @ByVal @Name("empty") Tensor torch_empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); +@Namespace("torch") public static native @ByVal @Name("empty") Tensor torch_empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("torch") public static native @ByVal @Name("empty") Tensor torch_empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names); @Namespace("torch") public static native @ByVal @Name("empty") Tensor torch_empty(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); @Namespace("torch") public static native @ByVal @Name("empty") Tensor torch_empty(@ByVal LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("empty") Tensor torch_empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("torch") public static native @ByVal @Name("empty") Tensor torch_empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("empty") Tensor torch_empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("torch") public static native @ByVal @Name("empty") Tensor torch_empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); @Namespace("torch") public static native @ByVal @Name("_empty_affine_quantized") Tensor torch__empty_affine_quantized(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, double scale/*=1*/, @Cast("int64_t") long zero_point/*=0*/, @ByVal(nullValue = "c10::optional(c10::MemoryFormat::Contiguous)") MemoryFormatOptional memory_format); @Namespace("torch") public static native @ByVal @Name("_empty_affine_quantized") Tensor torch__empty_affine_quantized(@ByVal LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("_empty_affine_quantized") Tensor torch__empty_affine_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, double scale/*=1*/, @Cast("int64_t") long zero_point/*=0*/, @ByVal(nullValue = "c10::optional(c10::MemoryFormat::Contiguous)") MemoryFormatOptional memory_format); -@Namespace("torch") public static native @ByVal @Name("_empty_affine_quantized") Tensor torch__empty_affine_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("_empty_affine_quantized") Tensor torch__empty_affine_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, double scale/*=1*/, @Cast("int64_t") long zero_point/*=0*/, @ByVal(nullValue = "c10::optional(c10::MemoryFormat::Contiguous)") MemoryFormatOptional memory_format); +@Namespace("torch") public static native @ByVal @Name("_empty_affine_quantized") Tensor torch__empty_affine_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); @Namespace("torch") public static native @ByVal Tensor _empty_affine_quantized_symint(@ByVal SymIntArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, double scale/*=1*/, @Cast("int64_t") long zero_point/*=0*/, @ByVal(nullValue = "c10::optional(c10::MemoryFormat::Contiguous)") MemoryFormatOptional memory_format); @Namespace("torch") public static native @ByVal Tensor _empty_affine_quantized_symint(@ByVal SymIntArrayRef size); @Namespace("torch") public static native @ByVal @Name("_empty_per_channel_affine_quantized") Tensor torch__empty_per_channel_affine_quantized(@ByVal LongArrayRef size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::MemoryFormat::Contiguous)") MemoryFormatOptional memory_format); @Namespace("torch") public static native @ByVal @Name("_empty_per_channel_affine_quantized") Tensor torch__empty_per_channel_affine_quantized(@ByVal LongArrayRef size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis); -@Namespace("torch") public static native @ByVal @Name("_empty_per_channel_affine_quantized") Tensor torch__empty_per_channel_affine_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::MemoryFormat::Contiguous)") MemoryFormatOptional memory_format); -@Namespace("torch") public static native @ByVal @Name("_empty_per_channel_affine_quantized") Tensor torch__empty_per_channel_affine_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis); +@Namespace("torch") public static native @ByVal @Name("_empty_per_channel_affine_quantized") Tensor torch__empty_per_channel_affine_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::MemoryFormat::Contiguous)") MemoryFormatOptional memory_format); +@Namespace("torch") public static native @ByVal @Name("_empty_per_channel_affine_quantized") Tensor torch__empty_per_channel_affine_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis); @Namespace("torch") public static native @ByVal Tensor _empty_per_channel_affine_quantized_symint(@ByVal SymIntArrayRef size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::MemoryFormat::Contiguous)") MemoryFormatOptional memory_format); @Namespace("torch") public static native @ByVal Tensor _empty_per_channel_affine_quantized_symint(@ByVal SymIntArrayRef size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis); @Namespace("torch") public static native @ByVal @Name("empty_quantized") Tensor torch_empty_quantized(@ByVal LongArrayRef size, @Const @ByRef Tensor qtensor, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); @Namespace("torch") public static native @ByVal @Name("empty_quantized") Tensor torch_empty_quantized(@ByVal LongArrayRef size, @Const @ByRef Tensor qtensor); -@Namespace("torch") public static native @ByVal @Name("empty_quantized") Tensor torch_empty_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor qtensor, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("torch") public static native @ByVal @Name("empty_quantized") Tensor torch_empty_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor qtensor); +@Namespace("torch") public static native @ByVal @Name("empty_quantized") Tensor torch_empty_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Tensor qtensor, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("torch") public static native @ByVal @Name("empty_quantized") Tensor torch_empty_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Tensor qtensor); @Namespace("torch") public static native @ByVal @Name("empty_like") Tensor torch_empty_like(@Const @ByRef Tensor self, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); @Namespace("torch") public static native @ByVal @Name("empty_like") Tensor torch_empty_like(@Const @ByRef Tensor self); @Namespace("torch") public static native @ByVal @Name("empty_strided") Tensor torch_empty_strided(@ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("empty_strided") Tensor torch_empty_strided(@ByVal LongArrayRef size, @ByVal LongArrayRef stride); -@Namespace("torch") public static native @ByVal @Name("empty_strided") Tensor torch_empty_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("empty_strided") Tensor torch_empty_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +@Namespace("torch") public static native @ByVal @Name("empty_strided") Tensor torch_empty_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("empty_strided") Tensor torch_empty_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... stride); @Namespace("torch") public static native @ByVal @Name("eye") Tensor torch_eye(@Cast("int64_t") long n, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("eye") Tensor torch_eye(@Cast("int64_t") long n); @Namespace("torch") public static native @ByVal @Name("eye") Tensor torch_eye(@Cast("int64_t") long n, @Cast("int64_t") long m, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("eye") Tensor torch_eye(@Cast("int64_t") long n, @Cast("int64_t") long m); @Namespace("torch") public static native @ByVal @Name("full") Tensor torch_full(@ByVal LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("full") Tensor torch_full(@ByVal LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names); -@Namespace("torch") public static native @ByVal @Name("full") Tensor torch_full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("full") Tensor torch_full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names); +@Namespace("torch") public static native @ByVal @Name("full") Tensor torch_full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("full") Tensor torch_full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names); @Namespace("torch") public static native @ByVal @Name("full") Tensor torch_full(@ByVal LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("full") Tensor torch_full(@ByVal LongArrayRef size, @Const @ByRef Scalar fill_value); -@Namespace("torch") public static native @ByVal @Name("full") Tensor torch_full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("full") Tensor torch_full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value); +@Namespace("torch") public static native @ByVal @Name("full") Tensor torch_full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("full") Tensor torch_full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Scalar fill_value); @Namespace("torch") public static native @ByVal @Name("full_like") Tensor torch_full_like(@Const @ByRef Tensor self, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); @Namespace("torch") public static native @ByVal @Name("full_like") Tensor torch_full_like(@Const @ByRef Tensor self, @Const @ByRef Scalar fill_value); @Namespace("torch") public static native @ByVal @Name("from_file") Tensor torch_from_file(@StringView BytePointer filename, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional shared, @ByVal(nullValue = "c10::optional(0)") LongOptional size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @@ -62033,70 +62054,70 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("torch") public static native @ByVal @Name("logspace") Tensor torch_logspace(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps); @Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal LongArrayRef size, @ByVal DimnameListOptional names); -@Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); +@Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names); @Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); @Namespace("torch") public static native @ByVal @Name("ones_like") Tensor torch_ones_like(@Const @ByRef Tensor self, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); @Namespace("torch") public static native @ByVal @Name("ones_like") Tensor torch_ones_like(@Const @ByRef Tensor self); @Namespace("torch") public static native @ByVal @Name("scalar_tensor") Tensor torch_scalar_tensor(@Const @ByRef Scalar s, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("scalar_tensor") Tensor torch_scalar_tensor(@Const @ByRef Scalar s); @Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal LongArrayRef size, @ByVal DimnameListOptional names); -@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); +@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names); @Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); -@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); +@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); @Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); @Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator); -@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); +@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator); @Namespace("torch") public static native @ByVal @Name("rand_like") Tensor torch_rand_like(@Const @ByRef Tensor self, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); @Namespace("torch") public static native @ByVal @Name("rand_like") Tensor torch_rand_like(@Const @ByRef Tensor self); @Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long high, @ByVal LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); @Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator); -@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); +@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator); @Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); @Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator); -@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); +@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator); @Namespace("torch") public static native @ByVal @Name("randint_like") Tensor torch_randint_like(@Const @ByRef Tensor self, @Cast("int64_t") long high, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); @Namespace("torch") public static native @ByVal @Name("randint_like") Tensor torch_randint_like(@Const @ByRef Tensor self, @Cast("int64_t") long high); @Namespace("torch") public static native @ByVal @Name("randint_like") Tensor torch_randint_like(@Const @ByRef Tensor self, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); @Namespace("torch") public static native @ByVal @Name("randint_like") Tensor torch_randint_like(@Const @ByRef Tensor self, @Cast("int64_t") long low, @Cast("int64_t") long high); @Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); @Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator); -@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); +@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator); @Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal LongArrayRef size, @ByVal DimnameListOptional names); -@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); +@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names); @Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); -@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); +@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); @Namespace("torch") public static native @ByVal @Name("randn_like") Tensor torch_randn_like(@Const @ByRef Tensor self, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); @Namespace("torch") public static native @ByVal @Name("randn_like") Tensor torch_randn_like(@Const @ByRef Tensor self); @Namespace("torch") public static native @ByVal @Name("randperm") Tensor torch_randperm(@Cast("int64_t") long n, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); @@ -62107,30 +62128,30 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("torch") public static native @ByVal @Name("range") Tensor torch_range(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal LongArrayRef size, @ByVal DimnameListOptional names); -@Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); +@Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal DimnameListOptional names); @Namespace("torch") public static native @ByVal @Name("_efficientzerotensor") Tensor torch__efficientzerotensor(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("_efficientzerotensor") Tensor torch__efficientzerotensor(@ByVal LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("_efficientzerotensor") Tensor torch__efficientzerotensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("_efficientzerotensor") Tensor torch__efficientzerotensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("_efficientzerotensor") Tensor torch__efficientzerotensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_efficientzerotensor") Tensor torch__efficientzerotensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); @Namespace("torch") public static native @ByVal Tensor _efficientzerotensor_symint(@ByVal SymIntArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal Tensor _efficientzerotensor_symint(@ByVal SymIntArrayRef size); @Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); @Namespace("torch") public static native @ByVal @Name("zeros_like") Tensor torch_zeros_like(@Const @ByRef Tensor self, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); @Namespace("torch") public static native @ByVal @Name("zeros_like") Tensor torch_zeros_like(@Const @ByRef Tensor self); @Namespace("torch") public static native @ByVal @Name("sparse_compressed_tensor") Tensor torch_sparse_compressed_tensor(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("sparse_compressed_tensor") Tensor torch_sparse_compressed_tensor(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("sparse_compressed_tensor") Tensor torch_sparse_compressed_tensor(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal TensorOptions options); @Namespace("torch") public static native @ByVal @Name("sparse_csr_tensor") Tensor torch_sparse_csr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("sparse_csr_tensor") Tensor torch_sparse_csr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("sparse_csr_tensor") Tensor torch_sparse_csr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal TensorOptions options); @Namespace("torch") public static native @ByVal @Name("sparse_csc_tensor") Tensor torch_sparse_csc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("sparse_csc_tensor") Tensor torch_sparse_csc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("sparse_csc_tensor") Tensor torch_sparse_csc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal TensorOptions options); @Namespace("torch") public static native @ByVal @Name("sparse_bsr_tensor") Tensor torch_sparse_bsr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("sparse_bsr_tensor") Tensor torch_sparse_bsr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("sparse_bsr_tensor") Tensor torch_sparse_bsr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal TensorOptions options); @Namespace("torch") public static native @ByVal @Name("sparse_bsc_tensor") Tensor torch_sparse_bsc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("sparse_bsc_tensor") Tensor torch_sparse_bsc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("sparse_bsc_tensor") Tensor torch_sparse_bsc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal TensorOptions options); @Namespace("torch") public static native @ByVal @Name("sparse_compressed_tensor") Tensor torch_sparse_compressed_tensor(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); @Namespace("torch") public static native @ByVal @Name("sparse_csr_tensor") Tensor torch_sparse_csr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); @Namespace("torch") public static native @ByVal @Name("sparse_csc_tensor") Tensor torch_sparse_csc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); @@ -62138,44 +62159,44 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("torch") public static native @ByVal @Name("sparse_bsc_tensor") Tensor torch_sparse_bsc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); @Namespace("torch") public static native @ByVal @Name("_sparse_compressed_tensor_unsafe") Tensor torch__sparse_compressed_tensor_unsafe(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("_sparse_compressed_tensor_unsafe") Tensor torch__sparse_compressed_tensor_unsafe(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("_sparse_compressed_tensor_unsafe") Tensor torch__sparse_compressed_tensor_unsafe(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("_sparse_compressed_tensor_unsafe") Tensor torch__sparse_compressed_tensor_unsafe(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("_sparse_compressed_tensor_unsafe") Tensor torch__sparse_compressed_tensor_unsafe(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_sparse_compressed_tensor_unsafe") Tensor torch__sparse_compressed_tensor_unsafe(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); @Namespace("torch") public static native @ByVal @Name("_sparse_csr_tensor_unsafe") Tensor torch__sparse_csr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("_sparse_csr_tensor_unsafe") Tensor torch__sparse_csr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("_sparse_csr_tensor_unsafe") Tensor torch__sparse_csr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("_sparse_csr_tensor_unsafe") Tensor torch__sparse_csr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("_sparse_csr_tensor_unsafe") Tensor torch__sparse_csr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_sparse_csr_tensor_unsafe") Tensor torch__sparse_csr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); @Namespace("torch") public static native @ByVal @Name("_sparse_csc_tensor_unsafe") Tensor torch__sparse_csc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("_sparse_csc_tensor_unsafe") Tensor torch__sparse_csc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("_sparse_csc_tensor_unsafe") Tensor torch__sparse_csc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("_sparse_csc_tensor_unsafe") Tensor torch__sparse_csc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("_sparse_csc_tensor_unsafe") Tensor torch__sparse_csc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_sparse_csc_tensor_unsafe") Tensor torch__sparse_csc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); @Namespace("torch") public static native @ByVal @Name("_sparse_bsr_tensor_unsafe") Tensor torch__sparse_bsr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("_sparse_bsr_tensor_unsafe") Tensor torch__sparse_bsr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("_sparse_bsr_tensor_unsafe") Tensor torch__sparse_bsr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("_sparse_bsr_tensor_unsafe") Tensor torch__sparse_bsr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("_sparse_bsr_tensor_unsafe") Tensor torch__sparse_bsr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_sparse_bsr_tensor_unsafe") Tensor torch__sparse_bsr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); @Namespace("torch") public static native @ByVal @Name("_sparse_bsc_tensor_unsafe") Tensor torch__sparse_bsc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("_sparse_bsc_tensor_unsafe") Tensor torch__sparse_bsc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("_sparse_bsc_tensor_unsafe") Tensor torch__sparse_bsc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("_sparse_bsc_tensor_unsafe") Tensor torch__sparse_bsc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("_sparse_bsc_tensor_unsafe") Tensor torch__sparse_bsc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_sparse_bsc_tensor_unsafe") Tensor torch__sparse_bsc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); @Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@ByVal LongArrayRef size, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal TensorOptions options); @Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional is_coalesced); @Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values); @Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional is_coalesced); @Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional is_coalesced); -@Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional is_coalesced); +@Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); @Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_unsafe") Tensor torch__sparse_coo_tensor_unsafe(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional is_coalesced); @Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_unsafe") Tensor torch__sparse_coo_tensor_unsafe(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_unsafe") Tensor torch__sparse_coo_tensor_unsafe(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional is_coalesced); -@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_unsafe") Tensor torch__sparse_coo_tensor_unsafe(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_unsafe") Tensor torch__sparse_coo_tensor_unsafe(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional is_coalesced); +@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_unsafe") Tensor torch__sparse_coo_tensor_unsafe(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); @Namespace("torch") public static native @ByVal Tensor _sparse_coo_tensor_unsafe_symint(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal SymIntArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional is_coalesced); @Namespace("torch") public static native @ByVal Tensor _sparse_coo_tensor_unsafe_symint(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal SymIntArrayRef size); @Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_with_dims") Tensor torch__sparse_coo_tensor_with_dims(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal LongArrayRef size, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_with_dims") Tensor torch__sparse_coo_tensor_with_dims(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_with_dims") Tensor torch__sparse_coo_tensor_with_dims(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal TensorOptions options); @Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_with_dims_and_tensors") Tensor torch__sparse_coo_tensor_with_dims_and_tensors(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal LongArrayRef size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional is_coalesced); @Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_with_dims_and_tensors") Tensor torch__sparse_coo_tensor_with_dims_and_tensors(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal LongArrayRef size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_with_dims_and_tensors") Tensor torch__sparse_coo_tensor_with_dims_and_tensors(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional is_coalesced); -@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_with_dims_and_tensors") Tensor torch__sparse_coo_tensor_with_dims_and_tensors(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_with_dims_and_tensors") Tensor torch__sparse_coo_tensor_with_dims_and_tensors(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional is_coalesced); +@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_with_dims_and_tensors") Tensor torch__sparse_coo_tensor_with_dims_and_tensors(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); @Namespace("torch") public static native @ByVal Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal SymIntArrayRef size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional is_coalesced); @Namespace("torch") public static native @ByVal Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal SymIntArrayRef size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); @Namespace("torch") public static native @ByVal @Name("_to_copy") Tensor torch__to_copy(@Const @ByRef Tensor self, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @Cast("bool") boolean non_blocking/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); @@ -62186,8 +62207,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("torch") public static native @ByVal @Name("triu_indices") Tensor torch_triu_indices(@Cast("int64_t") long row, @Cast("int64_t") long col); @Namespace("torch") public static native @ByVal @Name("normal") Tensor torch_normal(double mean, double std, @ByVal LongArrayRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("normal") Tensor torch_normal(double mean, double std, @ByVal LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("normal") Tensor torch_normal(double mean, double std, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("normal") Tensor torch_normal(double mean, double std, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("normal") Tensor torch_normal(double mean, double std, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("normal") Tensor torch_normal(double mean, double std, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); @Namespace("torch") public static native @ByVal @Name("fft_fftfreq") Tensor torch_fft_fftfreq(@Cast("int64_t") long n, double d/*=1.0*/, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch") public static native @ByVal @Name("fft_fftfreq") Tensor torch_fft_fftfreq(@Cast("int64_t") long n); @Namespace("torch") public static native @ByVal @Name("fft_rfftfreq") Tensor torch_fft_rfftfreq(@Cast("int64_t") long n, double d/*=1.0*/, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); @@ -65128,7 +65149,7 @@ The list of (type, depth) pairs controls the type of specializations and the num @Namespace("torch::fft") public static native @ByVal Tensor fft2( @Const @ByRef Tensor self, @ByVal(nullValue = "c10::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, - @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); /** Computes the inverse of torch.fft.fft2 @@ -65151,7 +65172,7 @@ The list of (type, depth) pairs controls the type of specializations and the num @Namespace("torch::fft") public static native @ByVal Tensor ifft2( @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, - @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); /** Computes the N dimensional fast Fourier transform over given dimensions. @@ -65261,7 +65282,7 @@ The list of (type, depth) pairs controls the type of specializations and the num @Namespace("torch::fft") public static native @ByVal Tensor rfft2( @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, - @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); /** Computes the inverse of torch.fft.rfft2. @@ -65284,7 +65305,7 @@ The list of (type, depth) pairs controls the type of specializations and the num @Namespace("torch::fft") public static native @ByVal Tensor irfft2( @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, - @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); /** Computes the N dimensional FFT of real input with onesided Hermitian output. @@ -65403,7 +65424,7 @@ The list of (type, depth) pairs controls the type of specializations and the num @Namespace("torch::fft") public static native @ByVal Tensor hfft2( @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, - @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); /** Computes the 2-dimensional IFFT of a real input signal. @@ -65431,7 +65452,7 @@ The list of (type, depth) pairs controls the type of specializations and the num @Namespace("torch::fft") public static native @ByVal Tensor ihfft2( @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, - @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); /** Computes the N-dimensional FFT of a Hermitian symmetric input signal. @@ -65458,7 +65479,7 @@ The list of (type, depth) pairs controls the type of specializations and the num @Namespace("torch::fft") public static native @ByVal Tensor hfftn( @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, - @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); /** Computes the N-dimensional IFFT of a real input signal. @@ -65486,7 +65507,7 @@ The list of (type, depth) pairs controls the type of specializations and the num @Namespace("torch::fft") public static native @ByVal Tensor ihfftn( @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, - @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") StringViewOptional norm); /** Computes the discrete Fourier Transform sample frequencies for a signal of @@ -65783,7 +65804,7 @@ The list of (type, depth) pairs controls the type of specializations and the num @Namespace("torch::linalg::detail") public static native @ByVal Tensor matrix_norm( @Const @ByRef Tensor self, @Const @ByRef Scalar ord, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype); @@ -65797,7 +65818,7 @@ The list of (type, depth) pairs controls the type of specializations and the num @Namespace("torch::linalg::detail") public static native @ByRef Tensor matrix_norm_out( @Const @ByRef Tensor self, @Const @ByRef Scalar ord, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor result); @@ -65811,7 +65832,7 @@ The list of (type, depth) pairs controls the type of specializations and the num @Namespace("torch::linalg::detail") public static native @ByVal Tensor matrix_norm( @Const @ByRef Tensor self, @StdString String ord, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype); @@ -65825,7 +65846,7 @@ The list of (type, depth) pairs controls the type of specializations and the num @Namespace("torch::linalg::detail") public static native @ByRef Tensor matrix_norm_out( @Const @ByRef Tensor self, @StdString String ord, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor result); @@ -66254,10 +66275,10 @@ The list of (type, depth) pairs controls the type of specializations and the num // implementation // See here for C++ implementation @Namespace("torch::nested") public static native @ByVal Tensor nested_tensor( - @ByVal @Cast("at::TensorList*") TensorArrayRef nested_tensor_data, + @ByVal TensorArrayRef nested_tensor_data, @Const @ByRef(nullValue = "at::TensorOptions{}") TensorOptions options); @Namespace("torch::nested") public static native @ByVal Tensor nested_tensor( - @ByVal @Cast("at::TensorList*") TensorArrayRef nested_tensor_data); + @ByVal TensorArrayRef nested_tensor_data); /// @@ -66278,11 +66299,11 @@ The list of (type, depth) pairs controls the type of specializations and the num /// /// @Namespace("torch::nested") public static native @ByVal Tensor as_nested_tensor( - @ByVal @Cast("at::TensorList*") TensorArrayRef list, + @ByVal TensorArrayRef list, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype, @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); @Namespace("torch::nested") public static native @ByVal Tensor as_nested_tensor( - @ByVal @Cast("at::TensorList*") TensorArrayRef list); + @ByVal TensorArrayRef list); /** Nested to padded tensor * @@ -67966,11 +67987,11 @@ scalar_t sf(scalar_t x, scalar_t y) @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef Tensor bias, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); // namespace detail // #endif /* DOXYGEN_SHOULD_SKIP_THIS */ @@ -68006,11 +68027,11 @@ scalar_t sf(scalar_t x, scalar_t y) @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef Tensor bias, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); // namespace detail // #endif /* DOXYGEN_SHOULD_SKIP_THIS */ @@ -68046,11 +68067,11 @@ scalar_t sf(scalar_t x, scalar_t y) @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef Tensor bias, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] output_padding, @Cast("int64_t") long groups, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... dilation); // namespace detail // #endif /* DOXYGEN_SHOULD_SKIP_THIS */ @@ -70689,7 +70710,7 @@ scalar_t sf(scalar_t x, scalar_t y) double value); @Namespace("torch::nn::functional::detail") public static native @ByVal Tensor pad( @Const @ByRef Tensor input, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] pad, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] pad, @ByVal PaddingMode mode, double value); // namespace detail @@ -70734,7 +70755,7 @@ scalar_t sf(scalar_t x, scalar_t y) @ByVal LongArrayRef t, @Cast("int64_t") long n); @Namespace("torch::nn::modules::utils") public static native @ByVal @Cast("std::vector*") LongVector _reverse_repeat_vector( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] t, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] t, @Cast("int64_t") long n); @Namespace("torch::nn::modules::utils") public static native @ByVal @Cast("std::vector*") LongVector _list_with_default( @@ -70742,7 +70763,7 @@ scalar_t sf(scalar_t x, scalar_t y) @ByVal LongArrayRef defaults); @Namespace("torch::nn::modules::utils") public static native @ByVal @Cast("std::vector*") LongVector _list_with_default( @ByVal LongOptionalArrayRef out_size, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... defaults); + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... defaults); // namespace utils // namespace modules @@ -71651,9 +71672,9 @@ scalar_t sf(scalar_t x, scalar_t y) @Const @ByRef LongVectorOptional output_size); @Namespace("torch::nn::functional") public static native @ByVal @Cast("std::vector*") LongVector _unpool_output_size( @Const @ByRef Tensor input, - @ByRef @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, - @ByRef @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, - @ByRef @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, + @ByRef @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] kernel_size, + @ByRef @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] stride, + @ByRef @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] padding, @Const @ByRef LongVectorOptional output_size); // #ifndef DOXYGEN_SHOULD_SKIP_THIS @@ -72270,11 +72291,11 @@ scalar_t sf(scalar_t x, scalar_t y) @Const @ByRef LongArrayRef size); @Namespace("torch::nn::functional") public static native @ByVal Tensor affine_grid( @Const @ByRef Tensor theta, - @ByRef @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, + @ByRef @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, @Cast("bool") boolean align_corners/*=false*/); @Namespace("torch::nn::functional") public static native @ByVal Tensor affine_grid( @Const @ByRef Tensor theta, - @ByRef @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); + @ByRef @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... size); // ============================================================================ @@ -78212,8 +78233,8 @@ scalar_t sf(scalar_t x, scalar_t y) I don't think we can directly virtualize Dataset<...> because of CRTP in Dataset. Because of issue #723, we cannot virtualize superclasses of javacpp::*Dataset, only javacpp::*Dataset. - So we redeclare/redefine virtual functions of parents in these classes, so that the JavaCPP peer classes implements - the logic to call the Java versions. + We must redeclare/redefine virtual functions of parents in these classes, so that the JavaCPP peer classes implement + the wrappers that call the Java implementations. */ // Targeting ../JavaDataset.java diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index 0d60d967406..381810492a9 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -392,7 +392,8 @@ public void map(InfoMap infoMap) { .pointerTypes("LongArrayRefOptional", "@Cast({\"int64_t*\", \"c10::ArrayRef\", \"std::vector&\"}) @StdVector long...").define()) .put(new Info("c10::optional >::swap").skip()) .put(new Info("c10::optional >", "c10::optional >", - "c10::OptionalArrayRef").pointerTypes("DoubleArrayRefOptional").define()) + "c10::OptionalArrayRef") + .pointerTypes("DoubleArrayRefOptional", "@Cast({\"double*\", \"c10::ArrayRef\", \"std::vector&\"}) @StdVector double...").define()) .put(new Info("c10::optional >", "c10::optional >", "c10::OptionalArrayRef", "c10::OptionalSymIntArrayRef", "at::OptionalSymIntArrayRef", "c10::optional").pointerTypes("SymIntArrayRefOptional").define()) .put(new Info("c10::optional", "c10::optional").pointerTypes("LayoutOptional").define()) @@ -679,7 +680,6 @@ public void map(InfoMap infoMap) { new ArrayInfo("Long") // Warning : c10::IntArrayRef is a Java LongArrayRef and not a Java IntArrayRef .otherCppNames("c10::IntArrayRef", "torch::IntArrayRef", "at::IntArrayRef", "c10::OptionalArray", "c10::remove_symint::type") .itPointerType("LongPointer") - .otherPointerTypes("@Cast({\"int64_t*\", \"c10::ArrayRef\", \"std::vector&\"}) @StdVector long...") .elementTypes("int64_t", "jlong") // Order is important, since ArrayRef and ArrayRef are incompatible, even though long == long long. And jlong is long long. .elementValueType("long"), new ArrayInfo("LongOptional").elementTypes("c10::optional"), @@ -693,7 +693,7 @@ public void map(InfoMap infoMap) { new ArrayInfo("SymInt").otherCppNames("c10::SymIntArrayRef").elementTypes("c10::SymInt"), new ArrayInfo("SymNode").elementTypes("c10::SymNode", "c10::intrusive_ptr"), new ArrayInfo("Symbol").elementTypes("c10::Symbol"), - new ArrayInfo("Tensor").otherCppNames("torch::TensorList", "at::ITensorListRef").elementTypes("torch::Tensor", "at::Tensor"), // Warning: not a TensorList (List) + new ArrayInfo("Tensor").otherCppNames("torch::TensorList", "at::TensorList", "at::ITensorListRef").elementTypes("torch::Tensor", "at::Tensor"), // Warning: not a TensorList (List) new ArrayInfo("TensorArg").elementTypes("torch::TensorArg", "at::TensorArg"), new ArrayInfo("TensorIndex").elementTypes("at::indexing::TensorIndex"), new ArrayInfo("TensorOptional").elementTypes("c10::optional", "c10::optional", "c10::optional"), @@ -2553,9 +2553,17 @@ void mapArrayRef(InfoMap infoMap) { cppNamesRIterator[n++] = cn + "::reverse_iterator"; cppNamesRIterator[n++] = cn + "::const_reverse_iterator"; } - String[] pt = new String[otherPointerTypes.length + 1]; + + // Use converting constructor from std::vector when it works to allow passing java array literals + boolean noVariadicPointerType = + elementValueType.contains(" ") // No @ByVal + || elementValueType.equals("boolean"); // ArrayRef cannot be constructed from a std::vector bitfield. + + String[] pt = new String[otherPointerTypes.length + (noVariadicPointerType ? 1 : 2)]; pt[0] = baseJavaName + "ArrayRef"; System.arraycopy(otherPointerTypes, 0, pt, 1, otherPointerTypes.length); + if (!noVariadicPointerType) + pt[otherPointerTypes.length + 1] = "@Cast({\"" + elementTypes[0] + "*\", \"" + cppNames[0] + "\", \"std::vector<" + elementTypes[0] + ">&\"}) @StdVector(\"" + elementTypes[0] + "\") " + elementValueType + "..."; Info info = new Info(cppNames).pointerTypes(pt); if (baseJavaName.contains("@Cast")) info.cast(); infoMap.put(info); From abf565bedf4c7d51acfa9f46c8f0e93d35768434 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Tue, 24 Oct 2023 16:25:12 +0200 Subject: [PATCH 14/26] Fix get_batch argument type --- .../src/gen/java/org/bytedeco/pytorch/JavaBatchDataset.java | 2 +- .../java/org/bytedeco/pytorch/JavaTensorBatchDataset.java | 2 +- .../src/gen/java/org/bytedeco/pytorch/MNISTMapDataset.java | 2 +- .../gen/java/org/bytedeco/pytorch/TensorBatchDataset.java | 2 +- .../gen/java/org/bytedeco/pytorch/TensorDatasetBase.java | 2 +- .../src/main/java/org/bytedeco/pytorch/presets/torch.java | 6 +++--- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaBatchDataset.java index d05b87b003f..ecda6625801 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaBatchDataset.java @@ -28,7 +28,7 @@ public class JavaBatchDataset extends Pointer { /** Returns a batch of data given an index. */ public native @ByVal ExampleVector get_batch(@ByVal SizeTArrayRef request); - public native @ByVal ExampleVector get_batch(@ByVal @Cast({"uint64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... request); + public native @ByVal ExampleVector get_batch(@ByVal @Cast({"size_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("size_t") long... request); /** Returns the size of the dataset, or an empty optional if it is unsized. */ public native @ByVal SizeTOptional size(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorBatchDataset.java index a39849fc69d..d0aa00f1dcb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorBatchDataset.java @@ -28,7 +28,7 @@ public class JavaTensorBatchDataset extends Pointer { /** Returns a batch of data given an index. */ public native @ByVal TensorExampleVector get_batch(@ByVal SizeTArrayRef request); - public native @ByVal TensorExampleVector get_batch(@ByVal @Cast({"uint64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... request); + public native @ByVal TensorExampleVector get_batch(@ByVal @Cast({"size_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("size_t") long... request); /** Returns the size of the dataset, or an empty optional if it is unsized. */ public native @ByVal SizeTOptional size(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapDataset.java index 2b8f534084a..0bfbf6953cf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapDataset.java @@ -32,7 +32,7 @@ public class MNISTMapDataset extends MNISTMapBatchDataset { /** Gets a batch from the source dataset and applies the transform to it, * returning the result. */ public native @Name("get_batch") @ByVal Example get_batch_example(@ByVal SizeTArrayRef indices); - public native @Name("get_batch") @ByVal Example get_batch_example(@ByVal @Cast({"uint64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... indices); + public native @Name("get_batch") @ByVal Example get_batch_example(@ByVal @Cast({"size_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... indices); /** Returns the size of the source dataset. */ // NOLINTNEXTLINE(bugprone-exception-escape) diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBatchDataset.java index 2b1c29a6bf2..6ad76384cfd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBatchDataset.java @@ -28,7 +28,7 @@ public class TensorBatchDataset extends Pointer { /** Returns a batch of data given an index. */ public native @ByVal TensorExampleVector get_batch(@ByVal SizeTArrayRef request); - public native @ByVal TensorExampleVector get_batch(@ByVal @Cast({"uint64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... request); + public native @ByVal TensorExampleVector get_batch(@ByVal @Cast({"size_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("size_t") long... request); /** Returns the size of the dataset, or an empty optional if it is unsized. */ public native @ByVal SizeTOptional size(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDatasetBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDatasetBase.java index a94b857df28..699e6e6ba93 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDatasetBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDatasetBase.java @@ -31,5 +31,5 @@ public class TensorDatasetBase extends TensorBatchDataset { * The default implementation calls {@code get()} for every requested index * in the batch. */ public native @ByVal TensorExampleVector get_batch(@ByVal SizeTArrayRef request); - public native @ByVal TensorExampleVector get_batch(@ByVal @Cast({"uint64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... request); + public native @ByVal TensorExampleVector get_batch(@ByVal @Cast({"size_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("size_t") long... request); } diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index 381810492a9..ba44db2b1e4 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -1210,7 +1210,7 @@ public void map(InfoMap infoMap) { // .put(new Info("torch::data::datasets::MapDataset > >::OutputBatchType").pointerTypes("Example")) .put(new Info("torch::data::datasets::MapDataset > >::get_batch") .javaText("public native @Name(\"get_batch\") @ByVal Example get_batch_example(@ByVal SizeTArrayRef indices);\n" + - "public native @Name(\"get_batch\") @ByVal Example get_batch_example(@ByVal @Cast({\"uint64_t*\", \"c10::ArrayRef\", \"std::vector&\"}) @StdVector long... indices);")) + "public native @Name(\"get_batch\") @ByVal Example get_batch_example(@ByVal @Cast({\"size_t*\", \"c10::ArrayRef\", \"std::vector&\"}) @StdVector long... indices);")) // Simple implementation from tensor.h serving a dataset from a single tensor .put(new Info("torch::data::datasets::TensorDataset")) // Ensure proper ns resolution @@ -1223,7 +1223,7 @@ public void map(InfoMap infoMap) { .put(new Info("torch::data::datasets::Dataset::get_batch", "torch::data::datasets::BatchDataset >::get_batch") .javaText("public native @ByVal TensorExampleVector get_batch(@ByVal SizeTArrayRef request);\n" + - "public native @ByVal TensorExampleVector get_batch(@ByVal @Cast({\"uint64_t*\", \"c10::ArrayRef\", \"std::vector&\"}) @StdVector long... request);")) + "public native @ByVal TensorExampleVector get_batch(@ByVal @Cast({\"size_t*\", \"c10::ArrayRef\", \"std::vector&\"}) @StdVector(\"size_t\") long... request);")) ; for (String[] ex : new String[][]{ @@ -1335,7 +1335,7 @@ public void map(InfoMap infoMap) { template("torch::data::datasets::MapDataset", template("torch::data::datasets::SharedBatchDataset", template("torch::data::datasets::ChunkDataset", mangledChunkDataReader, "torch::data::samplers::RandomSampler", "torch::data::samplers::RandomSampler")), template("torch::data::transforms::Stack", example)) + "::BatchRequestType", template("torch::data::datasets::BatchDataset", mangledJavaDataset, template("std::vector", example)) + "::BatchRequest", template("torch::data::datasets::BatchDataset", template("javacpp::Dataset", ex[1], ex[2]), template("std::vector", example)) + "::BatchRequest" - ).pointerTypes("SizeTArrayRef", "@Cast({\"uint64_t*\", \"c10::ArrayRef\", \"std::vector&\"}) @StdVector long...")) + ).pointerTypes("SizeTArrayRef", "@Cast({\"size_t*\", \"c10::ArrayRef\", \"std::vector&\"}) @StdVector(\"size_t\") long...")) .put(new Info( template("torch::data::datasets::MapDataset", template("torch::data::datasets::SharedBatchDataset", template("torch::data::datasets::ChunkDataset", mangledChunkDataReader, "torch::data::samplers::RandomSampler", "torch::data::samplers::RandomSampler")), template("torch::data::transforms::Stack", example)) + "::get_batch" ).javaText("public native @Name(\"get_batch\") @ByVal " + p + "ExampleOptional get_batch_example(@Cast(\"size_t\") long indices);")) From 84994867cb41583bbd5aef136d8aa2962e7cd2b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Wed, 25 Oct 2023 10:01:28 +0200 Subject: [PATCH 15/26] Remove GatheredContextSupplier.java --- .../bytedeco/pytorch/cuda/CUDAAllocator.java | 4 +-- .../functions/GatheredContextSupplier.java | 32 ------------------- .../bytedeco/pytorch/presets/torch_cuda.java | 8 +++-- 3 files changed, 8 insertions(+), 36 deletions(-) delete mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/GatheredContextSupplier.java diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAAllocator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAAllocator.java index 3fb69c5a563..8386af9c0a3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAAllocator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAAllocator.java @@ -58,12 +58,12 @@ public native void beginAllocateStreamToPool( public native @Cast("bool") boolean isHistoryEnabled(); public native void recordHistory( @Cast("bool") boolean enabled, - GatheredContextSupplier context_recorder, + @ByVal @Cast("c10::cuda::CUDACachingAllocator::CreateContextFn*") Pointer context_recorder, @Cast("size_t") long alloc_trace_max_entries, RecordContext when); public native void recordHistory( @Cast("bool") boolean enabled, - GatheredContextSupplier context_recorder, + @ByVal @Cast("c10::cuda::CUDACachingAllocator::CreateContextFn*") Pointer context_recorder, @Cast("size_t") long alloc_trace_max_entries, @Cast("c10::cuda::CUDACachingAllocator::RecordContext") int when); public native void attachOutOfMemoryObserver(@ByVal @Cast("c10::cuda::CUDACachingAllocator::OutOfMemoryObserver*") Pointer observer); diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/GatheredContextSupplier.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/GatheredContextSupplier.java deleted file mode 100644 index 241a46f8c49..00000000000 --- a/pytorch/src/main/java/org/bytedeco/pytorch/functions/GatheredContextSupplier.java +++ /dev/null @@ -1,32 +0,0 @@ -package org.bytedeco.pytorch.functions; - -import org.bytedeco.javacpp.FunctionPointer; -import org.bytedeco.javacpp.Loader; -import org.bytedeco.javacpp.Pointer; -import org.bytedeco.javacpp.annotation.Cast; -import org.bytedeco.javacpp.annotation.Properties; -import org.bytedeco.javacpp.annotation.SharedPtr; -import org.bytedeco.pytorch.GatheredContext; - -@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class GatheredContextSupplier extends FunctionPointer { - static { - Loader.load(); - } - - /** - * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. - */ - public GatheredContextSupplier(Pointer p) { - super(p); - } - - protected GatheredContextSupplier() { - allocate(); - } - - private native void allocate(); - - // See issue JavaCPP #720 - public native @Cast({"", "std::shared_ptr"}) @SharedPtr GatheredContext call(); -} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java index 42c9e831050..223849e4d77 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java @@ -105,7 +105,9 @@ public void map(InfoMap infoMap) { .put(new Info("const std::vector", "std::vector").pointerTypes("TraceEntryVector").define()) //// Function pointers - .put(new Info("std::shared_ptr (*)()", "c10::cuda::CUDACachingAllocator::CreateContextFn").pointerTypes("GatheredContextSupplier").valueTypes("GatheredContextSupplier").skip()) + // Function pointer returning shared_ptr don't compile on windows + // "D:\a\javacpp-presets\javacpp-presets\pytorch\target\native\org\bytedeco\pytorch\windows-x86_64\jnitorch.cpp(98904): error C2526: 'JavaCPP_org_bytedeco_pytorch_functions_GatheredContextSupplier_allocate_callback': C linkage function cannot return C++ class 'std::shared_ptr'" + //.put(new Info("std::shared_ptr (*)()", "c10::cuda::CUDACachingAllocator::CreateContextFn").pointerTypes("GatheredContextSupplier").valueTypes("GatheredContextSupplier").skip()) ; //// Avoiding name clashes by skipping or renaming @@ -157,7 +159,9 @@ public void map(InfoMap infoMap) { "at::native::Descriptor", "at::native::Descriptor", - "std::hash" + "std::hash", + + "std::shared_ptr (*)()", "c10::cuda::CUDACachingAllocator::CreateContextFn" // See comment for GatheredContextSupplier ).cast().pointerTypes("Pointer")) From 95496c655c6d5baa7d8c40d706787e42655cb649 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Fri, 27 Oct 2023 13:40:19 +0200 Subject: [PATCH 16/26] Restore missing classes from torch::jit --- .../java/org/bytedeco/pytorch/JitModule.java | 24 ++++---- .../org/bytedeco/pytorch/NamedIValue.java | 41 ++++++++++++++ .../org/bytedeco/pytorch/NamedTensor.java | 41 ++++++++++++++ .../bytedeco/pytorch/attribute_iterator.java | 55 +++++++++++++++++++ .../org/bytedeco/pytorch/attribute_list.java | 32 +++++++++++ .../org/bytedeco/pytorch/buffer_iterator.java | 55 +++++++++++++++++++ .../org/bytedeco/pytorch/buffer_list.java | 32 +++++++++++ .../org/bytedeco/pytorch/global/torch.java | 42 ++++++++++++++ .../pytorch/named_attribute_iterator.java | 55 +++++++++++++++++++ .../pytorch/named_attribute_list.java | 32 +++++++++++ .../pytorch/named_buffer_iterator.java | 55 +++++++++++++++++++ .../bytedeco/pytorch/named_buffer_list.java | 32 +++++++++++ .../pytorch/named_parameter_iterator.java | 55 +++++++++++++++++++ .../pytorch/named_parameter_list.java | 32 +++++++++++ .../bytedeco/pytorch/parameter_iterator.java | 55 +++++++++++++++++++ .../org/bytedeco/pytorch/parameter_list.java | 32 +++++++++++ .../org/bytedeco/pytorch/presets/torch.java | 5 +- 17 files changed, 662 insertions(+), 13 deletions(-) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/NamedIValue.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensor.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/attribute_iterator.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/attribute_list.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/buffer_iterator.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/buffer_list.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_iterator.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_list.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_iterator.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_list.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_iterator.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_list.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/parameter_iterator.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/parameter_list.java diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JitModule.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JitModule.java index bd1c16b5ec7..f3f47687f30 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JitModule.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JitModule.java @@ -106,10 +106,10 @@ public native void register_attribute( public native void apply(@Const @ByRef JitModuleApplyFunction fn); - public native @ByVal @Cast("torch::jit::buffer_list*") module_list buffers(@Cast("bool") boolean recurse/*=true*/); - public native @ByVal @Cast("torch::jit::buffer_list*") module_list buffers(); - public native @ByVal @Cast("torch::jit::named_buffer_list*") module_list named_buffers(@Cast("bool") boolean recurse/*=true*/); - public native @ByVal @Cast("torch::jit::named_buffer_list*") module_list named_buffers(); + public native @ByVal buffer_list buffers(@Cast("bool") boolean recurse/*=true*/); + public native @ByVal buffer_list buffers(); + public native @ByVal named_buffer_list named_buffers(@Cast("bool") boolean recurse/*=true*/); + public native @ByVal named_buffer_list named_buffers(); public native @ByVal module_list children(); // direct modules public native @ByVal named_module_list named_children(); @@ -117,16 +117,16 @@ public native void register_attribute( public native @ByVal named_module_list named_modules(); // all tensors involved in gradient optimization - public native @ByVal @Cast("torch::jit::parameter_list*") module_list parameters(@Cast("bool") boolean recurse/*=true*/); - public native @ByVal @Cast("torch::jit::parameter_list*") module_list parameters(); - public native @ByVal @Cast("torch::jit::named_parameter_list*") module_list named_parameters(@Cast("bool") boolean recurse/*=true*/); - public native @ByVal @Cast("torch::jit::named_parameter_list*") module_list named_parameters(); + public native @ByVal parameter_list parameters(@Cast("bool") boolean recurse/*=true*/); + public native @ByVal parameter_list parameters(); + public native @ByVal named_parameter_list named_parameters(@Cast("bool") boolean recurse/*=true*/); + public native @ByVal named_parameter_list named_parameters(); // all members of the object, similar to iterating over dir(obj) in python - public native @ByVal @Cast("torch::jit::attribute_list*") module_list attributes(@Cast("bool") boolean recurse/*=true*/); - public native @ByVal @Cast("torch::jit::attribute_list*") module_list attributes(); - public native @ByVal @Cast("torch::jit::named_attribute_list*") module_list named_attributes(@Cast("bool") boolean recurse/*=true*/); - public native @ByVal @Cast("torch::jit::named_attribute_list*") module_list named_attributes(); + public native @ByVal attribute_list attributes(@Cast("bool") boolean recurse/*=true*/); + public native @ByVal attribute_list attributes(); + public native @ByVal named_attribute_list named_attributes(@Cast("bool") boolean recurse/*=true*/); + public native @ByVal named_attribute_list named_attributes(); public native void dump( @Cast("bool") boolean print_method_bodies, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedIValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedIValue.java new file mode 100644 index 00000000000..d3e34d907d8 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedIValue.java @@ -0,0 +1,41 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("torch::jit::Named") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class NamedIValue extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public NamedIValue() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public NamedIValue(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public NamedIValue(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public NamedIValue position(long position) { + return (NamedIValue)super.position(position); + } + @Override public NamedIValue getPointer(long i) { + return new NamedIValue((Pointer)this).offsetAddress(i); + } + + public native @StdString BytePointer name(); public native NamedIValue name(BytePointer setter); + public native @ByRef IValue value(); public native NamedIValue value(IValue setter); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensor.java new file mode 100644 index 00000000000..09bf1bd7203 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensor.java @@ -0,0 +1,41 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("torch::jit::Named") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class NamedTensor extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public NamedTensor() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public NamedTensor(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public NamedTensor(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public NamedTensor position(long position) { + return (NamedTensor)super.position(position); + } + @Override public NamedTensor getPointer(long i) { + return new NamedTensor((Pointer)this).offsetAddress(i); + } + + public native @StdString BytePointer name(); public native NamedTensor name(BytePointer setter); + public native @ByRef Tensor value(); public native NamedTensor value(Tensor setter); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/attribute_iterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/attribute_iterator.java new file mode 100644 index 00000000000..cf4c7ad5820 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/attribute_iterator.java @@ -0,0 +1,55 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::jit::slot_iterator_impl") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class attribute_iterator extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public attribute_iterator(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public attribute_iterator(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public attribute_iterator position(long position) { + return (attribute_iterator)super.position(position); + } + @Override public attribute_iterator getPointer(long i) { + return new attribute_iterator((Pointer)this).offsetAddress(i); + } + + public attribute_iterator( + @ByVal JitModule root, + @Cast("bool") boolean recurse, + @Cast("bool") boolean return_module) { super((Pointer)null); allocate(root, recurse, return_module); } + private native void allocate( + @ByVal JitModule root, + @Cast("bool") boolean recurse, + @Cast("bool") boolean return_module); + // empty cursors_, represents end of iteration + public attribute_iterator() { super((Pointer)null); allocate(); } + private native void allocate(); + public native @ByVal @Name("operator *") IValue multiply(); + public native @ByVal @Name("operator ->") IValue access(); + public native @ByRef @Name("operator ++") attribute_iterator increment(); + public native @ByVal @Name("operator ++") attribute_iterator increment(int arg0); + + private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals( + @Const @ByRef attribute_iterator a, + @Const @ByRef attribute_iterator b); + public boolean notEquals(attribute_iterator b) { return notEquals(this, b); } +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/attribute_list.java b/pytorch/src/gen/java/org/bytedeco/pytorch/attribute_list.java new file mode 100644 index 00000000000..53ddd45911f --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/attribute_list.java @@ -0,0 +1,32 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::jit::slot_list_impl") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class attribute_list extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public attribute_list(Pointer p) { super(p); } + + public native @ByVal attribute_iterator begin(); + public native @ByVal attribute_iterator end(); + public native @Cast("size_t") long size(); + + public attribute_list(@ByVal JitModule module, @Cast("bool") boolean recurse, @Cast("bool") boolean return_module) { super((Pointer)null); allocate(module, recurse, return_module); } + private native void allocate(@ByVal JitModule module, @Cast("bool") boolean recurse, @Cast("bool") boolean return_module); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/buffer_iterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/buffer_iterator.java new file mode 100644 index 00000000000..a0b5610ea52 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/buffer_iterator.java @@ -0,0 +1,55 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::jit::slot_iterator_impl") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class buffer_iterator extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public buffer_iterator(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public buffer_iterator(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public buffer_iterator position(long position) { + return (buffer_iterator)super.position(position); + } + @Override public buffer_iterator getPointer(long i) { + return new buffer_iterator((Pointer)this).offsetAddress(i); + } + + public buffer_iterator( + @ByVal JitModule root, + @Cast("bool") boolean recurse, + @Cast("bool") boolean return_module) { super((Pointer)null); allocate(root, recurse, return_module); } + private native void allocate( + @ByVal JitModule root, + @Cast("bool") boolean recurse, + @Cast("bool") boolean return_module); + // empty cursors_, represents end of iteration + public buffer_iterator() { super((Pointer)null); allocate(); } + private native void allocate(); + public native @ByVal @Name("operator *") Tensor multiply(); + public native @ByVal @Name("operator ->") Tensor access(); + public native @ByRef @Name("operator ++") buffer_iterator increment(); + public native @ByVal @Name("operator ++") buffer_iterator increment(int arg0); + + private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals( + @Const @ByRef buffer_iterator a, + @Const @ByRef buffer_iterator b); + public boolean notEquals(buffer_iterator b) { return notEquals(this, b); } +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/buffer_list.java b/pytorch/src/gen/java/org/bytedeco/pytorch/buffer_list.java new file mode 100644 index 00000000000..6e3e6674133 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/buffer_list.java @@ -0,0 +1,32 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::jit::slot_list_impl") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class buffer_list extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public buffer_list(Pointer p) { super(p); } + + public native @ByVal buffer_iterator begin(); + public native @ByVal buffer_iterator end(); + public native @Cast("size_t") long size(); + + public buffer_list(@ByVal JitModule module, @Cast("bool") boolean recurse, @Cast("bool") boolean return_module) { super((Pointer)null); allocate(module, recurse, return_module); } + private native void allocate(@ByVal JitModule module, @Cast("bool") boolean recurse, @Cast("bool") boolean return_module); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java index 5b5555c824a..1e790b16b01 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java @@ -64220,6 +64220,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../NamedJitModule.java +// Targeting ../NamedTensor.java + + +// Targeting ../NamedIValue.java + + // namespace detail // Targeting ../JitModule.java @@ -64296,12 +64302,48 @@ The list of (type, depth) pairs controls the type of specializations and the num // Targeting ../named_module_iterator.java +// Targeting ../parameter_iterator.java + + +// Targeting ../named_parameter_iterator.java + + +// Targeting ../attribute_iterator.java + + +// Targeting ../named_attribute_iterator.java + + +// Targeting ../buffer_iterator.java + + +// Targeting ../named_buffer_iterator.java + + // Targeting ../module_list.java // Targeting ../named_module_list.java +// Targeting ../parameter_list.java + + +// Targeting ../named_parameter_list.java + + +// Targeting ../attribute_list.java + + +// Targeting ../named_attribute_list.java + + +// Targeting ../buffer_list.java + + +// Targeting ../named_buffer_list.java + + // Targeting ../ModulePolicy.java diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_iterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_iterator.java new file mode 100644 index 00000000000..7724c8b9d99 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_iterator.java @@ -0,0 +1,55 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::jit::slot_iterator_impl >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class named_attribute_iterator extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public named_attribute_iterator(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public named_attribute_iterator(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public named_attribute_iterator position(long position) { + return (named_attribute_iterator)super.position(position); + } + @Override public named_attribute_iterator getPointer(long i) { + return new named_attribute_iterator((Pointer)this).offsetAddress(i); + } + + public named_attribute_iterator( + @ByVal JitModule root, + @Cast("bool") boolean recurse, + @Cast("bool") boolean return_module) { super((Pointer)null); allocate(root, recurse, return_module); } + private native void allocate( + @ByVal JitModule root, + @Cast("bool") boolean recurse, + @Cast("bool") boolean return_module); + // empty cursors_, represents end of iteration + public named_attribute_iterator() { super((Pointer)null); allocate(); } + private native void allocate(); + public native @ByVal @Name("operator *") NamedIValue multiply(); + public native @ByVal @Name("operator ->") NamedIValue access(); + public native @ByRef @Name("operator ++") named_attribute_iterator increment(); + public native @ByVal @Name("operator ++") named_attribute_iterator increment(int arg0); + + private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals( + @Const @ByRef named_attribute_iterator a, + @Const @ByRef named_attribute_iterator b); + public boolean notEquals(named_attribute_iterator b) { return notEquals(this, b); } +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_list.java b/pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_list.java new file mode 100644 index 00000000000..0d1ee8c7015 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_list.java @@ -0,0 +1,32 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::jit::slot_list_impl >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class named_attribute_list extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public named_attribute_list(Pointer p) { super(p); } + + public native @ByVal named_attribute_iterator begin(); + public native @ByVal named_attribute_iterator end(); + public native @Cast("size_t") long size(); + + public named_attribute_list(@ByVal JitModule module, @Cast("bool") boolean recurse, @Cast("bool") boolean return_module) { super((Pointer)null); allocate(module, recurse, return_module); } + private native void allocate(@ByVal JitModule module, @Cast("bool") boolean recurse, @Cast("bool") boolean return_module); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_iterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_iterator.java new file mode 100644 index 00000000000..1aeef22c386 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_iterator.java @@ -0,0 +1,55 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::jit::slot_iterator_impl >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class named_buffer_iterator extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public named_buffer_iterator(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public named_buffer_iterator(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public named_buffer_iterator position(long position) { + return (named_buffer_iterator)super.position(position); + } + @Override public named_buffer_iterator getPointer(long i) { + return new named_buffer_iterator((Pointer)this).offsetAddress(i); + } + + public named_buffer_iterator( + @ByVal JitModule root, + @Cast("bool") boolean recurse, + @Cast("bool") boolean return_module) { super((Pointer)null); allocate(root, recurse, return_module); } + private native void allocate( + @ByVal JitModule root, + @Cast("bool") boolean recurse, + @Cast("bool") boolean return_module); + // empty cursors_, represents end of iteration + public named_buffer_iterator() { super((Pointer)null); allocate(); } + private native void allocate(); + public native @ByVal @Name("operator *") NamedTensor multiply(); + public native @ByVal @Name("operator ->") NamedTensor access(); + public native @ByRef @Name("operator ++") named_buffer_iterator increment(); + public native @ByVal @Name("operator ++") named_buffer_iterator increment(int arg0); + + private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals( + @Const @ByRef named_buffer_iterator a, + @Const @ByRef named_buffer_iterator b); + public boolean notEquals(named_buffer_iterator b) { return notEquals(this, b); } +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_list.java b/pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_list.java new file mode 100644 index 00000000000..f79ee2c7687 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_list.java @@ -0,0 +1,32 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::jit::slot_list_impl >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class named_buffer_list extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public named_buffer_list(Pointer p) { super(p); } + + public native @ByVal named_buffer_iterator begin(); + public native @ByVal named_buffer_iterator end(); + public native @Cast("size_t") long size(); + + public named_buffer_list(@ByVal JitModule module, @Cast("bool") boolean recurse, @Cast("bool") boolean return_module) { super((Pointer)null); allocate(module, recurse, return_module); } + private native void allocate(@ByVal JitModule module, @Cast("bool") boolean recurse, @Cast("bool") boolean return_module); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_iterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_iterator.java new file mode 100644 index 00000000000..b7b95d94a50 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_iterator.java @@ -0,0 +1,55 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::jit::slot_iterator_impl >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class named_parameter_iterator extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public named_parameter_iterator(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public named_parameter_iterator(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public named_parameter_iterator position(long position) { + return (named_parameter_iterator)super.position(position); + } + @Override public named_parameter_iterator getPointer(long i) { + return new named_parameter_iterator((Pointer)this).offsetAddress(i); + } + + public named_parameter_iterator( + @ByVal JitModule root, + @Cast("bool") boolean recurse, + @Cast("bool") boolean return_module) { super((Pointer)null); allocate(root, recurse, return_module); } + private native void allocate( + @ByVal JitModule root, + @Cast("bool") boolean recurse, + @Cast("bool") boolean return_module); + // empty cursors_, represents end of iteration + public named_parameter_iterator() { super((Pointer)null); allocate(); } + private native void allocate(); + public native @ByVal @Name("operator *") NamedTensor multiply(); + public native @ByVal @Name("operator ->") NamedTensor access(); + public native @ByRef @Name("operator ++") named_parameter_iterator increment(); + public native @ByVal @Name("operator ++") named_parameter_iterator increment(int arg0); + + private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals( + @Const @ByRef named_parameter_iterator a, + @Const @ByRef named_parameter_iterator b); + public boolean notEquals(named_parameter_iterator b) { return notEquals(this, b); } +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_list.java b/pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_list.java new file mode 100644 index 00000000000..6eba09a6a7d --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_list.java @@ -0,0 +1,32 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::jit::slot_list_impl >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class named_parameter_list extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public named_parameter_list(Pointer p) { super(p); } + + public native @ByVal named_parameter_iterator begin(); + public native @ByVal named_parameter_iterator end(); + public native @Cast("size_t") long size(); + + public named_parameter_list(@ByVal JitModule module, @Cast("bool") boolean recurse, @Cast("bool") boolean return_module) { super((Pointer)null); allocate(module, recurse, return_module); } + private native void allocate(@ByVal JitModule module, @Cast("bool") boolean recurse, @Cast("bool") boolean return_module); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/parameter_iterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/parameter_iterator.java new file mode 100644 index 00000000000..14f412baf94 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/parameter_iterator.java @@ -0,0 +1,55 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::jit::slot_iterator_impl") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class parameter_iterator extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public parameter_iterator(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public parameter_iterator(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public parameter_iterator position(long position) { + return (parameter_iterator)super.position(position); + } + @Override public parameter_iterator getPointer(long i) { + return new parameter_iterator((Pointer)this).offsetAddress(i); + } + + public parameter_iterator( + @ByVal JitModule root, + @Cast("bool") boolean recurse, + @Cast("bool") boolean return_module) { super((Pointer)null); allocate(root, recurse, return_module); } + private native void allocate( + @ByVal JitModule root, + @Cast("bool") boolean recurse, + @Cast("bool") boolean return_module); + // empty cursors_, represents end of iteration + public parameter_iterator() { super((Pointer)null); allocate(); } + private native void allocate(); + public native @ByVal @Name("operator *") Tensor multiply(); + public native @ByVal @Name("operator ->") Tensor access(); + public native @ByRef @Name("operator ++") parameter_iterator increment(); + public native @ByVal @Name("operator ++") parameter_iterator increment(int arg0); + + private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals( + @Const @ByRef parameter_iterator a, + @Const @ByRef parameter_iterator b); + public boolean notEquals(parameter_iterator b) { return notEquals(this, b); } +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/parameter_list.java b/pytorch/src/gen/java/org/bytedeco/pytorch/parameter_list.java new file mode 100644 index 00000000000..f650a73ab2b --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/parameter_list.java @@ -0,0 +1,32 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::jit::slot_list_impl") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class parameter_list extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public parameter_list(Pointer p) { super(p); } + + public native @ByVal parameter_iterator begin(); + public native @ByVal parameter_iterator end(); + public native @Cast("size_t") long size(); + + public parameter_list(@ByVal JitModule module, @Cast("bool") boolean recurse, @Cast("bool") boolean return_module) { super((Pointer)null); allocate(module, recurse, return_module); } + private native void allocate(@ByVal JitModule module, @Cast("bool") boolean recurse, @Cast("bool") boolean return_module); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index ba44db2b1e4..18d481d541e 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -1123,7 +1123,10 @@ public void map(InfoMap infoMap) { //// Jit iterators for (String[] t : new String[][]{ - {"Module", "JitModule", "torch::jit::Module"} + {"Module", "JitModule", "torch::jit::Module"}, + {"Parameter", "Tensor", "torch::Tensor"}, + {"Attribute", "IValue", "c10::IValue"}, + {"Buffer", "Tensor", "torch::Tensor"} }) { infoMap.put(new Info( "torch::jit::slot_list_impl", From fe140fdd6da3546053c9c78e7fcce9ab8c52f368 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Mon, 30 Oct 2023 12:20:08 +0100 Subject: [PATCH 17/26] Update CUDA library paths to 12.3 --- .../main/java/org/bytedeco/pytorch/presets/torch.java | 10 +++++----- .../java/org/bytedeco/pytorch/presets/torch_cuda.java | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index 18d481d541e..f4be35540e0 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -79,15 +79,15 @@ value = {"linux", "macosx", "windows"}, link = { "c10", "c10_cuda", "torch_cpu", "torch_cuda", "torch" }, preload = {"gomp@.1", "iomp5", "omp", "tbb@.2", "asmjit", "fbgemm", "cupti@.12"}, - includepath = {"/usr/local/cuda/include", "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.1/include/"}, + includepath = {"/usr/local/cuda/include", "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.3/include/"}, preloadpath = { - "/usr/local/cuda-12.1/lib64/", - "/usr/local/cuda-12.1/extras/CUPTI/lib64/", + "/usr/local/cuda-12.3/lib64/", + "/usr/local/cuda-12.3/extras/CUPTI/lib64/", "/usr/local/cuda/lib64/", "/usr/local/cuda/extras/CUPTI/lib64/", "/usr/lib64/", - "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.1/lib/x64/", - "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.1/extras/CUPTI/lib64/", + "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.3/lib/x64/", + "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.3/extras/CUPTI/lib64/", "C:/Program Files/NVIDIA Corporation/NvToolsExt/bin/x64/", }, diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java index 223849e4d77..ad06117d870 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java @@ -46,8 +46,8 @@ }, link = { "cudart", "cusparse" }, linkpath = { - "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.1/lib/x64/", - "/usr/local/cuda-12.1/lib64/", + "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.3/lib/x64/", + "/usr/local/cuda-12.3/lib64/", "/usr/local/cuda/lib64/", "/usr/lib64/" } From 4ffcc181d177d922772f7d506666c940cac5ede6 Mon Sep 17 00:00:00 2001 From: Samuel Audet Date: Tue, 31 Oct 2023 18:10:05 +0900 Subject: [PATCH 18/26] Try to update CUDA archs to "5.0;6.0;7.0;8.0+PTX" for PyTorch --- pytorch/cppbuild.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch/cppbuild.sh b/pytorch/cppbuild.sh index b5b4468a8b0..235a554be01 100755 --- a/pytorch/cppbuild.sh +++ b/pytorch/cppbuild.sh @@ -27,7 +27,7 @@ if [[ "$EXTENSION" == *gpu ]]; then export USE_CUDNN=1 export USE_FAST_NVCC=0 export CUDA_SEPARABLE_COMPILATION=OFF - export TORCH_CUDA_ARCH_LIST="5.0;6.0;7.0+PTX" + export TORCH_CUDA_ARCH_LIST="5.0;6.0;7.0;8.0+PTX" fi export PYTHON_BIN_PATH=$(which python3) From 49668cbf900ee81571f4c2f1781c1ad3d94bf6d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Wed, 1 Nov 2023 09:11:30 +0100 Subject: [PATCH 19/26] Try to update CUDA archs to "5.0;6.0;7.0;8.0;9.0" for PyTorch --- pytorch/cppbuild.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch/cppbuild.sh b/pytorch/cppbuild.sh index 235a554be01..8e9d089548b 100755 --- a/pytorch/cppbuild.sh +++ b/pytorch/cppbuild.sh @@ -27,7 +27,7 @@ if [[ "$EXTENSION" == *gpu ]]; then export USE_CUDNN=1 export USE_FAST_NVCC=0 export CUDA_SEPARABLE_COMPILATION=OFF - export TORCH_CUDA_ARCH_LIST="5.0;6.0;7.0;8.0+PTX" + export TORCH_CUDA_ARCH_LIST="5.0;6.0;7.0;8.0;9.0" fi export PYTHON_BIN_PATH=$(which python3) From 2dfcc327cb529d7a7e9dcb11665cd29327df79b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Fri, 3 Nov 2023 11:28:39 +0100 Subject: [PATCH 20/26] Add item_byte and data_ptr_bool --- pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java | 1 + pytorch/src/gen/java/org/bytedeco/pytorch/TensorBase.java | 1 + .../src/main/java/org/bytedeco/pytorch/presets/torch.java | 7 ++++++- 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java index b6c979b4382..a94350e2fd6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java @@ -155,6 +155,7 @@ private native void allocate( public native @Name("item") short item_short(); public native @Name("item") int item_int(); + public native @Name("item") int item_byte(); public native @Cast("int64_t") @Name("item") long item_long(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBase.java index b0201f5c4a7..a1f46cd21c7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBase.java @@ -328,6 +328,7 @@ private native void allocate( // TODO(#97856) Make this return a const pointer. This is currently // const because of the vast number of clients that // rely on this. + public native @Cast("bool*") @Name("data_ptr") BoolPointer data_ptr_bool(); public native @Name("data_ptr") BytePointer data_ptr_char(); public native @Cast("uint8_t*") @Name("data_ptr") BytePointer data_ptr_byte(); public native @Name("data_ptr") ShortPointer data_ptr_short(); diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index f4be35540e0..3aa55c02e9a 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -1958,6 +1958,7 @@ We need either to put an annotation info on each member, or javaName("@NoOffset + "@Namespace(\"c10_complex_math\") public static native @ByVal @Name(\"pow\") DoubleComplex pow(@Const @ByRef float x, @Const @ByRef DoubleComplex y);\n" )) .put(new Info("c10::util::get_type_index").javaNames("get_type_index_string")) + .put(new Info("at::TensorBase::data_ptr").javaNames("data_ptr_bool")) .put(new Info("at::TensorBase::data_ptr").javaNames("data_ptr_char")) .put(new Info("at::TensorBase::data_ptr").javaNames("data_ptr_byte")) .put(new Info("at::TensorBase::data_ptr").javaNames("data_ptr_short")) @@ -1968,7 +1969,11 @@ We need either to put an annotation info on each member, or javaName("@NoOffset .put(new Info("at::Tensor::item").javaNames("item_bool")) .put(new Info("at::Tensor::item").javaNames("item_char")) .put(new Info("at::Tensor::item").javaNames("item_short")) - .put(new Info("at::Tensor::item").javaNames("item_int")) + // Since we don't have uint8 in Java, make item_byte an alias of item_int + .put(new Info("at::Tensor::item").javaText( + "public native @Name(\"item\") int item_int();\n" + + "public native @Name(\"item\") int item_byte();" + )) .put(new Info("at::Tensor::item").javaNames("item_long")) .put(new Info("at::Tensor::item").javaNames("item_float")) .put(new Info("at::Tensor::item").javaNames("item_double")) From 4fc9e28846b6579f682fbed47c4b90d10e88bf37 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Fri, 3 Nov 2023 11:28:52 +0100 Subject: [PATCH 21/26] Add include_list.pl --- pytorch/include_list.pl | 65 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100644 pytorch/include_list.pl diff --git a/pytorch/include_list.pl b/pytorch/include_list.pl new file mode 100644 index 00000000000..4f3f9e65832 --- /dev/null +++ b/pytorch/include_list.pl @@ -0,0 +1,65 @@ +#!/bin/perl + +# Must be run at from javacpp-presets/pytorch after cppbuild.sh has been run +# for linux-x86_64-gpu + +# Generate the lists of includes to parse, in order, from the output +# of g++ -H +# Used to update src/main/resources/org/bytedeco/pytorch/presets/* + +use strict; +use warnings; + +my %incs; +my @inc_per_depth; + +sub flush($) { + my $min_depth = shift; + for (my $d = @inc_per_depth - 1; $d >= $min_depth; $d--) { + if ($inc_per_depth[$d]) { + foreach my $i (@{$inc_per_depth[$d]}) { + print "#include \"$i\"\n"; + $incs{$i} = 1; + } + undef $inc_per_depth[$d]; + } + } +} + +sub go { + my $path = join ' ', @_; + + my @inc = `g++ -I torch/csrc/api/include/ -I. -H $path -E 2>&1 > /dev/null`; + foreach my $i (@inc) { + chomp $i; + my ($depth, $f) = $i =~ /^(\.+)\s(.*\.h)$/; + next unless $depth; + $depth = length($depth); + $f =~ s#^\./##; + next if $f =~ m#^/ + |^ATen/ops/\w+_native\.h$ + |^ATen/ops/\w+_meta\.h$ + |^ATen/ops/\w+_ops\.h$ + |^ATen/ops/_\w+\.h$#x + or $incs{$f}; + flush($depth); + my $incs = $inc_per_depth[$depth]; + $incs = $inc_per_depth[$depth] = [] unless $incs; + push @$incs, $f; + } + flush(0); +} + +chdir "cppbuild/linux-x86_64-gpu/pytorch/torch/include"; + +go('torch/csrc/api/include/torch/torch.h', 'torch/script.h'); + +print < Date: Sat, 4 Nov 2023 09:31:04 +0100 Subject: [PATCH 22/26] Restore parse order of 2.0.1 --- pytorch/include_list.pl | 2 +- .../DeserializationStorageContext.java | 2 +- .../org/bytedeco/pytorch/global/torch.java | 5146 ++++++++--------- .../bytedeco/pytorch/global/torch_cuda.java | 408 +- .../pytorch/presets/torch_cuda_include.h | 10 +- .../bytedeco/pytorch/presets/torch_include.h | 70 +- 6 files changed, 2819 insertions(+), 2819 deletions(-) diff --git a/pytorch/include_list.pl b/pytorch/include_list.pl index 4f3f9e65832..1435c82f3ec 100644 --- a/pytorch/include_list.pl +++ b/pytorch/include_list.pl @@ -29,7 +29,7 @@ ($) sub go { my $path = join ' ', @_; - my @inc = `g++ -I torch/csrc/api/include/ -I. -H $path -E 2>&1 > /dev/null`; + my @inc = `g++ -I. -I torch/csrc/api/include/ -H $path -E 2>&1 > /dev/null`; foreach my $i (@inc) { chomp $i; my ($depth, $f) = $i =~ /^(\.+)\s(.*\.h)$/; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DeserializationStorageContext.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DeserializationStorageContext.java index 89262907d22..622568cefd4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DeserializationStorageContext.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DeserializationStorageContext.java @@ -16,7 +16,7 @@ import static org.bytedeco.openblas.global.openblas.*; import static org.bytedeco.pytorch.global.torch.*; - // namespace caffe2 + @Namespace("torch::jit") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class DeserializationStorageContext extends Pointer { diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java index 1e790b16b01..9acd11b13ea 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java @@ -757,115 +757,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #endif -// Parsed from c10/core/DeviceType.h - -// #pragma once - -// This is directly synchronized with caffe2/proto/caffe2.proto, but -// doesn't require me to figure out how to get Protobuf headers into -// ATen/core (which would require a lot more build system hacking.) -// If you modify me, keep me synchronized with that file. - -// #include - -// #include -// #include - -// These contains all device types that also have a BackendComponent -// and therefore participate in per-backend functionality dispatch keys. -// This is most backends except PrivateUse2 and PrivateUse3 -// #define C10_FORALL_BACKEND_DEVICE_TYPES(_, extra) -// _(CPU, extra) -// _(CUDA, extra) -// _(HIP, extra) -// _(XLA, extra) -// _(MPS, extra) -// _(IPU, extra) -// _(XPU, extra) -// _(HPU, extra) -// _(VE, extra) -// _(Lazy, extra) -// _(Meta, extra) -// _(MTIA, extra) -// _(PrivateUse1, extra) - -@Namespace("c10") public enum DeviceType { - CPU((byte)(0)), - CUDA((byte)(1)), // CUDA. - MKLDNN((byte)(2)), // Reserved for explicit MKLDNN - OPENGL((byte)(3)), // OpenGL - OPENCL((byte)(4)), // OpenCL - IDEEP((byte)(5)), // IDEEP. - HIP((byte)(6)), // AMD HIP - FPGA((byte)(7)), // FPGA - ORT((byte)(8)), // ONNX Runtime / Microsoft - XLA((byte)(9)), // XLA / TPU - Vulkan((byte)(10)), // Vulkan - Metal((byte)(11)), // Metal - XPU((byte)(12)), // XPU - MPS((byte)(13)), // MPS - Meta((byte)(14)), // Meta (tensors with no data) - HPU((byte)(15)), // HPU / HABANA - VE((byte)(16)), // SX-Aurora / NEC - Lazy((byte)(17)), // Lazy Tensors - IPU((byte)(18)), // Graphcore IPU - MTIA((byte)(19)), // Meta training and inference devices - PrivateUse1((byte)(20)), // PrivateUse1 device - // NB: If you add more devices: - // - Change the implementations of DeviceTypeName and isValidDeviceType - // in DeviceType.cpp - // - Change the number below - COMPILE_TIME_MAX_DEVICE_TYPES((byte)(21)); - - public final byte value; - private DeviceType(byte v) { this.value = v; } - private DeviceType(DeviceType e) { this.value = e.value; } - public DeviceType intern() { for (DeviceType e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} - -@Namespace("c10") @MemberGetter public static native DeviceType kCPU(); -@Namespace("c10") @MemberGetter public static native DeviceType kCUDA(); -@Namespace("c10") @MemberGetter public static native DeviceType kHIP(); -@Namespace("c10") @MemberGetter public static native DeviceType kFPGA(); -@Namespace("c10") @MemberGetter public static native DeviceType kORT(); -@Namespace("c10") @MemberGetter public static native DeviceType kXLA(); -@Namespace("c10") @MemberGetter public static native DeviceType kMPS(); -@Namespace("c10") @MemberGetter public static native DeviceType kMeta(); -@Namespace("c10") @MemberGetter public static native DeviceType kVulkan(); -@Namespace("c10") @MemberGetter public static native DeviceType kMetal(); -@Namespace("c10") @MemberGetter public static native DeviceType kXPU(); -@Namespace("c10") @MemberGetter public static native DeviceType kHPU(); -@Namespace("c10") @MemberGetter public static native DeviceType kVE(); -@Namespace("c10") @MemberGetter public static native DeviceType kLazy(); -@Namespace("c10") @MemberGetter public static native DeviceType kIPU(); -@Namespace("c10") @MemberGetter public static native DeviceType kMTIA(); -@Namespace("c10") @MemberGetter public static native DeviceType kPrivateUse1(); - -// define explicit int constant -@Namespace("c10") @MemberGetter public static native int COMPILE_TIME_MAX_DEVICE_TYPES(); - -@Namespace("c10") public static native @StdString BytePointer DeviceTypeName(DeviceType d, @Cast("bool") boolean lower_case/*=false*/); -@Namespace("c10") public static native @StdString BytePointer DeviceTypeName(DeviceType d); -@Namespace("c10") public static native @StdString String DeviceTypeName(@Cast("c10::DeviceType") byte d, @Cast("bool") boolean lower_case/*=false*/); -@Namespace("c10") public static native @StdString String DeviceTypeName(@Cast("c10::DeviceType") byte d); - -@Namespace("c10") public static native @Cast("bool") boolean isValidDeviceType(DeviceType d); -@Namespace("c10") public static native @Cast("bool") boolean isValidDeviceType(@Cast("c10::DeviceType") byte d); - -@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer stream, DeviceType type); -@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer stream, @Cast("c10::DeviceType") byte type); - -@Namespace("c10") public static native void register_privateuse1_backend(@StdString BytePointer backend_name); -@Namespace("c10") public static native void register_privateuse1_backend(@StdString String backend_name); -@Namespace("c10") public static native @StdString BytePointer get_privateuse1_backend(@Cast("bool") boolean lower_case/*=true*/); -@Namespace("c10") public static native @StdString BytePointer get_privateuse1_backend(); - - // namespace c10 - // namespace std - - - // Parsed from c10/macros/Macros.h // #ifndef C10_MACROS_MACROS_H_ @@ -1292,6 +1183,193 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #endif // C10_MACROS_MACROS_H_ +// Parsed from c10/core/DeviceType.h + +// #pragma once + +// This is directly synchronized with caffe2/proto/caffe2.proto, but +// doesn't require me to figure out how to get Protobuf headers into +// ATen/core (which would require a lot more build system hacking.) +// If you modify me, keep me synchronized with that file. + +// #include + +// #include +// #include + +// These contains all device types that also have a BackendComponent +// and therefore participate in per-backend functionality dispatch keys. +// This is most backends except PrivateUse2 and PrivateUse3 +// #define C10_FORALL_BACKEND_DEVICE_TYPES(_, extra) +// _(CPU, extra) +// _(CUDA, extra) +// _(HIP, extra) +// _(XLA, extra) +// _(MPS, extra) +// _(IPU, extra) +// _(XPU, extra) +// _(HPU, extra) +// _(VE, extra) +// _(Lazy, extra) +// _(Meta, extra) +// _(MTIA, extra) +// _(PrivateUse1, extra) + +@Namespace("c10") public enum DeviceType { + CPU((byte)(0)), + CUDA((byte)(1)), // CUDA. + MKLDNN((byte)(2)), // Reserved for explicit MKLDNN + OPENGL((byte)(3)), // OpenGL + OPENCL((byte)(4)), // OpenCL + IDEEP((byte)(5)), // IDEEP. + HIP((byte)(6)), // AMD HIP + FPGA((byte)(7)), // FPGA + ORT((byte)(8)), // ONNX Runtime / Microsoft + XLA((byte)(9)), // XLA / TPU + Vulkan((byte)(10)), // Vulkan + Metal((byte)(11)), // Metal + XPU((byte)(12)), // XPU + MPS((byte)(13)), // MPS + Meta((byte)(14)), // Meta (tensors with no data) + HPU((byte)(15)), // HPU / HABANA + VE((byte)(16)), // SX-Aurora / NEC + Lazy((byte)(17)), // Lazy Tensors + IPU((byte)(18)), // Graphcore IPU + MTIA((byte)(19)), // Meta training and inference devices + PrivateUse1((byte)(20)), // PrivateUse1 device + // NB: If you add more devices: + // - Change the implementations of DeviceTypeName and isValidDeviceType + // in DeviceType.cpp + // - Change the number below + COMPILE_TIME_MAX_DEVICE_TYPES((byte)(21)); + + public final byte value; + private DeviceType(byte v) { this.value = v; } + private DeviceType(DeviceType e) { this.value = e.value; } + public DeviceType intern() { for (DeviceType e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} + +@Namespace("c10") @MemberGetter public static native DeviceType kCPU(); +@Namespace("c10") @MemberGetter public static native DeviceType kCUDA(); +@Namespace("c10") @MemberGetter public static native DeviceType kHIP(); +@Namespace("c10") @MemberGetter public static native DeviceType kFPGA(); +@Namespace("c10") @MemberGetter public static native DeviceType kORT(); +@Namespace("c10") @MemberGetter public static native DeviceType kXLA(); +@Namespace("c10") @MemberGetter public static native DeviceType kMPS(); +@Namespace("c10") @MemberGetter public static native DeviceType kMeta(); +@Namespace("c10") @MemberGetter public static native DeviceType kVulkan(); +@Namespace("c10") @MemberGetter public static native DeviceType kMetal(); +@Namespace("c10") @MemberGetter public static native DeviceType kXPU(); +@Namespace("c10") @MemberGetter public static native DeviceType kHPU(); +@Namespace("c10") @MemberGetter public static native DeviceType kVE(); +@Namespace("c10") @MemberGetter public static native DeviceType kLazy(); +@Namespace("c10") @MemberGetter public static native DeviceType kIPU(); +@Namespace("c10") @MemberGetter public static native DeviceType kMTIA(); +@Namespace("c10") @MemberGetter public static native DeviceType kPrivateUse1(); + +// define explicit int constant +@Namespace("c10") @MemberGetter public static native int COMPILE_TIME_MAX_DEVICE_TYPES(); + +@Namespace("c10") public static native @StdString BytePointer DeviceTypeName(DeviceType d, @Cast("bool") boolean lower_case/*=false*/); +@Namespace("c10") public static native @StdString BytePointer DeviceTypeName(DeviceType d); +@Namespace("c10") public static native @StdString String DeviceTypeName(@Cast("c10::DeviceType") byte d, @Cast("bool") boolean lower_case/*=false*/); +@Namespace("c10") public static native @StdString String DeviceTypeName(@Cast("c10::DeviceType") byte d); + +@Namespace("c10") public static native @Cast("bool") boolean isValidDeviceType(DeviceType d); +@Namespace("c10") public static native @Cast("bool") boolean isValidDeviceType(@Cast("c10::DeviceType") byte d); + +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer stream, DeviceType type); +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer stream, @Cast("c10::DeviceType") byte type); + +@Namespace("c10") public static native void register_privateuse1_backend(@StdString BytePointer backend_name); +@Namespace("c10") public static native void register_privateuse1_backend(@StdString String backend_name); +@Namespace("c10") public static native @StdString BytePointer get_privateuse1_backend(@Cast("bool") boolean lower_case/*=true*/); +@Namespace("c10") public static native @StdString BytePointer get_privateuse1_backend(); + + // namespace c10 + // namespace std + + + +// Parsed from c10/util/Deprecated.h + +// #pragma once + +/** + * This file provides portable macros for marking declarations + * as deprecated. You should generally use C10_DEPRECATED, + * except when marking 'using' declarations as deprecated, + * in which case you should use C10_DEFINE_DEPRECATED_USING + * (due to portability concerns). + */ + +// Sample usage: +// +// C10_DEPRECATED void bad_func(); +// struct C10_DEPRECATED BadStruct { +// ... +// }; + +// NB: __cplusplus doesn't work for MSVC, so for now MSVC always uses +// the "__declspec(deprecated)" implementation and not the C++14 +// "[[deprecated]]" attribute. We tried enabling "[[deprecated]]" for C++14 on +// MSVC, but ran into issues with some older MSVC versions. +// #if (defined(__cplusplus) && __cplusplus >= 201402L) +// #define C10_DEPRECATED [[deprecated]] +// #define C10_DEPRECATED_MESSAGE(message) [[deprecated(message)]] +// #elif defined(__GNUC__) +// #define C10_DEPRECATED __attribute__((deprecated)) +// TODO Is there some way to implement this? +// #define C10_DEPRECATED_MESSAGE(message) __attribute__((deprecated)) + +// #elif defined(_MSC_VER) +// #else +// #warning "You need to implement C10_DEPRECATED for this compiler" +// #define C10_DEPRECATED +// #endif + +// Sample usage: +// +// C10_DEFINE_DEPRECATED_USING(BadType, int) +// +// which is the portable version of +// +// using BadType [[deprecated]] = int; + +// technically [[deprecated]] syntax is from c++14 standard, but it works in +// many compilers. +// #if defined(__has_cpp_attribute) +// #if __has_cpp_attribute(deprecated) && !defined(__CUDACC__) +// #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) +// using TypeName [[deprecated]] = TypeThingy; +// #endif +// #endif + +// #if defined(_MSC_VER) +// #endif + +// #if !defined(C10_DEFINE_DEPRECATED_USING) && defined(__GNUC__) +// nvcc has a bug where it doesn't understand __attribute__((deprecated)) +// declarations even when the host compiler supports it. We'll only use this gcc +// attribute when not cuda, and when using a GCC compiler that doesn't support +// the c++14 syntax we checked for above (available in __GNUC__ >= 5) +// #if !defined(__CUDACC__) +// #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) +// using TypeName __attribute__((deprecated)) = TypeThingy; +// #else +// using cuda + gcc < 5, neither deprecated syntax is available so turning off. +// #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) +// using TypeName = TypeThingy; +// #endif +// #endif + +// #if !defined(C10_DEFINE_DEPRECATED_USING) +// #warning "You need to implement C10_DEFINE_DEPRECATED_USING for this compiler" +// #define C10_DEFINE_DEPRECATED_USING +// #endif + + // Parsed from c10/util/reverse_iterator.h // #pragma once @@ -3258,84 +3336,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace c10 -// Parsed from c10/util/Deprecated.h - -// #pragma once - -/** - * This file provides portable macros for marking declarations - * as deprecated. You should generally use C10_DEPRECATED, - * except when marking 'using' declarations as deprecated, - * in which case you should use C10_DEFINE_DEPRECATED_USING - * (due to portability concerns). - */ - -// Sample usage: -// -// C10_DEPRECATED void bad_func(); -// struct C10_DEPRECATED BadStruct { -// ... -// }; - -// NB: __cplusplus doesn't work for MSVC, so for now MSVC always uses -// the "__declspec(deprecated)" implementation and not the C++14 -// "[[deprecated]]" attribute. We tried enabling "[[deprecated]]" for C++14 on -// MSVC, but ran into issues with some older MSVC versions. -// #if (defined(__cplusplus) && __cplusplus >= 201402L) -// #define C10_DEPRECATED [[deprecated]] -// #define C10_DEPRECATED_MESSAGE(message) [[deprecated(message)]] -// #elif defined(__GNUC__) -// #define C10_DEPRECATED __attribute__((deprecated)) -// TODO Is there some way to implement this? -// #define C10_DEPRECATED_MESSAGE(message) __attribute__((deprecated)) - -// #elif defined(_MSC_VER) -// #else -// #warning "You need to implement C10_DEPRECATED for this compiler" -// #define C10_DEPRECATED -// #endif - -// Sample usage: -// -// C10_DEFINE_DEPRECATED_USING(BadType, int) -// -// which is the portable version of -// -// using BadType [[deprecated]] = int; - -// technically [[deprecated]] syntax is from c++14 standard, but it works in -// many compilers. -// #if defined(__has_cpp_attribute) -// #if __has_cpp_attribute(deprecated) && !defined(__CUDACC__) -// #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) -// using TypeName [[deprecated]] = TypeThingy; -// #endif -// #endif - -// #if defined(_MSC_VER) -// #endif - -// #if !defined(C10_DEFINE_DEPRECATED_USING) && defined(__GNUC__) -// nvcc has a bug where it doesn't understand __attribute__((deprecated)) -// declarations even when the host compiler supports it. We'll only use this gcc -// attribute when not cuda, and when using a GCC compiler that doesn't support -// the c++14 syntax we checked for above (available in __GNUC__ >= 5) -// #if !defined(__CUDACC__) -// #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) -// using TypeName __attribute__((deprecated)) = TypeThingy; -// #else -// using cuda + gcc < 5, neither deprecated syntax is available so turning off. -// #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) -// using TypeName = TypeThingy; -// #endif -// #endif - -// #if !defined(C10_DEFINE_DEPRECATED_USING) -// #warning "You need to implement C10_DEFINE_DEPRECATED_USING for this compiler" -// #define C10_DEFINE_DEPRECATED_USING -// #endif - - // Parsed from c10/util/AlignOf.h //===--- AlignOf.h - Portable calculation of type alignment -----*- C++ -*-===// @@ -3906,6 +3906,37 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace c10 +// Parsed from c10/util/BFloat16.h + +// #pragma once + +// Defines the bloat16 type (brain floating-point). This representation uses +// 1 bit for the sign, 8 bits for the exponent and 7 bits for the mantissa. + +// #include +// #include +// #include + +// #if defined(__CUDACC__) && !defined(USE_ROCM) +// #endif + +// #if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS) +// #endif +@Namespace("c10::detail") public static native float f32_from_bits(@Cast("uint16_t") short src); + +@Namespace("c10::detail") public static native @Cast("uint16_t") short bits_from_f32(float src); + +@Namespace("c10::detail") public static native @Cast("uint16_t") short round_to_nearest_even(float src); + +// Targeting ../BFloat16.java + + + + // namespace c10 + +// #include // IWYU pragma: keep + + // Parsed from c10/util/BFloat16-inl.h // #pragma once @@ -4037,37 +4068,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { -// Parsed from c10/util/BFloat16.h - -// #pragma once - -// Defines the bloat16 type (brain floating-point). This representation uses -// 1 bit for the sign, 8 bits for the exponent and 7 bits for the mantissa. - -// #include -// #include -// #include - -// #if defined(__CUDACC__) && !defined(USE_ROCM) -// #endif - -// #if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS) -// #endif -@Namespace("c10::detail") public static native float f32_from_bits(@Cast("uint16_t") short src); - -@Namespace("c10::detail") public static native @Cast("uint16_t") short bits_from_f32(float src); - -@Namespace("c10::detail") public static native @Cast("uint16_t") short round_to_nearest_even(float src); - -// Targeting ../BFloat16.java - - - - // namespace c10 - -// #include // IWYU pragma: keep - - // Parsed from c10/util/TypeSafeSignMath.h // #pragma once @@ -4484,23 +4484,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace std -// Parsed from c10/util/complex_utils.h - -// #if !defined(C10_INTERNAL_INCLUDE_COMPLEX_REMAINING_H) -// #error -// "c10/util/complex_utils.h is not meant to be individually included. Include c10/util/complex.h instead." -// #endif - -// #include - -// Extract double from std::complex; is identity otherwise -// TODO: Write in more idiomatic C++17 - - // namespace c10 - - // namespace std - - // Parsed from c10/util/Half.h // #pragma once @@ -4633,6 +4616,161 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // IWYU pragma: keep +// Parsed from c10/util/Half-inl.h + +// #pragma once + +// #include +// #include + +// #include +// #include + +// #ifdef __CUDACC__ +// #include +// #endif + +// #ifdef __HIPCC__ +// #include +// #endif + +// #if defined(CL_SYCL_LANGUAGE_VERSION) +// #include // for SYCL 1.2.1 +// #elif defined(SYCL_LANGUAGE_VERSION) +// #include // for SYCL 2020 +// #endif + +// #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +// #endif + +/** Constructors */ + + + +/** Implicit conversions */ + + + +// #if defined(__CUDACC__) || defined(__HIPCC__) +// #endif + +// #ifdef SYCL_LANGUAGE_VERSION +// #endif + +// CUDA intrinsics + +// #if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 350)) || +// (defined(__clang__) && defined(__CUDA__)) + +// #endif + +/** Arithmetic */ + +@Namespace("c10") public static native @ByVal @Name("operator +") Half add(@Const @ByRef Half a, @Const @ByRef Half b); + +@Namespace("c10") public static native @ByVal @Name("operator -") Half subtract(@Const @ByRef Half a, @Const @ByRef Half b); + +@Namespace("c10") public static native @ByVal @Name("operator *") Half multiply(@Const @ByRef Half a, @Const @ByRef Half b); + +@Namespace("c10") public static native @ByVal @Name("operator /") Half divide(@Const @ByRef Half a, @Const @ByRef Half b); + +@Namespace("c10") public static native @ByVal @Name("operator -") Half subtract(@Const @ByRef Half a); + +@Namespace("c10") public static native @ByRef @Name("operator +=") Half addPut(@ByRef Half a, @Const @ByRef Half b); + +@Namespace("c10") public static native @ByRef @Name("operator -=") Half subtractPut(@ByRef Half a, @Const @ByRef Half b); + +@Namespace("c10") public static native @ByRef @Name("operator *=") Half multiplyPut(@ByRef Half a, @Const @ByRef Half b); + +@Namespace("c10") public static native @ByRef @Name("operator /=") Half dividePut(@ByRef Half a, @Const @ByRef Half b); + +/** Arithmetic with floats */ + +@Namespace("c10") public static native @Name("operator +") float add(@ByVal Half a, float b); +@Namespace("c10") public static native @Name("operator -") float subtract(@ByVal Half a, float b); +@Namespace("c10") public static native @Name("operator *") float multiply(@ByVal Half a, float b); +@Namespace("c10") public static native @Name("operator /") float divide(@ByVal Half a, float b); + +@Namespace("c10") public static native @Name("operator +") float add(float a, @ByVal Half b); +@Namespace("c10") public static native @Name("operator -") float subtract(float a, @ByVal Half b); +@Namespace("c10") public static native @Name("operator *") float multiply(float a, @ByVal Half b); +@Namespace("c10") public static native @Name("operator /") float divide(float a, @ByVal Half b); + +@Namespace("c10") public static native @ByRef @Name("operator +=") FloatPointer addPut(@ByRef FloatPointer a, @Const @ByRef Half b); +@Namespace("c10") public static native @ByRef @Name("operator +=") FloatBuffer addPut(@ByRef FloatBuffer a, @Const @ByRef Half b); +@Namespace("c10") public static native @ByRef @Name("operator +=") float[] addPut(@ByRef float[] a, @Const @ByRef Half b); +@Namespace("c10") public static native @ByRef @Name("operator -=") FloatPointer subtractPut(@ByRef FloatPointer a, @Const @ByRef Half b); +@Namespace("c10") public static native @ByRef @Name("operator -=") FloatBuffer subtractPut(@ByRef FloatBuffer a, @Const @ByRef Half b); +@Namespace("c10") public static native @ByRef @Name("operator -=") float[] subtractPut(@ByRef float[] a, @Const @ByRef Half b); +@Namespace("c10") public static native @ByRef @Name("operator *=") FloatPointer multiplyPut(@ByRef FloatPointer a, @Const @ByRef Half b); +@Namespace("c10") public static native @ByRef @Name("operator *=") FloatBuffer multiplyPut(@ByRef FloatBuffer a, @Const @ByRef Half b); +@Namespace("c10") public static native @ByRef @Name("operator *=") float[] multiplyPut(@ByRef float[] a, @Const @ByRef Half b); +@Namespace("c10") public static native @ByRef @Name("operator /=") FloatPointer dividePut(@ByRef FloatPointer a, @Const @ByRef Half b); +@Namespace("c10") public static native @ByRef @Name("operator /=") FloatBuffer dividePut(@ByRef FloatBuffer a, @Const @ByRef Half b); +@Namespace("c10") public static native @ByRef @Name("operator /=") float[] dividePut(@ByRef float[] a, @Const @ByRef Half b); + +/** Arithmetic with doubles */ + +@Namespace("c10") public static native @Name("operator +") double add(@ByVal Half a, double b); +@Namespace("c10") public static native @Name("operator -") double subtract(@ByVal Half a, double b); +@Namespace("c10") public static native @Name("operator *") double multiply(@ByVal Half a, double b); +@Namespace("c10") public static native @Name("operator /") double divide(@ByVal Half a, double b); + +@Namespace("c10") public static native @Name("operator +") double add(double a, @ByVal Half b); +@Namespace("c10") public static native @Name("operator -") double subtract(double a, @ByVal Half b); +@Namespace("c10") public static native @Name("operator *") double multiply(double a, @ByVal Half b); +@Namespace("c10") public static native @Name("operator /") double divide(double a, @ByVal Half b); + +/** Arithmetic with ints */ + +@Namespace("c10") public static native @ByVal @Name("operator +") Half add(@ByVal Half a, int b); +@Namespace("c10") public static native @ByVal @Name("operator -") Half subtract(@ByVal Half a, int b); +@Namespace("c10") public static native @ByVal @Name("operator *") Half multiply(@ByVal Half a, int b); +@Namespace("c10") public static native @ByVal @Name("operator /") Half divide(@ByVal Half a, int b); + +@Namespace("c10") public static native @ByVal @Name("operator +") Half add(int a, @ByVal Half b); +@Namespace("c10") public static native @ByVal @Name("operator -") Half subtract(int a, @ByVal Half b); +@Namespace("c10") public static native @ByVal @Name("operator *") Half multiply(int a, @ByVal Half b); +@Namespace("c10") public static native @ByVal @Name("operator /") Half divide(int a, @ByVal Half b); + +//// Arithmetic with int64_t + +@Namespace("c10") public static native @ByVal @Name("operator +") Half add(@ByVal Half a, @Cast("int64_t") long b); +@Namespace("c10") public static native @ByVal @Name("operator -") Half subtract(@ByVal Half a, @Cast("int64_t") long b); +@Namespace("c10") public static native @ByVal @Name("operator *") Half multiply(@ByVal Half a, @Cast("int64_t") long b); +@Namespace("c10") public static native @ByVal @Name("operator /") Half divide(@ByVal Half a, @Cast("int64_t") long b); + +@Namespace("c10") public static native @ByVal @Name("operator +") Half add(@Cast("int64_t") long a, @ByVal Half b); +@Namespace("c10") public static native @ByVal @Name("operator -") Half subtract(@Cast("int64_t") long a, @ByVal Half b); +@Namespace("c10") public static native @ByVal @Name("operator *") Half multiply(@Cast("int64_t") long a, @ByVal Half b); +@Namespace("c10") public static native @ByVal @Name("operator /") Half divide(@Cast("int64_t") long a, @ByVal Half b); + +/** NOTE: we do not define comparisons directly and instead rely on the implicit + * conversion from c10::Half to float. */ + + // namespace c10 + + // namespace std + + + +// Parsed from c10/util/complex_utils.h + +// #if !defined(C10_INTERNAL_INCLUDE_COMPLEX_REMAINING_H) +// #error +// "c10/util/complex_utils.h is not meant to be individually included. Include c10/util/complex.h instead." +// #endif + +// #include + +// Extract double from std::complex; is identity otherwise +// TODO: Write in more idiomatic C++17 + + // namespace c10 + + // namespace std + + // Parsed from c10/util/complex.h // #pragma once @@ -4716,144 +4854,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #undef C10_INTERNAL_INCLUDE_COMPLEX_REMAINING_H -// Parsed from c10/util/Half-inl.h - -// #pragma once - -// #include -// #include - -// #include -// #include - -// #ifdef __CUDACC__ -// #include -// #endif - -// #ifdef __HIPCC__ -// #include -// #endif - -// #if defined(CL_SYCL_LANGUAGE_VERSION) -// #include // for SYCL 1.2.1 -// #elif defined(SYCL_LANGUAGE_VERSION) -// #include // for SYCL 2020 -// #endif - -// #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") -// #endif - -/** Constructors */ - - - -/** Implicit conversions */ - - - -// #if defined(__CUDACC__) || defined(__HIPCC__) -// #endif - -// #ifdef SYCL_LANGUAGE_VERSION -// #endif - -// CUDA intrinsics - -// #if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 350)) || -// (defined(__clang__) && defined(__CUDA__)) - -// #endif - -/** Arithmetic */ - -@Namespace("c10") public static native @ByVal @Name("operator +") Half add(@Const @ByRef Half a, @Const @ByRef Half b); - -@Namespace("c10") public static native @ByVal @Name("operator -") Half subtract(@Const @ByRef Half a, @Const @ByRef Half b); - -@Namespace("c10") public static native @ByVal @Name("operator *") Half multiply(@Const @ByRef Half a, @Const @ByRef Half b); - -@Namespace("c10") public static native @ByVal @Name("operator /") Half divide(@Const @ByRef Half a, @Const @ByRef Half b); - -@Namespace("c10") public static native @ByVal @Name("operator -") Half subtract(@Const @ByRef Half a); - -@Namespace("c10") public static native @ByRef @Name("operator +=") Half addPut(@ByRef Half a, @Const @ByRef Half b); - -@Namespace("c10") public static native @ByRef @Name("operator -=") Half subtractPut(@ByRef Half a, @Const @ByRef Half b); - -@Namespace("c10") public static native @ByRef @Name("operator *=") Half multiplyPut(@ByRef Half a, @Const @ByRef Half b); - -@Namespace("c10") public static native @ByRef @Name("operator /=") Half dividePut(@ByRef Half a, @Const @ByRef Half b); - -/** Arithmetic with floats */ - -@Namespace("c10") public static native @Name("operator +") float add(@ByVal Half a, float b); -@Namespace("c10") public static native @Name("operator -") float subtract(@ByVal Half a, float b); -@Namespace("c10") public static native @Name("operator *") float multiply(@ByVal Half a, float b); -@Namespace("c10") public static native @Name("operator /") float divide(@ByVal Half a, float b); - -@Namespace("c10") public static native @Name("operator +") float add(float a, @ByVal Half b); -@Namespace("c10") public static native @Name("operator -") float subtract(float a, @ByVal Half b); -@Namespace("c10") public static native @Name("operator *") float multiply(float a, @ByVal Half b); -@Namespace("c10") public static native @Name("operator /") float divide(float a, @ByVal Half b); - -@Namespace("c10") public static native @ByRef @Name("operator +=") FloatPointer addPut(@ByRef FloatPointer a, @Const @ByRef Half b); -@Namespace("c10") public static native @ByRef @Name("operator +=") FloatBuffer addPut(@ByRef FloatBuffer a, @Const @ByRef Half b); -@Namespace("c10") public static native @ByRef @Name("operator +=") float[] addPut(@ByRef float[] a, @Const @ByRef Half b); -@Namespace("c10") public static native @ByRef @Name("operator -=") FloatPointer subtractPut(@ByRef FloatPointer a, @Const @ByRef Half b); -@Namespace("c10") public static native @ByRef @Name("operator -=") FloatBuffer subtractPut(@ByRef FloatBuffer a, @Const @ByRef Half b); -@Namespace("c10") public static native @ByRef @Name("operator -=") float[] subtractPut(@ByRef float[] a, @Const @ByRef Half b); -@Namespace("c10") public static native @ByRef @Name("operator *=") FloatPointer multiplyPut(@ByRef FloatPointer a, @Const @ByRef Half b); -@Namespace("c10") public static native @ByRef @Name("operator *=") FloatBuffer multiplyPut(@ByRef FloatBuffer a, @Const @ByRef Half b); -@Namespace("c10") public static native @ByRef @Name("operator *=") float[] multiplyPut(@ByRef float[] a, @Const @ByRef Half b); -@Namespace("c10") public static native @ByRef @Name("operator /=") FloatPointer dividePut(@ByRef FloatPointer a, @Const @ByRef Half b); -@Namespace("c10") public static native @ByRef @Name("operator /=") FloatBuffer dividePut(@ByRef FloatBuffer a, @Const @ByRef Half b); -@Namespace("c10") public static native @ByRef @Name("operator /=") float[] dividePut(@ByRef float[] a, @Const @ByRef Half b); - -/** Arithmetic with doubles */ - -@Namespace("c10") public static native @Name("operator +") double add(@ByVal Half a, double b); -@Namespace("c10") public static native @Name("operator -") double subtract(@ByVal Half a, double b); -@Namespace("c10") public static native @Name("operator *") double multiply(@ByVal Half a, double b); -@Namespace("c10") public static native @Name("operator /") double divide(@ByVal Half a, double b); - -@Namespace("c10") public static native @Name("operator +") double add(double a, @ByVal Half b); -@Namespace("c10") public static native @Name("operator -") double subtract(double a, @ByVal Half b); -@Namespace("c10") public static native @Name("operator *") double multiply(double a, @ByVal Half b); -@Namespace("c10") public static native @Name("operator /") double divide(double a, @ByVal Half b); - -/** Arithmetic with ints */ - -@Namespace("c10") public static native @ByVal @Name("operator +") Half add(@ByVal Half a, int b); -@Namespace("c10") public static native @ByVal @Name("operator -") Half subtract(@ByVal Half a, int b); -@Namespace("c10") public static native @ByVal @Name("operator *") Half multiply(@ByVal Half a, int b); -@Namespace("c10") public static native @ByVal @Name("operator /") Half divide(@ByVal Half a, int b); - -@Namespace("c10") public static native @ByVal @Name("operator +") Half add(int a, @ByVal Half b); -@Namespace("c10") public static native @ByVal @Name("operator -") Half subtract(int a, @ByVal Half b); -@Namespace("c10") public static native @ByVal @Name("operator *") Half multiply(int a, @ByVal Half b); -@Namespace("c10") public static native @ByVal @Name("operator /") Half divide(int a, @ByVal Half b); - -//// Arithmetic with int64_t - -@Namespace("c10") public static native @ByVal @Name("operator +") Half add(@ByVal Half a, @Cast("int64_t") long b); -@Namespace("c10") public static native @ByVal @Name("operator -") Half subtract(@ByVal Half a, @Cast("int64_t") long b); -@Namespace("c10") public static native @ByVal @Name("operator *") Half multiply(@ByVal Half a, @Cast("int64_t") long b); -@Namespace("c10") public static native @ByVal @Name("operator /") Half divide(@ByVal Half a, @Cast("int64_t") long b); - -@Namespace("c10") public static native @ByVal @Name("operator +") Half add(@Cast("int64_t") long a, @ByVal Half b); -@Namespace("c10") public static native @ByVal @Name("operator -") Half subtract(@Cast("int64_t") long a, @ByVal Half b); -@Namespace("c10") public static native @ByVal @Name("operator *") Half multiply(@Cast("int64_t") long a, @ByVal Half b); -@Namespace("c10") public static native @ByVal @Name("operator /") Half divide(@Cast("int64_t") long a, @ByVal Half b); - -/** NOTE: we do not define comparisons directly and instead rely on the implicit - * conversion from c10::Half to float. */ - - // namespace c10 - - // namespace std - - - // Parsed from c10/util/Float8_e5m2-inl.h // #pragma once @@ -5477,6 +5477,62 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace c10 +// Parsed from c10/util/ExclusivelyOwned.h + +// #pragma once + +// #include + +// See example implementation in TensorBase.h and TensorBody.h. +// Synopsis: +// +// repr_type -- type to use to store an owned T in ExclusivelyOwned. +// +// pointer_type -- pointer-esque type to return from +// ExclusivelyOwned's get() and operator*() methods. +// +// const_pointer_type -- similar to pointer_type, used for the const methods. +// +// static repr_type nullRepr() -- return a null instance of repr_type. +// +// template +// static repr_type createInPlace(Args&&... args) -- used by the in-place +// ExclusivelyOwned constructor. +// +// static repr_type moveToRepr(T&& x) -- move the given x into an +// instance of repr_type. used by the ExclusivelyOwned(T&&) +// constructor. +// +// static void destroyOwned(repr_type x) -- free memory for a +// known-exclusively-owned instance of x. Replaces calling repr_type's +// destructor. Being able to implement this more efficiently than +// repr_type's destructor is the main reason to use ExclusivelyOwned +// for a type. +// +// static T take(repr_type&) -- move out of the given repr_type into an owned T. +// +// static pointer_type getImpl(const repr_type&) -- return a pointer +// to the given repr_type. May take repr_type by value if that is more +// efficient. + +/** ExclusivelyOwned is a smart-pointer-like wrapper around an + * exclusively-owned instance of some type T that normally has + * mandatory reference counting (currently just Tensor). If you have + * an isolated piece of code that knows that it has sole ownership of + * an object of one of these types (i.e., because you created it + * directly or using a factory function) and that object will not + * escape from that isolated piece of code, then moving the object + * into an ExclusivelyOwned will avoid an atomic reference count + * decrement at destruction time. + * + * If you directly create the Tensor in the first + * place, you can use the in_place constructor of ExclusivelyOwned to + * additionally avoid doing any stores to initialize the refcount & + * weakcount. */ + + // namespace c10 + + // Parsed from c10/util/MaybeOwned.h // #pragma once @@ -5523,44 +5579,44 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace c10 -// Parsed from c10/core/SymBool.h +// Parsed from c10/core/SymFloat.h // #pragma once +// #include // #include // #include // #include // #include -// Targeting ../SymBool.java - +// #include +// Targeting ../SymFloat.java -@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer os, @Const @ByRef SymBool s); -// #define TORCH_SYM_CHECK(cond, ...) -// TORCH_CHECK((cond).expect_true(__FILE__, __LINE__), __VA_ARGS__) -// #define TORCH_SYM_INTERNAL_ASSERT(cond, ...) -// TORCH_INTERNAL_ASSERT((cond).expect_true(__FILE__, __LINE__), __VA_ARGS__) +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer os, @Const @ByRef SymFloat s); // namespace c10 -// Parsed from c10/core/SymFloat.h +// Parsed from c10/core/SymBool.h // #pragma once -// #include // #include // #include // #include // #include +// Targeting ../SymBool.java -// #include -// Targeting ../SymFloat.java +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer os, @Const @ByRef SymBool s); + +// #define TORCH_SYM_CHECK(cond, ...) +// TORCH_CHECK((cond).expect_true(__FILE__, __LINE__), __VA_ARGS__) +// #define TORCH_SYM_INTERNAL_ASSERT(cond, ...) +// TORCH_INTERNAL_ASSERT((cond).expect_true(__FILE__, __LINE__), __VA_ARGS__) -@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer os, @Const @ByRef SymFloat s); // namespace c10 @@ -5836,6 +5892,30 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // } +// Parsed from c10/util/Type.h + +// #ifndef C10_UTIL_TYPE_H_ +// #define C10_UTIL_TYPE_H_ + +// #include +// #include +// #ifdef __GXX_RTTI +// #include +// #endif // __GXX_RTTI + +// #include + +/** Utility to demangle a C++ symbol name. */ +@Namespace("c10") public static native @StdString BytePointer demangle(@Cast("const char*") BytePointer name); +@Namespace("c10") public static native @StdString String demangle(String name); + +/** Returns the printable name of the type. */ + + // namespace c10 + +// #endif // C10_UTIL_TYPE_H_ + + // Parsed from c10/util/ConstexprCrc.h // #pragma once @@ -5945,6 +6025,67 @@ public class torch extends org.bytedeco.pytorch.presets.torch { +// Parsed from c10/util/flat_hash_map.h + +// Taken from +// https://github.com/skarupke/flat_hash_map/blob/2c4687431f978f02a3780e24b8b701d22aa32d9c/flat_hash_map.hpp +// with fixes applied: +// - https://github.com/skarupke/flat_hash_map/pull/25 +// - https://github.com/skarupke/flat_hash_map/pull/26 +// - replace size_t with uint64_t to fix it for 32bit +// - add "GCC diagnostic" pragma to ignore -Wshadow +// - make sherwood_v3_table::convertible_to_iterator public because GCC5 seems +// to have issues with it otherwise +// - fix compiler warnings in operator templated_iterator + +// Copyright Malte Skarupke 2017. +// Distributed under the Boost Software License, Version 1.0. +// (See http://www.boost.org/LICENSE_1_0.txt) + +// #pragma once + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + +// #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +// #endif + +// #if defined(_MSC_VER) && !defined(__clang__) +// #pragma warning(push) +// #pragma warning(disable : 4624) // destructor was implicitly defined as deleted +// #endif + +// #ifdef _MSC_VER +// #define SKA_NOINLINE(...) __declspec(noinline) __VA_ARGS__ +// #else +// #define SKA_NOINLINE(...) __VA_ARGS__ __attribute__((noinline)) +// #endif +@Namespace("ska::detailv3") @MemberGetter public static native byte min_lookups(); +public static final byte min_lookups = min_lookups(); + +@Namespace("ska::detailv3") public static native byte log2(@Cast("uint64_t") long value); + +@Namespace("ska::detailv3") public static native @Cast("uint64_t") long next_power_of_two(@Cast("uint64_t") long i); + +// Implementation taken from http://en.cppreference.com/w/cpp/types/void_t +// (it takes CWG1558 into account and also works for older compilers) + // namespace detailv3 + + // end namespace ska + +// #if defined(_MSC_VER) && !defined(__clang__) +// #pragma warning(pop) +// #endif + + // Parsed from c10/util/irange.h // Copyright 2004-present Facebook. All Rights Reserved. @@ -6465,136 +6606,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace c10 -// Parsed from c10/core/impl/HermeticPyObjectTLS.h - -// #pragma once - -// #include -// #include -// Targeting ../HermeticPyObjectTLS.java - - - - // namespace impl - // namespace c10 - - -// Parsed from c10/core/SymIntArrayRef.h - -// #pragma once - -// #include -// #include -// #include -// #include - -@Namespace("c10") public static native @ByVal LongArrayRef asIntArrayRefUnchecked(@ByVal SymIntArrayRef ar); - -// TODO: a SymIntArrayRef containing a heap allocated large negative integer -// can actually technically be converted to an IntArrayRef... but not with -// the non-owning API we have here. We can't reinterpet cast; we have to -// allocate another buffer and write the integers into it. If you need it, -// we can do it. But I don't think you need it. - -@Namespace("c10") public static native @ByVal LongArrayRefOptional asIntArrayRefSlowOpt( - @ByVal SymIntArrayRef ar); - - - -// #define C10_AS_INTARRAYREF_SLOW(a) c10::asIntArrayRefSlow(a, __FILE__, __LINE__) - -// Prefer using a more semantic constructor, like -// fromIntArrayRefKnownNonNegative -@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefUnchecked(@ByVal LongArrayRef array_ref); -@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefUnchecked(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... array_ref); - -@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefKnownNonNegative(@ByVal LongArrayRef array_ref); -@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefKnownNonNegative(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... array_ref); - -@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefSlow(@ByVal LongArrayRef array_ref); -@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefSlow(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... array_ref); - - // namespace c10 - - // Parsed from c10/util/python_stub.h // #pragma once -// Parsed from c10/core/impl/PyInterpreter.h - -// #pragma once - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - -// Forward declarations - // namespace c10 - - // namespace torch - -// Actual implementation -// Targeting ../PyInterpreterVTable.java - - -// Targeting ../PyInterpreter.java - - - -// PyInterpreterStatus describes what the state of its interpreter tag -// is, relative to the thread currently holding the GIL. -@Namespace("c10::impl") public enum PyInterpreterStatus { - // We just allocated the Tensor, it hasn't escaped to other threads, - // we know that it definitely hasn't been tagged to be associated - // with an interpreter. - DEFINITELY_UNINITIALIZED(0), - // We queried the interpreter field and it looked uninitialized. But - // another thread may have raced with us to tag it with some other - // interpreter id. So we will have to do a CEX to make sure we can - // actually nab it. - MAYBE_UNINITIALIZED(1), - // We queried the interpreter field and it was tagged to belong to us. - // This means we have sole write access (as we hold the GIL for this - // interpreter) - TAGGED_BY_US(2), - // Someone else tagged this. We can't use this TensorImpl from Python. - TAGGED_BY_OTHER(3); - - public final int value; - private PyInterpreterStatus(int v) { this.value = v; } - private PyInterpreterStatus(PyInterpreterStatus e) { this.value = e.value; } - public PyInterpreterStatus intern() { for (PyInterpreterStatus e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} - - // namespace impl - // namespace c10 - - -// Parsed from c10/core/impl/PyObjectSlot.h - -// #pragma once - -// #include -// #include -// #include -// #include - -// #include - - // namespace impl - // namespace c10 - - // Parsed from c10/core/StorageImpl.h // #pragma once @@ -6643,182 +6659,27 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace c10 -// Parsed from c10/core/impl/LocalDispatchKeySet.h - -// #pragma once - -// #include -// #include - -// TLS management for DispatchKeySet (the "local" DispatchKeySet(s)) -// -// This manages two thread-local DispatchKeySets: -// -// - The included type set, which adds a tensor type for consideration -// in dispatch. (For example, you might add Profiling to -// the included type set to turn on profiling on all tensor operations.) -// -// - The excluded type set, which disqualifies a tensor type from dispatch. -// (For example, after redispatching on variable, we disqualify -// Autograd so we don't attempt to handle variable again.) -// (Exclusion wins over inclusion.) -// -// NB: Originally, I implemented the excluded type set as storing the inverted -// set, but TLS is defined to be zero-initialized, so this doesn't actually work -// (if it's inverted, you want the set to be -1 initialized). -// Targeting ../PODLocalDispatchKeySet.java - - -// Targeting ../LocalDispatchKeySet.java - - - -// thread_local variables cannot be C10_API on Windows. -// Inlining this seems to break AutoDispatchBelowAutograd on Android. -// #if defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE) -@Namespace("c10::impl") public static native @ByVal LocalDispatchKeySet tls_local_dispatch_key_set(); -// #else // defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE) - -// #endif // defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE) - -// Internal, use ThreadLocalStateGuard - -// Targeting ../IncludeDispatchKeyGuard.java - - -// Targeting ../ForceDispatchKeyGuard.java - - - -// Non-RAII API for manipulating the thread-local dispatch state. -// Please prefer the RAII API. The non-RAII API may be useful when -// the included/excluded state of a given DispatchKey must span -// many calls from the Python to the C++, so you cannot conveniently -// use an RAII guard. -// -// Example use case: a Python context manager that includes a certain -// DispatchKey, to ensure ops running under the context manager dispatch -// through that DispatchKey's registered overrides. -// -// The non-RAII API is less efficient than the RAII guards because both the -// getter and setter will do a tls_getaddr lookup (the RAII struct only needs -// one!) - -@Namespace("c10::impl") public static native @Cast("bool") boolean tls_is_dispatch_key_excluded(DispatchKey x); -@Namespace("c10::impl") public static native @Cast("bool") boolean tls_is_dispatch_key_excluded(@Cast("c10::DispatchKey") short x); -@Namespace("c10::impl") public static native void tls_set_dispatch_key_excluded(DispatchKey x, @Cast("bool") boolean desired_state); -@Namespace("c10::impl") public static native void tls_set_dispatch_key_excluded(@Cast("c10::DispatchKey") short x, @Cast("bool") boolean desired_state); -@Namespace("c10::impl") public static native @Cast("bool") boolean tls_is_dispatch_key_included(DispatchKey x); -@Namespace("c10::impl") public static native @Cast("bool") boolean tls_is_dispatch_key_included(@Cast("c10::DispatchKey") short x); -@Namespace("c10::impl") public static native void tls_set_dispatch_key_included(DispatchKey x, @Cast("bool") boolean desired_state); -@Namespace("c10::impl") public static native void tls_set_dispatch_key_included(@Cast("c10::DispatchKey") short x, @Cast("bool") boolean desired_state); -@Namespace("c10::impl") public static native @Cast("bool") boolean tls_is_dispatch_keyset_excluded(@ByVal DispatchKeySet ks); -@Namespace("c10::impl") public static native @Cast("bool") boolean tls_is_dispatch_keyset_included(@ByVal DispatchKeySet ks); - - // namespace impl - // namespace c10 - - -// Parsed from c10/core/InferenceMode.h +// Parsed from c10/core/GradMode.h // #pragma once // #include -// #include // #include -// Targeting ../InferenceMode.java - - - // namespace c10 - - -// Parsed from c10/core/WrapDimMinimal.h - -// #pragma once - -// #include -// This template can only be specialized at int64_t and c10::SymInt; -// you'll get linker errors otherwise - // namespace detail - -@Namespace("c10") public static native @Cast("int64_t") long maybe_wrap_dim( - @Cast("int64_t") long dim, - @Cast("int64_t") long dim_post_expr, - @Cast("bool") boolean wrap_scalar/*=true*/); -@Namespace("c10") public static native @Cast("int64_t") long maybe_wrap_dim( - @Cast("int64_t") long dim, - @Cast("int64_t") long dim_post_expr); - -@Namespace("c10") public static native @ByVal SymInt maybe_wrap_dim( - @ByVal SymInt dim, - @ByVal SymInt dim_post_expr, - @Cast("bool") boolean wrap_scalar/*=true*/); -@Namespace("c10") public static native @ByVal SymInt maybe_wrap_dim( - @ByVal SymInt dim, - @ByVal SymInt dim_post_expr); - - // namespace c10 - - -// Parsed from c10/core/impl/SizesAndStrides.h - -// #pragma once - -// #include -// #include - -// #include -// #include -// #include - -public static final int C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE = 5; -// Targeting ../SizesAndStrides.java - - - - // namespace impl - // namespace c10 - - -// Parsed from c10/util/DimVector.h - -// #pragma once - -// #include -// #include -// #include -// #include - -@Namespace("c10") @MemberGetter public static native @Cast("const size_t") long kDimVectorStaticSize(); - -/** A container for sizes or strides */ +// Targeting ../GradMode.java - // namespace c10 +// Targeting ../AutoGradMode.java -// Parsed from c10/util/Type.h -// #ifndef C10_UTIL_TYPE_H_ -// #define C10_UTIL_TYPE_H_ +// Targeting ../NoGradGuard.java -// #include -// #include -// #ifdef __GXX_RTTI -// #include -// #endif // __GXX_RTTI -// #include +// Targeting ../AutoFwGradMode.java -/** Utility to demangle a C++ symbol name. */ -@Namespace("c10") public static native @StdString BytePointer demangle(@Cast("const char*") BytePointer name); -@Namespace("c10") public static native @StdString String demangle(String name); -/** Returns the printable name of the type. */ // namespace c10 -// #endif // C10_UTIL_TYPE_H_ - // Parsed from c10/util/Registry.h @@ -7010,211 +6871,976 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #endif // C10_UTIL_REGISTRY_H_ -// Parsed from c10/util/Flags.h +// Parsed from c10/util/Flags.h + +// #ifndef C10_UTIL_FLAGS_H_ +// #define C10_UTIL_FLAGS_H_ + +/* Commandline flags support for C10. + * + * This is a portable commandline flags tool for c10, so we can optionally + * choose to use gflags or a lightweight custom implementation if gflags is + * not possible on a certain platform. If you have gflags installed, set the + * macro C10_USE_GFLAGS will seamlessly route everything to gflags. + * + * To define a flag foo of type bool default to true, do the following in the + * *global* namespace: + * C10_DEFINE_bool(foo, true, "An example."); + * + * To use it in another .cc file, you can use C10_DECLARE_* as follows: + * C10_DECLARE_bool(foo); + * + * In both cases, you can then access the flag via FLAGS_foo. + * + * It is recommended that you build with gflags. To learn more about the flags + * usage, refer to the gflags page here: + * + * https://gflags.github.io/gflags/ + * + * Note about Python users / devs: gflags is initiated from a C++ function + * ParseCommandLineFlags, and is usually done in native binaries in the main + * function. As Python does not have a modifiable main function, it is usually + * difficult to change the flags after Python starts. Hence, it is recommended + * that one sets the default value of the flags to one that's acceptable in + * general - that will allow Python to run without wrong flags. + */ + +// #include + +// #include +// #include +/** + * Sets the usage message when a commandline tool is called with "--help". + */ +@Namespace("c10") public static native void SetUsageMessage(@StdString BytePointer str); +@Namespace("c10") public static native void SetUsageMessage(@StdString String str); + +/** + * Returns the usage message for the commandline tool set by SetUsageMessage. + */ +@Namespace("c10") public static native @Cast("const char*") BytePointer UsageMessage(); + +/** + * Parses the commandline flags. + * + * This command parses all the commandline arguments passed in via pargc + * and argv. Once it is finished, partc and argv will contain the remaining + * commandline args that c10 does not deal with. Note that following + * convention, argv[0] contains the binary name and is not parsed. + */ +@Namespace("c10") public static native @Cast("bool") boolean ParseCommandLineFlags(IntPointer pargc, @Cast("char***") @ByPtrPtr PointerPointer pargv); +@Namespace("c10") public static native @Cast("bool") boolean ParseCommandLineFlags(IntBuffer pargc, @Cast("char***") @ByPtrPtr PointerPointer pargv); +@Namespace("c10") public static native @Cast("bool") boolean ParseCommandLineFlags(int[] pargc, @Cast("char***") @ByPtrPtr PointerPointer pargv); + +/** + * Checks if the commandline flags has already been passed. + */ +@Namespace("c10") public static native @Cast("bool") boolean CommandLineFlagsHasBeenParsed(); + + // namespace c10 + +//////////////////////////////////////////////////////////////////////////////// +// Below are gflags and non-gflags specific implementations. +// In general, they define the following macros for one to declare (use +// C10_DECLARE) or define (use C10_DEFINE) flags: +// C10_{DECLARE,DEFINE}_{int,int64,double,bool,string} +//////////////////////////////////////////////////////////////////////////////// + +// #ifdef C10_USE_GFLAGS + +//////////////////////////////////////////////////////////////////////////////// +// Begin gflags section: most functions are basically rerouted to gflags. +//////////////////////////////////////////////////////////////////////////////// +// #include + +// C10 uses hidden visibility by default. However, in gflags, it only uses +// export on Windows platform (with dllexport) but not on linux/mac (with +// default visibility). As a result, to ensure that we are always exporting +// global variables, we will redefine the GFLAGS_DLL_DEFINE_FLAG macro if we +// are building C10 as a shared library. +// This has to be done after the inclusion of gflags, because some early +// versions of gflags.h (e.g. 2.0 on ubuntu 14.04) directly defines the +// macros, so we need to do definition after gflags is done. +// #ifdef GFLAGS_DLL_DEFINE_FLAG +// #endif // GFLAGS_DLL_DEFINE_FLAG +// #ifdef GFLAGS_DLL_DECLARE_FLAG +// #endif // GFLAGS_DLL_DECLARE_FLAG +// #define GFLAGS_DLL_DEFINE_FLAG C10_EXPORT +// #define GFLAGS_DLL_DECLARE_FLAG C10_IMPORT + +// gflags before 2.0 uses namespace google and after 2.1 uses namespace gflags. +// Using GFLAGS_GFLAGS_H_ to capture this change. +// #ifndef GFLAGS_GFLAGS_H_ +// #endif // GFLAGS_GFLAGS_H_ + +// Motivation about the gflags wrapper: +// (1) We would need to make sure that the gflags version and the non-gflags +// version of C10 are going to expose the same flags abstraction. One should +// explicitly use FLAGS_flag_name to access the flags. +// (2) For flag names, it is recommended to start with c10_ to distinguish it +// from regular gflags flags. For example, do +// C10_DEFINE_BOOL(c10_my_flag, true, "An example"); +// to allow one to use FLAGS_c10_my_flag. +// (3) Gflags has a design issue that does not properly expose the global flags, +// if one builds the library with -fvisibility=hidden. The current gflags (as of +// Aug 2018) only deals with the Windows case using dllexport, and not the Linux +// counterparts. As a result, we will explicitly use C10_EXPORT to export the +// flags defined in C10. This is done via a global reference, so the flag +// itself is not duplicated - under the hood it is the same global gflags flag. +// #define C10_GFLAGS_DEF_WRAPPER(type, real_type, name, default_value, help_str) +// DEFINE_##type(name, default_value, help_str); + +// #define C10_DEFINE_int(name, default_value, help_str) +// C10_GFLAGS_DEF_WRAPPER(int32, gflags::int32, name, default_value, help_str) +// #define C10_DEFINE_int32(name, default_value, help_str) +// C10_DEFINE_int(name, default_value, help_str) +// #define C10_DEFINE_int64(name, default_value, help_str) +// C10_GFLAGS_DEF_WRAPPER(int64, gflags::int64, name, default_value, help_str) +// #define C10_DEFINE_double(name, default_value, help_str) +// C10_GFLAGS_DEF_WRAPPER(double, double, name, default_value, help_str) +// #define C10_DEFINE_bool(name, default_value, help_str) +// C10_GFLAGS_DEF_WRAPPER(bool, bool, name, default_value, help_str) +// #define C10_DEFINE_string(name, default_value, help_str) +// C10_GFLAGS_DEF_WRAPPER(string, ::fLS::clstring, name, default_value, help_str) + +// DECLARE_typed_var should be used in header files and in the global namespace. +// #define C10_GFLAGS_DECLARE_WRAPPER(type, real_type, name) DECLARE_##type(name); + +// #define C10_DECLARE_int(name) +// C10_GFLAGS_DECLARE_WRAPPER(int32, gflags::int32, name) +// #define C10_DECLARE_int32(name) C10_DECLARE_int(name) +// #define C10_DECLARE_int64(name) +// C10_GFLAGS_DECLARE_WRAPPER(int64, gflags::int64, name) +// #define C10_DECLARE_double(name) +// C10_GFLAGS_DECLARE_WRAPPER(double, double, name) +// #define C10_DECLARE_bool(name) C10_GFLAGS_DECLARE_WRAPPER(bool, bool, name) +// #define C10_DECLARE_string(name) +// C10_GFLAGS_DECLARE_WRAPPER(string, ::fLS::clstring, name) +// Targeting ../C10FlagParser.java + + + + + + // namespace c10 + +// The macros are defined outside the c10 namespace. In your code, you should +// write the C10_DEFINE_* and C10_DECLARE_* macros outside any namespace +// as well. + +// #define C10_DEFINE_typed_var(type, name, default_value, help_str) +// C10_EXPORT type FLAGS_##name = default_value; +// namespace c10 { +// namespace { +// class C10FlagParser_##name : public C10FlagParser { +// public: +// explicit C10FlagParser_##name(const std::string& content) { +// success_ = C10FlagParser::Parse(content, &FLAGS_##name); +// } +// }; +// } +// RegistererC10FlagsRegistry g_C10FlagsRegistry_##name( +// #name, +// C10FlagsRegistry(), +// RegistererC10FlagsRegistry::DefaultCreator, +// "(" #type ", default " #default_value ") " help_str); +// } + +// #define C10_DEFINE_int(name, default_value, help_str) +// C10_DEFINE_typed_var(int, name, default_value, help_str) +// #define C10_DEFINE_int32(name, default_value, help_str) +// C10_DEFINE_int(name, default_value, help_str) +// #define C10_DEFINE_int64(name, default_value, help_str) +// C10_DEFINE_typed_var(int64_t, name, default_value, help_str) +// #define C10_DEFINE_double(name, default_value, help_str) +// C10_DEFINE_typed_var(double, name, default_value, help_str) +// #define C10_DEFINE_bool(name, default_value, help_str) +// C10_DEFINE_typed_var(bool, name, default_value, help_str) +// #define C10_DEFINE_string(name, default_value, help_str) +// C10_DEFINE_typed_var(std::string, name, default_value, help_str) + +// DECLARE_typed_var should be used in header files and in the global namespace. +// #define C10_DECLARE_typed_var(type, name) C10_API extern type FLAGS_##name + +// #define C10_DECLARE_int(name) C10_DECLARE_typed_var(int, name) +// #define C10_DECLARE_int32(name) C10_DECLARE_int(name) +// #define C10_DECLARE_int64(name) C10_DECLARE_typed_var(int64_t, name) +// #define C10_DECLARE_double(name) C10_DECLARE_typed_var(double, name) +// #define C10_DECLARE_bool(name) C10_DECLARE_typed_var(bool, name) +// #define C10_DECLARE_string(name) C10_DECLARE_typed_var(std::string, name) + +//////////////////////////////////////////////////////////////////////////////// +// End non-gflags section. +//////////////////////////////////////////////////////////////////////////////// + +// #endif // C10_USE_GFLAGS + +// #endif // C10_UTIL_FLAGS_H_ + + +// Parsed from c10/core/impl/LocalDispatchKeySet.h + +// #pragma once + +// #include +// #include + +// TLS management for DispatchKeySet (the "local" DispatchKeySet(s)) +// +// This manages two thread-local DispatchKeySets: +// +// - The included type set, which adds a tensor type for consideration +// in dispatch. (For example, you might add Profiling to +// the included type set to turn on profiling on all tensor operations.) +// +// - The excluded type set, which disqualifies a tensor type from dispatch. +// (For example, after redispatching on variable, we disqualify +// Autograd so we don't attempt to handle variable again.) +// (Exclusion wins over inclusion.) +// +// NB: Originally, I implemented the excluded type set as storing the inverted +// set, but TLS is defined to be zero-initialized, so this doesn't actually work +// (if it's inverted, you want the set to be -1 initialized). +// Targeting ../PODLocalDispatchKeySet.java + + +// Targeting ../LocalDispatchKeySet.java + + + +// thread_local variables cannot be C10_API on Windows. +// Inlining this seems to break AutoDispatchBelowAutograd on Android. +// #if defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE) +@Namespace("c10::impl") public static native @ByVal LocalDispatchKeySet tls_local_dispatch_key_set(); +// #else // defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE) + +// #endif // defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE) + +// Internal, use ThreadLocalStateGuard + +// Targeting ../IncludeDispatchKeyGuard.java + + +// Targeting ../ForceDispatchKeyGuard.java + + + +// Non-RAII API for manipulating the thread-local dispatch state. +// Please prefer the RAII API. The non-RAII API may be useful when +// the included/excluded state of a given DispatchKey must span +// many calls from the Python to the C++, so you cannot conveniently +// use an RAII guard. +// +// Example use case: a Python context manager that includes a certain +// DispatchKey, to ensure ops running under the context manager dispatch +// through that DispatchKey's registered overrides. +// +// The non-RAII API is less efficient than the RAII guards because both the +// getter and setter will do a tls_getaddr lookup (the RAII struct only needs +// one!) + +@Namespace("c10::impl") public static native @Cast("bool") boolean tls_is_dispatch_key_excluded(DispatchKey x); +@Namespace("c10::impl") public static native @Cast("bool") boolean tls_is_dispatch_key_excluded(@Cast("c10::DispatchKey") short x); +@Namespace("c10::impl") public static native void tls_set_dispatch_key_excluded(DispatchKey x, @Cast("bool") boolean desired_state); +@Namespace("c10::impl") public static native void tls_set_dispatch_key_excluded(@Cast("c10::DispatchKey") short x, @Cast("bool") boolean desired_state); +@Namespace("c10::impl") public static native @Cast("bool") boolean tls_is_dispatch_key_included(DispatchKey x); +@Namespace("c10::impl") public static native @Cast("bool") boolean tls_is_dispatch_key_included(@Cast("c10::DispatchKey") short x); +@Namespace("c10::impl") public static native void tls_set_dispatch_key_included(DispatchKey x, @Cast("bool") boolean desired_state); +@Namespace("c10::impl") public static native void tls_set_dispatch_key_included(@Cast("c10::DispatchKey") short x, @Cast("bool") boolean desired_state); +@Namespace("c10::impl") public static native @Cast("bool") boolean tls_is_dispatch_keyset_excluded(@ByVal DispatchKeySet ks); +@Namespace("c10::impl") public static native @Cast("bool") boolean tls_is_dispatch_keyset_included(@ByVal DispatchKeySet ks); + + // namespace impl + // namespace c10 + + +// Parsed from c10/core/InferenceMode.h + +// #pragma once + +// #include +// #include +// #include +// Targeting ../InferenceMode.java + + + // namespace c10 + + +// Parsed from c10/core/SymIntArrayRef.h + +// #pragma once + +// #include +// #include +// #include +// #include + +@Namespace("c10") public static native @ByVal LongArrayRef asIntArrayRefUnchecked(@ByVal SymIntArrayRef ar); + +// TODO: a SymIntArrayRef containing a heap allocated large negative integer +// can actually technically be converted to an IntArrayRef... but not with +// the non-owning API we have here. We can't reinterpet cast; we have to +// allocate another buffer and write the integers into it. If you need it, +// we can do it. But I don't think you need it. + +@Namespace("c10") public static native @ByVal LongArrayRefOptional asIntArrayRefSlowOpt( + @ByVal SymIntArrayRef ar); + + + +// #define C10_AS_INTARRAYREF_SLOW(a) c10::asIntArrayRefSlow(a, __FILE__, __LINE__) + +// Prefer using a more semantic constructor, like +// fromIntArrayRefKnownNonNegative +@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefUnchecked(@ByVal LongArrayRef array_ref); +@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefUnchecked(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... array_ref); + +@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefKnownNonNegative(@ByVal LongArrayRef array_ref); +@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefKnownNonNegative(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... array_ref); + +@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefSlow(@ByVal LongArrayRef array_ref); +@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefSlow(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... array_ref); + + // namespace c10 + + +// Parsed from c10/core/DefaultDtype.h + +// #pragma once + +// #include +// #include + // namespace caffe2 +@Namespace("c10") public static native void set_default_dtype(@ByVal TypeMeta dtype); +@Namespace("c10") public static native @Const @ByVal TypeMeta get_default_dtype(); +@Namespace("c10") public static native ScalarType get_default_dtype_as_scalartype(); +@Namespace("c10") public static native @Const @ByVal TypeMeta get_default_complex_dtype(); + // namespace c10 + + +// Parsed from c10/core/TensorOptions.h + +// #pragma once + +// #include +// #include +// #include +// #include +// #include +// #include +// #include + +// #include +// #include + +// #include +// #include + +@Namespace("c10") public static native DispatchKey computeDispatchKey( + @ByVal ScalarTypeOptional dtype, + @ByVal LayoutOptional layout, + @ByVal DeviceOptional device); + +@Namespace("c10") public static native ScalarType dtype_or_default(@ByVal ScalarTypeOptional dtype); + +@Namespace("c10") public static native @ByVal TypeMeta dtype_or_default( + @ByVal TypeMetaOptional dtype); + +@Namespace("c10") public static native Layout layout_or_default(@ByVal LayoutOptional layout); + +@Namespace("c10") public static native @ByVal Device device_or_default(@ByVal DeviceOptional device); + + +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +@Namespace("c10") public static native @Cast("bool") boolean pinned_memory_or_default(@ByVal BoolOptional pinned_memory); +// Targeting ../TensorOptions.java + + + +// We should aspire to fit in one machine-size word; but a size greater than two +// words is too much. (We are doing terribly on 32-bit archs, where we require +// three machine size words to store tensor options. Eek!) + +/** Convenience function that returns a {@code TensorOptions} object with the {@code dtype} + * set to the given one. */ +@Namespace("c10") public static native @ByVal TensorOptions dtype(@ByVal TypeMeta dtype); + +// legacy function to support ScalarType +@Namespace("c10") public static native @ByVal TensorOptions dtype(ScalarType dtype); + +/** Convenience function that returns a {@code TensorOptions} object with the {@code layout} + * set to the given one. */ +@Namespace("c10") public static native @ByVal TensorOptions layout(Layout layout); +@Namespace("c10") public static native @ByVal TensorOptions layout(@Cast("c10::Layout") byte layout); + +/** Convenience function that returns a {@code TensorOptions} object with the {@code device} + * set to the given one. */ +@Namespace("c10") public static native @ByVal TensorOptions device(@ByVal Device device); + +/** Convenience function that returns a {@code TensorOptions} object with the + * {@code device} set to CUDA and the {@code device_index} set to the given one. */ +@Namespace("c10") public static native @ByVal TensorOptions device_index(short device_index); + +/** Convenience function that returns a {@code TensorOptions} object with the + * {@code requires_grad} set to the given one. */ +@Namespace("c10") public static native @ByVal TensorOptions requires_grad(@Cast("bool") boolean requires_grad/*=true*/); + +/** Convenience function that returns a {@code TensorOptions} object with the + * {@code memory_format} set to the given one. */ +@Namespace("c10") public static native @ByVal TensorOptions memory_format(MemoryFormat memory_format); +@Namespace("c10") public static native @ByVal TensorOptions memory_format(@Cast("c10::MemoryFormat") byte memory_format); + +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft( + @Cast("std::ostream*") @ByRef Pointer stream, + @Const @ByRef TensorOptions options); + +@Namespace("c10") public static native @StdString BytePointer toString(@Const @ByRef TensorOptions options); + +// This is intended to be a centralized location by which we can determine +// what an appropriate DispatchKey for a tensor is. + +@Namespace("c10") public static native Layout dispatchKeyToLayout(DispatchKey dispatch_key); +@Namespace("c10") public static native @Cast("c10::Layout") byte dispatchKeyToLayout(@Cast("c10::DispatchKey") short dispatch_key); + +@Namespace("c10") public static native DeviceType dispatchKeyToDeviceType(DispatchKey dispatch_key); +@Namespace("c10") public static native @Cast("c10::DeviceType") byte dispatchKeyToDeviceType(@Cast("c10::DispatchKey") short dispatch_key); + +@Namespace("c10") public static native @ByVal TensorOptions dispatchKeyToTensorOptions(DispatchKey dispatch_key); +@Namespace("c10") public static native @ByVal TensorOptions dispatchKeyToTensorOptions(@Cast("c10::DispatchKey") short dispatch_key); +@Namespace("c10::detail") public static native @Cast("bool") boolean backend_supports_empty_operator(@Const @ByRef TensorOptions options); + + // namespace detail + + // namespace c10 + + +// Parsed from c10/core/WrapDimMinimal.h + +// #pragma once + +// #include +// This template can only be specialized at int64_t and c10::SymInt; +// you'll get linker errors otherwise + // namespace detail + +@Namespace("c10") public static native @Cast("int64_t") long maybe_wrap_dim( + @Cast("int64_t") long dim, + @Cast("int64_t") long dim_post_expr, + @Cast("bool") boolean wrap_scalar/*=true*/); +@Namespace("c10") public static native @Cast("int64_t") long maybe_wrap_dim( + @Cast("int64_t") long dim, + @Cast("int64_t") long dim_post_expr); + +@Namespace("c10") public static native @ByVal SymInt maybe_wrap_dim( + @ByVal SymInt dim, + @ByVal SymInt dim_post_expr, + @Cast("bool") boolean wrap_scalar/*=true*/); +@Namespace("c10") public static native @ByVal SymInt maybe_wrap_dim( + @ByVal SymInt dim, + @ByVal SymInt dim_post_expr); + + // namespace c10 + + +// Parsed from c10/core/impl/HermeticPyObjectTLS.h + +// #pragma once + +// #include +// #include +// Targeting ../HermeticPyObjectTLS.java + + + + // namespace impl + // namespace c10 + + +// Parsed from c10/core/impl/PyInterpreter.h + +// #pragma once + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + +// Forward declarations + // namespace c10 + + // namespace torch + +// Actual implementation +// Targeting ../PyInterpreterVTable.java + + +// Targeting ../PyInterpreter.java + + + +// PyInterpreterStatus describes what the state of its interpreter tag +// is, relative to the thread currently holding the GIL. +@Namespace("c10::impl") public enum PyInterpreterStatus { + // We just allocated the Tensor, it hasn't escaped to other threads, + // we know that it definitely hasn't been tagged to be associated + // with an interpreter. + DEFINITELY_UNINITIALIZED(0), + // We queried the interpreter field and it looked uninitialized. But + // another thread may have raced with us to tag it with some other + // interpreter id. So we will have to do a CEX to make sure we can + // actually nab it. + MAYBE_UNINITIALIZED(1), + // We queried the interpreter field and it was tagged to belong to us. + // This means we have sole write access (as we hold the GIL for this + // interpreter) + TAGGED_BY_US(2), + // Someone else tagged this. We can't use this TensorImpl from Python. + TAGGED_BY_OTHER(3); + + public final int value; + private PyInterpreterStatus(int v) { this.value = v; } + private PyInterpreterStatus(PyInterpreterStatus e) { this.value = e.value; } + public PyInterpreterStatus intern() { for (PyInterpreterStatus e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} + + // namespace impl + // namespace c10 + + +// Parsed from c10/core/impl/PyObjectSlot.h + +// #pragma once + +// #include +// #include +// #include +// #include + +// #include + + // namespace impl + // namespace c10 + + +// Parsed from c10/core/impl/SizesAndStrides.h + +// #pragma once + +// #include +// #include + +// #include +// #include +// #include + +public static final int C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE = 5; +// Targeting ../SizesAndStrides.java + + + + // namespace impl + // namespace c10 + + +// Parsed from c10/util/DimVector.h + +// #pragma once + +// #include +// #include +// #include +// #include + +@Namespace("c10") @MemberGetter public static native @Cast("const size_t") long kDimVectorStaticSize(); + +/** A container for sizes or strides */ + + // namespace c10 + + +// Parsed from c10/util/Logging.h + +// #ifndef C10_UTIL_LOGGING_H_ +// #define C10_UTIL_LOGGING_H_ + +// #include +// #include +// #include +// #include +// #include + +// #include +// #include +// #include +// #include + +// CAFFE2_LOG_THRESHOLD is a compile time flag that would allow us to turn off +// logging at compile time so no logging message below that level is produced +// at all. The value should be between INT_MIN and CAFFE_FATAL. +// #ifndef CAFFE2_LOG_THRESHOLD +// If we have not defined the compile time log threshold, we keep all the +// log cases. +public static native @MemberGetter int CAFFE2_LOG_THRESHOLD(); +public static final int CAFFE2_LOG_THRESHOLD = CAFFE2_LOG_THRESHOLD(); +// #endif // CAFFE2_LOG_THRESHOLD + +// Below are different implementations for glog and non-glog cases. +// #ifdef C10_USE_GLOG +// #include +// #else // !C10_USE_GLOG +// #include +// #endif // C10_USE_GLOG + + + + +// Some versions of GLOG support less-spammy version of LOG_EVERY_MS. If it's +// not available - just short-circuit to the always working one one. +// We define the C10_ name to avoid confusing other files +// #ifdef LOG_EVERY_MS +// #define C10_LOG_EVERY_MS(severity, ms) LOG_EVERY_MS(severity, ms) +// #else +// #define C10_LOG_EVERY_MS(severity, ms) LOG(severity) +// #endif + +// Same for LOG_FIRST_N +// #ifdef LOG_FIRST_N +// #define C10_LOG_FIRST_N(severity, n) LOG_FIRST_N(severity, n) +// #else +// #define C10_LOG_FIRST_N(severity, n) LOG(severity) +// #endif + +// Same for LOG_EVERY_N +// #ifdef LOG_EVERY_N +// #define C10_LOG_EVERY_N(severity, n) LOG_EVERY_N(severity, n) +// #else +// #define C10_LOG_EVERY_N(severity, n) LOG(severity) +// #endif + +// Functions that we use for initialization. +@Namespace("c10") public static native @Cast("bool") boolean InitCaffeLogging(IntPointer argc, @Cast("char**") PointerPointer argv); +@Namespace("c10") public static native @Cast("bool") boolean InitCaffeLogging(IntPointer argc, @Cast("char**") @ByPtrPtr BytePointer argv); +@Namespace("c10") public static native @Cast("bool") boolean InitCaffeLogging(IntBuffer argc, @Cast("char**") @ByPtrPtr ByteBuffer argv); +@Namespace("c10") public static native @Cast("bool") boolean InitCaffeLogging(int[] argc, @Cast("char**") @ByPtrPtr byte[] argv); +@Namespace("c10") public static native void UpdateLoggingLevelsFromFlags(); + +@Namespace("c10") public static native void ThrowEnforceNotMet( + @Cast("const char*") BytePointer file, + int line, + @Cast("const char*") BytePointer condition, + @StdString BytePointer msg, + @Const Pointer caller/*=nullptr*/); +@Namespace("c10") public static native void ThrowEnforceNotMet( + @Cast("const char*") BytePointer file, + int line, + @Cast("const char*") BytePointer condition, + @StdString BytePointer msg); +@Namespace("c10") public static native void ThrowEnforceNotMet( + String file, + int line, + String condition, + @StdString String msg, + @Const Pointer caller/*=nullptr*/); +@Namespace("c10") public static native void ThrowEnforceNotMet( + String file, + int line, + String condition, + @StdString String msg); + +@Namespace("c10") public static native void ThrowEnforceNotMet( + @Cast("const char*") BytePointer file, + int line, + @Cast("const char*") BytePointer condition, + @ByVal CompileTimeEmptyString arg3, + @Const Pointer caller/*=nullptr*/); +@Namespace("c10") public static native void ThrowEnforceNotMet( + @Cast("const char*") BytePointer file, + int line, + @Cast("const char*") BytePointer condition, + @ByVal CompileTimeEmptyString arg3); +@Namespace("c10") public static native void ThrowEnforceNotMet( + String file, + int line, + String condition, + @ByVal CompileTimeEmptyString arg3, + @Const Pointer caller/*=nullptr*/); +@Namespace("c10") public static native void ThrowEnforceNotMet( + String file, + int line, + String condition, + @ByVal CompileTimeEmptyString arg3); + +@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( + @Cast("const char*") BytePointer file, + int line, + @Cast("const char*") BytePointer condition, + @StdString BytePointer msg, + @Const Pointer caller/*=nullptr*/); +@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( + @Cast("const char*") BytePointer file, + int line, + @Cast("const char*") BytePointer condition, + @StdString BytePointer msg); +@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( + String file, + int line, + String condition, + @StdString String msg, + @Const Pointer caller/*=nullptr*/); +@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( + String file, + int line, + String condition, + @StdString String msg); + +@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( + @Cast("const char*") BytePointer file, + int line, + @Cast("const char*") BytePointer condition, + @ByVal CompileTimeEmptyString arg3, + @Const Pointer caller/*=nullptr*/); +@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( + @Cast("const char*") BytePointer file, + int line, + @Cast("const char*") BytePointer condition, + @ByVal CompileTimeEmptyString arg3); +@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( + String file, + int line, + String condition, + @ByVal CompileTimeEmptyString arg3, + @Const Pointer caller/*=nullptr*/); +@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( + String file, + int line, + String condition, + @ByVal CompileTimeEmptyString arg3); + +@Namespace("c10") public static native @Cast("const bool") boolean IsUsingGoogleLogging(); + +/** + * A utility to allow one to show log info to stderr after the program starts. + * + * This is similar to calling GLOG's --logtostderr, or setting caffe2_log_level + * to smaller than INFO. You are recommended to only use this in a few sparse + * cases, such as when you want to write a tutorial or something. Normally, use + * the commandline flags to set the log level. + */ +@Namespace("c10") public static native void ShowLogInfoToStderr(); + +@Namespace("c10") public static native void SetStackTraceFetcher(@ByVal StringSupplier fetcher); + +// #define CAFFE_ENFORCE(condition, ...) +// do { +// if (C10_UNLIKELY(!(condition))) { +// ::c10::ThrowEnforceNotMet( +// __FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__)); +// } +// } while (false) + +// #define CAFFE_ENFORCE_FINITE(condition, ...) +// do { +// if (C10_UNLIKELY(!(condition))) { +// ::c10::ThrowEnforceFiniteNotMet( +// __FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__)); +// } +// } while (false) + +// #define CAFFE_ENFORCE_WITH_CALLER(condition, ...) +// do { +// if (C10_UNLIKELY(!(condition))) { +// ::c10::ThrowEnforceNotMet( +// __FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__), this); +// } +// } while (false) -// #ifndef C10_UTIL_FLAGS_H_ -// #define C10_UTIL_FLAGS_H_ +// #define CAFFE_THROW(...) +// ::c10::ThrowEnforceNotMet(__FILE__, __LINE__, "", ::c10::str(__VA_ARGS__)) -/* Commandline flags support for C10. - * - * This is a portable commandline flags tool for c10, so we can optionally - * choose to use gflags or a lightweight custom implementation if gflags is - * not possible on a certain platform. If you have gflags installed, set the - * macro C10_USE_GFLAGS will seamlessly route everything to gflags. - * - * To define a flag foo of type bool default to true, do the following in the - * *global* namespace: - * C10_DEFINE_bool(foo, true, "An example."); +/** + * Rich logging messages * - * To use it in another .cc file, you can use C10_DECLARE_* as follows: - * C10_DECLARE_bool(foo); + * CAFFE_ENFORCE_THAT can be used with one of the "checker functions" that + * capture input argument values and add it to the exception message. E.g. + * {@code CAFFE_ENFORCE_THAT(Equals(foo(x), bar(y)), "Optional additional message")} + * would evaluate both foo and bar only once and if the results are not equal - + * include them in the exception message. * - * In both cases, you can then access the flag via FLAGS_foo. + * Some of the basic checker functions like Equals or Greater are already + * defined below. Other header might define customized checkers by adding + * functions to caffe2::enforce_detail namespace. For example: * - * It is recommended that you build with gflags. To learn more about the flags - * usage, refer to the gflags page here: + * namespace caffe2 { namespace enforce_detail { + * inline EnforceFailMessage IsVector(const vector& shape) { + * if (shape.size() == 1) { return EnforceOK(); } + * return c10::str("Shape ", shape, " is not a vector"); + * } + * }} * - * https://gflags.github.io/gflags/ + * With further usages like {@code CAFFE_ENFORCE_THAT(IsVector(Input(0).dims()))} * - * Note about Python users / devs: gflags is initiated from a C++ function - * ParseCommandLineFlags, and is usually done in native binaries in the main - * function. As Python does not have a modifiable main function, it is usually - * difficult to change the flags after Python starts. Hence, it is recommended - * that one sets the default value of the flags to one that's acceptable in - * general - that will allow Python to run without wrong flags. - */ - -// #include - -// #include -// #include -/** - * Sets the usage message when a commandline tool is called with "--help". + * Convenient wrappers for binary operations like CAFFE_ENFORCE_EQ are provided + * too. Please use them instead of TORCH_CHECK_EQ and friends for failures in + * user-provided input. */ -@Namespace("c10") public static native void SetUsageMessage(@StdString BytePointer str); -@Namespace("c10") public static native void SetUsageMessage(@StdString String str); -/** - * Returns the usage message for the commandline tool set by SetUsageMessage. - */ -@Namespace("c10") public static native @Cast("const char*") BytePointer UsageMessage(); +// GCC7 is getting an internal compiler error on the new +// implementation, so keep the old one (which evaluates the error +// message eagerly and therefore is undesirable for general use +// compared to the new one) around for it. +// #if defined(__GNUG__) && __GNUC__ <= 7 && !defined(__clang__) -/** - * Parses the commandline flags. - * - * This command parses all the commandline arguments passed in via pargc - * and argv. Once it is finished, partc and argv will contain the remaining - * commandline args that c10 does not deal with. Note that following - * convention, argv[0] contains the binary name and is not parsed. - */ -@Namespace("c10") public static native @Cast("bool") boolean ParseCommandLineFlags(IntPointer pargc, @Cast("char***") @ByPtrPtr PointerPointer pargv); -@Namespace("c10") public static native @Cast("bool") boolean ParseCommandLineFlags(IntBuffer pargc, @Cast("char***") @ByPtrPtr PointerPointer pargv); -@Namespace("c10") public static native @Cast("bool") boolean ParseCommandLineFlags(int[] pargc, @Cast("char***") @ByPtrPtr PointerPointer pargv); +// #define CAFFE_ENFORCE_THAT_IMPL(op, lhs, rhs, expr, ...) +// ::c10::enforce_detail::enforceThatImpl( +// op, lhs, rhs, __FILE__, __LINE__, expr, nullptr, ##__VA_ARGS__) -/** - * Checks if the commandline flags has already been passed. - */ -@Namespace("c10") public static native @Cast("bool") boolean CommandLineFlagsHasBeenParsed(); +// #define CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER(op, lhs, rhs, expr, ...) +// ::c10::enforce_detail::enforceThatImpl( +// op, (lhs), (rhs), __FILE__, __LINE__, expr, this, ##__VA_ARGS__) - // namespace c10 +// #else -//////////////////////////////////////////////////////////////////////////////// -// Below are gflags and non-gflags specific implementations. -// In general, they define the following macros for one to declare (use -// C10_DECLARE) or define (use C10_DEFINE) flags: -// C10_{DECLARE,DEFINE}_{int,int64,double,bool,string} -//////////////////////////////////////////////////////////////////////////////// +// #define CAFFE_ENFORCE_THAT_IMPL(op, lhs, rhs, expr, ...) +// ::c10::enforce_detail::enforceThatImpl( +// op, +// (lhs), +// (rhs), +// __FILE__, +// __LINE__, +// expr, +// nullptr, +// [&](const auto& arg1, const auto& arg2) { +// return ::c10::enforce_detail::enforceFailMsgImpl( +// arg1, arg2, ##__VA_ARGS__); +// }) -// #ifdef C10_USE_GFLAGS +// #define CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER(op, lhs, rhs, expr, ...) +// ::c10::enforce_detail::enforceThatImpl( +// op, +// (lhs), +// (rhs), +// __FILE__, +// __LINE__, +// expr, +// this, +// [&](const auto& arg1, const auto& arg2) { +// return ::c10::enforce_detail::enforceFailMsgImpl( +// arg1, arg2, ##__VA_ARGS__); +// }) +// #endif -//////////////////////////////////////////////////////////////////////////////// -// Begin gflags section: most functions are basically rerouted to gflags. -//////////////////////////////////////////////////////////////////////////////// -// #include + // namespace enforce_detail -// C10 uses hidden visibility by default. However, in gflags, it only uses -// export on Windows platform (with dllexport) but not on linux/mac (with -// default visibility). As a result, to ensure that we are always exporting -// global variables, we will redefine the GFLAGS_DLL_DEFINE_FLAG macro if we -// are building C10 as a shared library. -// This has to be done after the inclusion of gflags, because some early -// versions of gflags.h (e.g. 2.0 on ubuntu 14.04) directly defines the -// macros, so we need to do definition after gflags is done. -// #ifdef GFLAGS_DLL_DEFINE_FLAG -// #endif // GFLAGS_DLL_DEFINE_FLAG -// #ifdef GFLAGS_DLL_DECLARE_FLAG -// #endif // GFLAGS_DLL_DECLARE_FLAG -// #define GFLAGS_DLL_DEFINE_FLAG C10_EXPORT -// #define GFLAGS_DLL_DECLARE_FLAG C10_IMPORT +// #define CAFFE_ENFORCE_THAT(cmp, op, lhs, rhs, ...) +// CAFFE_ENFORCE_THAT_IMPL(cmp, lhs, rhs, #lhs " " #op " " #rhs, ##__VA_ARGS__) -// gflags before 2.0 uses namespace google and after 2.1 uses namespace gflags. -// Using GFLAGS_GFLAGS_H_ to capture this change. -// #ifndef GFLAGS_GFLAGS_H_ -// #endif // GFLAGS_GFLAGS_H_ +// #define CAFFE_ENFORCE_BINARY_OP(cmp, op, x, y, ...) +// CAFFE_ENFORCE_THAT_IMPL(cmp, x, y, #x " " #op " " #y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_EQ(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP(std::equal_to(), ==, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_NE(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP(std::not_equal_to(), !=, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_LE(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP(std::less_equal(), <=, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_LT(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP(std::less(), <, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_GE(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP(std::greater_equal(), >=, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_GT(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP(std::greater(), >, x, y, ##__VA_ARGS__) -// Motivation about the gflags wrapper: -// (1) We would need to make sure that the gflags version and the non-gflags -// version of C10 are going to expose the same flags abstraction. One should -// explicitly use FLAGS_flag_name to access the flags. -// (2) For flag names, it is recommended to start with c10_ to distinguish it -// from regular gflags flags. For example, do -// C10_DEFINE_BOOL(c10_my_flag, true, "An example"); -// to allow one to use FLAGS_c10_my_flag. -// (3) Gflags has a design issue that does not properly expose the global flags, -// if one builds the library with -fvisibility=hidden. The current gflags (as of -// Aug 2018) only deals with the Windows case using dllexport, and not the Linux -// counterparts. As a result, we will explicitly use C10_EXPORT to export the -// flags defined in C10. This is done via a global reference, so the flag -// itself is not duplicated - under the hood it is the same global gflags flag. -// #define C10_GFLAGS_DEF_WRAPPER(type, real_type, name, default_value, help_str) -// DEFINE_##type(name, default_value, help_str); +// #define CAFFE_ENFORCE_BINARY_OP_WITH_CALLER(cmp, op, x, y, ...) +// CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER( +// cmp, x, y, #x " " #op " " #y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_EQ_WITH_CALLER(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( +// std::equal_to(), ==, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_NE_WITH_CALLER(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( +// std::not_equal_to(), !=, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_LE_WITH_CALLER(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( +// std::less_equal(), <=, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_LT_WITH_CALLER(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER(std::less(), <, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_GE_WITH_CALLER(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( +// std::greater_equal(), >=, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_GT_WITH_CALLER(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( +// std::greater(), >, x, y, ##__VA_ARGS__) -// #define C10_DEFINE_int(name, default_value, help_str) -// C10_GFLAGS_DEF_WRAPPER(int32, gflags::int32, name, default_value, help_str) -// #define C10_DEFINE_int32(name, default_value, help_str) -// C10_DEFINE_int(name, default_value, help_str) -// #define C10_DEFINE_int64(name, default_value, help_str) -// C10_GFLAGS_DEF_WRAPPER(int64, gflags::int64, name, default_value, help_str) -// #define C10_DEFINE_double(name, default_value, help_str) -// C10_GFLAGS_DEF_WRAPPER(double, double, name, default_value, help_str) -// #define C10_DEFINE_bool(name, default_value, help_str) -// C10_GFLAGS_DEF_WRAPPER(bool, bool, name, default_value, help_str) -// #define C10_DEFINE_string(name, default_value, help_str) -// C10_GFLAGS_DEF_WRAPPER(string, ::fLS::clstring, name, default_value, help_str) +/** + * Very lightweight logging for the first time API usage. It's beneficial for + * tracking of individual functionality usage in larger applications. + * + * In order to ensure light-weightedness of logging, we utilize static variable + * trick - LogAPIUsage will be invoked only once and further invocations will + * just do an atomic check. + * + * Example: + * // Logs caller info with an arbitrary text event, if there is a usage. + * C10_LOG_API_USAGE_ONCE("my_api"); + */ +// #define C10_LOG_API_USAGE_ONCE(...) +// C10_UNUSED static bool C10_ANONYMOUS_VARIABLE(logFlag) = +// ::c10::detail::LogAPIUsageFakeReturn(__VA_ARGS__); -// DECLARE_typed_var should be used in header files and in the global namespace. -// #define C10_GFLAGS_DECLARE_WRAPPER(type, real_type, name) DECLARE_##type(name); +// API usage logging capabilities +@Namespace("c10") public static native void SetAPIUsageLogger(@ByVal StringConsumer logger); +@Namespace("c10") public static native void LogAPIUsage(@StdString BytePointer context); +@Namespace("c10") public static native void LogAPIUsage(@StdString String context); -// #define C10_DECLARE_int(name) -// C10_GFLAGS_DECLARE_WRAPPER(int32, gflags::int32, name) -// #define C10_DECLARE_int32(name) C10_DECLARE_int(name) -// #define C10_DECLARE_int64(name) -// C10_GFLAGS_DECLARE_WRAPPER(int64, gflags::int64, name) -// #define C10_DECLARE_double(name) -// C10_GFLAGS_DECLARE_WRAPPER(double, double, name) -// #define C10_DECLARE_bool(name) C10_GFLAGS_DECLARE_WRAPPER(bool, bool, name) -// #define C10_DECLARE_string(name) -// C10_GFLAGS_DECLARE_WRAPPER(string, ::fLS::clstring, name) -// Targeting ../C10FlagParser.java +@Namespace("c10") public static native void SetAPIUsageMetadataLogger( + @ByVal MetadataLogger logger); +@Namespace("c10") public static native void LogAPIUsageMetadata( + @StdString BytePointer context, + @Const @ByRef StringStringMap metadata_map); +@Namespace("c10") public static native void LogAPIUsageMetadata( + @StdString String context, + @Const @ByRef StringStringMap metadata_map); +// Targeting ../DDPLoggingData.java +@Namespace("c10") public static native void SetPyTorchDDPUsageLogger( + @ByVal DDPLogger logger); +@Namespace("c10") public static native void LogPyTorchDDPUsage(@Const @ByRef DDPLoggingData ddpData); +// Return value is needed to do the static variable initialization trick +@Namespace("c10::detail") public static native @Cast("bool") boolean LogAPIUsageFakeReturn(@StdString BytePointer context); +@Namespace("c10::detail") public static native @Cast("bool") boolean LogAPIUsageFakeReturn(@StdString String context); + // namespace detail +// Initializes the c10 logger. +@Namespace("c10") public static native void initLogging(); // namespace c10 -// The macros are defined outside the c10 namespace. In your code, you should -// write the C10_DEFINE_* and C10_DECLARE_* macros outside any namespace -// as well. - -// #define C10_DEFINE_typed_var(type, name, default_value, help_str) -// C10_EXPORT type FLAGS_##name = default_value; -// namespace c10 { -// namespace { -// class C10FlagParser_##name : public C10FlagParser { -// public: -// explicit C10FlagParser_##name(const std::string& content) { -// success_ = C10FlagParser::Parse(content, &FLAGS_##name); -// } -// }; -// } -// RegistererC10FlagsRegistry g_C10FlagsRegistry_##name( -// #name, -// C10FlagsRegistry(), -// RegistererC10FlagsRegistry::DefaultCreator, -// "(" #type ", default " #default_value ") " help_str); -// } - -// #define C10_DEFINE_int(name, default_value, help_str) -// C10_DEFINE_typed_var(int, name, default_value, help_str) -// #define C10_DEFINE_int32(name, default_value, help_str) -// C10_DEFINE_int(name, default_value, help_str) -// #define C10_DEFINE_int64(name, default_value, help_str) -// C10_DEFINE_typed_var(int64_t, name, default_value, help_str) -// #define C10_DEFINE_double(name, default_value, help_str) -// C10_DEFINE_typed_var(double, name, default_value, help_str) -// #define C10_DEFINE_bool(name, default_value, help_str) -// C10_DEFINE_typed_var(bool, name, default_value, help_str) -// #define C10_DEFINE_string(name, default_value, help_str) -// C10_DEFINE_typed_var(std::string, name, default_value, help_str) - -// DECLARE_typed_var should be used in header files and in the global namespace. -// #define C10_DECLARE_typed_var(type, name) C10_API extern type FLAGS_##name - -// #define C10_DECLARE_int(name) C10_DECLARE_typed_var(int, name) -// #define C10_DECLARE_int32(name) C10_DECLARE_int(name) -// #define C10_DECLARE_int64(name) C10_DECLARE_typed_var(int64_t, name) -// #define C10_DECLARE_double(name) C10_DECLARE_typed_var(double, name) -// #define C10_DECLARE_bool(name) C10_DECLARE_typed_var(bool, name) -// #define C10_DECLARE_string(name) C10_DECLARE_typed_var(std::string, name) - -//////////////////////////////////////////////////////////////////////////////// -// End non-gflags section. -//////////////////////////////////////////////////////////////////////////////// - -// #endif // C10_USE_GFLAGS - -// #endif // C10_UTIL_FLAGS_H_ +// #endif // C10_UTIL_LOGGING_H_ // Parsed from c10/util/accumulate.h @@ -7524,188 +8150,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace c10 -// Parsed from c10/util/ExclusivelyOwned.h - -// #pragma once - -// #include - -// See example implementation in TensorBase.h and TensorBody.h. -// Synopsis: -// -// repr_type -- type to use to store an owned T in ExclusivelyOwned. -// -// pointer_type -- pointer-esque type to return from -// ExclusivelyOwned's get() and operator*() methods. -// -// const_pointer_type -- similar to pointer_type, used for the const methods. -// -// static repr_type nullRepr() -- return a null instance of repr_type. -// -// template -// static repr_type createInPlace(Args&&... args) -- used by the in-place -// ExclusivelyOwned constructor. -// -// static repr_type moveToRepr(T&& x) -- move the given x into an -// instance of repr_type. used by the ExclusivelyOwned(T&&) -// constructor. -// -// static void destroyOwned(repr_type x) -- free memory for a -// known-exclusively-owned instance of x. Replaces calling repr_type's -// destructor. Being able to implement this more efficiently than -// repr_type's destructor is the main reason to use ExclusivelyOwned -// for a type. -// -// static T take(repr_type&) -- move out of the given repr_type into an owned T. -// -// static pointer_type getImpl(const repr_type&) -- return a pointer -// to the given repr_type. May take repr_type by value if that is more -// efficient. - -/** ExclusivelyOwned is a smart-pointer-like wrapper around an - * exclusively-owned instance of some type T that normally has - * mandatory reference counting (currently just Tensor). If you have - * an isolated piece of code that knows that it has sole ownership of - * an object of one of these types (i.e., because you created it - * directly or using a factory function) and that object will not - * escape from that isolated piece of code, then moving the object - * into an ExclusivelyOwned will avoid an atomic reference count - * decrement at destruction time. - * - * If you directly create the Tensor in the first - * place, you can use the in_place constructor of ExclusivelyOwned to - * additionally avoid doing any stores to initialize the refcount & - * weakcount. */ - - // namespace c10 - - -// Parsed from c10/core/DefaultDtype.h - -// #pragma once - -// #include -// #include - // namespace caffe2 -@Namespace("c10") public static native void set_default_dtype(@ByVal TypeMeta dtype); -@Namespace("c10") public static native @Const @ByVal TypeMeta get_default_dtype(); -@Namespace("c10") public static native ScalarType get_default_dtype_as_scalartype(); -@Namespace("c10") public static native @Const @ByVal TypeMeta get_default_complex_dtype(); - // namespace c10 - - -// Parsed from c10/core/TensorOptions.h - -// #pragma once - -// #include -// #include -// #include -// #include -// #include -// #include -// #include - -// #include -// #include - -// #include -// #include - -@Namespace("c10") public static native DispatchKey computeDispatchKey( - @ByVal ScalarTypeOptional dtype, - @ByVal LayoutOptional layout, - @ByVal DeviceOptional device); - -@Namespace("c10") public static native ScalarType dtype_or_default(@ByVal ScalarTypeOptional dtype); - -@Namespace("c10") public static native @ByVal TypeMeta dtype_or_default( - @ByVal TypeMetaOptional dtype); - -@Namespace("c10") public static native Layout layout_or_default(@ByVal LayoutOptional layout); - -@Namespace("c10") public static native @ByVal Device device_or_default(@ByVal DeviceOptional device); - - -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -@Namespace("c10") public static native @Cast("bool") boolean pinned_memory_or_default(@ByVal BoolOptional pinned_memory); -// Targeting ../TensorOptions.java - - - -// We should aspire to fit in one machine-size word; but a size greater than two -// words is too much. (We are doing terribly on 32-bit archs, where we require -// three machine size words to store tensor options. Eek!) - -/** Convenience function that returns a {@code TensorOptions} object with the {@code dtype} - * set to the given one. */ -@Namespace("c10") public static native @ByVal TensorOptions dtype(@ByVal TypeMeta dtype); - -// legacy function to support ScalarType -@Namespace("c10") public static native @ByVal TensorOptions dtype(ScalarType dtype); - -/** Convenience function that returns a {@code TensorOptions} object with the {@code layout} - * set to the given one. */ -@Namespace("c10") public static native @ByVal TensorOptions layout(Layout layout); -@Namespace("c10") public static native @ByVal TensorOptions layout(@Cast("c10::Layout") byte layout); - -/** Convenience function that returns a {@code TensorOptions} object with the {@code device} - * set to the given one. */ -@Namespace("c10") public static native @ByVal TensorOptions device(@ByVal Device device); - -/** Convenience function that returns a {@code TensorOptions} object with the - * {@code device} set to CUDA and the {@code device_index} set to the given one. */ -@Namespace("c10") public static native @ByVal TensorOptions device_index(short device_index); - -/** Convenience function that returns a {@code TensorOptions} object with the - * {@code requires_grad} set to the given one. */ -@Namespace("c10") public static native @ByVal TensorOptions requires_grad(@Cast("bool") boolean requires_grad/*=true*/); - -/** Convenience function that returns a {@code TensorOptions} object with the - * {@code memory_format} set to the given one. */ -@Namespace("c10") public static native @ByVal TensorOptions memory_format(MemoryFormat memory_format); -@Namespace("c10") public static native @ByVal TensorOptions memory_format(@Cast("c10::MemoryFormat") byte memory_format); - -@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft( - @Cast("std::ostream*") @ByRef Pointer stream, - @Const @ByRef TensorOptions options); - -@Namespace("c10") public static native @StdString BytePointer toString(@Const @ByRef TensorOptions options); - -// This is intended to be a centralized location by which we can determine -// what an appropriate DispatchKey for a tensor is. - -@Namespace("c10") public static native Layout dispatchKeyToLayout(DispatchKey dispatch_key); -@Namespace("c10") public static native @Cast("c10::Layout") byte dispatchKeyToLayout(@Cast("c10::DispatchKey") short dispatch_key); - -@Namespace("c10") public static native DeviceType dispatchKeyToDeviceType(DispatchKey dispatch_key); -@Namespace("c10") public static native @Cast("c10::DeviceType") byte dispatchKeyToDeviceType(@Cast("c10::DispatchKey") short dispatch_key); - -@Namespace("c10") public static native @ByVal TensorOptions dispatchKeyToTensorOptions(DispatchKey dispatch_key); -@Namespace("c10") public static native @ByVal TensorOptions dispatchKeyToTensorOptions(@Cast("c10::DispatchKey") short dispatch_key); -@Namespace("c10::detail") public static native @Cast("bool") boolean backend_supports_empty_operator(@Const @ByRef TensorOptions options); - - // namespace detail - - // namespace c10 - - // Parsed from ATen/core/CheckMemoryFormat.h // #include @@ -11977,12 +12421,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #else // #define SKA_NOINLINE(...) __VA_ARGS__ __attribute__((noinline)) // #endif -@Namespace("ska_ordered::detailv3") @MemberGetter public static native byte min_lookups(); -public static final byte min_lookups = min_lookups(); - -@Namespace("ska_ordered::detailv3") public static native @Cast("uint64_t") long next_power_of_two(@Cast("uint64_t") long i); // Implementation taken from http://en.cppreference.com/w/cpp/types/void_t // (it takes CWG1558 into account and also works for older compilers) @@ -12841,367 +13281,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace c10 -// Parsed from c10/util/Logging.h - -// #ifndef C10_UTIL_LOGGING_H_ -// #define C10_UTIL_LOGGING_H_ - -// #include -// #include -// #include -// #include -// #include - -// #include -// #include -// #include -// #include - -// CAFFE2_LOG_THRESHOLD is a compile time flag that would allow us to turn off -// logging at compile time so no logging message below that level is produced -// at all. The value should be between INT_MIN and CAFFE_FATAL. -// #ifndef CAFFE2_LOG_THRESHOLD -// If we have not defined the compile time log threshold, we keep all the -// log cases. -public static native @MemberGetter int CAFFE2_LOG_THRESHOLD(); -public static final int CAFFE2_LOG_THRESHOLD = CAFFE2_LOG_THRESHOLD(); -// #endif // CAFFE2_LOG_THRESHOLD - -// Below are different implementations for glog and non-glog cases. -// #ifdef C10_USE_GLOG -// #include -// #else // !C10_USE_GLOG -// #include -// #endif // C10_USE_GLOG - - - - -// Some versions of GLOG support less-spammy version of LOG_EVERY_MS. If it's -// not available - just short-circuit to the always working one one. -// We define the C10_ name to avoid confusing other files -// #ifdef LOG_EVERY_MS -// #define C10_LOG_EVERY_MS(severity, ms) LOG_EVERY_MS(severity, ms) -// #else -// #define C10_LOG_EVERY_MS(severity, ms) LOG(severity) -// #endif - -// Same for LOG_FIRST_N -// #ifdef LOG_FIRST_N -// #define C10_LOG_FIRST_N(severity, n) LOG_FIRST_N(severity, n) -// #else -// #define C10_LOG_FIRST_N(severity, n) LOG(severity) -// #endif - -// Same for LOG_EVERY_N -// #ifdef LOG_EVERY_N -// #define C10_LOG_EVERY_N(severity, n) LOG_EVERY_N(severity, n) -// #else -// #define C10_LOG_EVERY_N(severity, n) LOG(severity) -// #endif - -// Functions that we use for initialization. -@Namespace("c10") public static native @Cast("bool") boolean InitCaffeLogging(IntPointer argc, @Cast("char**") PointerPointer argv); -@Namespace("c10") public static native @Cast("bool") boolean InitCaffeLogging(IntPointer argc, @Cast("char**") @ByPtrPtr BytePointer argv); -@Namespace("c10") public static native @Cast("bool") boolean InitCaffeLogging(IntBuffer argc, @Cast("char**") @ByPtrPtr ByteBuffer argv); -@Namespace("c10") public static native @Cast("bool") boolean InitCaffeLogging(int[] argc, @Cast("char**") @ByPtrPtr byte[] argv); -@Namespace("c10") public static native void UpdateLoggingLevelsFromFlags(); - -@Namespace("c10") public static native void ThrowEnforceNotMet( - @Cast("const char*") BytePointer file, - int line, - @Cast("const char*") BytePointer condition, - @StdString BytePointer msg, - @Const Pointer caller/*=nullptr*/); -@Namespace("c10") public static native void ThrowEnforceNotMet( - @Cast("const char*") BytePointer file, - int line, - @Cast("const char*") BytePointer condition, - @StdString BytePointer msg); -@Namespace("c10") public static native void ThrowEnforceNotMet( - String file, - int line, - String condition, - @StdString String msg, - @Const Pointer caller/*=nullptr*/); -@Namespace("c10") public static native void ThrowEnforceNotMet( - String file, - int line, - String condition, - @StdString String msg); - -@Namespace("c10") public static native void ThrowEnforceNotMet( - @Cast("const char*") BytePointer file, - int line, - @Cast("const char*") BytePointer condition, - @ByVal CompileTimeEmptyString arg3, - @Const Pointer caller/*=nullptr*/); -@Namespace("c10") public static native void ThrowEnforceNotMet( - @Cast("const char*") BytePointer file, - int line, - @Cast("const char*") BytePointer condition, - @ByVal CompileTimeEmptyString arg3); -@Namespace("c10") public static native void ThrowEnforceNotMet( - String file, - int line, - String condition, - @ByVal CompileTimeEmptyString arg3, - @Const Pointer caller/*=nullptr*/); -@Namespace("c10") public static native void ThrowEnforceNotMet( - String file, - int line, - String condition, - @ByVal CompileTimeEmptyString arg3); - -@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( - @Cast("const char*") BytePointer file, - int line, - @Cast("const char*") BytePointer condition, - @StdString BytePointer msg, - @Const Pointer caller/*=nullptr*/); -@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( - @Cast("const char*") BytePointer file, - int line, - @Cast("const char*") BytePointer condition, - @StdString BytePointer msg); -@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( - String file, - int line, - String condition, - @StdString String msg, - @Const Pointer caller/*=nullptr*/); -@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( - String file, - int line, - String condition, - @StdString String msg); - -@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( - @Cast("const char*") BytePointer file, - int line, - @Cast("const char*") BytePointer condition, - @ByVal CompileTimeEmptyString arg3, - @Const Pointer caller/*=nullptr*/); -@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( - @Cast("const char*") BytePointer file, - int line, - @Cast("const char*") BytePointer condition, - @ByVal CompileTimeEmptyString arg3); -@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( - String file, - int line, - String condition, - @ByVal CompileTimeEmptyString arg3, - @Const Pointer caller/*=nullptr*/); -@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( - String file, - int line, - String condition, - @ByVal CompileTimeEmptyString arg3); - -@Namespace("c10") public static native @Cast("const bool") boolean IsUsingGoogleLogging(); - -/** - * A utility to allow one to show log info to stderr after the program starts. - * - * This is similar to calling GLOG's --logtostderr, or setting caffe2_log_level - * to smaller than INFO. You are recommended to only use this in a few sparse - * cases, such as when you want to write a tutorial or something. Normally, use - * the commandline flags to set the log level. - */ -@Namespace("c10") public static native void ShowLogInfoToStderr(); - -@Namespace("c10") public static native void SetStackTraceFetcher(@ByVal StringSupplier fetcher); - -// #define CAFFE_ENFORCE(condition, ...) -// do { -// if (C10_UNLIKELY(!(condition))) { -// ::c10::ThrowEnforceNotMet( -// __FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__)); -// } -// } while (false) - -// #define CAFFE_ENFORCE_FINITE(condition, ...) -// do { -// if (C10_UNLIKELY(!(condition))) { -// ::c10::ThrowEnforceFiniteNotMet( -// __FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__)); -// } -// } while (false) - -// #define CAFFE_ENFORCE_WITH_CALLER(condition, ...) -// do { -// if (C10_UNLIKELY(!(condition))) { -// ::c10::ThrowEnforceNotMet( -// __FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__), this); -// } -// } while (false) - -// #define CAFFE_THROW(...) -// ::c10::ThrowEnforceNotMet(__FILE__, __LINE__, "", ::c10::str(__VA_ARGS__)) - -/** - * Rich logging messages - * - * CAFFE_ENFORCE_THAT can be used with one of the "checker functions" that - * capture input argument values and add it to the exception message. E.g. - * {@code CAFFE_ENFORCE_THAT(Equals(foo(x), bar(y)), "Optional additional message")} - * would evaluate both foo and bar only once and if the results are not equal - - * include them in the exception message. - * - * Some of the basic checker functions like Equals or Greater are already - * defined below. Other header might define customized checkers by adding - * functions to caffe2::enforce_detail namespace. For example: - * - * namespace caffe2 { namespace enforce_detail { - * inline EnforceFailMessage IsVector(const vector& shape) { - * if (shape.size() == 1) { return EnforceOK(); } - * return c10::str("Shape ", shape, " is not a vector"); - * } - * }} - * - * With further usages like {@code CAFFE_ENFORCE_THAT(IsVector(Input(0).dims()))} - * - * Convenient wrappers for binary operations like CAFFE_ENFORCE_EQ are provided - * too. Please use them instead of TORCH_CHECK_EQ and friends for failures in - * user-provided input. - */ - -// GCC7 is getting an internal compiler error on the new -// implementation, so keep the old one (which evaluates the error -// message eagerly and therefore is undesirable for general use -// compared to the new one) around for it. -// #if defined(__GNUG__) && __GNUC__ <= 7 && !defined(__clang__) - -// #define CAFFE_ENFORCE_THAT_IMPL(op, lhs, rhs, expr, ...) -// ::c10::enforce_detail::enforceThatImpl( -// op, lhs, rhs, __FILE__, __LINE__, expr, nullptr, ##__VA_ARGS__) - -// #define CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER(op, lhs, rhs, expr, ...) -// ::c10::enforce_detail::enforceThatImpl( -// op, (lhs), (rhs), __FILE__, __LINE__, expr, this, ##__VA_ARGS__) - -// #else - -// #define CAFFE_ENFORCE_THAT_IMPL(op, lhs, rhs, expr, ...) -// ::c10::enforce_detail::enforceThatImpl( -// op, -// (lhs), -// (rhs), -// __FILE__, -// __LINE__, -// expr, -// nullptr, -// [&](const auto& arg1, const auto& arg2) { -// return ::c10::enforce_detail::enforceFailMsgImpl( -// arg1, arg2, ##__VA_ARGS__); -// }) - -// #define CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER(op, lhs, rhs, expr, ...) -// ::c10::enforce_detail::enforceThatImpl( -// op, -// (lhs), -// (rhs), -// __FILE__, -// __LINE__, -// expr, -// this, -// [&](const auto& arg1, const auto& arg2) { -// return ::c10::enforce_detail::enforceFailMsgImpl( -// arg1, arg2, ##__VA_ARGS__); -// }) -// #endif - - // namespace enforce_detail - -// #define CAFFE_ENFORCE_THAT(cmp, op, lhs, rhs, ...) -// CAFFE_ENFORCE_THAT_IMPL(cmp, lhs, rhs, #lhs " " #op " " #rhs, ##__VA_ARGS__) - -// #define CAFFE_ENFORCE_BINARY_OP(cmp, op, x, y, ...) -// CAFFE_ENFORCE_THAT_IMPL(cmp, x, y, #x " " #op " " #y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_EQ(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP(std::equal_to(), ==, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_NE(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP(std::not_equal_to(), !=, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_LE(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP(std::less_equal(), <=, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_LT(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP(std::less(), <, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_GE(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP(std::greater_equal(), >=, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_GT(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP(std::greater(), >, x, y, ##__VA_ARGS__) - -// #define CAFFE_ENFORCE_BINARY_OP_WITH_CALLER(cmp, op, x, y, ...) -// CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER( -// cmp, x, y, #x " " #op " " #y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_EQ_WITH_CALLER(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( -// std::equal_to(), ==, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_NE_WITH_CALLER(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( -// std::not_equal_to(), !=, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_LE_WITH_CALLER(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( -// std::less_equal(), <=, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_LT_WITH_CALLER(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER(std::less(), <, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_GE_WITH_CALLER(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( -// std::greater_equal(), >=, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_GT_WITH_CALLER(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( -// std::greater(), >, x, y, ##__VA_ARGS__) - -/** - * Very lightweight logging for the first time API usage. It's beneficial for - * tracking of individual functionality usage in larger applications. - * - * In order to ensure light-weightedness of logging, we utilize static variable - * trick - LogAPIUsage will be invoked only once and further invocations will - * just do an atomic check. - * - * Example: - * // Logs caller info with an arbitrary text event, if there is a usage. - * C10_LOG_API_USAGE_ONCE("my_api"); - */ -// #define C10_LOG_API_USAGE_ONCE(...) -// C10_UNUSED static bool C10_ANONYMOUS_VARIABLE(logFlag) = -// ::c10::detail::LogAPIUsageFakeReturn(__VA_ARGS__); - -// API usage logging capabilities -@Namespace("c10") public static native void SetAPIUsageLogger(@ByVal StringConsumer logger); -@Namespace("c10") public static native void LogAPIUsage(@StdString BytePointer context); -@Namespace("c10") public static native void LogAPIUsage(@StdString String context); - -@Namespace("c10") public static native void SetAPIUsageMetadataLogger( - @ByVal MetadataLogger logger); -@Namespace("c10") public static native void LogAPIUsageMetadata( - @StdString BytePointer context, - @Const @ByRef StringStringMap metadata_map); -@Namespace("c10") public static native void LogAPIUsageMetadata( - @StdString String context, - @Const @ByRef StringStringMap metadata_map); -// Targeting ../DDPLoggingData.java - - - -@Namespace("c10") public static native void SetPyTorchDDPUsageLogger( - @ByVal DDPLogger logger); -@Namespace("c10") public static native void LogPyTorchDDPUsage(@Const @ByRef DDPLoggingData ddpData); -// Return value is needed to do the static variable initialization trick -@Namespace("c10::detail") public static native @Cast("bool") boolean LogAPIUsageFakeReturn(@StdString BytePointer context); -@Namespace("c10::detail") public static native @Cast("bool") boolean LogAPIUsageFakeReturn(@StdString String context); - // namespace detail - -// Initializes the c10 logger. -@Namespace("c10") public static native void initLogging(); - - // namespace c10 - -// #endif // C10_UTIL_LOGGING_H_ - - // Parsed from c10/util/intrusive_ptr.h // #pragma once @@ -15532,48 +15611,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace c10 -// Parsed from ATen/core/function_schema_inl.h - -// #pragma once -// #include -// #include - -// note: windows build doesn't find symbols in operator files unless -// this is a header file - -@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef FunctionSchema schema); - -@Namespace("c10") public static native @Cast("size_t") long findFirstOutArg(@StdVector Argument args); - - - - - - - - - - - - - - - - - - - -// covariant subtyping of list of Arguments -@Namespace("c10") public static native @Cast("bool") boolean isSubtypeOfList( - @ByVal ArgumentArrayRef child, - @ByVal ArgumentArrayRef parent, - @Cast("std::ostream*") Pointer why_not); - - - - // namespace c10 - - // Parsed from ATen/core/function_schema.h // #pragma once @@ -15623,6 +15660,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // full format: Type(alias)? name=default_value @Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef Argument arg); +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef FunctionSchema schema); + @Namespace("c10") public static native @StdString BytePointer toString(@Const @ByRef FunctionSchema schema); // namespace c10 @@ -15632,6 +15671,46 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // IWYU pragma: keep +// Parsed from ATen/core/function_schema_inl.h + +// #pragma once +// #include +// #include + +// note: windows build doesn't find symbols in operator files unless +// this is a header file + +@Namespace("c10") public static native @Cast("size_t") long findFirstOutArg(@StdVector Argument args); + + + + + + + + + + + + + + + + + + + +// covariant subtyping of list of Arguments +@Namespace("c10") public static native @Cast("bool") boolean isSubtypeOfList( + @ByVal ArgumentArrayRef child, + @ByVal ArgumentArrayRef parent, + @Cast("std::ostream*") Pointer why_not); + + + + // namespace c10 + + // Parsed from ATen/core/op_registration/infer_schema.h // #pragma once @@ -15677,6 +15756,281 @@ public class torch extends org.bytedeco.pytorch.presets.torch { +// Parsed from ATen/record_function.h + +// #pragma once + +// #include +// #include +// #include +// #include +// #include +// #include + +// #include +// #include +// #include +// #include + + +// Kind of record function scope; +@Namespace("at") public enum RecordScope { + // c10/ATen ops, autograd nodes + FUNCTION((byte)(0)), + // Functions/nodes called from the autograd + BACKWARD_FUNCTION((byte)(1)), + // TorchScript functions, methods + TORCHSCRIPT_FUNCTION((byte)(2)), + // Kernel Function dtype Tag + KERNEL_FUNCTION_DTYPE((byte)(3)), + // Torchbind custom class, + CUSTOM_CLASS((byte)(4)), + // Generic Build Feature + BUILD_FEATURE((byte)(5)), + // Kernel Function dtype Tag + LITE_INTERPRETER((byte)(6)), + // User defined scope (e.g. with record_function()) + USER_SCOPE((byte)(7)), + // Scopes for static runtime, a specialized TorchScript interpreter + STATIC_RUNTIME_OP((byte)(8)), + STATIC_RUNTIME_MODEL((byte)(9)), + NUM_SCOPES((byte)(10));// must be the last in the list + + public final byte value; + private RecordScope(byte v) { this.value = v; } + private RecordScope(RecordScope e) { this.value = e.value; } + public RecordScope intern() { for (RecordScope e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} + + // namespace at + // namespace std + +// Soft limit on the number of callbacks to use; +@Namespace("at") @MemberGetter public static native @Cast("const std::size_t") long kSoftLimitCallbacks(); + +// An abstract base class for various observer contexts that can be attached to +// the RecordFunction. + +// +// PyTorch callbacks/observers API: +// + +/** + * RecordFunctionCallback represents a pair of callbacks to be used with + * RecordFunction, members: + * start, end - the callbacks to run when entering and exiting the scope; + * optionally, the start callback may return an ObserverContext which will + * be passed to the end callback, use appropriate constructor accordingly. + * needs_inputs - whether the callbacks need the inputs passed from the + * observed function/range; NOTE: passing the inputs incurs an additional + * overhead; sampling_probability - if not 1.0, then the callback is + * probabilistically sampled to run; NOTE: start and end callbacks always run as + * a pair and are sampled together; scopes - types of scopes to execute the + * callbacks on (see RecordScope); passing empty set means the callbacks will be + * executed for all possible scope types should_run - optional function that + * returns whether this callback should run; overwrites the effect of setting + * sampling_probability + */ + +// Notes: +// - two types of callbacks are provided: thread local and global +// - thread local callbacks are added/removed only for the given thread +// and are stored locally for each thread and separately from the list +// of the global callbacks +// - global callbacks are stored in a single per process list and are +// invoked by every RecordFunction, in addition to the thread local +// callbacks specific to the given thread +// - we allow the added callbacks to be sampled, by specifying a sampling +// probability for each callback pair, if the start callback is +// not picked to run, the corresponding end callback won't be called +// - a typical use case for the global callbacks is passive monitoring +// in the background (e.g. fleet-wide monitoring), without focusing on +// the specific piece of code +// - in contrast, thread local callbacks are enabled locally, on demand, +// for the specific piece of code (range) and are not sampled +// - a typical use case for thread local callbacks is profiler and code +// execution tracer +// - note, thread local callbacks are automatically propagated with +// ThreadLocalState across JIT continuations and async tasks (at::launch) + +@Namespace("at") @MemberGetter public static native @Cast("const at::CallbackHandle") long INVALID_CALLBACK_HANDLE(); +// Targeting ../RecordFunctionCallbacksEntry.java + + + +// Holds pairs (callbacks, unique_id) +// Targeting ../RecordFunction.java + + + +@Namespace("at") public static native @ByVal @Cast("at::StepCallbacks*") Pointer getStepCallbacks(RecordScope scope); +@Namespace("at") public static native @ByVal @Cast("at::StepCallbacks*") Pointer getStepCallbacks(@Cast("at::RecordScope") byte scope); + +@Namespace("at") public static native @ByVal @Cast("c10::optional*") Pointer getStepCallbacksUnlessEmpty( + RecordScope scope); +@Namespace("at") public static native @ByVal @Cast("c10::optional*") Pointer getStepCallbacksUnlessEmpty( + @Cast("at::RecordScope") byte scope); + + // namespace detail + +// optional argument - function's seq_no +// #define RECORD_FUNCTION_WITH_SCOPE(scope, fn, inputs, ...) +// at::RecordFunction guard(scope); +// if (guard.isActive()) { +// ::at::detail::record_function_with_scope( +// guard, fn, inputs, ##__VA_ARGS__); +// } + +// #define RECORD_FUNCTION_WITH_SCOPE_INPUTS_OUTPUTS( +// scope, fn, inputs, outputs, ...) +// at::RecordFunction guard(scope); +// if (guard.isActive()) { +// if (guard.needsInputs()) { +// guard.before(fn, inputs, ##__VA_ARGS__); +// } else { +// guard.before(fn, ##__VA_ARGS__); +// } +// if (guard.needsOutputs()) { +// guard.setOutputs(outputs); +// } +// } + +// #define RECORD_FUNCTION(fn, inputs, ...) +// RECORD_FUNCTION_WITH_SCOPE( +// at::RecordScope::FUNCTION, fn, inputs, ##__VA_ARGS__) + +// #define RECORD_TORCHSCRIPT_FUNCTION(mn, inputs) +// RECORD_FUNCTION_WITH_SCOPE(at::RecordScope::TORCHSCRIPT_FUNCTION, mn, inputs) + +// #define RECORD_FUNCTION_WITH_INPUTS_OUTPUTS(fn, inputs, outputs, ...) +// RECORD_FUNCTION_WITH_SCOPE_INPUTS_OUTPUTS( +// at::RecordScope::FUNCTION, fn, inputs, outputs, ##__VA_ARGS__) + +// Custom user scopes in C++; similar to Python's 'with record_function("..."):' +// #define RECORD_USER_SCOPE(fn) +// RECORD_FUNCTION_WITH_SCOPE( +// at::RecordScope::USER_SCOPE, fn, c10::ArrayRef{}) + +// RECORD_USER_SCOPE with inputs +// #define RECORD_USER_SCOPE_WITH_INPUTS(fn, inputs) +// RECORD_FUNCTION_WITH_SCOPE(at::RecordScope::USER_SCOPE, fn, inputs) + +// Helper macro to pass in debug handle that is used to +// post process events +// #define RECORD_WITH_SCOPE_DEBUG_HANDLE_AND_INPUTS( +// scope, fn, debug_handle, inputs, ...) +// at::RecordFunction guard(scope); +// if (guard.isActive()) { +// ::at::detail::record_function_with_scope_and_debug_handle( +// guard, fn, debug_handle, inputs, ##__VA_ARGS__); +// } + +// Helper macros to record LITE INTERPETER scope events with debug handles +// #define RECORD_EDGE_SCOPE_WITH_DEBUG_HANDLE_AND_INPUTS( +// fn, debug_handle, inputs) +// RECORD_WITH_SCOPE_DEBUG_HANDLE_AND_INPUTS( +// at::RecordScope::LITE_INTERPRETER, fn, debug_handle, inputs) + +// Bookend to the RECORD_FUNCTION macros. Use this after the kernel +// launch to let the profiler bind the outputs to the op that produced +// them. Note that guard is declared by RECORD_FUNCTION so this macro +// needs to be called from the same scope as RECORD_FUNCTION +// #define RECORD_OUTPUTS(outputs) +// if (guard.needsOutputs()) { +// guard.setOutputs( +// std::vector(outputs.begin(), outputs.end())); +// } + +/** + * addThreadLocalCallback adds a thread local callback to run with + * RecordFunction, returns handle to use with removeThreadLocalCallback + */ +@Namespace("at") public static native @Cast("at::CallbackHandle") long addThreadLocalCallback(@ByVal @Cast("at::RecordFunctionCallback*") Pointer cb); + +/** + * hasThreadLocalCallbacks returns whether there're callbacks registered + * with addThreadLocalCallback + */ +@Namespace("at") public static native @Cast("bool") boolean hasThreadLocalCallbacks(); + +/** + * clearThreadLocalCallbacks removes all thread local callbacks + */ +@Namespace("at") public static native void clearThreadLocalCallbacks(); + +/** + * addGlobalCallback adds a global callback to run with RecordFunction: + * + * only during the program initialization + */ +@Namespace("at") public static native @Cast("at::CallbackHandle") long addGlobalCallback(@ByVal @Cast("at::RecordFunctionCallback*") Pointer cb); + +/** + * removeCallback removes a callback given the handle returned by + * addThreadLocalCallback or addGlobalCallback; + * + * no other code can run simultaneously + */ +@Namespace("at") public static native void removeCallback(@Cast("at::CallbackHandle") long handle); + +/** + * Prevent the given callback from executing. If handle is invalid, + * does nothing. + */ +@Namespace("at") public static native void disableCallback(@Cast("at::CallbackHandle") long handle); + +/** + * Allow the given callback, previously disabled with disableCallback, to + * execute again. If handle is invalid, does nothing. + */ +@Namespace("at") public static native void reenableCallback(@Cast("at::CallbackHandle") long handle); + +/** + * hasGlobalCallbacks returns whether there're global callbacks + * registered with pushGlobalCallback + */ +@Namespace("at") public static native @Cast("bool") boolean hasGlobalCallbacks(); + +/** + * clearGlobalCallbacks removes all global callbacks + */ +@Namespace("at") public static native void clearGlobalCallbacks(); + +// for both thread local and global callbacks +@Namespace("at") public static native @Cast("bool") boolean hasCallbacks(); +@Namespace("at") public static native void clearCallbacks(); + +/** + * enableRecordFunction enables RecordFunction thread locally + */ +@Namespace("at") public static native void enableRecordFunction(@Cast("bool") boolean enable/*=true*/); +@Namespace("at") public static native void enableRecordFunction(); + +/** + * isRecordFunctionEnabled returns whether RecordFunction + * is enabled thread locally + */ +@Namespace("at") public static native @Cast("bool") boolean isRecordFunctionEnabled(); +// Targeting ../RecordFunctionGuard.java + + +// Targeting ../DisableRecordFunctionGuard.java + + +// Targeting ../RecordFunctionTLS.java + + + +@Namespace("at") public static native @Const @ByRef RecordFunctionTLS get_record_function_tls_(); + +@Namespace("at") public static native void set_record_function_tls_(@Const @ByRef RecordFunctionTLS tls); + +@Namespace("at") public static native void set_record_function_seed_for_testing(@Cast("uint32_t") int seed); + + // namespace at + + // Parsed from ATen/core/op_registration/op_allowlist.h // #pragma once @@ -15834,32 +16188,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch -// Parsed from ATen/core/enum_tag.h - -// #pragma once - -// @generated by torchgen/gen.py from enum_tag.h - // Enum of valid tags obtained from the entries in tags.yaml - @Namespace("at") public enum Tag { - core(0), - data_dependent_output(1), - dynamic_output_shape(2), - generated(3), - inplace_view(4), - nondeterministic_bitwise(5), - nondeterministic_seeded(6), - pointwise(7), - view_copy(8); - - public final int value; - private Tag(int v) { this.value = v; } - private Tag(Tag e) { this.value = e.value; } - public Tag intern() { for (Tag e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } - } - - - // Parsed from c10/core/CompileTimeFunctionPointer.h // #pragma once @@ -15906,42 +16234,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #pragma once // #include // Targeting ../OperatorKernel.java - - - - // namespace c10 - - -// Parsed from ATen/core/boxing/BoxedKernel_impl.h - -// #pragma once - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -16021,6 +16313,42 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include +// Parsed from ATen/core/boxing/BoxedKernel_impl.h + +// #pragma once + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + // namespace c10 + + // Parsed from ATen/core/stack.h // #pragma once @@ -16362,6 +16690,25 @@ public class torch extends org.bytedeco.pytorch.presets.torch { +// Parsed from ATen/core/boxing/KernelFunction.h + +// #pragma once + +// #include +// #include +// #include +// #include +// #include +// #include // TODO Instead of this, move torch::jit::Stack to the c10 namespace. +// Targeting ../KernelFunction.java + + + + + +// #include + + // Parsed from ATen/core/boxing/KernelFunction_impl.h // #include @@ -16424,25 +16771,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { - - -// Parsed from ATen/core/boxing/KernelFunction.h - -// #pragma once - -// #include -// #include -// #include -// #include -// #include -// #include // TODO Instead of this, move torch::jit::Stack to the c10 namespace. -// Targeting ../KernelFunction.java - - - - - -// #include // Parsed from ATen/core/dispatch/CppSignature.h @@ -16522,6 +16850,32 @@ public class torch extends org.bytedeco.pytorch.presets.torch { +// Parsed from ATen/core/enum_tag.h + +// #pragma once + +// @generated by torchgen/gen.py from enum_tag.h + // Enum of valid tags obtained from the entries in tags.yaml + @Namespace("at") public enum Tag { + core(0), + data_dependent_output(1), + dynamic_output_shape(2), + generated(3), + inplace_view(4), + nondeterministic_bitwise(5), + nondeterministic_seeded(6), + pointwise(7), + view_copy(8); + + public final int value; + private Tag(int v) { this.value = v; } + private Tag(Tag e) { this.value = e.value; } + public Tag intern() { for (Tag e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } + } + + + // Parsed from ATen/core/function.h // #pragma once @@ -17009,63 +17363,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch -// Parsed from c10/util/flat_hash_map.h - -// Taken from -// https://github.com/skarupke/flat_hash_map/blob/2c4687431f978f02a3780e24b8b701d22aa32d9c/flat_hash_map.hpp -// with fixes applied: -// - https://github.com/skarupke/flat_hash_map/pull/25 -// - https://github.com/skarupke/flat_hash_map/pull/26 -// - replace size_t with uint64_t to fix it for 32bit -// - add "GCC diagnostic" pragma to ignore -Wshadow -// - make sherwood_v3_table::convertible_to_iterator public because GCC5 seems -// to have issues with it otherwise -// - fix compiler warnings in operator templated_iterator - -// Copyright Malte Skarupke 2017. -// Distributed under the Boost Software License, Version 1.0. -// (See http://www.boost.org/LICENSE_1_0.txt) - -// #pragma once - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - -// #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") -// #endif - -// #if defined(_MSC_VER) && !defined(__clang__) -// #pragma warning(push) -// #pragma warning(disable : 4624) // destructor was implicitly defined as deleted -// #endif - -// #ifdef _MSC_VER -// #define SKA_NOINLINE(...) __declspec(noinline) __VA_ARGS__ -// #else -// #define SKA_NOINLINE(...) __VA_ARGS__ __attribute__((noinline)) -// #endif - -@Namespace("ska::detailv3") public static native byte log2(@Cast("uint64_t") long value); - -// Implementation taken from http://en.cppreference.com/w/cpp/types/void_t -// (it takes CWG1558 into account and also works for older compilers) - // namespace detailv3 - - // end namespace ska - -// #if defined(_MSC_VER) && !defined(__clang__) -// #pragma warning(pop) -// #endif - - // Parsed from torch/csrc/autograd/anomaly_mode.h // #pragma once @@ -17087,28 +17384,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace torch -// Parsed from c10/core/GradMode.h - -// #pragma once - -// #include -// #include -// Targeting ../GradMode.java - - -// Targeting ../AutoGradMode.java - - -// Targeting ../NoGradGuard.java - - -// Targeting ../AutoFwGradMode.java - - - - // namespace c10 - - // Parsed from ATen/core/grad_mode.h // #pragma once @@ -17233,281 +17508,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace at -// Parsed from ATen/record_function.h - -// #pragma once - -// #include -// #include -// #include -// #include -// #include -// #include - -// #include -// #include -// #include -// #include - - -// Kind of record function scope; -@Namespace("at") public enum RecordScope { - // c10/ATen ops, autograd nodes - FUNCTION((byte)(0)), - // Functions/nodes called from the autograd - BACKWARD_FUNCTION((byte)(1)), - // TorchScript functions, methods - TORCHSCRIPT_FUNCTION((byte)(2)), - // Kernel Function dtype Tag - KERNEL_FUNCTION_DTYPE((byte)(3)), - // Torchbind custom class, - CUSTOM_CLASS((byte)(4)), - // Generic Build Feature - BUILD_FEATURE((byte)(5)), - // Kernel Function dtype Tag - LITE_INTERPRETER((byte)(6)), - // User defined scope (e.g. with record_function()) - USER_SCOPE((byte)(7)), - // Scopes for static runtime, a specialized TorchScript interpreter - STATIC_RUNTIME_OP((byte)(8)), - STATIC_RUNTIME_MODEL((byte)(9)), - NUM_SCOPES((byte)(10));// must be the last in the list - - public final byte value; - private RecordScope(byte v) { this.value = v; } - private RecordScope(RecordScope e) { this.value = e.value; } - public RecordScope intern() { for (RecordScope e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} - - // namespace at - // namespace std - -// Soft limit on the number of callbacks to use; -@Namespace("at") @MemberGetter public static native @Cast("const std::size_t") long kSoftLimitCallbacks(); - -// An abstract base class for various observer contexts that can be attached to -// the RecordFunction. - -// -// PyTorch callbacks/observers API: -// - -/** - * RecordFunctionCallback represents a pair of callbacks to be used with - * RecordFunction, members: - * start, end - the callbacks to run when entering and exiting the scope; - * optionally, the start callback may return an ObserverContext which will - * be passed to the end callback, use appropriate constructor accordingly. - * needs_inputs - whether the callbacks need the inputs passed from the - * observed function/range; NOTE: passing the inputs incurs an additional - * overhead; sampling_probability - if not 1.0, then the callback is - * probabilistically sampled to run; NOTE: start and end callbacks always run as - * a pair and are sampled together; scopes - types of scopes to execute the - * callbacks on (see RecordScope); passing empty set means the callbacks will be - * executed for all possible scope types should_run - optional function that - * returns whether this callback should run; overwrites the effect of setting - * sampling_probability - */ - -// Notes: -// - two types of callbacks are provided: thread local and global -// - thread local callbacks are added/removed only for the given thread -// and are stored locally for each thread and separately from the list -// of the global callbacks -// - global callbacks are stored in a single per process list and are -// invoked by every RecordFunction, in addition to the thread local -// callbacks specific to the given thread -// - we allow the added callbacks to be sampled, by specifying a sampling -// probability for each callback pair, if the start callback is -// not picked to run, the corresponding end callback won't be called -// - a typical use case for the global callbacks is passive monitoring -// in the background (e.g. fleet-wide monitoring), without focusing on -// the specific piece of code -// - in contrast, thread local callbacks are enabled locally, on demand, -// for the specific piece of code (range) and are not sampled -// - a typical use case for thread local callbacks is profiler and code -// execution tracer -// - note, thread local callbacks are automatically propagated with -// ThreadLocalState across JIT continuations and async tasks (at::launch) - -@Namespace("at") @MemberGetter public static native @Cast("const at::CallbackHandle") long INVALID_CALLBACK_HANDLE(); -// Targeting ../RecordFunctionCallbacksEntry.java - - - -// Holds pairs (callbacks, unique_id) -// Targeting ../RecordFunction.java - - - -@Namespace("at") public static native @ByVal @Cast("at::StepCallbacks*") Pointer getStepCallbacks(RecordScope scope); -@Namespace("at") public static native @ByVal @Cast("at::StepCallbacks*") Pointer getStepCallbacks(@Cast("at::RecordScope") byte scope); - -@Namespace("at") public static native @ByVal @Cast("c10::optional*") Pointer getStepCallbacksUnlessEmpty( - RecordScope scope); -@Namespace("at") public static native @ByVal @Cast("c10::optional*") Pointer getStepCallbacksUnlessEmpty( - @Cast("at::RecordScope") byte scope); - - // namespace detail - -// optional argument - function's seq_no -// #define RECORD_FUNCTION_WITH_SCOPE(scope, fn, inputs, ...) -// at::RecordFunction guard(scope); -// if (guard.isActive()) { -// ::at::detail::record_function_with_scope( -// guard, fn, inputs, ##__VA_ARGS__); -// } - -// #define RECORD_FUNCTION_WITH_SCOPE_INPUTS_OUTPUTS( -// scope, fn, inputs, outputs, ...) -// at::RecordFunction guard(scope); -// if (guard.isActive()) { -// if (guard.needsInputs()) { -// guard.before(fn, inputs, ##__VA_ARGS__); -// } else { -// guard.before(fn, ##__VA_ARGS__); -// } -// if (guard.needsOutputs()) { -// guard.setOutputs(outputs); -// } -// } - -// #define RECORD_FUNCTION(fn, inputs, ...) -// RECORD_FUNCTION_WITH_SCOPE( -// at::RecordScope::FUNCTION, fn, inputs, ##__VA_ARGS__) - -// #define RECORD_TORCHSCRIPT_FUNCTION(mn, inputs) -// RECORD_FUNCTION_WITH_SCOPE(at::RecordScope::TORCHSCRIPT_FUNCTION, mn, inputs) - -// #define RECORD_FUNCTION_WITH_INPUTS_OUTPUTS(fn, inputs, outputs, ...) -// RECORD_FUNCTION_WITH_SCOPE_INPUTS_OUTPUTS( -// at::RecordScope::FUNCTION, fn, inputs, outputs, ##__VA_ARGS__) - -// Custom user scopes in C++; similar to Python's 'with record_function("..."):' -// #define RECORD_USER_SCOPE(fn) -// RECORD_FUNCTION_WITH_SCOPE( -// at::RecordScope::USER_SCOPE, fn, c10::ArrayRef{}) - -// RECORD_USER_SCOPE with inputs -// #define RECORD_USER_SCOPE_WITH_INPUTS(fn, inputs) -// RECORD_FUNCTION_WITH_SCOPE(at::RecordScope::USER_SCOPE, fn, inputs) - -// Helper macro to pass in debug handle that is used to -// post process events -// #define RECORD_WITH_SCOPE_DEBUG_HANDLE_AND_INPUTS( -// scope, fn, debug_handle, inputs, ...) -// at::RecordFunction guard(scope); -// if (guard.isActive()) { -// ::at::detail::record_function_with_scope_and_debug_handle( -// guard, fn, debug_handle, inputs, ##__VA_ARGS__); -// } - -// Helper macros to record LITE INTERPETER scope events with debug handles -// #define RECORD_EDGE_SCOPE_WITH_DEBUG_HANDLE_AND_INPUTS( -// fn, debug_handle, inputs) -// RECORD_WITH_SCOPE_DEBUG_HANDLE_AND_INPUTS( -// at::RecordScope::LITE_INTERPRETER, fn, debug_handle, inputs) - -// Bookend to the RECORD_FUNCTION macros. Use this after the kernel -// launch to let the profiler bind the outputs to the op that produced -// them. Note that guard is declared by RECORD_FUNCTION so this macro -// needs to be called from the same scope as RECORD_FUNCTION -// #define RECORD_OUTPUTS(outputs) -// if (guard.needsOutputs()) { -// guard.setOutputs( -// std::vector(outputs.begin(), outputs.end())); -// } - -/** - * addThreadLocalCallback adds a thread local callback to run with - * RecordFunction, returns handle to use with removeThreadLocalCallback - */ -@Namespace("at") public static native @Cast("at::CallbackHandle") long addThreadLocalCallback(@ByVal @Cast("at::RecordFunctionCallback*") Pointer cb); - -/** - * hasThreadLocalCallbacks returns whether there're callbacks registered - * with addThreadLocalCallback - */ -@Namespace("at") public static native @Cast("bool") boolean hasThreadLocalCallbacks(); - -/** - * clearThreadLocalCallbacks removes all thread local callbacks - */ -@Namespace("at") public static native void clearThreadLocalCallbacks(); - -/** - * addGlobalCallback adds a global callback to run with RecordFunction: - * - * only during the program initialization - */ -@Namespace("at") public static native @Cast("at::CallbackHandle") long addGlobalCallback(@ByVal @Cast("at::RecordFunctionCallback*") Pointer cb); - -/** - * removeCallback removes a callback given the handle returned by - * addThreadLocalCallback or addGlobalCallback; - * - * no other code can run simultaneously - */ -@Namespace("at") public static native void removeCallback(@Cast("at::CallbackHandle") long handle); - -/** - * Prevent the given callback from executing. If handle is invalid, - * does nothing. - */ -@Namespace("at") public static native void disableCallback(@Cast("at::CallbackHandle") long handle); - -/** - * Allow the given callback, previously disabled with disableCallback, to - * execute again. If handle is invalid, does nothing. - */ -@Namespace("at") public static native void reenableCallback(@Cast("at::CallbackHandle") long handle); - -/** - * hasGlobalCallbacks returns whether there're global callbacks - * registered with pushGlobalCallback - */ -@Namespace("at") public static native @Cast("bool") boolean hasGlobalCallbacks(); - -/** - * clearGlobalCallbacks removes all global callbacks - */ -@Namespace("at") public static native void clearGlobalCallbacks(); - -// for both thread local and global callbacks -@Namespace("at") public static native @Cast("bool") boolean hasCallbacks(); -@Namespace("at") public static native void clearCallbacks(); - -/** - * enableRecordFunction enables RecordFunction thread locally - */ -@Namespace("at") public static native void enableRecordFunction(@Cast("bool") boolean enable/*=true*/); -@Namespace("at") public static native void enableRecordFunction(); - -/** - * isRecordFunctionEnabled returns whether RecordFunction - * is enabled thread locally - */ -@Namespace("at") public static native @Cast("bool") boolean isRecordFunctionEnabled(); -// Targeting ../RecordFunctionGuard.java - - -// Targeting ../DisableRecordFunctionGuard.java - - -// Targeting ../RecordFunctionTLS.java - - - -@Namespace("at") public static native @Const @ByRef RecordFunctionTLS get_record_function_tls_(); - -@Namespace("at") public static native void set_record_function_tls_(@Const @ByRef RecordFunctionTLS tls); - -@Namespace("at") public static native void set_record_function_seed_for_testing(@Cast("uint32_t") int seed); - - // namespace at - - // Parsed from c10/core/impl/PythonDispatcherTLS.h // #pragma once @@ -18970,217 +18970,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace at -// Parsed from ATen/ops/from_blob.h - -// #pragma once -// #include - -@Namespace("at::detail") public static native void noopDelete(Pointer arg0); - - -// Targeting ../TensorMaker.java - - - -@Namespace("at") public static native @ByVal @NoException(true) TensorMaker for_blob(Pointer data, @ByVal LongArrayRef sizes); -@Namespace("at") public static native @ByVal @NoException(true) TensorMaker for_blob(Pointer data, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... sizes); - -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - @ByVal LongArrayRef strides, - PointerConsumer deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - @ByVal LongArrayRef strides, - PointerConsumer deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] strides, - PointerConsumer deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] strides, - PointerConsumer deleter); - -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - @ByVal LongArrayRef strides, - @Cast("int64_t") long storage_offset, - PointerConsumer deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - @ByVal LongArrayRef strides, - @Cast("int64_t") long storage_offset, - PointerConsumer deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] strides, - @Cast("int64_t") long storage_offset, - PointerConsumer deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] strides, - @Cast("int64_t") long storage_offset, - PointerConsumer deleter); - -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - PointerConsumer deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - PointerConsumer deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, - PointerConsumer deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, - PointerConsumer deleter); - -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - @ByVal LongArrayRef strides, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - @ByVal LongArrayRef strides); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] strides, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... strides); - -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal LongArrayRef sizes); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... sizes); - - // namespace at - - -// Parsed from ATen/ops/tensor.h - -// #pragma once -// #include -// #include - -// These functions are defined in ATen/Utils.cpp. -// #define TENSOR(T, S) -// TORCH_API Tensor tensor(ArrayRef values, const TensorOptions& options); -// inline Tensor tensor( -// std::initializer_list values, const TensorOptions& options) { -// return at::tensor(ArrayRef(values), options); -// } -// inline Tensor tensor(T value, const TensorOptions& options) { -// return at::tensor(ArrayRef(value), options); -// } -// inline Tensor tensor(ArrayRef values) { -// return at::tensor(std::move(values), at::dtype(k##S)); -// } -// inline Tensor tensor(std::initializer_list values) { -// return at::tensor(ArrayRef(values)); -// } -// inline Tensor tensor(T value) { -// return at::tensor(ArrayRef(value)); -// } -@Namespace("at") public static native @ByVal Tensor tensor(@ByVal ByteArrayRef values, @Const @ByRef TensorOptions options); -@Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"jbyte*", "c10::ArrayRef", "std::vector&"}) @StdVector("jbyte") byte[] values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@Cast("uint8_t") byte value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal ByteArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"jbyte*", "c10::ArrayRef", "std::vector&"}) @StdVector("jbyte") byte... values); - @Namespace("at") public static native @ByVal Tensor tensor(@Cast("uint8_t") byte value); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal ShortArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"jshort*", "c10::ArrayRef", "std::vector&"}) @StdVector("jshort") short[] values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(short value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal ShortArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"jshort*", "c10::ArrayRef", "std::vector&"}) @StdVector("jshort") short... values); - @Namespace("at") public static native @ByVal Tensor tensor(short value); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal IntArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"jint*", "c10::ArrayRef", "std::vector&"}) @StdVector("jint") int[] values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(int value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal IntArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"jint*", "c10::ArrayRef", "std::vector&"}) @StdVector("jint") int... values); - @Namespace("at") public static native @ByVal Tensor tensor(int value); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal LongArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@Cast("int64_t") long value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal LongArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... values); - @Namespace("at") public static native @ByVal Tensor tensor(@Cast("int64_t") long value); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"float*", "c10::ArrayRef", "std::vector&"}) @StdVector("float") float[] values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(float value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"float*", "c10::ArrayRef", "std::vector&"}) @StdVector("float") float... values); - @Namespace("at") public static native @ByVal Tensor tensor(float value); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(double value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(double value); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BoolArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@Cast("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)") boolean value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BoolArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(@Cast("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)") boolean value); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal HalfArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal Half value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal HalfArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal Half value); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BFloat16ArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BFloat16 value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BFloat16ArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BFloat16 value); -@Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatComplexArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatComplex value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatComplexArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatComplex value); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleComplexArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleComplex value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleComplexArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleComplex value); -// #undef TENSOR - - // namespace at - - // Parsed from ATen/ops/abs.h // #pragma once @@ -30480,6 +30269,133 @@ public class torch extends org.bytedeco.pytorch.presets.torch { +// Parsed from ATen/ops/from_blob.h + +// #pragma once +// #include + +@Namespace("at::detail") public static native void noopDelete(Pointer arg0); + + +// Targeting ../TensorMaker.java + + + +@Namespace("at") public static native @ByVal @NoException(true) TensorMaker for_blob(Pointer data, @ByVal LongArrayRef sizes); +@Namespace("at") public static native @ByVal @NoException(true) TensorMaker for_blob(Pointer data, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... sizes); + +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + PointerConsumer deleter, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, + @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + PointerConsumer deleter); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] strides, + PointerConsumer deleter, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, + @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] strides, + PointerConsumer deleter); + +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + @Cast("int64_t") long storage_offset, + PointerConsumer deleter, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, + @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + @Cast("int64_t") long storage_offset, + PointerConsumer deleter); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] strides, + @Cast("int64_t") long storage_offset, + PointerConsumer deleter, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, + @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] strides, + @Cast("int64_t") long storage_offset, + PointerConsumer deleter); + +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + PointerConsumer deleter, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, + @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + PointerConsumer deleter); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, + PointerConsumer deleter, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, + @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, + PointerConsumer deleter); + +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] strides, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... strides); + +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... sizes); + + // namespace at + + // Parsed from ATen/ops/from_file.h // #pragma once @@ -53217,6 +53133,90 @@ public class torch extends org.bytedeco.pytorch.presets.torch { +// Parsed from ATen/ops/tensor.h + +// #pragma once +// #include +// #include + +// These functions are defined in ATen/Utils.cpp. +// #define TENSOR(T, S) +// TORCH_API Tensor tensor(ArrayRef values, const TensorOptions& options); +// inline Tensor tensor( +// std::initializer_list values, const TensorOptions& options) { +// return at::tensor(ArrayRef(values), options); +// } +// inline Tensor tensor(T value, const TensorOptions& options) { +// return at::tensor(ArrayRef(value), options); +// } +// inline Tensor tensor(ArrayRef values) { +// return at::tensor(std::move(values), at::dtype(k##S)); +// } +// inline Tensor tensor(std::initializer_list values) { +// return at::tensor(ArrayRef(values)); +// } +// inline Tensor tensor(T value) { +// return at::tensor(ArrayRef(value)); +// } +@Namespace("at") public static native @ByVal Tensor tensor(@ByVal ByteArrayRef values, @Const @ByRef TensorOptions options); +@Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"jbyte*", "c10::ArrayRef", "std::vector&"}) @StdVector("jbyte") byte[] values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@Cast("uint8_t") byte value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal ByteArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"jbyte*", "c10::ArrayRef", "std::vector&"}) @StdVector("jbyte") byte... values); + @Namespace("at") public static native @ByVal Tensor tensor(@Cast("uint8_t") byte value); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal ShortArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"jshort*", "c10::ArrayRef", "std::vector&"}) @StdVector("jshort") short[] values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(short value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal ShortArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"jshort*", "c10::ArrayRef", "std::vector&"}) @StdVector("jshort") short... values); + @Namespace("at") public static native @ByVal Tensor tensor(short value); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal IntArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"jint*", "c10::ArrayRef", "std::vector&"}) @StdVector("jint") int[] values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(int value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal IntArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"jint*", "c10::ArrayRef", "std::vector&"}) @StdVector("jint") int... values); + @Namespace("at") public static native @ByVal Tensor tensor(int value); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal LongArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@Cast("int64_t") long value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal LongArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... values); + @Namespace("at") public static native @ByVal Tensor tensor(@Cast("int64_t") long value); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"float*", "c10::ArrayRef", "std::vector&"}) @StdVector("float") float[] values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(float value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"float*", "c10::ArrayRef", "std::vector&"}) @StdVector("float") float... values); + @Namespace("at") public static native @ByVal Tensor tensor(float value); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(double value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(double value); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BoolArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@Cast("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)") boolean value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BoolArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(@Cast("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)") boolean value); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal HalfArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal Half value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal HalfArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal Half value); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BFloat16ArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BFloat16 value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BFloat16ArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BFloat16 value); +@Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatComplexArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatComplex value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatComplexArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatComplex value); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleComplexArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleComplex value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleComplexArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleComplex value); +// #undef TENSOR + + // namespace at + + // Parsed from ATen/ops/tensordot.h // #pragma once @@ -76746,21 +76746,497 @@ scalar_t sf(scalar_t x, scalar_t y) // summary of https://github.com/pytorch/pytorch/pull/75201 for more details. @Namespace("caffe2::serialize") @MemberGetter public static native @Cast("const uint64_t") long kProducedBytecodeVersion(); -// static_assert( -// kProducedBytecodeVersion >= kProducedFileFormatVersion, -// "kProducedBytecodeVersion must be higher or equal to -// kProducedFileFormatVersion."); +// static_assert( +// kProducedBytecodeVersion >= kProducedFileFormatVersion, +// "kProducedBytecodeVersion must be higher or equal to +// kProducedFileFormatVersion."); + +// Introduce kMinSupportedBytecodeVersion and kMaxSupportedBytecodeVersion +// for limited backward/forward compatibility support of bytecode. If +// kMinSupportedBytecodeVersion <= model_version <= kMaxSupportedBytecodeVersion +// (in loader), we should support this model_version. For example, we provide a +// wrapper to handle an updated operator. +@Namespace("caffe2::serialize") @MemberGetter public static native @Cast("const uint64_t") long kMinSupportedBytecodeVersion(); +@Namespace("caffe2::serialize") @MemberGetter public static native @Cast("const uint64_t") long kMaxSupportedBytecodeVersion(); + + // namespace serialize + // namespace caffe2 + + +// Parsed from torch/csrc/jit/serialization/unpickler.h + +// #pragma once + +// #include +// #include +// #include +// #include +// #include +// #include +// Targeting ../DeserializationStorageContext.java + + +// Targeting ../Unpickler.java + + + + + + // namespace jit + // namespace torch + + +// Parsed from torch/csrc/jit/frontend/script_type_parser.h + +// #pragma once +// #include +// #include +// #include +// #include +// Targeting ../ScriptTypeParser.java + + + // namespace jit + // namespace torch + + +// Parsed from torch/csrc/jit/frontend/resolver.h + +// #pragma once + +// #include +// #include +// #include +// Targeting ../Resolver.java + + +// Targeting ../NativeResolver.java + + + +@Namespace("torch::jit") public static native @SharedPtr NativeResolver nativeResolver(); + // namespace jit + // namespace torch + + +// Parsed from torch/csrc/jit/frontend/sugared_value.h + +// #pragma once +// #include +// #include +// #include +// #include +// #include + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// Targeting ../SugaredValue.java + + +// Targeting ../SimpleValue.java + + +// Targeting ../BuiltinFunction.java + + +// Targeting ../SugaredTupleValue.java + + +// Targeting ../BuiltinModule.java + + +// Targeting ../ClassValue.java + + +// Targeting ../NamedTupleConstructor.java + + +// Targeting ../FunctionValue.java + + +// Targeting ../ClosureValue.java + + +// Targeting ../MethodValue.java + + +// Targeting ../PrintValue.java + + +// Targeting ../CastValue.java + + +// Targeting ../TensorCastValue.java + + +// Targeting ../MagicMethod.java + + +// Targeting ../SpecialFormValue.java + + +// Targeting ../LegacyTensorConstructor.java + + +// Targeting ../RangeValue.java + + + +// Specialized Tree structure to matched against for special handling +// of builtin functions iterables expressions like zip(), enumerate(), etc. +// zip and enumerate can be modeled as a tree of SimpleValue/RangeValue: +// zip(x, y) -> (x, y) with tuple assignment to each loop target +// enumerate(x) -> (range(0, math.inf, 1), x) +// So a complicated expression like zip(a, enumerate(b), range(0, 100)) will be: +// (a, (range(0, math.inf, 1), b), range(0, 100)) +// We use those base iterables to fill in the loop information like +// max_trip_count and set the value table for loop targets +// Iterables can contain lists of SugaredValues like ModuleLists. If it +// does, then we emit it unrolled and require that all values it contains +// have a statically-determinable length. + +@Namespace("torch::jit") public static native @ByVal ValueVector toValues( + @ByRef Graph g, + @ByVal NamedValueArrayRef nvs); +// Targeting ../SimpleSelf.java + + +// Targeting ../ExceptionMessageValue.java + + +// Targeting ../ExceptionValue.java + + +// Targeting ../SugaredEnumClass.java + + +// Targeting ../SliceValue.java + + + + // namespace jit + // namespace torch + + +// Parsed from torch/csrc/jit/frontend/error_report.h + +// #pragma once + +// #include +// #include +// Targeting ../Call.java + + +// Targeting ../ErrorReport.java + + + + // namespace jit + // namespace torch + + +// Parsed from torch/csrc/jit/frontend/tree.h + +// #pragma once + +// #include +// #include +// #include +// #include + +// #include +// #include +// #include + +// Trees are used to represent all forms of TC IR, pre- and post-typechecking. +// Rather than have a full class hierarchy for all TC statements, trees are a +// slight variation of Lisp s-expressions. For instance, the expression a*b+1 +// is represented as: +// (+ (* (ident a) (ident b)) (const 1)) +// Atoms like 'a', 'b', and '1' are represented by subclasses of Tree which +// define stringValue(). Everything else is a Compound object, which has a +// 'kind' that is a token from lexer.h's TokenKind enum. Single-character +// operators like '+' are represented using the character itself (so, add.kind() +// would be '+'). Each Compound object also contains a list of subtrees and is +// associated with a SourceRange for error reporting. +// Memory management of trees is done using intrusive_ptr. +// Targeting ../Tree.java + + +// Targeting ../JitString.java + + + +@Namespace("torch::jit") public static native @ByVal SourceRange mergeRanges(@ByVal SourceRange c, @Cast("const torch::jit::TreeList*") @ByRef SymDimVector others); +// Targeting ../Compound.java + + +// Targeting ../pretty_tree.java + + + +@Namespace("torch::jit") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @ByVal pretty_tree t_); + +@Namespace("torch::jit") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef TreeRef t); + + // namespace jit + // namespace torch + + +// Parsed from torch/csrc/jit/frontend/lexer.h + +// #pragma once +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + +// #if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32") +// #endif + +// single character tokens are just the character itself '+' +// multi-character tokens need an entry here +// if the third entry is not the empty string, it is used +// in the lexer to match this token. + +// These kinds are also used in Tree.h as the kind of the AST node. +// Some kinds TK_APPLY, TK_LIST are only used in the AST and are not seen in the +// lexer. + +// #define TC_FORALL_TOKEN_KINDS(_) +// _(TK_EOF, "eof", "") +// _(TK_WHITESPACE, "whitespace", "") +// _(TK_WHITESPACE_EOF, "whitespace_eof", "") +// _(TK_NUMBER, "number", "") +// _(TK_NEWLINE, "newline", "") +// _(TK_INDENT, "indent", "") +// _(TK_DEDENT, "dedent", "") +// _(TK_DEF, "def", "def") +// _(TK_EQUIVALENT, "equivalent", "<=>") +// _(TK_IDENT, "ident", "") +// _(TK_STRING, "string", "") +// _(TK_STRINGLITERAL, "string_literal", "") +// _(TK_CONST, "const", "") +// _(TK_LIST, "list", "") +// _(TK_DICT, "dict", "") +// _(TK_OPTION, "option", "") +// _(TK_APPLY, "apply", "") +// _(TK_COMPREHENSION, "comprehension", "") +// _(TK_RANGE_CONSTRAINT, "range_constraint", "") +// _(TK_PARAM, "param", "") +// _(TK_INFERRED, "inferred", "") +// _(TK_ACCESS, "access", "") +// _(TK_ASSIGN, "assign", "") +// _(TK_AUG_ASSIGN, "aug_assign", "") +// _(TK_ATTRIBUTE, "attribute", "") +// _(TK_IF, "if", "if") +// _(TK_ELSE, "else", "else") +// _(TK_ELIF, "elif", "elif") +// _(TK_WHILE, "while", "while") +// _(TK_EXPR_STMT, "expression statement", "") +// _(TK_RETURN, "return", "return") +// _(TK_IS, "is", "is") +// _(TK_ISNOT, "is not", "is not") +// _(TK_NE, "ne", "!=") +// _(TK_EQ, "eq", "==") +// _(TK_LE, "le", "<=") +// _(TK_GE, "ge", ">=") +// _(TK_FLOOR_DIV, "floordiv", "//") +// _(TK_IF_EXPR, "if", "") +// _(TK_TRUE, "True", "True") +// _(TK_FALSE, "False", "False") +// _(TK_NONE, "None", "None") +// _(TK_AND, "and", "and") +// _(TK_OR, "or", "or") +// _(TK_NOT, "not", "not") +// _(TK_LSHIFT, "<<", "<<") +// _(TK_RSHIFT, ">>", ">>") +// _(TK_CAST, "cast", "") +// _(TK_PLUS_EQ, "+=", "+=") +// _(TK_MINUS_EQ, "-=", "-=") +// _(TK_TIMES_EQ, "*=", "*=") +// _(TK_DIV_EQ, "/=", "/=") +// _(TK_MOD_EQ, "%=", "%=") +// _(TK_BIT_OR_EQ, "|=", "|=") +// _(TK_BIT_AND_EQ, "&=", "&=") +// _(TK_BIT_XOR_EQ, "^=", "^=") +// _(TK_LSHIFT_EQ, "<<=", "<<=") +// _(TK_RSHIFT_EQ, ">>=", ">>=") +// _(TK_POW_EQ, "**=", "**=") +// _(TK_GLOBAL, "global", "global") +// _(TK_BUILT_IN, "built-in", "") +// _(TK_SUBSCRIPT, "subscript", "") +// _(TK_VAR, "variable", "") +// _(TK_NOTHING, "nothing", "") +// _(TK_DICT_LITERAL, "dict-literal", "") +// _(TK_LIST_LITERAL, "list-literal", "") +// _(TK_TUPLE_LITERAL, "tuple-literal", "") +// _(TK_FOR, "for", "for") +// _(TK_IN, "in", "in") +// _(TK_NOTIN, "not in", "not in") +// _(TK_STARRED, "starred", "") +// _(TK_UNARY_MINUS, "unary minus", "") +// _(TK_POW, "pow operator", "**") +// _(TK_ARROW, "arrow", "->") +// _(TK_DECL, "decl", "") +// _(TK_SLICE_EXPR, "slice expr", "") +// _(TK_TYPE_COMMENT, "type comment", "# type:") +// _(TK_RAISE, "raise", "raise") +// _(TK_ASSERT, "assert", "assert") +// _(TK_DOTS, "dots", "...") +// _(TK_LIST_COMP, "list comprehension", "") +// _(TK_DICT_COMP, "dict comprehension", "") +// _(TK_BREAK, "break", "break") +// _(TK_CONTINUE, "continue", "continue") +// _(TK_DELETE, "del", "del") +// _(TK_PASS, "pass", "pass") +// _(TK_CLASS_DEF, "class", "class") +// _(TK_IMPORT, "import", "import") +// _(TK_WITH, "with", "with") +// _(TK_WITH_ITEM, "withitem", "") +// _(TK_AS, "as", "as") +// _(TK_PROP, "property", "") +// _(TK_ELLIPSIS, "Ellipsis", "Ellipsis") +// _(TK_NONE_TYPE, "NoneType", "NoneType") + +@Namespace("torch::jit") public enum TokenKind { + // we use characters to represent themselves so skip all valid characters + // before + // assigning enum values to multi-char tokens. + TK_DUMMY_START(256), + TK_EOF(257), + TK_WHITESPACE(258), + TK_WHITESPACE_EOF(259), + TK_NUMBER(260), + TK_NEWLINE(261), + TK_INDENT(262), + TK_DEDENT(263), + TK_DEF(264), + TK_EQUIVALENT(265), + TK_IDENT(266), + TK_STRING(267), + TK_STRINGLITERAL(268), + TK_CONST(269), + TK_LIST(270), + TK_DICT(271), + TK_OPTION(272), + TK_APPLY(273), + TK_COMPREHENSION(274), + TK_RANGE_CONSTRAINT(275), + TK_PARAM(276), + TK_INFERRED(277), + TK_ACCESS(278), + TK_ASSIGN(279), + TK_AUG_ASSIGN(280), + TK_ATTRIBUTE(281), + TK_IF(282), + TK_ELSE(283), + TK_ELIF(284), + TK_WHILE(285), + TK_EXPR_STMT(286), + TK_RETURN(287), + TK_IS(288), + TK_ISNOT(289), + TK_NE(290), + TK_EQ(291), + TK_LE(292), + TK_GE(293), + TK_FLOOR_DIV(294), + TK_IF_EXPR(295), + TK_TRUE(296), + TK_FALSE(297), + TK_NONE(298), + TK_AND(299), + TK_OR(300), + TK_NOT(301), + TK_LSHIFT(302), + TK_RSHIFT(303), + TK_CAST(304), + TK_PLUS_EQ(305), + TK_MINUS_EQ(306), + TK_TIMES_EQ(307), + TK_DIV_EQ(308), + TK_MOD_EQ(309), + TK_BIT_OR_EQ(310), + TK_BIT_AND_EQ(311), + TK_BIT_XOR_EQ(312), + TK_LSHIFT_EQ(313), + TK_RSHIFT_EQ(314), + TK_POW_EQ(315), + TK_GLOBAL(316), + TK_BUILT_IN(317), + TK_SUBSCRIPT(318), + TK_VAR(319), + TK_NOTHING(320), + TK_DICT_LITERAL(321), + TK_LIST_LITERAL(322), + TK_TUPLE_LITERAL(323), + TK_FOR(324), + TK_IN(325), + TK_NOTIN(326), + TK_STARRED(327), + TK_UNARY_MINUS(328), + TK_POW(329), + TK_ARROW(330), + TK_DECL(331), + TK_SLICE_EXPR(332), + TK_TYPE_COMMENT(333), + TK_RAISE(334), + TK_ASSERT(335), + TK_DOTS(336), + TK_LIST_COMP(337), + TK_DICT_COMP(338), + TK_BREAK(339), + TK_CONTINUE(340), + TK_DELETE(341), + TK_PASS(342), + TK_CLASS_DEF(343), + TK_IMPORT(344), + TK_WITH(345), + TK_WITH_ITEM(346), + TK_AS(347), + TK_PROP(348), + TK_ELLIPSIS(349), + TK_NONE_TYPE(350); + + public final int value; + private TokenKind(int v) { this.value = v; } + private TokenKind(TokenKind e) { this.value = e.value; } + public TokenKind intern() { for (TokenKind e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} -// Introduce kMinSupportedBytecodeVersion and kMaxSupportedBytecodeVersion -// for limited backward/forward compatibility support of bytecode. If -// kMinSupportedBytecodeVersion <= model_version <= kMaxSupportedBytecodeVersion -// (in loader), we should support this model_version. For example, we provide a -// wrapper to handle an updated operator. -@Namespace("caffe2::serialize") @MemberGetter public static native @Cast("const uint64_t") long kMinSupportedBytecodeVersion(); -@Namespace("caffe2::serialize") @MemberGetter public static native @Cast("const uint64_t") long kMaxSupportedBytecodeVersion(); +@Namespace("torch::jit") public static native @StdString BytePointer kindToString(int kind); +@Namespace("torch::jit") public static native int stringToKind(@StdString BytePointer str); +@Namespace("torch::jit") public static native int stringToKind(@StdString String str); + +// nested hash tables that indicate char-by-char what is a valid token. +// Targeting ../SharedParserData.java + + + +@Namespace("torch::jit") public static native @ByRef SharedParserData sharedParserData(); +// Targeting ../Token.java + + + // namespace jit + // namespace torch - // namespace serialize - // namespace caffe2 // Parsed from caffe2/serialize/inline_container.h @@ -76880,10 +77356,7 @@ scalar_t sf(scalar_t x, scalar_t y) // #include // namespace serialize - -// Targeting ../DeserializationStorageContext.java - - + // namespace caffe2 @Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( @SharedPtr CompilationUnit cu, @@ -77347,322 +77820,6 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/csrc/jit/frontend/lexer.h - -// #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - -// #if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32") -// #endif - -// single character tokens are just the character itself '+' -// multi-character tokens need an entry here -// if the third entry is not the empty string, it is used -// in the lexer to match this token. - -// These kinds are also used in Tree.h as the kind of the AST node. -// Some kinds TK_APPLY, TK_LIST are only used in the AST and are not seen in the -// lexer. - -// #define TC_FORALL_TOKEN_KINDS(_) -// _(TK_EOF, "eof", "") -// _(TK_WHITESPACE, "whitespace", "") -// _(TK_WHITESPACE_EOF, "whitespace_eof", "") -// _(TK_NUMBER, "number", "") -// _(TK_NEWLINE, "newline", "") -// _(TK_INDENT, "indent", "") -// _(TK_DEDENT, "dedent", "") -// _(TK_DEF, "def", "def") -// _(TK_EQUIVALENT, "equivalent", "<=>") -// _(TK_IDENT, "ident", "") -// _(TK_STRING, "string", "") -// _(TK_STRINGLITERAL, "string_literal", "") -// _(TK_CONST, "const", "") -// _(TK_LIST, "list", "") -// _(TK_DICT, "dict", "") -// _(TK_OPTION, "option", "") -// _(TK_APPLY, "apply", "") -// _(TK_COMPREHENSION, "comprehension", "") -// _(TK_RANGE_CONSTRAINT, "range_constraint", "") -// _(TK_PARAM, "param", "") -// _(TK_INFERRED, "inferred", "") -// _(TK_ACCESS, "access", "") -// _(TK_ASSIGN, "assign", "") -// _(TK_AUG_ASSIGN, "aug_assign", "") -// _(TK_ATTRIBUTE, "attribute", "") -// _(TK_IF, "if", "if") -// _(TK_ELSE, "else", "else") -// _(TK_ELIF, "elif", "elif") -// _(TK_WHILE, "while", "while") -// _(TK_EXPR_STMT, "expression statement", "") -// _(TK_RETURN, "return", "return") -// _(TK_IS, "is", "is") -// _(TK_ISNOT, "is not", "is not") -// _(TK_NE, "ne", "!=") -// _(TK_EQ, "eq", "==") -// _(TK_LE, "le", "<=") -// _(TK_GE, "ge", ">=") -// _(TK_FLOOR_DIV, "floordiv", "//") -// _(TK_IF_EXPR, "if", "") -// _(TK_TRUE, "True", "True") -// _(TK_FALSE, "False", "False") -// _(TK_NONE, "None", "None") -// _(TK_AND, "and", "and") -// _(TK_OR, "or", "or") -// _(TK_NOT, "not", "not") -// _(TK_LSHIFT, "<<", "<<") -// _(TK_RSHIFT, ">>", ">>") -// _(TK_CAST, "cast", "") -// _(TK_PLUS_EQ, "+=", "+=") -// _(TK_MINUS_EQ, "-=", "-=") -// _(TK_TIMES_EQ, "*=", "*=") -// _(TK_DIV_EQ, "/=", "/=") -// _(TK_MOD_EQ, "%=", "%=") -// _(TK_BIT_OR_EQ, "|=", "|=") -// _(TK_BIT_AND_EQ, "&=", "&=") -// _(TK_BIT_XOR_EQ, "^=", "^=") -// _(TK_LSHIFT_EQ, "<<=", "<<=") -// _(TK_RSHIFT_EQ, ">>=", ">>=") -// _(TK_POW_EQ, "**=", "**=") -// _(TK_GLOBAL, "global", "global") -// _(TK_BUILT_IN, "built-in", "") -// _(TK_SUBSCRIPT, "subscript", "") -// _(TK_VAR, "variable", "") -// _(TK_NOTHING, "nothing", "") -// _(TK_DICT_LITERAL, "dict-literal", "") -// _(TK_LIST_LITERAL, "list-literal", "") -// _(TK_TUPLE_LITERAL, "tuple-literal", "") -// _(TK_FOR, "for", "for") -// _(TK_IN, "in", "in") -// _(TK_NOTIN, "not in", "not in") -// _(TK_STARRED, "starred", "") -// _(TK_UNARY_MINUS, "unary minus", "") -// _(TK_POW, "pow operator", "**") -// _(TK_ARROW, "arrow", "->") -// _(TK_DECL, "decl", "") -// _(TK_SLICE_EXPR, "slice expr", "") -// _(TK_TYPE_COMMENT, "type comment", "# type:") -// _(TK_RAISE, "raise", "raise") -// _(TK_ASSERT, "assert", "assert") -// _(TK_DOTS, "dots", "...") -// _(TK_LIST_COMP, "list comprehension", "") -// _(TK_DICT_COMP, "dict comprehension", "") -// _(TK_BREAK, "break", "break") -// _(TK_CONTINUE, "continue", "continue") -// _(TK_DELETE, "del", "del") -// _(TK_PASS, "pass", "pass") -// _(TK_CLASS_DEF, "class", "class") -// _(TK_IMPORT, "import", "import") -// _(TK_WITH, "with", "with") -// _(TK_WITH_ITEM, "withitem", "") -// _(TK_AS, "as", "as") -// _(TK_PROP, "property", "") -// _(TK_ELLIPSIS, "Ellipsis", "Ellipsis") -// _(TK_NONE_TYPE, "NoneType", "NoneType") - -@Namespace("torch::jit") public enum TokenKind { - // we use characters to represent themselves so skip all valid characters - // before - // assigning enum values to multi-char tokens. - TK_DUMMY_START(256), - TK_EOF(257), - TK_WHITESPACE(258), - TK_WHITESPACE_EOF(259), - TK_NUMBER(260), - TK_NEWLINE(261), - TK_INDENT(262), - TK_DEDENT(263), - TK_DEF(264), - TK_EQUIVALENT(265), - TK_IDENT(266), - TK_STRING(267), - TK_STRINGLITERAL(268), - TK_CONST(269), - TK_LIST(270), - TK_DICT(271), - TK_OPTION(272), - TK_APPLY(273), - TK_COMPREHENSION(274), - TK_RANGE_CONSTRAINT(275), - TK_PARAM(276), - TK_INFERRED(277), - TK_ACCESS(278), - TK_ASSIGN(279), - TK_AUG_ASSIGN(280), - TK_ATTRIBUTE(281), - TK_IF(282), - TK_ELSE(283), - TK_ELIF(284), - TK_WHILE(285), - TK_EXPR_STMT(286), - TK_RETURN(287), - TK_IS(288), - TK_ISNOT(289), - TK_NE(290), - TK_EQ(291), - TK_LE(292), - TK_GE(293), - TK_FLOOR_DIV(294), - TK_IF_EXPR(295), - TK_TRUE(296), - TK_FALSE(297), - TK_NONE(298), - TK_AND(299), - TK_OR(300), - TK_NOT(301), - TK_LSHIFT(302), - TK_RSHIFT(303), - TK_CAST(304), - TK_PLUS_EQ(305), - TK_MINUS_EQ(306), - TK_TIMES_EQ(307), - TK_DIV_EQ(308), - TK_MOD_EQ(309), - TK_BIT_OR_EQ(310), - TK_BIT_AND_EQ(311), - TK_BIT_XOR_EQ(312), - TK_LSHIFT_EQ(313), - TK_RSHIFT_EQ(314), - TK_POW_EQ(315), - TK_GLOBAL(316), - TK_BUILT_IN(317), - TK_SUBSCRIPT(318), - TK_VAR(319), - TK_NOTHING(320), - TK_DICT_LITERAL(321), - TK_LIST_LITERAL(322), - TK_TUPLE_LITERAL(323), - TK_FOR(324), - TK_IN(325), - TK_NOTIN(326), - TK_STARRED(327), - TK_UNARY_MINUS(328), - TK_POW(329), - TK_ARROW(330), - TK_DECL(331), - TK_SLICE_EXPR(332), - TK_TYPE_COMMENT(333), - TK_RAISE(334), - TK_ASSERT(335), - TK_DOTS(336), - TK_LIST_COMP(337), - TK_DICT_COMP(338), - TK_BREAK(339), - TK_CONTINUE(340), - TK_DELETE(341), - TK_PASS(342), - TK_CLASS_DEF(343), - TK_IMPORT(344), - TK_WITH(345), - TK_WITH_ITEM(346), - TK_AS(347), - TK_PROP(348), - TK_ELLIPSIS(349), - TK_NONE_TYPE(350); - - public final int value; - private TokenKind(int v) { this.value = v; } - private TokenKind(TokenKind e) { this.value = e.value; } - public TokenKind intern() { for (TokenKind e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} - -@Namespace("torch::jit") public static native @StdString BytePointer kindToString(int kind); -@Namespace("torch::jit") public static native int stringToKind(@StdString BytePointer str); -@Namespace("torch::jit") public static native int stringToKind(@StdString String str); - -// nested hash tables that indicate char-by-char what is a valid token. -// Targeting ../SharedParserData.java - - - -@Namespace("torch::jit") public static native @ByRef SharedParserData sharedParserData(); -// Targeting ../Token.java - - - // namespace jit - // namespace torch - - - -// Parsed from torch/csrc/jit/frontend/tree.h - -// #pragma once - -// #include -// #include -// #include -// #include - -// #include -// #include -// #include - -// Trees are used to represent all forms of TC IR, pre- and post-typechecking. -// Rather than have a full class hierarchy for all TC statements, trees are a -// slight variation of Lisp s-expressions. For instance, the expression a*b+1 -// is represented as: -// (+ (* (ident a) (ident b)) (const 1)) -// Atoms like 'a', 'b', and '1' are represented by subclasses of Tree which -// define stringValue(). Everything else is a Compound object, which has a -// 'kind' that is a token from lexer.h's TokenKind enum. Single-character -// operators like '+' are represented using the character itself (so, add.kind() -// would be '+'). Each Compound object also contains a list of subtrees and is -// associated with a SourceRange for error reporting. -// Memory management of trees is done using intrusive_ptr. -// Targeting ../Tree.java - - -// Targeting ../JitString.java - - - -@Namespace("torch::jit") public static native @ByVal SourceRange mergeRanges(@ByVal SourceRange c, @Cast("const torch::jit::TreeList*") @ByRef SymDimVector others); -// Targeting ../Compound.java - - -// Targeting ../pretty_tree.java - - - -@Namespace("torch::jit") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @ByVal pretty_tree t_); - -@Namespace("torch::jit") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef TreeRef t); - - // namespace jit - // namespace torch - - -// Parsed from torch/csrc/jit/frontend/error_report.h - -// #pragma once - -// #include -// #include -// Targeting ../Call.java - - -// Targeting ../ErrorReport.java - - - - // namespace jit - // namespace torch - - // Parsed from torch/csrc/jit/frontend/schema_matching.h // #pragma once @@ -77774,129 +77931,6 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace torch -// Parsed from torch/csrc/jit/frontend/sugared_value.h - -// #pragma once -// #include -// #include -// #include -// #include -// #include - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// Targeting ../SugaredValue.java - - -// Targeting ../SimpleValue.java - - -// Targeting ../BuiltinFunction.java - - -// Targeting ../SugaredTupleValue.java - - -// Targeting ../BuiltinModule.java - - -// Targeting ../ClassValue.java - - -// Targeting ../NamedTupleConstructor.java - - -// Targeting ../FunctionValue.java - - -// Targeting ../ClosureValue.java - - -// Targeting ../MethodValue.java - - -// Targeting ../PrintValue.java - - -// Targeting ../CastValue.java - - -// Targeting ../TensorCastValue.java - - -// Targeting ../MagicMethod.java - - -// Targeting ../SpecialFormValue.java - - -// Targeting ../LegacyTensorConstructor.java - - -// Targeting ../RangeValue.java - - - -// Specialized Tree structure to matched against for special handling -// of builtin functions iterables expressions like zip(), enumerate(), etc. -// zip and enumerate can be modeled as a tree of SimpleValue/RangeValue: -// zip(x, y) -> (x, y) with tuple assignment to each loop target -// enumerate(x) -> (range(0, math.inf, 1), x) -// So a complicated expression like zip(a, enumerate(b), range(0, 100)) will be: -// (a, (range(0, math.inf, 1), b), range(0, 100)) -// We use those base iterables to fill in the loop information like -// max_trip_count and set the value table for loop targets -// Iterables can contain lists of SugaredValues like ModuleLists. If it -// does, then we emit it unrolled and require that all values it contains -// have a statically-determinable length. - -@Namespace("torch::jit") public static native @ByVal ValueVector toValues( - @ByRef Graph g, - @ByVal NamedValueArrayRef nvs); -// Targeting ../SimpleSelf.java - - -// Targeting ../ExceptionMessageValue.java - - -// Targeting ../ExceptionValue.java - - -// Targeting ../SugaredEnumClass.java - - -// Targeting ../SliceValue.java - - - - // namespace jit - // namespace torch - - -// Parsed from torch/csrc/jit/frontend/resolver.h - -// #pragma once - -// #include -// #include -// #include -// Targeting ../Resolver.java - - -// Targeting ../NativeResolver.java - - - -@Namespace("torch::jit") public static native @SharedPtr NativeResolver nativeResolver(); - // namespace jit - // namespace torch - - // Parsed from torch/csrc/jit/frontend/tree_views.h // #pragma once @@ -78114,40 +78148,6 @@ scalar_t sf(scalar_t x, scalar_t y) // namespace std -// Parsed from torch/csrc/jit/frontend/script_type_parser.h - -// #pragma once -// #include -// #include -// #include -// #include -// Targeting ../ScriptTypeParser.java - - - // namespace jit - // namespace torch - - -// Parsed from torch/csrc/jit/serialization/unpickler.h - -// #pragma once - -// #include -// #include -// #include -// #include -// #include -// #include -// Targeting ../Unpickler.java - - - - - - // namespace jit - // namespace torch - - // Parsed from torch/csrc/jit/serialization/pickle.h // #pragma once diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java index c41b6bb7b44..7674691295a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java @@ -105,116 +105,6 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // namespace c10 -// Parsed from c10/core/impl/GPUTrace.h - -// #pragma once - -// #include - - // namespace impl - // namespace c10 - - -// Parsed from c10/cuda/impl/cuda_cmake_macros.h - -// #pragma once - -// Automatically generated header file for the C10 CUDA library. Do not -// include this file directly. Instead, include c10/cuda/CUDAMacros.h - -// #define C10_CUDA_BUILD_SHARED_LIBS - - -// Parsed from c10/cuda/CUDAMacros.h - -// #pragma once - -// #ifndef C10_USING_CUSTOM_GENERATED_MACROS - -// We have not yet modified the AMD HIP build to generate this file so -// we add an extra option to specifically ignore it. -// #ifndef C10_CUDA_NO_CMAKE_CONFIGURE_FILE -// #include -// #endif // C10_CUDA_NO_CMAKE_CONFIGURE_FILE - -// #endif - -// See c10/macros/Export.h for a detailed explanation of what the function -// of these macros are. We need one set of macros for every separate library -// we build. - -// #ifdef _WIN32 -// #else // _WIN32 -// #if defined(__GNUC__) -// #define C10_CUDA_EXPORT __attribute__((__visibility__("default"))) -// #else // defined(__GNUC__) -// #define C10_CUDA_EXPORT -// #endif // defined(__GNUC__) -// #define C10_CUDA_IMPORT C10_CUDA_EXPORT -// #endif // _WIN32 - -// This one is being used by libc10_cuda.so -// #ifdef C10_CUDA_BUILD_MAIN_LIB -// #define C10_CUDA_API C10_CUDA_EXPORT -// #else -// #define C10_CUDA_API C10_CUDA_IMPORT -// #endif - -/** - * The maximum number of GPUs that we recognizes. - */ -public static final int C10_COMPILE_TIME_MAX_GPUS = 16; - - -// Parsed from c10/cuda/CUDADeviceAssertionHost.h - -// #pragma once - -// #include - -// #include -// #include -// #include -// #include - -// #ifdef USE_CUDA -// #define TORCH_USE_CUDA_DSA -// #endif - -/** Number of assertion failure messages we can store. If this is too small - * threads will fail silently. */ -@MemberGetter public static native int C10_CUDA_DSA_ASSERTION_COUNT(); -@MemberGetter public static native int C10_CUDA_DSA_MAX_STR_LEN(); -// Targeting ../cuda/DeviceAssertionData.java - - -// Targeting ../cuda/DeviceAssertionsData.java - - -// Targeting ../cuda/CUDAKernelLaunchInfo.java - - -// Targeting ../cuda/CUDAKernelLaunchRegistry.java - - - - - - // namespace cuda - // namespace c10 - -// Each kernel launched with TORCH_DSA_KERNEL_LAUNCH -// requires the same input arguments. We introduce the following macro to -// standardize these. -// #define TORCH_DSA_KERNEL_ARGS -// [[maybe_unused]] c10::cuda::DeviceAssertionsData *const assertions_data, -// [[maybe_unused]] uint32_t assertion_caller_id - -// This macro can be used to pass the DSA arguments onward to another -// function -// #define TORCH_DSA_KERNEL_ARGS_PASS assertions_data, assertion_caller_id - - // Parsed from c10/cuda/CUDAStream.h // #pragma once @@ -340,6 +230,199 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // namespace std +// Parsed from ATen/cuda/CUDAContext.h + +// #pragma once + +// #include + +// #include +// #include +// #include + +// #ifdef CUDART_VERSION +// #include +// #endif + +// #if defined(USE_ROCM) && ROCM_VERSION >= 50300 +// #include +// #endif + +// #include +// #include +// #include +// #include +// #include +// #include + +/* +A common CUDA interface for ATen. + +This interface is distinct from CUDAHooks, which defines an interface that links +to both CPU-only and CUDA builds. That interface is intended for runtime +dispatch and should be used from files that are included in both CPU-only and +CUDA builds. + +CUDAContext, on the other hand, should be preferred by files only included in +CUDA builds. It is intended to expose CUDA functionality in a consistent +manner. + +This means there is some overlap between the CUDAContext and CUDAHooks, but +the choice of which to use is simple: use CUDAContext when in a CUDA-only file, +use CUDAHooks otherwise. + +Note that CUDAContext simply defines an interface with no associated class. +It is expected that the modules whose functions compose this interface will +manage their own state. There is only a single CUDA context/state. +*/ + +/** + * DEPRECATED: use device_count() instead + */ +@Namespace("at::cuda") public static native @Cast("int64_t") long getNumGPUs(); + +/** + * CUDA is available if we compiled with CUDA, and there are one or more + * devices. If we compiled with CUDA but there is a driver problem, etc., + * this function will report CUDA is not available (rather than raise an error.) + */ +@Namespace("at::cuda") public static native @Cast("bool") boolean is_available(); + +@Namespace("at::cuda") public static native Pointer getCurrentDeviceProperties(); + +@Namespace("at::cuda") public static native int warp_size(); + +@Namespace("at::cuda") public static native Pointer getDeviceProperties(@Cast("int64_t") long device); + +@Namespace("at::cuda") public static native @Cast("bool") boolean canDeviceAccessPeer( + @Cast("int64_t") long device, + @Cast("int64_t") long peer_device); + +@Namespace("at::cuda") public static native Allocator getCUDADeviceAllocator(); + +/* Handles */ +@Namespace("at::cuda") public static native @Cast("cusparseHandle_t") Pointer getCurrentCUDASparseHandle(); +@Namespace("at::cuda") public static native @Cast("cublasHandle_t") Pointer getCurrentCUDABlasHandle(); + +@Namespace("at::cuda") public static native void clearCublasWorkspaces(); + +// #if defined(CUDART_VERSION) || defined(USE_ROCM) && ROCM_VERSION >= 50300 +@Namespace("at::cuda") public static native @Cast("cusolverDnHandle_t") Pointer getCurrentCUDASolverDnHandle(); +// #endif + + // namespace at::cuda + + +// Parsed from c10/core/impl/GPUTrace.h + +// #pragma once + +// #include + + // namespace impl + // namespace c10 + + +// Parsed from c10/cuda/CUDADeviceAssertionHost.h + +// #pragma once + +// #include + +// #include +// #include +// #include +// #include + +// #ifdef USE_CUDA +// #define TORCH_USE_CUDA_DSA +// #endif + +/** Number of assertion failure messages we can store. If this is too small + * threads will fail silently. */ +@MemberGetter public static native int C10_CUDA_DSA_ASSERTION_COUNT(); +@MemberGetter public static native int C10_CUDA_DSA_MAX_STR_LEN(); +// Targeting ../cuda/DeviceAssertionData.java + + +// Targeting ../cuda/DeviceAssertionsData.java + + +// Targeting ../cuda/CUDAKernelLaunchInfo.java + + +// Targeting ../cuda/CUDAKernelLaunchRegistry.java + + + + + + // namespace cuda + // namespace c10 + +// Each kernel launched with TORCH_DSA_KERNEL_LAUNCH +// requires the same input arguments. We introduce the following macro to +// standardize these. +// #define TORCH_DSA_KERNEL_ARGS +// [[maybe_unused]] c10::cuda::DeviceAssertionsData *const assertions_data, +// [[maybe_unused]] uint32_t assertion_caller_id + +// This macro can be used to pass the DSA arguments onward to another +// function +// #define TORCH_DSA_KERNEL_ARGS_PASS assertions_data, assertion_caller_id + + +// Parsed from c10/cuda/CUDAMacros.h + +// #pragma once + +// #ifndef C10_USING_CUSTOM_GENERATED_MACROS + +// We have not yet modified the AMD HIP build to generate this file so +// we add an extra option to specifically ignore it. +// #ifndef C10_CUDA_NO_CMAKE_CONFIGURE_FILE +// #include +// #endif // C10_CUDA_NO_CMAKE_CONFIGURE_FILE + +// #endif + +// See c10/macros/Export.h for a detailed explanation of what the function +// of these macros are. We need one set of macros for every separate library +// we build. + +// #ifdef _WIN32 +// #else // _WIN32 +// #if defined(__GNUC__) +// #define C10_CUDA_EXPORT __attribute__((__visibility__("default"))) +// #else // defined(__GNUC__) +// #define C10_CUDA_EXPORT +// #endif // defined(__GNUC__) +// #define C10_CUDA_IMPORT C10_CUDA_EXPORT +// #endif // _WIN32 + +// This one is being used by libc10_cuda.so +// #ifdef C10_CUDA_BUILD_MAIN_LIB +// #define C10_CUDA_API C10_CUDA_EXPORT +// #else +// #define C10_CUDA_API C10_CUDA_IMPORT +// #endif + +/** + * The maximum number of GPUs that we recognizes. + */ +public static final int C10_COMPILE_TIME_MAX_GPUS = 16; + + +// Parsed from c10/cuda/impl/cuda_cmake_macros.h + +// #pragma once + +// Automatically generated header file for the C10 CUDA library. Do not +// include this file directly. Instead, include c10/cuda/CUDAMacros.h + +// #define C10_CUDA_BUILD_SHARED_LIBS + + // Parsed from ATen/cuda/Exceptions.h // #pragma once @@ -497,89 +580,6 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // } while (0) -// Parsed from ATen/cuda/CUDAContext.h - -// #pragma once - -// #include - -// #include -// #include -// #include - -// #ifdef CUDART_VERSION -// #include -// #endif - -// #if defined(USE_ROCM) && ROCM_VERSION >= 50300 -// #include -// #endif - -// #include -// #include -// #include -// #include -// #include -// #include - -/* -A common CUDA interface for ATen. - -This interface is distinct from CUDAHooks, which defines an interface that links -to both CPU-only and CUDA builds. That interface is intended for runtime -dispatch and should be used from files that are included in both CPU-only and -CUDA builds. - -CUDAContext, on the other hand, should be preferred by files only included in -CUDA builds. It is intended to expose CUDA functionality in a consistent -manner. - -This means there is some overlap between the CUDAContext and CUDAHooks, but -the choice of which to use is simple: use CUDAContext when in a CUDA-only file, -use CUDAHooks otherwise. - -Note that CUDAContext simply defines an interface with no associated class. -It is expected that the modules whose functions compose this interface will -manage their own state. There is only a single CUDA context/state. -*/ - -/** - * DEPRECATED: use device_count() instead - */ -@Namespace("at::cuda") public static native @Cast("int64_t") long getNumGPUs(); - -/** - * CUDA is available if we compiled with CUDA, and there are one or more - * devices. If we compiled with CUDA but there is a driver problem, etc., - * this function will report CUDA is not available (rather than raise an error.) - */ -@Namespace("at::cuda") public static native @Cast("bool") boolean is_available(); - -@Namespace("at::cuda") public static native Pointer getCurrentDeviceProperties(); - -@Namespace("at::cuda") public static native int warp_size(); - -@Namespace("at::cuda") public static native Pointer getDeviceProperties(@Cast("int64_t") long device); - -@Namespace("at::cuda") public static native @Cast("bool") boolean canDeviceAccessPeer( - @Cast("int64_t") long device, - @Cast("int64_t") long peer_device); - -@Namespace("at::cuda") public static native Allocator getCUDADeviceAllocator(); - -/* Handles */ -@Namespace("at::cuda") public static native @Cast("cusparseHandle_t") Pointer getCurrentCUDASparseHandle(); -@Namespace("at::cuda") public static native @Cast("cublasHandle_t") Pointer getCurrentCUDABlasHandle(); - -@Namespace("at::cuda") public static native void clearCublasWorkspaces(); - -// #if defined(CUDART_VERSION) || defined(USE_ROCM) && ROCM_VERSION >= 50300 -@Namespace("at::cuda") public static native @Cast("cusolverDnHandle_t") Pointer getCurrentCUDASolverDnHandle(); -// #endif - - // namespace at::cuda - - // Parsed from ATen/cudnn/cudnn-wrapper.h // #pragma once @@ -612,17 +612,6 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // Use TORCH_CUDA_CPP_API or TORCH_CUDA_CU_API for exports from this folder -// Parsed from ATen/cudnn/Handle.h - -// #pragma once - -// #include -// #include - -@Namespace("at::native") public static native @Cast("cudnnHandle_t") Pointer getCudnnHandle(); - // namespace at::native - - // Parsed from ATen/cudnn/Utils.h // #pragma once @@ -641,6 +630,17 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { +// Parsed from ATen/cudnn/Handle.h + +// #pragma once + +// #include +// #include + +@Namespace("at::native") public static native @Cast("cudnnHandle_t") Pointer getCudnnHandle(); + // namespace at::native + + // Parsed from c10/cuda/CUDAGraphsC10Utils.h // #pragma once diff --git a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h index 5a88880145f..5172bf59ff6 100644 --- a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h +++ b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h @@ -4,20 +4,20 @@ // ATen/cudnn/Descriptors.h // ATen/cudnn/Types.h // c10/cuda/CUDAGuard.h +#include "c10/cuda/CUDAStream.h" +#include "ATen/cuda/CUDAContext.h" #include "c10/core/impl/GPUTrace.h" -#include "c10/cuda/impl/cuda_cmake_macros.h" -#include "c10/cuda/CUDAMacros.h" #include "c10/cuda/CUDADeviceAssertionHost.h" +#include "c10/cuda/CUDAMacros.h" +#include "c10/cuda/impl/cuda_cmake_macros.h" // #include "c10/cuda/CUDAMiscFunctions.h", // Parsing error // #include "c10/cuda/CUDAException.h", // Parsing error // #include "c10/cuda/CUDAFunctions.h", // Parsing error -#include "c10/cuda/CUDAStream.h" #include "ATen/cuda/Exceptions.h" -#include "ATen/cuda/CUDAContext.h" #include "ATen/cudnn/cudnn-wrapper.h" #include "ATen/cuda/ATenCUDAGeneral.h" -#include "ATen/cudnn/Handle.h" #include "ATen/cudnn/Utils.h" +#include "ATen/cudnn/Handle.h" #include "c10/cuda/CUDAGraphsC10Utils.h" #include "c10/cuda/CUDACachingAllocator.h", #include "c10/cuda/impl/CUDAGuardImpl.h" diff --git a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h index cc26edbecb6..bfb6b5def37 100644 --- a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h +++ b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h @@ -11,8 +11,9 @@ #include "c10/macros/cmake_macros.h" #include "c10/macros/Export.h" #include "torch/csrc/Export.h" -#include "c10/core/DeviceType.h" #include "c10/macros/Macros.h" +#include "c10/core/DeviceType.h" +#include "c10/util/Deprecated.h" // #include "c10/util/string_utils.h" // Android only // #include "c10/util/C++17.h" #include "c10/util/reverse_iterator.h" @@ -32,7 +33,6 @@ #include "c10/core/DispatchKeySet.h" #include "c10/core/Backend.h" #include "c10/core/Layout.h" -#include "c10/util/Deprecated.h" #include "c10/util/AlignOf.h" #include "c10/util/SmallVector.h" #include "c10/util/ArrayRef.h" @@ -40,17 +40,17 @@ #include "c10/core/QScheme.h" #include "c10/core/Stream.h" #include "c10/core/OptionalRef.h" -#include "c10/util/BFloat16-inl.h" #include "c10/util/BFloat16.h" +#include "c10/util/BFloat16-inl.h" #include "c10/util/TypeSafeSignMath.h" #include "c10/util/floating_point_utils.h" #include "c10/util/Float8_e4m3fn-inl.h" #include "c10/util/Float8_e4m3fn.h" #include "c10/util/complex_math.h" -#include "c10/util/complex_utils.h" #include "c10/util/Half.h" // Moved before complex.h because it overrides complex -#include "c10/util/complex.h" #include "c10/util/Half-inl.h" +#include "c10/util/complex_utils.h" +#include "c10/util/complex.h" #include "c10/util/Float8_e5m2-inl.h" #include "c10/util/Float8_e5m2.h" #include "c10/util/bits.h" @@ -60,47 +60,51 @@ #include "c10/util/quint4x2.h" #include "c10/util/quint8.h" #include "c10/core/ScalarType.h" -// #include "c10/util/Optional.h" // Incompatible with declaration of c10::optional as basic container +#include "c10/util/ExclusivelyOwned.h" #include "c10/util/MaybeOwned.h" #include "c10/core/SymNodeImpl.h" -#include "c10/core/SymBool.h" #include "c10/core/SymFloat.h" +#include "c10/core/SymBool.h" #include "c10/core/SymInt.h" #include "c10/util/TypeCast.h" #include "c10/core/Scalar.h" +// #include "c10/util/Optional.h" // Incompatible with declaration of c10::optional as basic container #include "c10/util/IdWrapper.h" +#include "c10/util/Type.h" #include "c10/util/ConstexprCrc.h" #include "c10/util/TypeIndex.h" +#include "c10/util/flat_hash_map.h" #include "c10/util/irange.h" #include "c10/util/typeid.h" #include "c10/core/ScalarTypeToTypeMeta.h" #include "c10/util/ThreadLocalDebugInfo.h" #include "c10/util/UniqueVoidPtr.h" #include "c10/core/Allocator.h" -#include "c10/core/impl/HermeticPyObjectTLS.h" -#include "c10/core/SymIntArrayRef.h" #include "c10/util/python_stub.h" -#include "c10/core/impl/PyInterpreter.h" -#include "c10/core/impl/PyObjectSlot.h" #include "c10/core/StorageImpl.h" #include "c10/core/Storage.h" #include "c10/core/AutogradState.h" +#include "c10/core/GradMode.h" +#include "c10/util/Registry.h" +#include "c10/util/Flags.h" #include "c10/core/impl/LocalDispatchKeySet.h" #include "c10/core/InferenceMode.h" +#include "c10/core/SymIntArrayRef.h" +#include "c10/core/DefaultDtype.h" +#include "c10/core/TensorOptions.h" #include "c10/core/WrapDimMinimal.h" +#include "c10/core/impl/HermeticPyObjectTLS.h" +#include "c10/core/impl/PyInterpreter.h" +#include "c10/core/impl/PyObjectSlot.h" #include "c10/core/impl/SizesAndStrides.h" #include "c10/util/DimVector.h" -#include "c10/util/Type.h" -#include "c10/util/Registry.h" -#include "c10/util/Flags.h" +// #include "c10/util/logging_is_not_google_glog.h" // Not parseable +#include "c10/util/Logging.h" #include "c10/util/accumulate.h" #include "c10/util/safe_numerics.h" #include "c10/core/TensorImpl.h" #include "c10/core/UndefinedTensorImpl.h" -#include "c10/util/ExclusivelyOwned.h" // #include "c10/util/OptionalArrayRef.h" // Not compatible with basic container. Are we concerned by https://github.com/pytorch/pytorch/issues/63645 ? -#include "c10/core/DefaultDtype.h" -#include "c10/core/TensorOptions.h" #include "ATen/core/CheckMemoryFormat.h" // #include "ATen/core/DeprecatedTypePropertiesRegistry.h" // Deprecated #include "c10/core/GeneratorImpl.h" @@ -149,8 +153,6 @@ #include "c10/core/impl/InlineStreamGuard.h" #include "c10/core/StreamGuard.h" #include "c10/util/FunctionRef.h" -// #include "c10/util/logging_is_not_google_glog.h" // Not parseable -#include "c10/util/Logging.h" #include "c10/util/intrusive_ptr.h" // Moved after the definition or its template args #include "ATen/core/ivalue_inl.h" #include "ATen/core/ivalue.h" @@ -168,28 +170,29 @@ #include "ATen/core/alias_info.h" #include "ATen/core/operator_name.h" #include "ATen/core/dispatch/OperatorOptions.h" -#include "ATen/core/function_schema_inl.h" #include "ATen/core/function_schema.h" +#include "ATen/core/function_schema_inl.h" #include "ATen/core/op_registration/infer_schema.h" +#include "ATen/record_function.h" #include "ATen/core/op_registration/op_allowlist.h" #include "c10/util/either.h" #include "torch/csrc/jit/frontend/function_schema_parser.h" -#include "ATen/core/enum_tag.h" #include "c10/core/CompileTimeFunctionPointer.h" #include "ATen/core/boxing/OperatorKernel.h" -#include "ATen/core/boxing/BoxedKernel_impl.h" #include "ATen/core/boxing/BoxedKernel.h" +#include "ATen/core/boxing/BoxedKernel_impl.h" #include "ATen/core/stack.h" #include "ATen/core/boxing/impl/boxing.h" #include "ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h" #include "ATen/core/boxing/impl/WrapFunctionIntoFunctor.h" #include "ATen/core/boxing/impl/WrapFunctionIntoRuntimeFunctor.h" -#include "ATen/core/boxing/KernelFunction_impl.h" #include "ATen/core/boxing/KernelFunction.h" +#include "ATen/core/boxing/KernelFunction_impl.h" #include "ATen/core/dispatch/CppSignature.h" #include "ATen/core/dispatch/RegistrationHandleRAII.h" #include "ATen/core/ATenOpList.h" #include "ATen/core/op_registration/op_registration.h" +#include "ATen/core/enum_tag.h" #include "ATen/core/function.h" // #include "ATen/core/builtin_function.h" // Not in API #include "ATen/core/class_type.h" @@ -197,9 +200,7 @@ // #include "torch/custom_class.h" // Not in API #include "torch/library.h" #include "torch/csrc/autograd/autograd_not_implemented_fallback.h" -#include "c10/util/flat_hash_map.h" #include "torch/csrc/autograd/anomaly_mode.h" -#include "c10/core/GradMode.h" #include "ATen/core/grad_mode.h" #include "torch/csrc/autograd/grad_mode.h" #include "ATen/FuncTorchTLS.h" @@ -207,7 +208,6 @@ #include "ATen/PythonTorchFunctionTLS.h" #include "ATen/SavedTensorHooks.h" #include "ATen/ThreadLocalPythonObjects.h" -#include "ATen/record_function.h" #include "c10/core/impl/PythonDispatcherTLS.h" #include "c10/core/impl/TorchDispatchModeTLS.h" #include "ATen/ThreadLocalState.h" @@ -241,8 +241,6 @@ #include "ATen/TensorUtils.h" #include "ATen/TracerMode.h" #include "ATen/core/Reduction.h" -#include "ATen/ops/from_blob.h" -#include "ATen/ops/tensor.h" #include "ATen/ops/abs.h" #include "ATen/ops/absolute.h" #include "ATen/ops/acos.h" @@ -526,6 +524,7 @@ #include "ATen/ops/fractional_max_pool3d_backward.h" #include "ATen/ops/frexp.h" #include "ATen/ops/frobenius_norm.h" +#include "ATen/ops/from_blob.h" #include "ATen/ops/from_file.h" #include "ATen/ops/full.h" #include "ATen/ops/full_like.h" @@ -1086,6 +1085,7 @@ #include "ATen/ops/tanh.h" #include "ATen/ops/tanh_backward.h" #include "ATen/ops/tensor_split.h" +#include "ATen/ops/tensor.h" #include "ATen/ops/tensordot.h" #include "ATen/ops/thnn_conv2d.h" #include "ATen/ops/threshold.h" @@ -1403,22 +1403,22 @@ #include "caffe2/serialize/read_adapter_interface.h" #include "caffe2/serialize/istream_adapter.h" #include "caffe2/serialize/versions.h" +#include "torch/csrc/jit/serialization/unpickler.h" +#include "torch/csrc/jit/frontend/script_type_parser.h" +#include "torch/csrc/jit/frontend/resolver.h" +#include "torch/csrc/jit/frontend/sugared_value.h" +#include "torch/csrc/jit/frontend/error_report.h" +#include "torch/csrc/jit/frontend/tree.h" +#include "torch/csrc/jit/frontend/lexer.h" #include "caffe2/serialize/inline_container.h" #include "torch/csrc/jit/serialization/import.h" #include "c10/util/FbcodeMaps.h" #include "torch/csrc/jit/serialization/pickler.h" #include "torch/csrc/jit/frontend/parser_constants.h" #include "torch/csrc/jit/frontend/strtod.h" -#include "torch/csrc/jit/frontend/lexer.h" -#include "torch/csrc/jit/frontend/tree.h" -#include "torch/csrc/jit/frontend/error_report.h" #include "torch/csrc/jit/frontend/schema_matching.h" #include "torch/csrc/jit/frontend/versioned_symbols.h" -#include "torch/csrc/jit/frontend/sugared_value.h" -#include "torch/csrc/jit/frontend/resolver.h" #include "torch/csrc/jit/frontend/tree_views.h" -#include "torch/csrc/jit/frontend/script_type_parser.h" -#include "torch/csrc/jit/serialization/unpickler.h" #include "torch/csrc/jit/serialization/pickle.h" #include "datasets.h" \ No newline at end of file From df1e13e4bf5148df45c5f6f15b274371863c2b76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Mon, 6 Nov 2023 17:53:46 +0100 Subject: [PATCH 23/26] Make register_module generic --- .../src/gen/java/org/bytedeco/pytorch/Module.java | 12 ++++-------- .../java/org/bytedeco/pytorch/presets/torch.java | 10 +++++++++- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java index d31e290cce4..bb5c4dd8d52 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java @@ -558,14 +558,10 @@ public class Module extends Pointer { /// /// /// - public Module register_module(BytePointer name, Module module) { return asModule()._register_module(name, module.asModule()); } - private native @SharedPtr("torch::nn::Module") @ByVal @Name("register_module") Module _register_module( - @StdString BytePointer name, - @SharedPtr("torch::nn::Module") @ByVal Module module); - public Module register_module(String name, Module module) { return asModule()._register_module(name, module.asModule()); } - private native @SharedPtr("torch::nn::Module") @ByVal @Name("register_module") Module _register_module( - @StdString String name, - @SharedPtr("torch::nn::Module") @ByVal Module module); + private native @Name("register_module") void _register_module(@StdString BytePointer name, @SharedPtr @ByVal Module module); + public M register_module(BytePointer name, M module) { asModule()._register_module(name, module.asModule()); return module; } + private native @Name("register_module") void _register_module(@StdString String name, @SharedPtr @ByVal Module module); + public M register_module(String name, M module) { asModule()._register_module(name, module.asModule()); return module; } /** Registers a submodule with this {@code Module}. * diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index 3aa55c02e9a..dad2f8a563f 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -1516,7 +1516,15 @@ public void map(InfoMap infoMap) { //// Modules infoMap - .put(new Info("torch::nn::Module::register_module").javaNames("register_module")) + // Mimic C++ register_module and return the subclass instance. Also keep API compatibility with + // presets before 2.0.1 where register_module template was instantiated for all known + // native subclasses. + .put(new Info("torch::nn::Module::register_module").javaText( + "private native @Name(\"register_module\") void _register_module(@StdString BytePointer name, @SharedPtr @ByVal Module module);\n" + + "public M register_module(BytePointer name, M module) { asModule()._register_module(name, module.asModule()); return module; }\n" + + "private native @Name(\"register_module\") void _register_module(@StdString String name, @SharedPtr @ByVal Module module);\n" + + "public M register_module(String name, M module) { asModule()._register_module(name, module.asModule()); return module; }" + )) .put(new Info("torch::nn::Module").upcast()) ; String[] virtuals = {"train", "is_training", "to", "zero_grad", "save", "load", "pretty_print", "is_serializable"}; From 6fcfb808222bc163f8dd19736b9046d2672e15aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Mon, 6 Nov 2023 18:36:03 +0100 Subject: [PATCH 24/26] Revert renaming of `torch::jit::load` --- .../org/bytedeco/pytorch/global/torch.java | 32 +++++++++---------- .../org/bytedeco/pytorch/presets/torch.java | 4 +-- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java index 9acd11b13ea..d1d369d7348 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java @@ -77461,21 +77461,21 @@ scalar_t sf(scalar_t x, scalar_t y) * * The istream must contain a serialized {@code Module}, exported via * {@code torch::jit::ExportModule} in C++. */ -@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( +@Namespace("torch::jit") public static native @ByVal JitModule load( @Cast("std::istream*") @ByRef Pointer in, @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, @Cast("bool") boolean load_debug_files/*=true*/); -@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( +@Namespace("torch::jit") public static native @ByVal JitModule load( @Cast("std::istream*") @ByRef Pointer in); /// -@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( +@Namespace("torch::jit") public static native @ByVal JitModule load( @Cast("std::istream*") @ByRef Pointer in, @ByVal DeviceOptional device, @ByRef ExtraFilesMap extra_files, @Cast("bool") boolean load_debug_files/*=true*/); -@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( +@Namespace("torch::jit") public static native @ByVal JitModule load( @Cast("std::istream*") @ByRef Pointer in, @ByVal DeviceOptional device, @ByRef ExtraFilesMap extra_files); @@ -77485,36 +77485,36 @@ scalar_t sf(scalar_t x, scalar_t y) * The file stored at the location given in {@code filename} must contain a * serialized {@code Module}, exported either via {@code ScriptModule.save()} in * Python or {@code torch::jit::ExportModule} in C++. */ -@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( +@Namespace("torch::jit") public static native @ByVal JitModule load( @StdString BytePointer filename, @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, @Cast("bool") boolean load_debug_files/*=true*/); -@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( +@Namespace("torch::jit") public static native @ByVal JitModule load( @StdString BytePointer filename); -@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( +@Namespace("torch::jit") public static native @ByVal JitModule load( @StdString String filename, @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, @Cast("bool") boolean load_debug_files/*=true*/); -@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( +@Namespace("torch::jit") public static native @ByVal JitModule load( @StdString String filename); /// -@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( +@Namespace("torch::jit") public static native @ByVal JitModule load( @StdString BytePointer filename, @ByVal DeviceOptional device, @ByRef ExtraFilesMap extra_files, @Cast("bool") boolean load_debug_files/*=true*/); -@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( +@Namespace("torch::jit") public static native @ByVal JitModule load( @StdString BytePointer filename, @ByVal DeviceOptional device, @ByRef ExtraFilesMap extra_files); -@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( +@Namespace("torch::jit") public static native @ByVal JitModule load( @StdString String filename, @ByVal DeviceOptional device, @ByRef ExtraFilesMap extra_files, @Cast("bool") boolean load_debug_files/*=true*/); -@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( +@Namespace("torch::jit") public static native @ByVal JitModule load( @StdString String filename, @ByVal DeviceOptional device, @ByRef ExtraFilesMap extra_files); @@ -77524,19 +77524,19 @@ scalar_t sf(scalar_t x, scalar_t y) * The reader adapter, which is for customized input stream, must contain a * serialized {@code Module}, exported either via {@code ScriptModule.save()} in * Python or {@code torch::jit::ExportModule} in C++. */ -@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( +@Namespace("torch::jit") public static native @ByVal JitModule load( @SharedPtr ReadAdapterInterface rai, @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, @Cast("bool") boolean load_debug_files/*=true*/); -@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( +@Namespace("torch::jit") public static native @ByVal JitModule load( @SharedPtr ReadAdapterInterface rai); -@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( +@Namespace("torch::jit") public static native @ByVal JitModule load( @SharedPtr ReadAdapterInterface rai, @ByVal DeviceOptional device, @ByRef ExtraFilesMap extra_files, @Cast("bool") boolean load_debug_files/*=true*/); -@Namespace("torch::jit") public static native @ByVal @Name("load") JitModule jitLoad( +@Namespace("torch::jit") public static native @ByVal JitModule load( @SharedPtr ReadAdapterInterface rai, @ByVal DeviceOptional device, @ByRef ExtraFilesMap extra_files); diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index dad2f8a563f..7b9f249b985 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -213,7 +213,8 @@ public void mapModule(InfoMap infoMap, String name, String base, String baseBase .put(new Info("torch::nn::" + name + "Impl::" + name + "Impl").annotations("@SharedPtr", "@Name(\"std::make_shared\")")) .put(new Info("torch::nn::Cloneable").pointerTypes(name + "ImplCloneable").purify()) .put(new Info("torch::nn::ModuleHolder").skip()) - .put(new Info("torch::nn::" + name).skip()); + .put(new Info("torch::nn::" + name).skip()) + ; if (anyModuleCompatible) { anyModuleConstructors += @@ -1938,7 +1939,6 @@ We need either to put an annotation info on each member, or javaName("@NoOffset .put(new Info("torch::jit::Node").pointerTypes("JitNode")) .put(new Info("torch::jit::Module").pointerTypes("JitModule")) .put(new Info("torch::jit::Object").pointerTypes("JitObject")) - .put(new Info("torch::jit::load").javaNames("jitLoad")) .put(new Info("torch::jit::String").pointerTypes("JitString")) ; From 18dda3218351d8b352b26c5ef1110d82dec7770a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Guillemet?= Date: Tue, 7 Nov 2023 18:37:43 +0100 Subject: [PATCH 25/26] Revert change in README concerning register_module --- pytorch/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pytorch/README.md b/pytorch/README.md index 1777f21f3f9..ff19dd6eb71 100644 --- a/pytorch/README.md +++ b/pytorch/README.md @@ -93,9 +93,9 @@ public class SimpleMNIST { static class Net extends Module { Net() { // Construct and register two Linear submodules. - register_module("fc1", fc1 = new LinearImpl(784, 64)); - register_module("fc2", fc2 = new LinearImpl(64, 32)); - register_module("fc3", fc3 = new LinearImpl(32, 10)); + fc1 = register_module("fc1", new LinearImpl(784, 64)); + fc2 = register_module("fc2", new LinearImpl(64, 32)); + fc3 = register_module("fc3", new LinearImpl(32, 10)); } // Implement the Net's algorithm. From 9a7e6c20c0267aff46e7424cfa78b23c879b28d9 Mon Sep 17 00:00:00 2001 From: Samuel Audet Date: Thu, 9 Nov 2023 17:29:56 +0900 Subject: [PATCH 26/26] Update CHANGELOG.md and fix nits --- CHANGELOG.md | 2 +- README.md | 4 ++-- platform/pom.xml | 2 +- pytorch/samples/SimpleMNIST.java | 6 +++--- pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java | 3 ++- .../src/main/java/org/bytedeco/pytorch/presets/torch.java | 7 ++----- 6 files changed, 11 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c3a4148c833..cea1e5dd1d7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,7 +3,7 @@ * Refactor and improve presets for PyTorch ([pull #1360](https://github.com/bytedeco/javacpp-presets/pull/1360)) * Include `mkl_lapack.h` header file in presets for MKL ([issue #1388](https://github.com/bytedeco/javacpp-presets/issues/1388)) * Map new higher-level C++ API of Triton Inference Server ([pull #1361](https://github.com/bytedeco/javacpp-presets/pull/1361)) - * Upgrade presets for OpenCV 4.8.1, DNNL 3.3, OpenBLAS 0.3.24, CPython 3.12.0, NumPy 1.26.1, SciPy 1.11.3, LLVM 17.0.1, Leptonica 1.83.1, Tesseract 5.3.3, CUDA 12.3.0, cuDNN 8.9.5, NCCL 2.18.5, TensorFlow Lite 2.14.0, Triton Inference Server 2.38.0, ONNX 1.14.1, ONNX Runtime 1.16.1, TVM 0.13.0, and their dependencies + * Upgrade presets for OpenCV 4.8.1, DNNL 3.3, OpenBLAS 0.3.24, CPython 3.12.0, NumPy 1.26.1, SciPy 1.11.3, LLVM 17.0.1, Leptonica 1.83.1, Tesseract 5.3.3, CUDA 12.3.0, cuDNN 8.9.5, NCCL 2.18.5, PyTorch 2.1.0, TensorFlow Lite 2.14.0, Triton Inference Server 2.38.0, ONNX 1.14.1, ONNX Runtime 1.16.1, TVM 0.13.0, and their dependencies ### June 6, 2023 version 1.5.9 * Virtualize `nvinfer1::IGpuAllocator` from TensorRT to allow customization ([pull #1367](https://github.com/bytedeco/javacpp-presets/pull/1367)) diff --git a/README.md b/README.md index effa1d36f14..6664501caed 100644 --- a/README.md +++ b/README.md @@ -134,7 +134,7 @@ Further, in the case of Android, the JavaCPP Presets also rely on: Manual Installation ------------------- -Simply put all the desired JAR files (`opencv*.jar`, `ffmpeg*.jar`, etc.), in addition to `javacpp.jar`, somewhere in your class path. The JAR files available as pre-built artifacts are meant to be used with [JavaCPP](https://github.com/bytedeco/javacpp). The binaries for Linux were built for CentOS 6 and 7, so they should work on most distributions currently in use. The ones for Android were compiled for ARMv7 processors featuring an FPU, so they will not work on ancient devices such as the HTC Magic or some others with an ARMv6 CPU. Here are some more specific instructions for common cases: +Simply put all the desired JAR files (`opencv*.jar`, `ffmpeg*.jar`, etc.), in addition to `javacpp.jar`, somewhere in your class path. The JAR files available as pre-built artifacts are meant to be used with [JavaCPP](https://github.com/bytedeco/javacpp). The binaries for Linux are built with Ubuntu, so they should work on most distributions currently in use. The ones for Android were compiled for ARMv7 processors featuring an FPU, so they will not work on ancient devices such as the HTC Magic or some others with an ARMv6 CPU. Here are some more specific instructions for common cases: NetBeans (Java SE 7 or newer): @@ -222,7 +222,7 @@ Each child module in turn relies by default on the included [`cppbuild.sh` scrip * NVIDIA Video Codec SDK 12.1.x https://developer.nvidia.com/nvidia-video-codec-sdk * OpenCL 3.0.x https://github.com/KhronosGroup/OpenCL-ICD-Loader * MXNet 1.9.x https://github.com/apache/incubator-mxnet - * PyTorch 2.0.x https://github.com/pytorch/pytorch + * PyTorch 2.1.x https://github.com/pytorch/pytorch * SentencePiece 0.1.99 https://github.com/google/sentencepiece * TensorFlow 1.15.x https://github.com/tensorflow/tensorflow * TensorFlow Lite 2.14.x https://github.com/tensorflow/tensorflow diff --git a/platform/pom.xml b/platform/pom.xml index 976e8feba6a..2f06d345982 100644 --- a/platform/pom.xml +++ b/platform/pom.xml @@ -292,7 +292,7 @@ org.bytedeco pytorch-platform - 2.0.1-${project.version} + 2.1.0-${project.version} org.bytedeco diff --git a/pytorch/samples/SimpleMNIST.java b/pytorch/samples/SimpleMNIST.java index 2b2419024e3..d1a3fa392f1 100644 --- a/pytorch/samples/SimpleMNIST.java +++ b/pytorch/samples/SimpleMNIST.java @@ -20,14 +20,14 @@ static class Net extends Module { Tensor forward(Tensor x) { // Use one of many tensor manipulation functions. x = relu(fc1.forward(x.reshape(x.size(0), 784))); - x = dropout(x, /*p=*/0.5, /*train=*/is_training(), false); + x = dropout(x, /*p=*/0.5, /*train=*/is_training()); x = relu(fc2.forward(x)); - x = log_softmax(fc3.forward(x), new LogSoftmaxFuncOptions(/*dim=*/1)); + x = log_softmax(fc3.forward(x), /*dim=*/1); return x; } // Use one of many "standard library" modules. - LinearImpl fc1 = null, fc2 = null, fc3 = null; + final LinearImpl fc1, fc2, fc3; } public static void main(String[] args) throws Exception { diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java index a94350e2fd6..d0e0cb5b3d2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java @@ -152,10 +152,11 @@ private native void allocate( public native @Name("item") byte item_char(); + public native @Cast("uint8_t") @Name("item") byte item_byte(); + public native @Name("item") short item_short(); public native @Name("item") int item_int(); - public native @Name("item") int item_byte(); public native @Cast("int64_t") @Name("item") long item_long(); diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index 7b9f249b985..5a1883491fa 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -1976,12 +1976,9 @@ We need either to put an annotation info on each member, or javaName("@NoOffset .put(new Info("at::TensorBase::data_ptr").javaNames("data_ptr_double")) .put(new Info("at::Tensor::item").javaNames("item_bool")) .put(new Info("at::Tensor::item").javaNames("item_char")) + .put(new Info("at::Tensor::item").javaNames("item_byte")) .put(new Info("at::Tensor::item").javaNames("item_short")) - // Since we don't have uint8 in Java, make item_byte an alias of item_int - .put(new Info("at::Tensor::item").javaText( - "public native @Name(\"item\") int item_int();\n" + - "public native @Name(\"item\") int item_byte();" - )) + .put(new Info("at::Tensor::item").javaNames("item_int")) .put(new Info("at::Tensor::item").javaNames("item_long")) .put(new Info("at::Tensor::item").javaNames("item_float")) .put(new Info("at::Tensor::item").javaNames("item_double"))