Skip to content

Commit

Permalink
Restore missing classes from torch::jit
Browse files Browse the repository at this point in the history
  • Loading branch information
HGuillemet committed Nov 3, 2023
1 parent 8499486 commit 95496c6
Show file tree
Hide file tree
Showing 17 changed files with 662 additions and 13 deletions.
24 changes: 12 additions & 12 deletions pytorch/src/gen/java/org/bytedeco/pytorch/JitModule.java
Original file line number Diff line number Diff line change
Expand Up @@ -106,27 +106,27 @@ public native void register_attribute(

public native void apply(@Const @ByRef JitModuleApplyFunction fn);

public native @ByVal @Cast("torch::jit::buffer_list*") module_list buffers(@Cast("bool") boolean recurse/*=true*/);
public native @ByVal @Cast("torch::jit::buffer_list*") module_list buffers();
public native @ByVal @Cast("torch::jit::named_buffer_list*") module_list named_buffers(@Cast("bool") boolean recurse/*=true*/);
public native @ByVal @Cast("torch::jit::named_buffer_list*") module_list named_buffers();
public native @ByVal buffer_list buffers(@Cast("bool") boolean recurse/*=true*/);
public native @ByVal buffer_list buffers();
public native @ByVal named_buffer_list named_buffers(@Cast("bool") boolean recurse/*=true*/);
public native @ByVal named_buffer_list named_buffers();

public native @ByVal module_list children(); // direct modules
public native @ByVal named_module_list named_children();
public native @ByVal module_list modules(); // all modules, including this one, recursively
public native @ByVal named_module_list named_modules();

// all tensors involved in gradient optimization
public native @ByVal @Cast("torch::jit::parameter_list*") module_list parameters(@Cast("bool") boolean recurse/*=true*/);
public native @ByVal @Cast("torch::jit::parameter_list*") module_list parameters();
public native @ByVal @Cast("torch::jit::named_parameter_list*") module_list named_parameters(@Cast("bool") boolean recurse/*=true*/);
public native @ByVal @Cast("torch::jit::named_parameter_list*") module_list named_parameters();
public native @ByVal parameter_list parameters(@Cast("bool") boolean recurse/*=true*/);
public native @ByVal parameter_list parameters();
public native @ByVal named_parameter_list named_parameters(@Cast("bool") boolean recurse/*=true*/);
public native @ByVal named_parameter_list named_parameters();

// all members of the object, similar to iterating over dir(obj) in python
public native @ByVal @Cast("torch::jit::attribute_list*") module_list attributes(@Cast("bool") boolean recurse/*=true*/);
public native @ByVal @Cast("torch::jit::attribute_list*") module_list attributes();
public native @ByVal @Cast("torch::jit::named_attribute_list*") module_list named_attributes(@Cast("bool") boolean recurse/*=true*/);
public native @ByVal @Cast("torch::jit::named_attribute_list*") module_list named_attributes();
public native @ByVal attribute_list attributes(@Cast("bool") boolean recurse/*=true*/);
public native @ByVal attribute_list attributes();
public native @ByVal named_attribute_list named_attributes(@Cast("bool") boolean recurse/*=true*/);
public native @ByVal named_attribute_list named_attributes();

public native void dump(
@Cast("bool") boolean print_method_bodies,
Expand Down
41 changes: 41 additions & 0 deletions pytorch/src/gen/java/org/bytedeco/pytorch/NamedIValue.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE

package org.bytedeco.pytorch;

import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;

import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;

import static org.bytedeco.pytorch.global.torch.*;


@Name("torch::jit::Named<c10::IValue>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class NamedIValue extends Pointer {
static { Loader.load(); }
/** Default native constructor. */
public NamedIValue() { super((Pointer)null); allocate(); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public NamedIValue(long size) { super((Pointer)null); allocateArray(size); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public NamedIValue(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(long size);
@Override public NamedIValue position(long position) {
return (NamedIValue)super.position(position);
}
@Override public NamedIValue getPointer(long i) {
return new NamedIValue((Pointer)this).offsetAddress(i);
}

public native @StdString BytePointer name(); public native NamedIValue name(BytePointer setter);
public native @ByRef IValue value(); public native NamedIValue value(IValue setter);
}
41 changes: 41 additions & 0 deletions pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensor.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE

package org.bytedeco.pytorch;

import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;

import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;

import static org.bytedeco.pytorch.global.torch.*;


@Name("torch::jit::Named<torch::Tensor>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class NamedTensor extends Pointer {
static { Loader.load(); }
/** Default native constructor. */
public NamedTensor() { super((Pointer)null); allocate(); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public NamedTensor(long size) { super((Pointer)null); allocateArray(size); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public NamedTensor(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(long size);
@Override public NamedTensor position(long position) {
return (NamedTensor)super.position(position);
}
@Override public NamedTensor getPointer(long i) {
return new NamedTensor((Pointer)this).offsetAddress(i);
}

public native @StdString BytePointer name(); public native NamedTensor name(BytePointer setter);
public native @ByRef Tensor value(); public native NamedTensor value(Tensor setter);
}
55 changes: 55 additions & 0 deletions pytorch/src/gen/java/org/bytedeco/pytorch/attribute_iterator.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE

package org.bytedeco.pytorch;

import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;

import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;

import static org.bytedeco.pytorch.global.torch.*;

@Name("torch::jit::slot_iterator_impl<torch::jit::detail::AttributePolicy>") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class attribute_iterator extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public attribute_iterator(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public attribute_iterator(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public attribute_iterator position(long position) {
return (attribute_iterator)super.position(position);
}
@Override public attribute_iterator getPointer(long i) {
return new attribute_iterator((Pointer)this).offsetAddress(i);
}

public attribute_iterator(
@ByVal JitModule root,
@Cast("bool") boolean recurse,
@Cast("bool") boolean return_module) { super((Pointer)null); allocate(root, recurse, return_module); }
private native void allocate(
@ByVal JitModule root,
@Cast("bool") boolean recurse,
@Cast("bool") boolean return_module);
// empty cursors_, represents end of iteration
public attribute_iterator() { super((Pointer)null); allocate(); }
private native void allocate();
public native @ByVal @Name("operator *") IValue multiply();
public native @ByVal @Name("operator ->") IValue access();
public native @ByRef @Name("operator ++") attribute_iterator increment();
public native @ByVal @Name("operator ++") attribute_iterator increment(int arg0);

private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals(
@Const @ByRef attribute_iterator a,
@Const @ByRef attribute_iterator b);
public boolean notEquals(attribute_iterator b) { return notEquals(this, b); }
}
32 changes: 32 additions & 0 deletions pytorch/src/gen/java/org/bytedeco/pytorch/attribute_list.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE

package org.bytedeco.pytorch;

import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;

import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;

import static org.bytedeco.pytorch.global.torch.*;

@Name("torch::jit::slot_list_impl<torch::jit::detail::AttributePolicy>") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class attribute_list extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public attribute_list(Pointer p) { super(p); }

public native @ByVal attribute_iterator begin();
public native @ByVal attribute_iterator end();
public native @Cast("size_t") long size();

public attribute_list(@ByVal JitModule module, @Cast("bool") boolean recurse, @Cast("bool") boolean return_module) { super((Pointer)null); allocate(module, recurse, return_module); }
private native void allocate(@ByVal JitModule module, @Cast("bool") boolean recurse, @Cast("bool") boolean return_module);
}
55 changes: 55 additions & 0 deletions pytorch/src/gen/java/org/bytedeco/pytorch/buffer_iterator.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE

package org.bytedeco.pytorch;

import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;

import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;

import static org.bytedeco.pytorch.global.torch.*;

@Name("torch::jit::slot_iterator_impl<torch::jit::detail::BufferPolicy>") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class buffer_iterator extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public buffer_iterator(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public buffer_iterator(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public buffer_iterator position(long position) {
return (buffer_iterator)super.position(position);
}
@Override public buffer_iterator getPointer(long i) {
return new buffer_iterator((Pointer)this).offsetAddress(i);
}

public buffer_iterator(
@ByVal JitModule root,
@Cast("bool") boolean recurse,
@Cast("bool") boolean return_module) { super((Pointer)null); allocate(root, recurse, return_module); }
private native void allocate(
@ByVal JitModule root,
@Cast("bool") boolean recurse,
@Cast("bool") boolean return_module);
// empty cursors_, represents end of iteration
public buffer_iterator() { super((Pointer)null); allocate(); }
private native void allocate();
public native @ByVal @Name("operator *") Tensor multiply();
public native @ByVal @Name("operator ->") Tensor access();
public native @ByRef @Name("operator ++") buffer_iterator increment();
public native @ByVal @Name("operator ++") buffer_iterator increment(int arg0);

private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals(
@Const @ByRef buffer_iterator a,
@Const @ByRef buffer_iterator b);
public boolean notEquals(buffer_iterator b) { return notEquals(this, b); }
}
32 changes: 32 additions & 0 deletions pytorch/src/gen/java/org/bytedeco/pytorch/buffer_list.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE

package org.bytedeco.pytorch;

import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;

import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;

import static org.bytedeco.pytorch.global.torch.*;

@Name("torch::jit::slot_list_impl<torch::jit::detail::BufferPolicy>") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class buffer_list extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public buffer_list(Pointer p) { super(p); }

public native @ByVal buffer_iterator begin();
public native @ByVal buffer_iterator end();
public native @Cast("size_t") long size();

public buffer_list(@ByVal JitModule module, @Cast("bool") boolean recurse, @Cast("bool") boolean return_module) { super((Pointer)null); allocate(module, recurse, return_module); }
private native void allocate(@ByVal JitModule module, @Cast("bool") boolean recurse, @Cast("bool") boolean return_module);
}
42 changes: 42 additions & 0 deletions pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java
Original file line number Diff line number Diff line change
Expand Up @@ -64220,6 +64220,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch {
// Targeting ../NamedJitModule.java


// Targeting ../NamedTensor.java


// Targeting ../NamedIValue.java


// namespace detail
// Targeting ../JitModule.java

Expand Down Expand Up @@ -64296,12 +64302,48 @@ The list of (type, depth) pairs controls the type of specializations and the num
// Targeting ../named_module_iterator.java


// Targeting ../parameter_iterator.java


// Targeting ../named_parameter_iterator.java


// Targeting ../attribute_iterator.java


// Targeting ../named_attribute_iterator.java


// Targeting ../buffer_iterator.java


// Targeting ../named_buffer_iterator.java


// Targeting ../module_list.java


// Targeting ../named_module_list.java


// Targeting ../parameter_list.java


// Targeting ../named_parameter_list.java


// Targeting ../attribute_list.java


// Targeting ../named_attribute_list.java


// Targeting ../buffer_list.java


// Targeting ../named_buffer_list.java


// Targeting ../ModulePolicy.java


Expand Down
Loading

0 comments on commit 95496c6

Please sign in to comment.