diff --git a/JGNN/src/examples/classification/LogisticRegression.java b/JGNN/src/examples/classification/LogisticRegression.java index b03646f6..54a0b8e2 100644 --- a/JGNN/src/examples/classification/LogisticRegression.java +++ b/JGNN/src/examples/classification/LogisticRegression.java @@ -2,10 +2,10 @@ import mklab.JGNN.adhoc.Dataset; import mklab.JGNN.adhoc.ModelBuilder; +import mklab.JGNN.adhoc.ModelTraining; import mklab.JGNN.adhoc.datasets.Citeseer; import mklab.JGNN.core.Matrix; import mklab.JGNN.nn.Model; -import mklab.JGNN.nn.ModelTraining; import mklab.JGNN.nn.loss.Accuracy; import mklab.JGNN.nn.loss.BinaryCrossEntropy; import mklab.JGNN.core.Slice; diff --git a/JGNN/src/examples/classification/MLP.java b/JGNN/src/examples/classification/MLP.java index 77c54d72..093f2fbf 100644 --- a/JGNN/src/examples/classification/MLP.java +++ b/JGNN/src/examples/classification/MLP.java @@ -2,10 +2,10 @@ import mklab.JGNN.adhoc.Dataset; import mklab.JGNN.adhoc.ModelBuilder; +import mklab.JGNN.adhoc.ModelTraining; import mklab.JGNN.adhoc.datasets.Citeseer; import mklab.JGNN.core.Matrix; import mklab.JGNN.nn.Model; -import mklab.JGNN.nn.ModelTraining; import mklab.JGNN.core.Slice; import mklab.JGNN.core.Tensor; import mklab.JGNN.nn.initializers.XavierNormal; diff --git a/JGNN/src/examples/nodeClassification/APPNP.java b/JGNN/src/examples/nodeClassification/APPNP.java index 025c7096..af06ed98 100644 --- a/JGNN/src/examples/nodeClassification/APPNP.java +++ b/JGNN/src/examples/nodeClassification/APPNP.java @@ -3,11 +3,11 @@ import mklab.JGNN.adhoc.Dataset; import mklab.JGNN.adhoc.ModelBuilder; +import mklab.JGNN.adhoc.ModelTraining; import mklab.JGNN.adhoc.datasets.Cora; import mklab.JGNN.adhoc.parsers.FastBuilder; import mklab.JGNN.core.Matrix; import mklab.JGNN.nn.Model; -import mklab.JGNN.nn.ModelTraining; import mklab.JGNN.core.Slice; import mklab.JGNN.core.Tensor; import mklab.JGNN.nn.initializers.XavierNormal; diff --git a/JGNN/src/examples/nodeClassification/GAT.java b/JGNN/src/examples/nodeClassification/GAT.java index b645ae2c..8218258e 100644 --- a/JGNN/src/examples/nodeClassification/GAT.java +++ b/JGNN/src/examples/nodeClassification/GAT.java @@ -2,11 +2,12 @@ import mklab.JGNN.adhoc.Dataset; import mklab.JGNN.adhoc.ModelBuilder; +import mklab.JGNN.adhoc.ModelTraining; import mklab.JGNN.adhoc.datasets.Citeseer; import mklab.JGNN.adhoc.parsers.FastBuilder; +import mklab.JGNN.adhoc.train.NodeClassification; import mklab.JGNN.core.Matrix; import mklab.JGNN.nn.Model; -import mklab.JGNN.nn.ModelTraining; import mklab.JGNN.core.Slice; import mklab.JGNN.core.Tensor; import mklab.JGNN.nn.initializers.XavierNormal; @@ -34,7 +35,7 @@ public static void main(String[] args) throws Exception { .classify() .assertBackwardValidity(); - ModelTraining trainer = new ModelTraining() + ModelTraining trainer = new NodeClassification() .setOptimizer(new Adam(0.01)) .setEpochs(300) .setPatience(100) @@ -48,7 +49,9 @@ public static void main(String[] args) throws Exception { .init(new XavierNormal()) .train(trainer, Tensor.fromRange(nodes.size()).asColumn(), - dataset.labels(), nodes.range(0, 0.6), nodes.range(0.6, 0.8)); + dataset.labels(), + nodes.range(0, 0.6), + nodes.range(0.6, 0.8)); System.out.println("Training time "+(System.currentTimeMillis()-tic)/1000.); Matrix output = model.predict(Tensor.fromRange(0, nodes.size()).asColumn()).get(0).cast(Matrix.class); diff --git a/JGNN/src/examples/nodeClassification/GCN.java b/JGNN/src/examples/nodeClassification/GCN.java index 817f5eb3..fd64eb2d 100644 --- a/JGNN/src/examples/nodeClassification/GCN.java +++ b/JGNN/src/examples/nodeClassification/GCN.java @@ -5,11 +5,11 @@ import mklab.JGNN.adhoc.Dataset; import mklab.JGNN.adhoc.ModelBuilder; +import mklab.JGNN.adhoc.ModelTraining; import mklab.JGNN.adhoc.datasets.Cora; import mklab.JGNN.adhoc.parsers.FastBuilder; import mklab.JGNN.core.Matrix; import mklab.JGNN.nn.Model; -import mklab.JGNN.nn.ModelTraining; import mklab.JGNN.core.Slice; import mklab.JGNN.core.Tensor; import mklab.JGNN.core.empy.EmptyTensor; diff --git a/JGNN/src/examples/nodeClassification/GCNII.java b/JGNN/src/examples/nodeClassification/GCNII.java index f8e61313..55dab25a 100644 --- a/JGNN/src/examples/nodeClassification/GCNII.java +++ b/JGNN/src/examples/nodeClassification/GCNII.java @@ -2,11 +2,11 @@ import mklab.JGNN.adhoc.Dataset; import mklab.JGNN.adhoc.ModelBuilder; +import mklab.JGNN.adhoc.ModelTraining; import mklab.JGNN.adhoc.datasets.Cora; import mklab.JGNN.adhoc.parsers.FastBuilder; import mklab.JGNN.core.Matrix; import mklab.JGNN.nn.Model; -import mklab.JGNN.nn.ModelTraining; import mklab.JGNN.core.Slice; import mklab.JGNN.core.Tensor; import mklab.JGNN.nn.initializers.XavierNormal; diff --git a/JGNN/src/examples/nodeClassification/HetGCN.java b/JGNN/src/examples/nodeClassification/HetGCN.java index 1cdc296e..1c8a059d 100644 --- a/JGNN/src/examples/nodeClassification/HetGCN.java +++ b/JGNN/src/examples/nodeClassification/HetGCN.java @@ -2,11 +2,11 @@ import mklab.JGNN.adhoc.Dataset; import mklab.JGNN.adhoc.ModelBuilder; +import mklab.JGNN.adhoc.ModelTraining; import mklab.JGNN.adhoc.datasets.Citeseer; import mklab.JGNN.adhoc.parsers.FastBuilder; import mklab.JGNN.core.Matrix; import mklab.JGNN.nn.Model; -import mklab.JGNN.nn.ModelTraining; import mklab.JGNN.core.Slice; import mklab.JGNN.core.Tensor; import mklab.JGNN.nn.initializers.XavierNormal; diff --git a/JGNN/src/examples/nodeClassification/MessagePassing.java b/JGNN/src/examples/nodeClassification/MessagePassing.java index 6245e634..e7b7344d 100644 --- a/JGNN/src/examples/nodeClassification/MessagePassing.java +++ b/JGNN/src/examples/nodeClassification/MessagePassing.java @@ -2,11 +2,11 @@ import mklab.JGNN.adhoc.Dataset; import mklab.JGNN.adhoc.ModelBuilder; +import mklab.JGNN.adhoc.ModelTraining; import mklab.JGNN.adhoc.datasets.Cora; import mklab.JGNN.adhoc.parsers.FastBuilder; import mklab.JGNN.core.Matrix; import mklab.JGNN.nn.Model; -import mklab.JGNN.nn.ModelTraining; import mklab.JGNN.core.Slice; import mklab.JGNN.core.Tensor; import mklab.JGNN.nn.initializers.XavierNormal; diff --git a/JGNN/src/examples/nodeClassification/Scripting.java b/JGNN/src/examples/nodeClassification/Scripting.java index 2ec1d003..510ef666 100644 --- a/JGNN/src/examples/nodeClassification/Scripting.java +++ b/JGNN/src/examples/nodeClassification/Scripting.java @@ -4,11 +4,11 @@ import mklab.JGNN.adhoc.Dataset; import mklab.JGNN.adhoc.ModelBuilder; +import mklab.JGNN.adhoc.ModelTraining; import mklab.JGNN.adhoc.datasets.Cora; import mklab.JGNN.adhoc.parsers.Neuralang; import mklab.JGNN.core.Matrix; import mklab.JGNN.nn.Model; -import mklab.JGNN.nn.ModelTraining; import mklab.JGNN.core.Slice; import mklab.JGNN.core.Tensor; import mklab.JGNN.core.empy.EmptyTensor; diff --git a/JGNN/src/examples/tutorial/Learning.java b/JGNN/src/examples/tutorial/Learning.java index 651961d2..6eb248db 100644 --- a/JGNN/src/examples/tutorial/Learning.java +++ b/JGNN/src/examples/tutorial/Learning.java @@ -2,11 +2,11 @@ import mklab.JGNN.adhoc.Dataset; import mklab.JGNN.adhoc.ModelBuilder; +import mklab.JGNN.adhoc.ModelTraining; import mklab.JGNN.adhoc.datasets.Citeseer; import mklab.JGNN.adhoc.datasets.Cora; import mklab.JGNN.core.Matrix; import mklab.JGNN.nn.Model; -import mklab.JGNN.nn.ModelTraining; import mklab.JGNN.core.Slice; import mklab.JGNN.core.Tensor; import mklab.JGNN.nn.initializers.XavierNormal; diff --git a/JGNN/src/examples/tutorial/NN.java b/JGNN/src/examples/tutorial/NN.java index dbda5637..0b208113 100644 --- a/JGNN/src/examples/tutorial/NN.java +++ b/JGNN/src/examples/tutorial/NN.java @@ -3,11 +3,11 @@ import mklab.JGNN.adhoc.Dataset; import mklab.JGNN.adhoc.ModelBuilder; +import mklab.JGNN.adhoc.ModelTraining; import mklab.JGNN.adhoc.datasets.Citeseer; import mklab.JGNN.adhoc.parsers.LayeredBuilder; import mklab.JGNN.core.Matrix; import mklab.JGNN.nn.Model; -import mklab.JGNN.nn.ModelTraining; import mklab.JGNN.core.Slice; import mklab.JGNN.core.Tensor; import mklab.JGNN.nn.initializers.XavierNormal; diff --git a/JGNN/src/examples/tutorial/Quickstart.java b/JGNN/src/examples/tutorial/Quickstart.java index 6f67ed91..279fa281 100644 --- a/JGNN/src/examples/tutorial/Quickstart.java +++ b/JGNN/src/examples/tutorial/Quickstart.java @@ -2,11 +2,12 @@ import mklab.JGNN.adhoc.Dataset; import mklab.JGNN.adhoc.ModelBuilder; +import mklab.JGNN.adhoc.ModelTraining; import mklab.JGNN.adhoc.datasets.Cora; import mklab.JGNN.adhoc.parsers.FastBuilder; +import mklab.JGNN.adhoc.parsers.Neuralang; import mklab.JGNN.core.Matrix; import mklab.JGNN.nn.Model; -import mklab.JGNN.nn.ModelTraining; import mklab.JGNN.core.Slice; import mklab.JGNN.core.Tensor; import mklab.JGNN.core.empy.EmptyTensor; @@ -25,42 +26,49 @@ public class Quickstart { public static void main(String[] args) throws Exception { Dataset dataset = new Cora(); dataset.graph().setMainDiagonal(1).setToSymmetricNormalization(); - + + String architectures = """ + fn classify(nodes, h, epochs: !3000, patience: !100, lr: !0.01) { + return softmax(h[nodes], dim: "row"); + } + fn gcnlayer(A, h, hidden: 16, reg: 0.005) { + return A@h@matrix(?, hidden, reg) + vector(hidden); + } + fn gcn(A, h, classes: extern) { + h = gcnlayer(A, h); + h = dropout(relu(h), 0.5); + return gcnlayer(A, h, hidden: classes); + } + """; + long numSamples = dataset.samples().getSlice().size(); long numClasses = dataset.labels().getCols(); - ModelBuilder modelBuilder = new FastBuilder(dataset.graph(), dataset.features()) - .config("reg", 0.005) - .config("classes", numClasses) - .config("hidden", 64) - .function("gcnlayer", "(A,h){Adrop = dropout(A, 0.5); return Adrop@(h@matrix(?, hidden, reg))+vector(?);}") - .layer("h{l+1}=relu(gcnlayer(A, h{l}))") - .config("hidden", "classes") // reassigns the output gcnlayer's "hidden" to be the number of "classes" - .layer("h{l+1}=gcnlayer(A, h{l})") - .classify() - .autosize(new EmptyTensor(numSamples)); + ModelBuilder modelBuilder = new Neuralang() + .parse(architectures) + .constant("A", dataset.graph()) + .constant("h", dataset.features()) + .var("nodes") + .config("classes", numClasses) + .config("hidden", numClasses+2) + .out("classify(nodes, gcn(A,h))") + .autosize(new EmptyTensor(numSamples)); + System.out.println("Preferred learning rate: "+modelBuilder.getConfig("lr")); ModelTraining trainer = new ModelTraining() - .setOptimizer(new Adam(0.01)) - .setEpochs(3000) - .setPatience(100) + .configFrom(modelBuilder) .setLoss(new CategoricalCrossEntropy()) - .setValidationLoss(new VerboseLoss(new Accuracy()).setInterval(10)); + .setValidationLoss(new VerboseLoss(new CategoricalCrossEntropy())); - Slice nodes = dataset.samples().getSlice().shuffle(); // a permutation of node identifiers - Matrix inputData = Tensor.fromRange(nodes.size()).asColumn(); // each node has its identifier as an input + long tic = System.currentTimeMillis(); + Slice nodes = dataset.samples().getSlice().shuffle(100); Model model = modelBuilder.getModel() .init(new XavierNormal()) - .train(trainer, - inputData, - dataset.labels(), - nodes.range(0, 0.6), // train slice - nodes.range(0.6, 0.8) // validation slice - ); - - //modelBuilder.save(Paths.get("gcn_cora.jgnn")); + .train(trainer, + Tensor.fromRange(nodes.size()).asColumn(), + dataset.labels(), nodes.range(0, 0.6), nodes.range(0.6, 0.8)); - Model loadedModel = model;//ModelBuilder.load(Paths.get("gcn_cora.jgnn")).getModel(); - Matrix output = loadedModel.predict(Tensor.fromRange(0, nodes.size()).asColumn()).get(0).cast(Matrix.class); + System.out.println("Training time "+(System.currentTimeMillis()-tic)/1000.); + Matrix output = model.predict(Tensor.fromRange(0, nodes.size()).asColumn()).get(0).cast(Matrix.class); double acc = 0; for(Long node : nodes.range(0.8, 1)) { Matrix nodeLabels = dataset.labels().accessRow(node).asRow(); diff --git a/JGNN/src/main/java/mklab/JGNN/adhoc/Dataset.java b/JGNN/src/main/java/mklab/JGNN/adhoc/Dataset.java index 3eb7de73..0a1b802e 100644 --- a/JGNN/src/main/java/mklab/JGNN/adhoc/Dataset.java +++ b/JGNN/src/main/java/mklab/JGNN/adhoc/Dataset.java @@ -17,8 +17,9 @@ import mklab.JGNN.core.tensor.SparseTensor; /** - * This class provides the backbone with which to define datasets. - * It provides common operations for downloading and importing data. + * This class provides the backbone with which to define datasets. It provides + * common operations for downloading and importing data. + * * @author Emmanouil Krasanakis * @see #samples() * @see #features() @@ -31,106 +32,112 @@ public class Dataset { private IdConverter class2Ids; private Matrix labels; private Matrix graph; - + protected void downloadIfNotExists(String file, String url) { - if(Files.exists(Paths.get(file))) + if (Files.exists(Paths.get(file))) return; - System.out.println("First time requesting: "+url+"\nDownloading to: "+file); + System.out.println("First time requesting: " + url + "\nDownloading to: " + file); try { Files.createDirectories(Paths.get(file).getParent()); ReadableByteChannel readableByteChannel = Channels.newChannel(new URL(url).openStream()); try (FileOutputStream fileOutputStream = new FileOutputStream(file)) { - fileOutputStream.getChannel() - .transferFrom(readableByteChannel, 0, Long.MAX_VALUE); + fileOutputStream.getChannel().transferFrom(readableByteChannel, 0, Long.MAX_VALUE); } - } - catch(Exception e) { + } catch (Exception e) { e.printStackTrace(); } } - + protected void loadFeatures(String file) { nodes = new IdConverter(); ArrayList rows = new ArrayList(); ArrayList classes = new ArrayList(); class2Ids = new IdConverter(); - try(BufferedReader reader = new BufferedReader(new FileReader(file))){ + try (BufferedReader reader = new BufferedReader(new FileReader(file))) { String line = reader.readLine(); while (line != null) { String[] cols = line.split(","); - if(cols.length<2) + if (cols.length < 2) continue; nodes.getOrCreateId(cols[0]); - Tensor features = new SparseTensor(cols.length-2); - for(int col=0;coldataset.samples().getIds() + * * @return A {@link IdConverter}. */ public IdConverter samples() { return nodes; } + /** - * Retrieves a converter that maps class names to label dimentions. - * For example, the prediction for one sample can be converted to its name - * per dataset.classes().get(prediction.argmax()). + * Retrieves a converter that maps class names to label dimentions. For example, + * the prediction for one sample can be converted to its name per + * dataset.classes().get(prediction.argmax()). + * * @return An {@link IdConverter}. */ public IdConverter classes() { return class2Ids; } + /** * Retrieves the dataset's feature matrix. + * * @return A nodes x features {@link Matrix}. */ public Matrix features() { return features; } + /** * Retrieves the dataset's sample labels in one-hot encoding. + * * @return A nodes x classes {@link Matrix}. */ public Matrix labels() { return labels; } + /** * Retrieves the dataset's graph. + * * @return A {@link Matrix} or null if the dataset is feature-only. */ public Matrix graph() { diff --git a/JGNN/src/main/java/mklab/JGNN/adhoc/IdConverter.java b/JGNN/src/main/java/mklab/JGNN/adhoc/IdConverter.java index 4d7d24e7..fffc9608 100644 --- a/JGNN/src/main/java/mklab/JGNN/adhoc/IdConverter.java +++ b/JGNN/src/main/java/mklab/JGNN/adhoc/IdConverter.java @@ -8,6 +8,7 @@ /** * Converts back-and-forth between objects and unique ids. + * * @author Emmanouil Krasanakis */ public class IdConverter { @@ -16,24 +17,32 @@ public class IdConverter { protected String nodeDimensionName; protected String featureDimensionName; private Slice idSlice = null; + /** - * Instantiates an empty converter to be filled with {@link #getOrCreateId(Object)}. + * Instantiates an empty converter to be filled with + * {@link #getOrCreateId(Object)}. */ public IdConverter() { } + /** - * Instantiates the converter on a list of objects to register - * with {@link #getOrCreateId(Object)} on. + * Instantiates the converter on a list of objects to register with + * {@link #getOrCreateId(Object)} on. + * * @param objects A list of objects. */ public IdConverter(List objects) { - for(int i=0;ithis instance */ public IdConverter setDimensionName(String nodeDimensionName, String featureDimensionName) { @@ -41,8 +50,10 @@ public IdConverter setDimensionName(String nodeDimensionName, String featureDime this.featureDimensionName = featureDimensionName; return this; } + /** * Retrieves an identifier for a given object, creating one if none exists. + * * @param object The object for which to obtain an identifier. * @return A long identifier. * @see #getId(Object) @@ -50,23 +61,27 @@ public IdConverter setDimensionName(String nodeDimensionName, String featureDime */ public long getOrCreateId(Object object) { Long ret = ids.get(object); - if(ret==null) { - ids.put(object, ret = (long)ids.size()); + if (ret == null) { + ids.put(object, ret = (long) ids.size()); inverse.put(ret, object); idSlice = null; } return ret; } + /** * Retrieves the object corresponding to a given identifier. + * * @param id The identifier of the object. * @return The object. */ public Object get(long id) { return inverse.get(id); } + /** * Retrieves an identifier. + * * @param object An object with a registered identifier. * @return A long identifier. * @exception Exception if the identifiers does not exist. @@ -75,31 +90,37 @@ public Object get(long id) { public long getId(Object object) { return ids.get(object); } + /** * The number of registered identifiers. + * * @return A long value. */ public long size() { return ids.size(); } + /** - * Checks whether the object has been registered with {@link #getOrCreateId(Object)}. + * Checks whether the object has been registered with + * {@link #getOrCreateId(Object)}. + * * @param object An object to check if it exists. * @return A boolean value. */ public boolean contains(Object object) { return ids.containsKey(object); } + /** - * Returns a slice of all registered identifiers. - * The slice is persistent across multiple calls to this method, but is - * instantiated anew after {@link #getOrCreateId(Object)} registers a new - * object (but not if it retrieves an existing object). + * Returns a slice of all registered identifiers. The slice is persistent across + * multiple calls to this method, but is instantiated anew after + * {@link #getOrCreateId(Object)} registers a new object (but not if it + * retrieves an existing object). * * @return A {@link Slice}. */ public Slice getSlice() { - if(idSlice==null) + if (idSlice == null) idSlice = new Slice(new Range(0, ids.size())); return idSlice; } diff --git a/JGNN/src/main/java/mklab/JGNN/adhoc/ModelBuilder.java b/JGNN/src/main/java/mklab/JGNN/adhoc/ModelBuilder.java index ebbba9a7..64e0b208 100644 --- a/JGNN/src/main/java/mklab/JGNN/adhoc/ModelBuilder.java +++ b/JGNN/src/main/java/mklab/JGNN/adhoc/ModelBuilder.java @@ -56,8 +56,8 @@ import mklab.JGNN.nn.pooling.Mean; /** - * This class and subclasses can be used to create {@link Model} instances - * by automatically creating and managing {@link NNOperation} instances based on + * This class and subclasses can be used to create {@link Model} instances by + * automatically creating and managing {@link NNOperation} instances based on * textual descriptions. * * @author Emmanouil Krasanakis @@ -79,236 +79,246 @@ public class ModelBuilder { private int tmpVariableIdentifier = 0; private static transient DenseTensor denseTensorConstructor = new DenseTensor(1); private static transient DenseMatrix denseMatrixConstructor = new DenseMatrix(1, 1); + public ModelBuilder() { this(new Model()); configurations.put("?", 0.0); } + public ModelBuilder(Model model) { this.model = model; } + /** - * Retrieves the model currently built by the builder. - * This can changed depending on additional building method calls. + * Retrieves the model currently built by the builder. This can changed + * depending on additional building method calls. + * * @return A {@link Model} instance. */ public Model getModel() { return model; } - + /** * Serializes the model builder instance into a Path, such as * Paths.get("example.jgnn"). + * * @param path A serialized path. * @return This builder's instance. */ public ModelBuilder save(Path path) { - try(BufferedWriter writer = Files.newBufferedWriter(path)){ - writer.write(this.getClass().getCanonicalName()+"\n"); - for(String configurationName : configurations.keySet()) - writer.write(configurationName+" = config: "+configurations.get(configurationName)+"\n"); - for(String componentName : components.keySet()) - if(components.get(componentName) instanceof Parameter) { - if(components.get(componentName) instanceof Variable) { - writer.write(componentName+" = var: null\n"); + try (BufferedWriter writer = Files.newBufferedWriter(path)) { + writer.write(this.getClass().getCanonicalName() + "\n"); + for (String configurationName : configurations.keySet()) + writer.write(configurationName + " = config: " + configurations.get(configurationName) + "\n"); + for (String componentName : components.keySet()) + if (components.get(componentName) instanceof Parameter) { + if (components.get(componentName) instanceof Variable) { + writer.write(componentName + " = var: null\n"); continue; } - writer.write(componentName+" = "); - Tensor value = ((Parameter)components.get(componentName)).get(); - writer.write((((Parameter)components.get(componentName)).isConstant()?"const ":"param ")+value.describe()+": "); - if(value.density()<0.3) { + writer.write(componentName + " = "); + Tensor value = ((Parameter) components.get(componentName)).get(); + writer.write((((Parameter) components.get(componentName)).isConstant() ? "const " : "param ") + + value.describe() + ": "); + if (value.density() < 0.3) { writer.write("{"); boolean isNotFirst = false; - for(long pos : value.getNonZeroElements()) { - if(isNotFirst) + for (long pos : value.getNonZeroElements()) { + if (isNotFirst) writer.write(","); - writer.write(pos+":"+value.get(pos)); + writer.write(pos + ":" + value.get(pos)); isNotFirst = true; } writer.write("}\n"); - } - else { + } else { writer.write("["); - for(long pos=0;posPaths.get("example.jgnn"). - * The instance may have been serialized with any class that extends the model builder. + * Loads a ModelBuilder instance from the provided path, such as + * Paths.get("example.jgnn"). The instance may have been serialized + * with any class that extends the model builder. * * @param path The provided path. * @return The loaded ModelBuilder instance. */ public static ModelBuilder load(Path path) { ModelBuilder builder; - try(BufferedReader reader = Files.newBufferedReader(path)){ + try (BufferedReader reader = Files.newBufferedReader(path)) { String line = reader.readLine(); try { - builder = (ModelBuilder) Class.forName(line).getDeclaredConstructor().newInstance(); - } - catch (Exception e) { + builder = (ModelBuilder) Class.forName(line).getDeclaredConstructor().newInstance(); + } catch (Exception e) { e.printStackTrace(); return null; } - while((line = reader.readLine())!=null) { - if(line.length()==0) + while ((line = reader.readLine()) != null) { + if (line.length() == 0) continue; int eqPosition = line.indexOf('='); - if(eqPosition==-1) { + if (eqPosition == -1) { String[] splt = line.split("\\s+", 2); - if(splt.length!=2 || !builder.loadCommand(splt[0], splt[1])) - throw new IOException("Unidentified command: "+line+". A different JGNN version was likely used to save the model."); + if (splt.length != 2 || !builder.loadCommand(splt[0], splt[1])) + throw new IOException("Unidentified command: " + line + + ". A different JGNN version was likely used to save the model."); continue; } int initPosition = line.indexOf(':', eqPosition); - String name = line.substring(0, eqPosition-1); - if(builder.components.containsKey(name)) + String name = line.substring(0, eqPosition - 1); + if (builder.components.containsKey(name)) continue; - if(initPosition==-1) { + if (initPosition == -1) { System.out.println("parsing " + line); builder.operation(line); continue; } - String type = line.substring(eqPosition+2, initPosition); - System.out.println("reading " + name+" "+type); - if(type.equals("var")) + String type = line.substring(eqPosition + 2, initPosition); + System.out.println("reading " + name + " " + type); + if (type.equals("var")) builder.var(name); - else if(type.equals("out")) + else if (type.equals("out")) builder.out(name); - else if(type.equals("config")) - builder.config(name, Double.parseDouble(line.substring(initPosition+1))); - else if(type.contains("Tensor ") || type.contains("Matrix ")) { - boolean isDense = line.charAt(initPosition+2)=='['; + else if (type.equals("config")) + builder.config(name, Double.parseDouble(line.substring(initPosition + 1))); + else if (type.contains("Tensor ") || type.contains("Matrix ")) { + boolean isDense = line.charAt(initPosition + 2) == '['; Tensor tensor; - if(type.contains("Tensor ")) { - String[] dimParts = type.substring(type.indexOf('(')+1, type.lastIndexOf(')')).split("\\s", 2); - int dim = Integer.parseInt(dimParts[dimParts.length-1]); - tensor = isDense?denseTensorConstructor.zeroCopy(dim):new SparseTensor(dim); - if(dimParts.length>1) + if (type.contains("Tensor ")) { + String[] dimParts = type.substring(type.indexOf('(') + 1, type.lastIndexOf(')')).split("\\s", + 2); + int dim = Integer.parseInt(dimParts[dimParts.length - 1]); + tensor = isDense ? denseTensorConstructor.zeroCopy(dim) : new SparseTensor(dim); + if (dimParts.length > 1) tensor.setDimensionName(dimParts[0]); - } - else { - String[] dims = type.substring(type.indexOf('(')+1, type.lastIndexOf(')')).split(","); + } else { + String[] dims = type.substring(type.indexOf('(') + 1, type.lastIndexOf(')')).split(","); String[] dimRowParts = dims[0].trim().split("\\s", 2); - int dimRow = Integer.parseInt(dimRowParts[dimRowParts.length-1]); + int dimRow = Integer.parseInt(dimRowParts[dimRowParts.length - 1]); String[] dimColParts = dims[1].trim().split("\\s", 2); - int dimCol = Integer.parseInt(dimColParts[dimColParts.length-1]); - tensor = isDense?denseMatrixConstructor.zeroCopy(dimRow, dimCol):new SparseMatrix(dimRow, dimCol); - if(dimRowParts.length>1) + int dimCol = Integer.parseInt(dimColParts[dimColParts.length - 1]); + tensor = isDense ? denseMatrixConstructor.zeroCopy(dimRow, dimCol) + : new SparseMatrix(dimRow, dimCol); + if (dimRowParts.length > 1) tensor.cast(Matrix.class).setRowName(dimRowParts[0]); - if(dimColParts.length>1) + if (dimColParts.length > 1) tensor.cast(Matrix.class).setColName(dimColParts[0]); } - if(line.charAt(initPosition+2)=='[') { + if (line.charAt(initPosition + 2) == '[') { long idx = 0; String accum = ""; - for(int pos=initPosition+3;posboolean value */ public boolean hasComponent(String name) { return components.containsKey(name); } + protected void assertValidName(String name) { - if(name==null || name.isEmpty()) + if (name == null || name.isEmpty()) throw new IllegalArgumentException("Invalid component name"); - if(configurations.containsKey(name)) - throw new IllegalArgumentException("Component name "+name+" already in use as a configuration"); - if(components.containsKey(name)) - throw new IllegalArgumentException("Component name "+name+" already in use by another model component"); + if (configurations.containsKey(name)) + throw new IllegalArgumentException("Component name " + name + " already in use as a configuration"); + if (components.containsKey(name)) + throw new IllegalArgumentException("Component name " + name + " already in use by another model component"); } + protected void assertExists(String name) { - if(configurations.containsKey(name)) - throw new IllegalArgumentException("Component name "+name+" is a configuration but expressions can only parse components"); - if(!components.containsKey(name)) { - if(name.equals("row") || name.equals("col")) - throw new IllegalArgumentException("Component name "+name+" not declared.\n It seems like you are trying to use a model declaration for an earlier version of JGNN.\n Newer versions should parse expressions that are compatible\n with Neuralang syntax.\nFIX: Consider replacing the `"+name+"` argument with `dim:\""+name+"\"`"); - throw new IllegalArgumentException("Component name "+name+" not declared"); + if (configurations.containsKey(name)) + throw new IllegalArgumentException( + "Component name " + name + " is a configuration but expressions can only parse components"); + if (!components.containsKey(name)) { + if (name.equals("row") || name.equals("col")) + throw new IllegalArgumentException("Component name " + name + + " not declared.\n It seems like you are trying to use a model declaration for an earlier version of JGNN.\n Newer versions should parse expressions that are compatible\n with Neuralang syntax.\nFIX: Consider replacing the `" + + name + "` argument with `dim:\"" + name + "\"`"); + throw new IllegalArgumentException("Component name " + name + " not declared"); } } - + /** - * Declares a component with the given name to be used as an input - * of the managed model. + * Declares a component with the given name to be used as an input of the + * managed model. + * * @param name The name of the component. * @return The builder's instance. */ @@ -320,35 +330,37 @@ public ModelBuilder var(String name) { variable.setDescription(name); return this; } - + /** - * Declares the component with the given name an output of the - * managed model. The component should have already been assigned a value. - * To output complex expressions use {@link #operation(String)} - * to define them first. + * Declares the component with the given name an output of the managed model. + * The component should have already been assigned a value. To output complex + * expressions use {@link #operation(String)} to define them first. + * * @param name A component name. * @return The builder's instance. */ public ModelBuilder out(String name) { - if(name.contains("(") || name.contains("[")) { - operation("_return = "+name); + if (name.contains("(") || name.contains("[")) { + operation("_return = " + name); name = "_return"; } assertExists(name); model.addOutput(components.get(name)); return this; } - + /** * Declares a learnable {@link Paramater} component with the given name, * learning L2 regularization, and initial value. - * @param name The name to be assigned to the new component. - * @param regularization The regularization value. Zero corresponds to no regularization. - * Typically, this is non-negative. - * @param value The initial value to be assigned to the parameter. Exact values - * can be overridden by neural initialization strategies, but an initial value - * should be declared nonetheless to determine the parameter type and allocate - * any necessary memory. + * + * @param name The name to be assigned to the new component. + * @param regularization The regularization value. Zero corresponds to no + * regularization. Typically, this is non-negative. + * @param value The initial value to be assigned to the parameter. + * Exact values can be overridden by neural initialization + * strategies, but an initial value should be declared + * nonetheless to determine the parameter type and + * allocate any necessary memory. * @return The builder's instance. * @see #param(String, Tensor) * @see #operation(String) @@ -360,15 +372,17 @@ public ModelBuilder param(String name, double regularization, Tensor value) { variable.setDescription(name); return this; } - + /** - * Declares a configuration hyperparameter, which can be used to declare - * matrix and vector parameters during {@link #operation(String)} expressions. - * For in-expression use of hyperparameters, declare them with {@link #constant(String, double)}. - * In Neuralang terms, this is implements the broadcasting operation. - * @param name The name of the configuration hyperparameter. - * @param value The value to be assigned to the hyperparameter. - * This may also be a long number. + * Declares a configuration hyperparameter, which can be used to declare matrix + * and vector parameters during {@link #operation(String)} expressions. For + * in-expression use of hyperparameters, declare them with + * {@link #constant(String, double)}. In Neuralang terms, this is implements the + * broadcasting operation. + * + * @param name The name of the configuration hyperparameter. + * @param value The value to be assigned to the hyperparameter. This may also be + * a long number. * @return The builder's instance. * @see #operation(String) * @see #param(String, Tensor) @@ -376,17 +390,19 @@ public ModelBuilder param(String name, double regularization, Tensor value) { * @see #config(String, String) */ public ModelBuilder config(String name, double value) { - if(name.equals("?")) + if (name.equals("?")) throw new RuntimeException("The \"?\" config name is not allowed."); this.configurations.put(name, value); return this; } - + /** - * Applies {@link #config(String, double)} where the set value - * is obtained from another configuration hyperaparameter. - * @param name The name of the configuration hyperparameter to set. - * @param value The name of the configuration hyperparameter whose value should be copied. + * Applies {@link #config(String, double)} where the set value is obtained from + * another configuration hyperaparameter. + * + * @param name The name of the configuration hyperparameter to set. + * @param value The name of the configuration hyperparameter whose value should + * be copied. * @return The builder's instance. * @see #config(String, double) */ @@ -396,55 +412,58 @@ public ModelBuilder config(String name, String value) { /** * Retrieves a configuration hyperparameter's value. - * @param name The configuration's name. + * + * @param name The configuration's name. * @return The retrieved value; * @throws RuntimeException If a no configuration with the given name was found. * @see #getConfigOrDefault(String, double) */ public double getConfig(String name) { Double val = configurations.get(name); - if(val==null) - throw new RuntimeException("No configuration "+name+" found"); + if (val == null) + throw new RuntimeException("No configuration " + name + " found"); this.configurations.put(name, val); return val; } - + /** * Retrieves a configuration hyperparameter's value. If no such configuration * exists, a default value is returned instead. - * @param name The configuration's name. - * @param defaultValue The default to be retrieved if no such configuration was found. + * + * @param name The configuration's name. + * @param defaultValue The default to be retrieved if no such configuration was + * found. * @return The retrieved value; * @see #getConfig(String) */ public double getConfigOrDefault(String name, double defaultValue) { return configurations.getOrDefault(name, defaultValue); } - + protected double parseConfigValue(String text) { - if(configurations.containsKey(text)) + if (configurations.containsKey(text)) return configurations.get(text); return Double.parseDouble(text); } - + protected boolean isDouble(String text) { try { Double.parseDouble(text); return true; - } - catch(Exception e) { + } catch (Exception e) { return false; } } /** - * Declares a learnable {@link mklab.JGNN.nn.inputs.Paramater} component with the given name, - * zero regularization, and initial value. - * @param name The name to be assigned to the new component. + * Declares a learnable {@link mklab.JGNN.nn.inputs.Paramater} component with + * the given name, zero regularization, and initial value. + * + * @param name The name to be assigned to the new component. * @param value The initial value to be assigned to the parameter. Exact values - * can be overridden by neural initialization strategies, but an initial value - * should be declared nonetheless to determine the parameter type and allocate - * any necessary memory. + * can be overridden by neural initialization strategies, but an + * initial value should be declared nonetheless to determine the + * parameter type and allocate any necessary memory. * @return The builder's instance. * @see #param(String, double, Tensor) * @see #operation(String) @@ -456,12 +475,13 @@ public ModelBuilder param(String name, Tensor value) { variable.setDescription(name); return this; } - + /** - * Declares a non-learnable constant component with the given name. - * This can be used in computations. To edit the constant's values, - * use {@link #get(String)} to retrieve the respective component. - * @param name The name of the constant component. + * Declares a non-learnable constant component with the given name. This can be + * used in computations. To edit the constant's values, use {@link #get(String)} + * to retrieve the respective component. + * + * @param name The name of the constant component. * @param value A double value to assign to the constant. * @return The builder's instance. * @see #config(String, double) @@ -470,20 +490,21 @@ public ModelBuilder param(String name, Tensor value) { public ModelBuilder constant(String name, double value) { return constant(name, Tensor.fromDouble(value)); } - + /** - * Declares a non-learnable constant component with the given name. - * This can be used in computations. To edit the constant's values, - * use {@link #get(String)} to retrieve the respective component. - * @param name The name of the constant component. + * Declares a non-learnable constant component with the given name. This can be + * used in computations. To edit the constant's values, use {@link #get(String)} + * to retrieve the respective component. + * + * @param name The name of the constant component. * @param value A Tensor value to assign to the constant. * @return The builder's instance. * @see #constant(String, double) */ public ModelBuilder constant(String name, Tensor value) { - if(components.containsKey(name)) { - ((Constant)components.get(name)).set(value); - ((Constant)components.get(name)).setDescription(name); + if (components.containsKey(name)) { + ((Constant) components.get(name)).set(value); + ((Constant) components.get(name)).setDescription(name); return this; } assertValidName(name); @@ -492,21 +513,22 @@ public ModelBuilder constant(String name, Tensor value) { variable.setDescription(name); return this; } - + /** - * Retrieves the {@link NNOperation} registered with the provided - * name, for example to investigates its value. + * Retrieves the {@link NNOperation} registered with the provided name, for + * example to investigates its value. + * * @param name The name of the component. * @return A {@link NNOperation}. */ public NNOperation get(String name) { return components.get(name); } - + /** - * This is a wrapper for getModel().predict(inputs) - * without returning output values (use {@link #get(String)} - * afterwards to view outputs. + * This is a wrapper for getModel().predict(inputs) without + * returning output values (use {@link #get(String)} afterwards to view outputs. + * * @param inputs A variable number of Tensor inputs. * @return The builder's instance. * @see #getModel() @@ -516,10 +538,11 @@ public ModelBuilder runModel(Tensor... inputs) { model.predict(inputs); return this; } + /** - * This is a wrapper for getModel().predict(inputs) - * without returning output values (use {@link #get(String)} - * afterwards to view outputs. + * This is a wrapper for getModel().predict(inputs) without + * returning output values (use {@link #get(String)} afterwards to view outputs. + * * @param inputs A list of Tensor inputs. * @return The builder's instance. * @see #getModel() @@ -529,50 +552,52 @@ public ModelBuilder runModel(ArrayList inputs) { model.predict(inputs); return this; } - + public ModelBuilder function(String name, String value) { value = value.trim(); - if(value.indexOf(")")==-1) + if (value.indexOf(")") == -1) throw new RuntimeException("Function signature should be enclosed in parentheses."); - if(value.indexOf("{")==-1) + if (value.indexOf("{") == -1) throw new RuntimeException("Function body should start with brackets."); - functions.put(name, value.substring(value.indexOf("{")+1, value.length()-1)); + functions.put(name, value.substring(value.indexOf("{") + 1, value.length() - 1)); functionSignatures.put(name, value.substring(1, value.indexOf(")"))); functionUsages.put(name, 0); return this; } - + private static List extractTokens(String input) { - String tokenRegex = "\\b[a-zA-Z_][a-zA-Z0-9_]*\\b"+"|(\\\".*\\\")"+"|\\b\\w+\\b|\\(|\\)|\\=|\\+|\\;|\\!|\\:|\\#|\\-|\\.|\\*|\\@|\\/|\\[|\\]|\\,|\\?|\\||\\{|\\}"; - Pattern tokenPattern = Pattern.compile(tokenRegex); - Matcher tokenMatcher = tokenPattern.matcher(input); - List tokens = new ArrayList<>(); - while (tokenMatcher.find()) { - String token = tokenMatcher.group(); - tokens.add(token); - } - return tokens; - } - private static boolean isString(String str) { - return str.startsWith("\"") && str.endsWith("\""); - } + String tokenRegex = "\\b[a-zA-Z_][a-zA-Z0-9_]*\\b" + "|(\\\".*\\\")" + + "|\\b\\w+\\b|\\(|\\)|\\=|\\+|\\;|\\!|\\:|\\#|\\-|\\.|\\*|\\@|\\/|\\[|\\]|\\,|\\?|\\||\\{|\\}"; + Pattern tokenPattern = Pattern.compile(tokenRegex); + Matcher tokenMatcher = tokenPattern.matcher(input); + List tokens = new ArrayList<>(); + while (tokenMatcher.find()) { + String token = tokenMatcher.group(); + tokens.add(token); + } + return tokens; + } + + private static boolean isString(String str) { + return str.startsWith("\"") && str.endsWith("\""); + } + + private static boolean isNumeric(String str) { + if (str == null || str.isEmpty()) { + return false; + } + String regex = "[+-]?\\d*(\\.\\d+)?"; + return str.matches(regex); + } - private static boolean isNumeric(String str) { - if (str == null || str.isEmpty()) { - return false; - } - String regex = "[+-]?\\d*(\\.\\d+)?"; - return str.matches(regex); - } - /** - * Parses one or more operations split by new line characters or ; - * to add to the execution graph. All operations should assign a - * value to a new component name and comprise operators and functions. - * For a detailed description of the domain-specific language this - * method accepts, please refer to the library's - * - * online documentation. + * Parses one or more operations split by new line characters or ; to add to the + * execution graph. All operations should assign a value to a new component name + * and comprise operators and functions. For a detailed description of the + * domain-specific language this method accepts, please refer to the library's + * online + * documentation. + * * @param desc The operation to parse. * @return The builder's instance. * @see #config(String, double) @@ -582,12 +607,12 @@ private static boolean isNumeric(String str) { */ public ModelBuilder operation(String desc) { desc = desc.replace("'", "\""); - //System.out.println(desc); - + // System.out.println(desc); + String[] lines = desc.split("\\;|\\\n"); - if(lines.length>1) { - for(String line : lines) - if(!line.trim().isEmpty()) + if (lines.length > 1) { + for (String line : lines) + if (!line.trim().isEmpty()) operation(line); return this; } @@ -601,437 +626,400 @@ public ModelBuilder operation(String desc) { desc = desc.replace("(", " ( "); desc = desc.replace(")", " ) "); desc = desc.replace(",", " , "); - if(!desc.contains("MINUS_ONE")) { - if(desc.contains("-") && !components.containsKey("MINUS_ONE")) + if (!desc.contains("MINUS_ONE")) { + if (desc.contains("-") && !components.containsKey("MINUS_ONE")) constant("MINUS_ONE", Tensor.fromDouble(-1)); desc = desc.replace("-", " + MINUS_ONE * "); } desc = desc.replaceAll("\\s\\=\\s+\\+\\s+MINUS\\_ONE", " = MINUS_ONE"); desc = desc.replaceAll("\\s+", " "); - if(desc.endsWith(";")) // hack to parse return correctly - desc = desc.substring(0, desc.length()-1); - + if (desc.endsWith(";")) // hack to parse return correctly + desc = desc.substring(0, desc.length() - 1); + boolean madeChanges = true; - while(madeChanges) { + while (madeChanges) { madeChanges = false; String newDesc = ""; ArrayList suboperation = new ArrayList(); suboperation.add(new StringBuilder()); int level = 0; - for(int i=0;i0) - suboperation.get(suboperation.size()-1).append(c); + } else if (level > 0) + suboperation.get(suboperation.size() - 1).append(c); else newDesc += c; } - if(level!=0) - throw new RuntimeException("Imbalanced parenthesis in operation: "+desc); + if (level != 0) + throw new RuntimeException("Imbalanced parenthesis in operation: " + desc); desc = newDesc; - String[] operators = {" + ", " * ", " @ ", " | ", "-", "/"}; + String[] operators = { " + ", " * ", " @ ", " | ", "-", "/" }; madeChanges = false; - for(String operator : operators) { - if(madeChanges) + for (String operator : operators) { + if (madeChanges) break; String[] splt = desc.split("\\s*=\\s*"); - if(splt.length!=2) - throw new IllegalArgumentException("Exactly one equality needed in each operation: "+desc); + if (splt.length != 2) + throw new IllegalArgumentException("Exactly one equality needed in each operation: " + desc); newDesc = ""; int lastArgPos = -1; - for(int i=0;i4) { + if (splt.length > 4) { String modeText = splt[4].trim(); - if(splt.length>5) - modeText = splt[4]+splt[5]; - if(modeText.endsWith(";")) - modeText = modeText.substring(0, modeText.length()-1); - if(!modeText.split("\\:")[0].trim().equals("dim")) - throw new RuntimeException("Second argument "+modeText+" to softmax should be a dim config (dim: \"row\" or dim: \"col\")"); - modeText = modeText.substring(modeText.indexOf(":")+1).trim(); - if(modeText.equals("\"col\"")) + if (splt.length > 5) + modeText = splt[4] + splt[5]; + if (modeText.endsWith(";")) + modeText = modeText.substring(0, modeText.length() - 1); + if (!modeText.split("\\:")[0].trim().equals("dim")) + throw new RuntimeException("Second argument " + modeText + + " to softmax should be a dim config (dim: \"row\" or dim: \"col\")"); + modeText = modeText.substring(modeText.indexOf(":") + 1).trim(); + if (modeText.equals("\"col\"")) mode = false; - else if(modeText.equals("\"row\"")) + else if (modeText.equals("\"row\"")) mode = true; else - throw new RuntimeException("Invalid dim argument "+modeText+" to softmax"); + throw new RuntimeException("Invalid dim argument " + modeText + " to softmax"); } component = new SoftMax(mode); arg0 = splt[3]; - } - else if(splt[2].equals("sum")) { + } else if (splt[2].equals("sum")) { boolean mode = false; - if(splt.length>4) { + if (splt.length > 4) { String modeText = splt[4].trim(); - if(splt.length>5) - modeText = splt[4]+splt[5]; - if(modeText.endsWith(";")) - modeText = modeText.substring(0, modeText.length()-1); - if(!modeText.split("\\:")[0].trim().equals("dim")) - throw new RuntimeException("Second argument "+modeText+" to softmax should be a dim config (dim: \"row\" or dim: \"col\")"); - modeText = modeText.substring(modeText.indexOf(":")+1).trim(); - if(modeText.equals("\"col\"")) + if (splt.length > 5) + modeText = splt[4] + splt[5]; + if (modeText.endsWith(";")) + modeText = modeText.substring(0, modeText.length() - 1); + if (!modeText.split("\\:")[0].trim().equals("dim")) + throw new RuntimeException("Second argument " + modeText + + " to softmax should be a dim config (dim: \"row\" or dim: \"col\")"); + modeText = modeText.substring(modeText.indexOf(":") + 1).trim(); + if (modeText.equals("\"col\"")) mode = false; - else if(modeText.equals("\"row\"")) + else if (modeText.equals("\"row\"")) mode = true; else - throw new RuntimeException("Invalid dim argument "+modeText+" to softmax"); + throw new RuntimeException("Invalid dim argument " + modeText + " to softmax"); } component = new Sum(mode); arg0 = splt[3]; - } - else if(splt[2].equals("mean")) { + } else if (splt[2].equals("mean")) { boolean mode = false; - if(splt.length>4) { + if (splt.length > 4) { String modeText = splt[4].trim(); - if(splt.length>5) - modeText = splt[4]+splt[5]; - if(modeText.endsWith(";")) - modeText = modeText.substring(0, modeText.length()-1); - if(!modeText.split("\\:")[0].trim().equals("dim")) - throw new RuntimeException("Second argument "+modeText+" to softmax should be a dim config (dim: \"row\" or dim: \"col\")"); - modeText = modeText.substring(modeText.indexOf(":")+1).trim(); - if(modeText.equals("\"col\"")) + if (splt.length > 5) + modeText = splt[4] + splt[5]; + if (modeText.endsWith(";")) + modeText = modeText.substring(0, modeText.length() - 1); + if (!modeText.split("\\:")[0].trim().equals("dim")) + throw new RuntimeException("Second argument " + modeText + + " to softmax should be a dim config (dim: \"row\" or dim: \"col\")"); + modeText = modeText.substring(modeText.indexOf(":") + 1).trim(); + if (modeText.equals("\"col\"")) mode = false; - else if(modeText.equals("\"row\"")) + else if (modeText.equals("\"row\"")) mode = true; else - throw new RuntimeException("Invalid dim argument "+modeText+" to softmax"); + throw new RuntimeException("Invalid dim argument " + modeText + " to softmax"); } component = new Mean(mode); arg0 = splt[3]; - } - else if(splt[2].equals("max")) { + } else if (splt[2].equals("max")) { boolean mode = false; - if(splt.length>4) { + if (splt.length > 4) { String modeText = splt[4].trim(); - if(splt.length>5) - modeText = splt[4]+splt[5]; - if(modeText.endsWith(";")) - modeText = modeText.substring(0, modeText.length()-1); - if(!modeText.split("\\:")[0].trim().equals("dim")) - throw new RuntimeException("Second argument "+modeText+" to softmax should be a dim config (dim: \"row\" or dim: \"col\")"); - modeText = modeText.substring(modeText.indexOf(":")+1).trim(); - if(modeText.equals("\"col\"")) + if (splt.length > 5) + modeText = splt[4] + splt[5]; + if (modeText.endsWith(";")) + modeText = modeText.substring(0, modeText.length() - 1); + if (!modeText.split("\\:")[0].trim().equals("dim")) + throw new RuntimeException("Second argument " + modeText + + " to softmax should be a dim config (dim: \"row\" or dim: \"col\")"); + modeText = modeText.substring(modeText.indexOf(":") + 1).trim(); + if (modeText.equals("\"col\"")) mode = false; - else if(modeText.equals("\"row\"")) + else if (modeText.equals("\"row\"")) mode = true; else - throw new RuntimeException("Invalid dim argument "+modeText+" to softmax"); + throw new RuntimeException("Invalid dim argument " + modeText + " to softmax"); } component = new Max(mode); arg0 = splt[3]; - } - else if(splt[2].equals("matrix") || splt[2].equals("mat")) { - param(name, splt.length>5?parseConfigValue(splt[5]):0., - denseMatrixConstructor.zeroCopy((long)parseConfigValue(splt[3]), (long)parseConfigValue(splt[4])) - .setDimensionName(isDouble(splt[3])?null:splt[3], isDouble(splt[4])?null:splt[4])); + } else if (splt[2].equals("matrix") || splt[2].equals("mat")) { + param(name, splt.length > 5 ? parseConfigValue(splt[5]) : 0., + denseMatrixConstructor.zeroCopy((long) parseConfigValue(splt[3]), (long) parseConfigValue(splt[4])) + .setDimensionName(isDouble(splt[3]) ? null : splt[3], isDouble(splt[4]) ? null : splt[4])); routing = prevRouting; return this; - } - else if(splt[2].equals("vector") || splt[2].equals("vec")) { - param(name, - splt.length>4?parseConfigValue(splt[4]):0., - denseTensorConstructor.zeroCopy((long)parseConfigValue(splt[3])) - .setDimensionName(isDouble(splt[3])?null:splt[3])); + } else if (splt[2].equals("vector") || splt[2].equals("vec")) { + param(name, splt.length > 4 ? parseConfigValue(splt[4]) : 0., denseTensorConstructor + .zeroCopy((long) parseConfigValue(splt[3])).setDimensionName(isDouble(splt[3]) ? null : splt[3])); routing = prevRouting; return this; - } - else if(splt[2].equals("sort")) { - component = new Sort((int)(splt.length>4?parseConfigValue(splt[4]):0)) - .setDimensionName(splt.length<=4 || isDouble(splt[4])?null:splt[4]); + } else if (splt[2].equals("sort")) { + component = new Sort((int) (splt.length > 4 ? parseConfigValue(splt[4]) : 0)) + .setDimensionName(splt.length <= 4 || isDouble(splt[4]) ? null : splt[4]); arg0 = splt[3]; - } - else if(splt[2].equals("reshape")) { - component = new Reshape((long)(splt.length>4?parseConfigValue(splt[4]):1), - (long)(splt.length>5?parseConfigValue(splt[5]):1)) - .setDimensionName(splt.length>4&&isDouble(splt[4])?null:splt[4], - splt.length<=5 || isDouble(splt[5])?null:splt[5] - ); + } else if (splt[2].equals("reshape")) { + component = new Reshape((long) (splt.length > 4 ? parseConfigValue(splt[4]) : 1), + (long) (splt.length > 5 ? parseConfigValue(splt[5]) : 1)).setDimensionName( + splt.length > 4 && isDouble(splt[4]) ? null : splt[4], + splt.length <= 5 || isDouble(splt[5]) ? null : splt[5]); arg0 = splt[3]; - } - else if(splt[2].equals("relu")) { + } else if (splt[2].equals("relu")) { component = new Relu(); arg0 = splt[3]; - } - else if(splt[2].equals("lrelu")) { + } else if (splt[2].equals("lrelu")) { component = new LRelu(); arg0 = splt[3]; arg1 = splt[4]; - } - else if(splt[2].equals("prelu")) { + } else if (splt[2].equals("prelu")) { component = new PRelu(); arg0 = splt[3]; arg1 = splt[4]; - } - else if(splt[2].equals("tanh")) { + } else if (splt[2].equals("tanh")) { component = new Tanh(); arg0 = splt[3]; - } - else if(splt[2].equals("log")) { + } else if (splt[2].equals("log")) { component = new Log(); arg0 = splt[3]; - } - else if(splt[2].equals("monitor")) { + } else if (splt[2].equals("monitor")) { component = this.get(splt[3]); component.debugging = true; - } - else if(splt[2].equals("transpose")) { + } else if (splt[2].equals("transpose")) { component = new Transpose(); arg0 = splt[3]; - } - else if(splt[2].equals("sigmoid")) { + } else if (splt[2].equals("sigmoid")) { component = new Sigmoid(); arg0 = splt[3]; - } - else if(splt[2].equals("L1")) { + } else if (splt[2].equals("L1")) { boolean mode = false; - if(splt.length>4) { + if (splt.length > 4) { String modeText = splt[4].trim(); - if(splt.length>5) - modeText = splt[4]+splt[5]; - if(modeText.endsWith(";")) - modeText = modeText.substring(0, modeText.length()-1); - if(!modeText.split("\\:")[0].trim().equals("dim")) - throw new RuntimeException("Second argument "+modeText+" to softmax should be a dim config (dim: \"row\" or dim: \"col\")"); - modeText = modeText.substring(modeText.indexOf(":")+1).trim(); - if(modeText.equals("\"col\"")) + if (splt.length > 5) + modeText = splt[4] + splt[5]; + if (modeText.endsWith(";")) + modeText = modeText.substring(0, modeText.length() - 1); + if (!modeText.split("\\:")[0].trim().equals("dim")) + throw new RuntimeException("Second argument " + modeText + + " to softmax should be a dim config (dim: \"row\" or dim: \"col\")"); + modeText = modeText.substring(modeText.indexOf(":") + 1).trim(); + if (modeText.equals("\"col\"")) mode = false; - else if(modeText.equals("\"row\"")) + else if (modeText.equals("\"row\"")) mode = true; else - throw new RuntimeException("Invalid dim argument "+modeText+" to softmax"); + throw new RuntimeException("Invalid dim argument " + modeText + " to softmax"); } component = new L1(mode); arg0 = splt[3]; - } - else if(splt[2].equals("exp")) { + } else if (splt[2].equals("exp")) { component = new Exp(); arg0 = splt[3]; - } - else if(splt[2].equals("nexp")) { + } else if (splt[2].equals("nexp")) { component = new NExp(); arg0 = splt[3]; - } - else if(splt[2].equals("complement")) { + } else if (splt[2].equals("complement")) { component = new Complement(); arg0 = splt[3]; - } - else if(splt[2].equals("repeat")) { + } else if (splt[2].equals("repeat")) { component = new Repeat(); arg0 = splt[3]; arg1 = splt[4]; - } - else if(splt[2].equals("dropout") || splt[2].equals("drop")) { + } else if (splt[2].equals("dropout") || splt[2].equals("drop")) { component = new Dropout(); arg0 = splt[3]; arg1 = splt[4]; - } - else if(splt[2].equals("attention") || splt[2].equals("att")) { + } else if (splt[2].equals("attention") || splt[2].equals("att")) { component = new Attention(); arg0 = splt[3]; arg1 = splt[4]; - } - else if(splt[2].equals("transpose")) { + } else if (splt[2].equals("transpose")) { component = new Transpose(); arg0 = splt[3]; - } - else if(splt[2].equals("from")) { + } else if (splt[2].equals("from")) { component = new From(); arg0 = splt[3]; - } - else if(splt[2].equals("to")) { + } else if (splt[2].equals("to")) { component = new To(); arg0 = splt[3]; - } - else if(splt[2].equals("reduce")) { + } else if (splt[2].equals("reduce")) { component = new Reduce(); arg0 = splt[3]; arg1 = splt[4]; - } - else if(splt[3].equals("|")) { + } else if (splt[3].equals("|")) { component = new Concat(); arg0 = splt[2]; arg1 = splt[4]; - } - else if(splt[3].equals("*")) { + } else if (splt[3].equals("*")) { component = new Multiply(); arg0 = splt[2]; arg1 = splt[4]; - } - else if(splt[3].equals("@")) { + } else if (splt[3].equals("@")) { component = new MatMul(); arg0 = splt[2]; arg1 = splt[4]; - } - else if(splt[3].equals("[")) { + } else if (splt[3].equals("[")) { component = new Gather(); arg0 = splt[4]; arg1 = splt[2]; - } - else if(splt[3].equals("x")) { + } else if (splt[3].equals("x")) { component = new Repeat(); arg0 = splt[2]; arg1 = splt[4]; - } - else if(functions.containsKey(splt[2])) { + } else if (functions.containsKey(splt[2])) { String[] args = functionSignatures.get(splt[2]).split("\\,"); - //if(args.length!=splt.length-3) - // throw new RuntimeException("Function "+splt[2]+" requires at most "+args.length+" arguments"); + // if(args.length!=splt.length-3) + // throw new RuntimeException("Function "+splt[2]+" requires at most + // "+args.length+" arguments"); int functionRepetition = functionUsages.get(splt[2]); - functionUsages.put(splt[2], functionRepetition+1); + functionUsages.put(splt[2], functionRepetition + 1); HashMap configStack = this.configurations; this.configurations = new HashMap(this.configurations); HashMap customNames = new HashMap(); - for(int i=0;i tokens = extractTokens(functions.get(splt[2])); HashSet keywords = new HashSet(); keywords.addAll(functions.keySet()); - keywords.addAll(Arrays.asList(".", "+", "-", "*", "/", "@", ",", "(", ")", ";", "=", "\"", - "max", "min", "vector", "matrix", "vec", "mat", "[", "]", "{", "}", "|", "#", "!", ":", - "extern", "softmax", - "from", "to", "reduce", "transpose", "attention", "att", "dropout", "drop", - "repeat", "exp", "nexp", "L1", "sigmoid", "transpose", "monitor", - "log", "tanh", "prelu", "lrelu", "relu", "reshape", "mean")); + keywords.addAll(Arrays.asList(".", "+", "-", "*", "/", "@", ",", "(", ")", ";", "=", "\"", "max", "min", + "vector", "matrix", "vec", "mat", "[", "]", "{", "}", "|", "#", "!", ":", "extern", "softmax", + "from", "to", "reduce", "transpose", "attention", "att", "dropout", "drop", "repeat", "exp", "nexp", + "L1", "sigmoid", "transpose", "monitor", "log", "tanh", "prelu", "lrelu", "relu", "reshape", + "mean")); keywords.addAll(this.components.keySet()); keywords.addAll(this.configurations.keySet()); - customNames.put("return", splt[0]+" = "); + customNames.put("return", splt[0] + " = "); String newExpr = ""; routing = prevRouting; // remove the function call from the routing boolean prevHash = false; boolean prevTemp = false; HashMap temp = new HashMap(); HashMap renameLater = new HashMap(); - for(int i=0;i0 && tokens.get(i-1).equals(":")) { - // token = token; // this is the case where we have configuration values - } - else if(customNames.containsKey(token)) + temp.put(token, id + 1); + renameLater.put(token, "_" + splt[2] + functionRepetition + "_stack" + id + "_" + token); + token = "_" + splt[2] + functionRepetition + "_stack" + id + "_" + token; + } else if (i < tokens.size() - 1 && tokens.get(i + 1).equals(":")) { + // token = token; // this is the case where we have configuration arguments + } else if (i > 0 && tokens.get(i - 1).equals(":")) { + // token = token; // this is the case where we have configuration values + } else if (customNames.containsKey(token)) token = customNames.get(token); - else if(!keywords.contains(token) && !isNumeric(token) && !isString(token) && !prevHash) - token = "_"+splt[2]+functionRepetition+"_"+token; + else if (!keywords.contains(token) && !isNumeric(token) && !isString(token) && !prevHash) + token = "_" + splt[2] + functionRepetition + "_" + token; prevHash = token.equals("#"); prevTemp = token.equals("!"); - if(token.equals(";") || token.equals("}")) { + if (token.equals(";") || token.equals("}")) { customNames.putAll(renameLater); renameLater.clear(); } - if(!prevHash && !prevTemp) + if (!prevHash && !prevTemp) newExpr += token; } this.operation(newExpr); this.configurations = configStack; return this; - } - else - throw new RuntimeException("Invalid operation: "+desc); - - if(arg0.contains(":")) { + } else + throw new RuntimeException("Invalid operation: " + desc); + + if (arg0.contains(":")) { String config = arg0.substring(0, arg0.indexOf(":")).trim(); - String value = arg0.substring(arg0.indexOf(":")+1).trim(); - if(value.equals("extern")) { - if(!this.configurations.containsKey(config)) - throw new RuntimeException("Required external config: "+config); + String value = arg0.substring(arg0.indexOf(":") + 1).trim(); + if (value.equals("extern")) { + if (!this.configurations.containsKey(config)) + throw new RuntimeException("Required external config: " + config); } this.config(config, parseConfigValue(value)); return this; } - if(arg0!=null) { + if (arg0 != null) { assertExists(arg0); component.addInput(components.get(arg0)); } - if(arg1!=null) { + if (arg1 != null) { assertExists(arg1); component.addInput(components.get(arg1)); } - + components.put(name, component); component.setDescription(name); - + return this; } + + /** + * Applies the {@link #createForwardValidity(List)} method for the given inputs + * to replace zero tensor dimensions (annotated with ? in symbolic definitions) + * with a valid dimension size and name, and then checks that all computation + * outcomes are valid with {@link #assertBackwardValidity()}. + * + * @param inputs + * @return + * @see #autosize(List) + */ public ModelBuilder autosize(Tensor... inputs) { createForwardValidity(Arrays.asList(inputs)); assertBackwardValidity(); return this; } + /** + * Applies the {@link #createForwardValidity(List)} method for the given inputs + * to replace zero tensor dimensions (annotated with ? in symbolic definitions) + * with a valid dimension size and name, and then checks that all computation + * outcomes are valid with {@link #assertBackwardValidity()}. + * + * @param inputs + * @return + * @see #autosize(Tensor...) + */ public ModelBuilder autosize(List inputs) { createForwardValidity(inputs); assertBackwardValidity(); @@ -1126,63 +1130,77 @@ public ModelBuilder autosize(List inputs) { } /** - * Asserts that all components parsed into a call graph with - * {@link #operation(String)} are eventually used by at least one {@link #out(String)} - * component. + * Asserts that a forward run of the architecture is valid given some input + * data. Prefer using input data from the {@link mklab.JGNN.core.empty} package + * to make this method run fastly, without performing any numerical operations. + * In addition to asserting validity, this operation simultaneously resizes any + * zero-dimension parameter tensors (created with the ? configuration in parsed + * expressions) to automatically admit dimension names and sizes. + * + * @param inputs The same inputs you would pas to {@link Model#predict(List)}. * @return The builder's instance. - * @throws RuntimeException if not all execution graph branches lead to declared outputs. + * @throws RuntimeException If mismatched dimensions or dimension names, or if + * it is not possible to infer the actual size of + * automatically populated dimensions. + * @see #autosize(List) */ public ModelBuilder createForwardValidity(List inputs) { - if(inputs.size() != model.getInputs().size()) - throw new IllegalArgumentException("Incompatible input size: expected"+model.getInputs().size()+" inputs instead of "+inputs.size()); - for(NNOperation output : model.getOutputs()) + if (inputs.size() != model.getInputs().size()) + throw new IllegalArgumentException("Incompatible input size: expected" + model.getInputs().size() + + " inputs instead of " + inputs.size()); + for (NNOperation output : model.getOutputs()) output.clearPrediction(); - for(int i=0;i allFoundComponents = new HashSet(); Stack pending = new Stack(); - for(NNOperation output : model.getOutputs()) { + for (NNOperation output : model.getOutputs()) { pending.add(output); allFoundComponents.add(output); } - while(!pending.isEmpty()) { + while (!pending.isEmpty()) { NNOperation component = pending.pop(); - for(NNOperation componentInput : component.getInputs()) - if(!allFoundComponents.contains(componentInput)) { + for (NNOperation componentInput : component.getInputs()) + if (!allFoundComponents.contains(componentInput)) { allFoundComponents.add(componentInput); pending.add(componentInput); } } HashSet actualComponents = new HashSet(components.values()); - for(NNOperation component : allFoundComponents) - if(!actualComponents.contains(component)) - System.err.println("The component "+component.describe()+" was not added by this builder to its model's pipeline"); - for(NNOperation component : actualComponents) - if(!allFoundComponents.contains(component)) { - throw new RuntimeException("The component "+component.describe()+" does not lead to an output"); - //System.err.println("The component "+component.describe()+" does not lead to an output and will be removed from the outputs of other components"); - //for(NNOperation other : actualComponents) - // other.getOutputs().remove(component); + for (NNOperation component : allFoundComponents) + if (!actualComponents.contains(component)) + System.err.println("The component " + component.describe() + + " was not added by this builder to its model's pipeline"); + for (NNOperation component : actualComponents) + if (!allFoundComponents.contains(component)) { + throw new RuntimeException("The component " + component.describe() + " does not lead to an output"); + // System.err.println("The component "+component.describe()+" does not lead to + // an output and will be removed from the outputs of other components"); + // for(NNOperation other : actualComponents) + // other.getOutputs().remove(component); } return this; } + /** * Creates a description of the builded model's internal execution graph. + * * @return A String. * @see #print() */ @@ -1190,81 +1208,73 @@ public String describe() { getModel(); return routing; } + /** * Exports the builded model's execution graph into a .dot format * representation. + * * @return A String to be pasted into GraphViz for visualization. */ public String getExecutionGraphDot() { getModel(); String ret = "//Can visualize at: https://dreampuf.github.io/GraphvizOnline"; - ret+="\ndigraph operations {"; - for(NNOperation component : components.values()) { - for(NNOperation input : component.getInputs()) - ret+="\n "+input.getDescription()+" -> "+component.getDescription(); + ret += "\ndigraph operations {"; + for (NNOperation component : components.values()) { + for (NNOperation input : component.getInputs()) + ret += "\n " + input.getDescription() + " -> " + component.getDescription(); } - for(NNOperation component : components.values()) - if(model.getOutputs().contains(component)) { - ret+="\n "+component.getDescription() - +"[label=\""+component.getDescription()+" = "+component.getSimpleDescription()+"\", shape=doubleoctagon]"; - } - else if(component instanceof Variable) - ret+="\n "+component.getDescription()+"[color=red,shape=octagon]"; - else if(component instanceof Constant) { - if(((Constant)component).get().size()==1) { - if(component.getDescription().startsWith("_")) - ret+="\n "+component.getDescription() - +"[shape=rectangle,color=red,label=\"" - +((Parameter)component).get().toDouble()+"\"]"; + for (NNOperation component : components.values()) + if (model.getOutputs().contains(component)) { + ret += "\n " + component.getDescription() + "[label=\"" + component.getDescription() + " = " + + component.getSimpleDescription() + "\", shape=doubleoctagon]"; + } else if (component instanceof Variable) + ret += "\n " + component.getDescription() + "[color=red,shape=octagon]"; + else if (component instanceof Constant) { + if (((Constant) component).get().size() == 1) { + if (component.getDescription().startsWith("_")) + ret += "\n " + component.getDescription() + "[shape=rectangle,color=red,label=\"" + + ((Parameter) component).get().toDouble() + "\"]"; else - ret+="\n "+component.getDescription() - +"[shape=rectangle,color=red,label=\"" - +component.getDescription()+" = "+((Parameter)component).get().toDouble()+"\"]"; - } - else if(component.getDescription().startsWith("_")){ - ret+="\n "+component.getDescription() - +"[shape=rectangle,color=red,label=\"" - +((Parameter)component).get().describe()+"\"]"; - } - else - ret+="\n "+component.getDescription() - +"[shape=rectangle,color=red,label=\"" - +component.getDescription()+" = "+((Parameter)component).get().describe()+"\"]"; - - } - else if(component instanceof Parameter) { - //ret+="\n "+component.getDescription()+"[color=green]"; - if(component.getDescription().startsWith("_")){ - ret+="\n "+component.getDescription() - +"[shape=rectangle,color=green,label=\"" - +((Parameter)component).get().describe()+"\"]"; - } - else - ret+="\n "+component.getDescription() - +"[shape=rectangle,color=green,label=\"" - +component.getDescription()+" = "+((Parameter)component).get().describe()+"\"]"; - - } - else if(component.getDescription().startsWith("_")){ - ret+="\n "+component.getDescription()+"[label=\""+component.getSimpleDescription()+"\"]"; - } - else - ret+="\n "+component.getDescription()+"[label=\""+component.getDescription()+" = "+component.getSimpleDescription()+"\"]"; - + ret += "\n " + component.getDescription() + "[shape=rectangle,color=red,label=\"" + + component.getDescription() + " = " + ((Parameter) component).get().toDouble() + "\"]"; + } else if (component.getDescription().startsWith("_")) { + ret += "\n " + component.getDescription() + "[shape=rectangle,color=red,label=\"" + + ((Parameter) component).get().describe() + "\"]"; + } else + ret += "\n " + component.getDescription() + "[shape=rectangle,color=red,label=\"" + + component.getDescription() + " = " + ((Parameter) component).get().describe() + "\"]"; + + } else if (component instanceof Parameter) { + // ret+="\n "+component.getDescription()+"[color=green]"; + if (component.getDescription().startsWith("_")) { + ret += "\n " + component.getDescription() + "[shape=rectangle,color=green,label=\"" + + ((Parameter) component).get().describe() + "\"]"; + } else + ret += "\n " + component.getDescription() + "[shape=rectangle,color=green,label=\"" + + component.getDescription() + " = " + ((Parameter) component).get().describe() + "\"]"; + + } else if (component.getDescription().startsWith("_")) { + ret += "\n " + component.getDescription() + "[label=\"" + component.getSimpleDescription() + "\"]"; + } else + ret += "\n " + component.getDescription() + "[label=\"" + component.getDescription() + " = " + + component.getSimpleDescription() + "\"]"; + ret += "\n}"; return ret; } + public ModelBuilder print() { getModel(); - for(NNOperation component : components.values()) - if(component instanceof Parameter) + for (NNOperation component : components.values()) + if (component instanceof Parameter) System.out.println(component.describe()); System.out.println(describe()); return this; } + public ModelBuilder printState() { getModel(); - for(NNOperation component : components.values()) + for (NNOperation component : components.values()) System.out.println(component.view()); return this; } diff --git a/JGNN/src/main/java/mklab/JGNN/adhoc/ModelTraining.java b/JGNN/src/main/java/mklab/JGNN/adhoc/ModelTraining.java new file mode 100644 index 00000000..092e986c --- /dev/null +++ b/JGNN/src/main/java/mklab/JGNN/adhoc/ModelTraining.java @@ -0,0 +1,172 @@ +package mklab.JGNN.adhoc; + +import mklab.JGNN.core.Matrix; +import mklab.JGNN.core.Slice; +import mklab.JGNN.core.ThreadPool; +import mklab.JGNN.nn.Loss; +import mklab.JGNN.nn.Model; +import mklab.JGNN.nn.Optimizer; +import mklab.JGNN.nn.optimizers.Adam; +import mklab.JGNN.nn.optimizers.BatchOptimizer; + +/** + * This is a helper class that automates the definition of training processes of + * {@link Model} instances by defining the number of epochs, loss functions, + * number of batches and the ability to use {@link ThreadPool} for parallelized + * batch computations. + * + * @author Emmanouil Krasanakis + */ +public class ModelTraining { + protected BatchOptimizer optimizer; + protected int numBatches = 1; + protected int epochs = 300; + protected int patience = Integer.MAX_VALUE; + protected boolean paralellization = false; + protected boolean stochasticGradientDescent = false; + protected Loss loss, validationLoss; + protected boolean verbose = false; + + public ModelTraining() { + } + + /** + * @param verbose Whether an error message will be printed. + * @deprecated This method was available in earlier JGNN versions but will be + * gradually phased out. Instead, wrap the validation loss within + * {@link mklab.JGNN.nn.loss.report.VerboseLoss} to replicate the + * same behavior. + */ + public ModelTraining setVerbose(boolean verbose) { + System.err.println("WARNING: The setVerbose method was available in earlier JGNN versions" + + "\n but will be gradually phased out. Instead, wrap the validation" + + "\n loss within a VerboseLoss instance to replicate the same" + + "\n behavior. Look for more losses of the mklab.JGNN.nn.loss.report" + + "\n package for more types of training feedback."); + this.verbose = verbose; + return this; + } + + /** + * Set + * + * @param loss + * @return + */ + public ModelTraining setLoss(Loss loss) { + this.loss = loss; + return this; + } + + public ModelTraining setValidationLoss(Loss loss) { + this.validationLoss = loss; + return this; + } + + /** + * Sets an {@link Optimizer} instance to controls parameter updates during + * training. If the provided optimizer is not an instance of + * {@link BatchOptimizer}, it is forcefully wrapped by the latter. Training + * calls the batch optimizer's update method after every batch. + * + * @param optimizer The desired optimizer. + * @return this model training instance. + * @see #train(Model, Matrix, Matrix, Slice, Slice) + */ + public ModelTraining setOptimizer(Optimizer optimizer) { + if (optimizer instanceof BatchOptimizer) + this.optimizer = (BatchOptimizer) optimizer; + else + this.optimizer = new BatchOptimizer(optimizer); + return this; + } + + /** + * Sets the number of batches training data slices should be split into. + * + * @param numBatches The desired number of batches. Default is 1. + * @return this model training instance. + * @see #setParallelizedStochasticGradientDescent(boolean) + */ + public ModelTraining setNumBatches(int numBatches) { + this.numBatches = numBatches; + return this; + } + + /** + * Sets whether the training strategy should reflect stochastic gradient descent + * by randomly sampling from the training dataset to obtain data samples. If + * true, both this feature and acceptable thread-based + * paralellization is enabled. Parallelization makes use of JGNN's + * {@link ThreadPool}. + * + * @param paralellization A boolean value indicating whether this feature is + * enabled. + * @return this model training instance. + * @see #setNumBatches(int) + * @see #train(Model, Matrix, Matrix, Slice, Slice) + */ + public ModelTraining setParallelizedStochasticGradientDescent(boolean paralellization) { + this.paralellization = paralellization; + this.stochasticGradientDescent = paralellization; + return this; + } + + /** + * Sets the maximum number of epochs for which training runs. If no patience has + * been set, training runs for exactly this number of epochs. + * + * @param epochs The maximum number of epochs. + * @return this model training instance. + * @see #setPatience(int) + */ + public ModelTraining setEpochs(int epochs) { + this.epochs = epochs; + return this; + } + + /** + * Sets the patience of the training strategy that performs early stopping. If + * training does not encounter a smaller validation loss for this number of + * epochs, it stops. + * + * @param patience The number of patience epochs. Default is Integer.MAX_VALUE + * to effectively disable this feature and let training always + * reach the maximum number of set epochs. + * @return this model training instance. + * @see #setEpochs(int) + */ + public ModelTraining setPatience(int patience) { + this.patience = patience; + return this; + } + + /** + * This is a leftover method from an earlier version of JGNN's interface. + * + * @deprecated This method has been moved to + * {@link mklab.JGNN.adhoc.train.NodeClassification#train(Model, Matrix, Matrix, Slice, Slice)} + */ + public Model train(Model model, Matrix features, Matrix labels, Slice trainingSamples, Slice validationSamples) { + throw new RuntimeException( + "The ModelTraining.train method has been moved to NodeClassification.train since version 1.3.28. " + + "\n For valid code, create a NodeClassification instance instead of a ModelTraining instance. " + + "\n This method may be made abstract or removed completely in future versions, and will probably be replaced" + + "\n with a uniform interface for any predictive task."); + } + + /** + * Retrieves the learning rate (lr), epochs, batches, and patience parameters + * from the configurations of a + * + * @param modelBuilder + * @return this model training instance. + */ + public ModelTraining configFrom(ModelBuilder modelBuilder) { + setOptimizer(new Adam(modelBuilder.getConfigOrDefault("lr", 0.01))); + setEpochs((int) modelBuilder.getConfigOrDefault("epochs", epochs)); + numBatches = (int) modelBuilder.getConfigOrDefault("batches", numBatches); + setPatience((int) modelBuilder.getConfigOrDefault("patience", patience)); + return this; + } +} diff --git a/JGNN/src/main/java/mklab/JGNN/adhoc/datasets/Citeseer.java b/JGNN/src/main/java/mklab/JGNN/adhoc/datasets/Citeseer.java index 31a92371..e842c388 100644 --- a/JGNN/src/main/java/mklab/JGNN/adhoc/datasets/Citeseer.java +++ b/JGNN/src/main/java/mklab/JGNN/adhoc/datasets/Citeseer.java @@ -4,13 +4,14 @@ /** * Downloads and constructs the Citeseer node classification {@link Dataset}. + * * @author Emmanouil Krasanakis */ public class Citeseer extends Dataset { public Citeseer() { - downloadIfNotExists("downloads/citeseer/citeseer.feats", + downloadIfNotExists("downloads/citeseer/citeseer.feats", "https://github.com/maniospas/graph-data/raw/main/citeseer/citeseer.feats"); - downloadIfNotExists("downloads/citeseer/citeseer.graph", + downloadIfNotExists("downloads/citeseer/citeseer.graph", "https://github.com/maniospas/graph-data/raw/main/citeseer/citeseer.graph"); loadFeatures("downloads/citeseer/citeseer.feats"); loadGraph("downloads/citeseer/citeseer.graph"); diff --git a/JGNN/src/main/java/mklab/JGNN/adhoc/datasets/Cora.java b/JGNN/src/main/java/mklab/JGNN/adhoc/datasets/Cora.java index 7292cce9..e071afef 100644 --- a/JGNN/src/main/java/mklab/JGNN/adhoc/datasets/Cora.java +++ b/JGNN/src/main/java/mklab/JGNN/adhoc/datasets/Cora.java @@ -4,13 +4,14 @@ /** * Downloads and constructs the Cora node classification {@link Dataset}. + * * @author Emmanouil Krasanakis */ public class Cora extends Dataset { public Cora() { - downloadIfNotExists("downloads/cora/cora.feats", + downloadIfNotExists("downloads/cora/cora.feats", "https://github.com/maniospas/graph-data/raw/main/cora/cora.feats"); - downloadIfNotExists("downloads/cora/cora.graph", + downloadIfNotExists("downloads/cora/cora.graph", "https://github.com/maniospas/graph-data/raw/main/cora/cora.graph"); loadFeatures("downloads/cora/cora.feats"); loadGraph("downloads/cora/cora.graph"); diff --git a/JGNN/src/main/java/mklab/JGNN/adhoc/datasets/Pubmed.java b/JGNN/src/main/java/mklab/JGNN/adhoc/datasets/Pubmed.java index 144dbefe..8b81a583 100644 --- a/JGNN/src/main/java/mklab/JGNN/adhoc/datasets/Pubmed.java +++ b/JGNN/src/main/java/mklab/JGNN/adhoc/datasets/Pubmed.java @@ -4,13 +4,14 @@ /** * Downloads and constructs the Pubmed node classification {@link Dataset}. + * * @author Emmanouil Krasanakis */ public class Pubmed extends Dataset { public Pubmed() { - downloadIfNotExists("downloads/pubmed/pubmed.feats", + downloadIfNotExists("downloads/pubmed/pubmed.feats", "https://github.com/maniospas/graph-data/raw/main/pubmed/pubmed.feats"); - downloadIfNotExists("downloads/pubmed/pubmed.graph", + downloadIfNotExists("downloads/pubmed/pubmed.graph", "https://github.com/maniospas/graph-data/raw/main/pubmed/pubmed.graph"); loadFeatures("downloads/pubmed/pubmed.feats"); loadGraph("downloads/pubmed/pubmed.graph"); diff --git a/JGNN/src/main/java/mklab/JGNN/adhoc/datasets/package-info.java b/JGNN/src/main/java/mklab/JGNN/adhoc/datasets/package-info.java new file mode 100644 index 00000000..d759d345 --- /dev/null +++ b/JGNN/src/main/java/mklab/JGNN/adhoc/datasets/package-info.java @@ -0,0 +1,8 @@ +/** + * This package contains datasets for out-of-the-box experimentation. When run + * for the first time, the datasets also download their data in a + * downloads/ directory in the running path. + * + * @author Emmanouil Krasanakis + */ +package mklab.JGNN.adhoc.datasets; \ No newline at end of file diff --git a/JGNN/src/main/java/mklab/JGNN/adhoc/package-info.java b/JGNN/src/main/java/mklab/JGNN/adhoc/package-info.java new file mode 100644 index 00000000..bb1ab420 --- /dev/null +++ b/JGNN/src/main/java/mklab/JGNN/adhoc/package-info.java @@ -0,0 +1,7 @@ +/** + * Contains classes that simplify data loading, model building, and training. + * Top-level base classes are extended in sub-packages. + * + * @author Emmanouil Krasanakis + */ +package mklab.JGNN.adhoc; \ No newline at end of file diff --git a/JGNN/src/main/java/mklab/JGNN/adhoc/parsers/FastBuilder.java b/JGNN/src/main/java/mklab/JGNN/adhoc/parsers/FastBuilder.java index 3cd7b41f..91195241 100644 --- a/JGNN/src/main/java/mklab/JGNN/adhoc/parsers/FastBuilder.java +++ b/JGNN/src/main/java/mklab/JGNN/adhoc/parsers/FastBuilder.java @@ -8,10 +8,10 @@ import mklab.JGNN.adhoc.ModelBuilder; /** - * Extends the capabilities of {@link LayeredBuilder} to use - * for node classification. It accepts the adjacency graph in the constructor, - * to be used with the symbol A in operations or layer definitions, - * and node features. + * Extends the capabilities of {@link LayeredBuilder} to use for node + * classification. It accepts the adjacency graph in the constructor, to be used + * with the symbol A in operations or layer definitions, and node + * features. * * @author Emmanouil Krasanakis * @see #classify() @@ -19,16 +19,19 @@ public class FastBuilder extends ModelBuilder { private int layer = 0; private HashMap rememberAs = new HashMap(); + /** * @deprecated This constructor should only be used by loading. */ public FastBuilder() { } + /** - * Creates a graph neural network builder from an - * normalized adjacency matrix and a node feature matrix. + * Creates a graph neural network builder from an normalized adjacency matrix + * and a node feature matrix. + * * @param adjacency The pre-normalized adjacency matrix. - * @param features The node feature matrix. + * @param features The node feature matrix. */ public FastBuilder(Matrix adjacency, Matrix features) { long numFeatures = features.getCols(); @@ -36,29 +39,33 @@ public FastBuilder(Matrix adjacency, Matrix features) { constant("A", adjacency); constant("h0", features); } + protected String saveCommands() { String ret = super.saveCommands(); - for(String rememberKey : rememberAs.keySet()) - ret += "remember "+rememberKey+" as "+rememberAs.get(rememberKey)+"\n"; - ret += "layer "+layer+"\n"; + for (String rememberKey : rememberAs.keySet()) + ret += "remember " + rememberKey + " as " + rememberAs.get(rememberKey) + "\n"; + ret += "layer " + layer + "\n"; return ret; } + protected boolean loadCommand(String command, String data) { - if(command.equals("layer")) { + if (command.equals("layer")) { layer = Integer.parseInt(data); return true; } - if(command.equals("remember")) { + if (command.equals("remember")) { int pos = data.lastIndexOf(" as "); - rememberAs.put(data.substring(0, pos), Integer.parseInt(data.substring(pos+4))); + rememberAs.put(data.substring(0, pos), Integer.parseInt(data.substring(pos + 4))); return true; } return super.loadCommand(command, data); } + /** * Remembers the last layer's output per a given identifier so that {layerId} * within future {@link #layer(String)} definitions is made to refer to the * current layer. + * * @param layerId An identifier to remember the last layer's output as. * @return The model builder. */ @@ -66,139 +73,159 @@ public FastBuilder rememberAs(String layerId) { rememberAs.put(layerId, layer); return this; } + /** - * Applies an {@link #operation(String)} and increases the layer identifier count. + * Applies an {@link #operation(String)} and increases the layer identifier + * count. + * * @param expression A parsable expression. * @return this builder. * @see #layerRepeat(String, int) */ public FastBuilder layer(String expression) { - expression = expression - .replace("{l+1}", ""+(layer+1)) - .replace("{l}", ""+layer); - for(String layerId : rememberAs.keySet()) - expression = expression.replace("{"+layerId+"}", ""+rememberAs.get(layerId)); + expression = expression.replace("{l+1}", "" + (layer + 1)).replace("{l}", "" + layer); + for (String layerId : rememberAs.keySet()) + expression = expression.replace("{" + layerId + "}", "" + rememberAs.get(layerId)); layer += 1; super.operation(expression); return this; } + /** - * Adds a classification layer that gather the number of inputs nodes - * and applies softmax on all of them. + * Adds a classification layer that gather the number of inputs nodes and + * applies softmax on all of them. + * * @return this builder. */ public FastBuilder classify() { var("nodes"); layer("h{l+1}=h{l}[nodes]"); layer("h{l+1}=softmax(h{l}, dim: \"row\")"); - out("h"+layer); + out("h" + layer); return this; } + /** - * Repeats a {@link #layer(String)} definition a number of times. - * Ideal for building deep architectures. + * Repeats a {@link #layer(String)} definition a number of times. Ideal for + * building deep architectures. + * * @param expression The expression to repeat for each layer. - * @param times The number of times to repeat the expression. + * @param times The number of times to repeat the expression. * @return this builder. * * @see #futureConfigs(String, Function, int) * @see #futureConstants(String, Function, int) */ public FastBuilder layerRepeat(String expression, int times) { - for(int i=0;i{l} - * notation, for example so that they can be used during {@link #layerRepeat(String, int)}. - * @param config The configuration symbols (these should involve {l}). - * @param func A lambda Java function to calculate the configuration's value. This takes - * as input an integer (starting from 0 for the current layer) and adds one for each - * subsequently declared symbol. - * @param depth The number of future layers expected to use the symbols. + * Defines a number of {@link #config(String, double)} symbols involving a + * {l} notation, for example so that they can be used during + * {@link #layerRepeat(String, int)}. + * + * @param config The configuration symbols (these should involve + * {l}). + * @param func A lambda Java function to calculate the configuration's value. + * This takes as input an integer (starting from 0 for the current + * layer) and adds one for each subsequently declared symbol. + * @param depth The number of future layers expected to use the symbols. * @return this builder. * * @see #futureConstants(String, Function, int) */ public FastBuilder futureConfigs(String config, Function func, int depth) { - for(int layer=this.layer;layer{l} + * Defines a number of {@link #constant(String, double)} symbols involving a + * {l} * notation, for example so that they can be used during {@link #layerRepeat(String, int)}. - * @param constantName The configuration symbols (these should involve {l}). - * @param func A lambda Java function to calculate the constant's value. This takes - * as input an integer (starting from 0 for the current layer) and adds one for each - * subsequently declared symbol. - * @param depth The number of future layers expected to use the constant. + * @param constantName The configuration symbols (these should involve {l}). + * @param func A lambda Java function to calculate the constant's value. + * This takes as input an integer (starting from 0 for the + * current layer) and adds one for each subsequently + * declared symbol. + * @param depth The number of future layers expected to use the constant. * @return this builder. * * @see #futureConstants(String, Function, int) */ public FastBuilder futureConstants(String constantName, Function func, int depth) { - for(int layer=this.layer;layerconcat within normal operations.) + * Concatenates horizontally the output of a number of given layers, starting + * from the last one and going backwards. (For concatenation of specific layers + * just use concat within normal operations.) + * * @param depth The number of given layers to concatenate. * @return this builder. */ public FastBuilder concat(int depth) { String expression = ""; - for(int i=layer;i>layer-depth;i--) { - if(!expression.isEmpty()) + for (int i = layer; i > layer - depth; i--) { + if (!expression.isEmpty()) expression += " | "; - expression += "h"+i; + expression += "h" + i; } - layer("h{l+1} = "+expression); + layer("h{l+1} = " + expression); return this; } } diff --git a/JGNN/src/main/java/mklab/JGNN/adhoc/parsers/LayeredBuilder.java b/JGNN/src/main/java/mklab/JGNN/adhoc/parsers/LayeredBuilder.java index 0cf78404..289bf7b2 100644 --- a/JGNN/src/main/java/mklab/JGNN/adhoc/parsers/LayeredBuilder.java +++ b/JGNN/src/main/java/mklab/JGNN/adhoc/parsers/LayeredBuilder.java @@ -7,10 +7,10 @@ import mklab.JGNN.adhoc.ModelBuilder; /** - * Extends the capabilities of the {@link ModelBuilder} - * with the ability to define multilayer (e.g. deep) neural architectures. - * The symbols {l} and {l+1} are replaced in all expressions - * with appropriate layer identifiers (these increase by one each time a new + * Extends the capabilities of the {@link ModelBuilder} with the ability to + * define multilayer (e.g. deep) neural architectures. The symbols + * {l} and {l+1} are replaced in all expressions with + * appropriate layer identifiers (these increase by one each time a new * {@link #layer(String)} is defined. * * @see #layer(String) @@ -22,8 +22,9 @@ public class LayeredBuilder extends ModelBuilder { private int layer = 0; private HashMap rememberAs = new HashMap(); + /** - * Instantiates a layered builder with input name h0. This can be + * Instantiates a layered builder with input name h0. This can be * used by future expressions involving h{l}. You can add more * architecture inputs normally with {@link #var(String)}. * @@ -32,26 +33,30 @@ public class LayeredBuilder extends ModelBuilder { public LayeredBuilder() { this("h0"); } + /** - * Instantiates a layered builder with the given symbol as an input name. - * If you plan to immediately use a {@link #layer(String)} expression - * that involves X{l}, where X is some symbol, - * set X0 as the architecture's input. You can add more - * architecture inputs normally with {@link #var(String)}. + * Instantiates a layered builder with the given symbol as an input name. If you + * plan to immediately use a {@link #layer(String)} expression that involves + * X{l}, where X is some symbol, set X0 + * as the architecture's input. You can add more architecture inputs normally + * with {@link #var(String)}. * * @param inputName The symbol to use as the built architecture's input. */ public LayeredBuilder(String inputName) { var(inputName); } + public LayeredBuilder var(String inputName) { super.var(inputName); return this; } + /** * Sets the current layer identifier to a specific symbol layerId - * so that future usage of {layerId} is automatically replaced with + * so that future usage of {layerId} is automatically replaced with * the identifier. + * * @param layerId The symbol to set to the current layer identifier. * @return this layer builder. */ @@ -59,132 +64,145 @@ public LayeredBuilder rememberAs(String layerId) { rememberAs.put(layerId, layer); return this; } + /** - * Applies an {@link #operation(String)} and increases the layer identifier count. + * Applies an {@link #operation(String)} and increases the layer identifier + * count. + * * @param expression A parsable expression. * @return this layer builder. * @see #layerRepeat(String, int) */ public LayeredBuilder layer(String expression) { - expression = expression - .replace("{l+1}", ""+(layer+1)) - .replace("{l}", ""+layer); - for(String layerId : rememberAs.keySet()) - expression = expression.replace("{"+layerId+"}", ""+rememberAs.get(layerId)); + expression = expression.replace("{l+1}", "" + (layer + 1)).replace("{l}", "" + layer); + for (String layerId : rememberAs.keySet()) + expression = expression.replace("{" + layerId + "}", "" + rememberAs.get(layerId)); layer += 1; return operation(expression); } - /*public LayeredBuilder classify() { - layer("h{l+1}=h{l}[nodes]"); - layer("h{l+1}=softmax(h{l}, row)"); - out("h"+layer); - return this; - }*/ + + /* + * public LayeredBuilder classify() { layer("h{l+1}=h{l}[nodes]"); + * layer("h{l+1}=softmax(h{l}, row)"); out("h"+layer); return this; } + */ /** - * Repeats a {@link #layer(String)} definition a number of times. - * Ideal for building deep architectures. + * Repeats a {@link #layer(String)} definition a number of times. Ideal for + * building deep architectures. + * * @param expression The expression to repeat for each layer. - * @param times The number of times to repeat the expression. + * @param times The number of times to repeat the expression. * @return this layer builder. * * @see #futureConfigs(String, Function, int) * @see #futureConstants(String, Function, int) */ public LayeredBuilder layerRepeat(String expression, int times) { - for(int i=0;iconcat within normal operations.) + * Concatenates horizontally the output of a number of given layers, starting + * from the last one and going backwards. (For concatenation of specific layers + * just use concat within normal operations.) + * * @param depth The number of given layers to concatenate. * @return this layer builder. */ public LayeredBuilder concat(int depth) { String expression = ""; - for(int i=layer;i>layer-depth;i--) { - if(!expression.isEmpty()) + for (int i = layer; i > layer - depth; i--) { + if (!expression.isEmpty()) expression += " | "; - expression += "h"+i; + expression += "h" + i; } - layer("h{l+1} = "+expression); + layer("h{l+1} = " + expression); return this; } + /** - * Defines a number of {@link #config(String, double)} symbols involving a {l} - * notation, for example so that they can be used during {@link #layerRepeat(String, int)}. - * @param config The configuration symbols (these should involve {l}). - * @param func A lambda Java function to calculate the configuration's value. This takes - * as input an integer (starting from 0 for the current layer) and adds one for each - * subsequently declared symbol. - * @param depth The number of future layers expected to use the symbols. + * Defines a number of {@link #config(String, double)} symbols involving a + * {l} notation, for example so that they can be used during + * {@link #layerRepeat(String, int)}. + * + * @param config The configuration symbols (these should involve + * {l}). + * @param func A lambda Java function to calculate the configuration's value. + * This takes as input an integer (starting from 0 for the current + * layer) and adds one for each subsequently declared symbol. + * @param depth The number of future layers expected to use the symbols. * @return this layer builder. * * @see #futureConstants(String, Function, int) */ public LayeredBuilder futureConfigs(String config, Function func, int depth) { - for(int layer=this.layer;layer{l} + * Defines a number of {@link #constant(String, double)} symbols involving a + * {l} * notation, for example so that they can be used during {@link #layerRepeat(String, int)}. - * @param constantName The configuration symbols (these should involve {l}). - * @param func A lambda Java function to calculate the constant's value. This takes - * as input an integer (starting from 0 for the current layer) and adds one for each - * subsequently declared symbol. - * @param depth The number of future layers expected to use the constant. + * @param constantName The configuration symbols (these should involve {l}). + * @param func A lambda Java function to calculate the constant's value. + * This takes as input an integer (starting from 0 for the + * current layer) and adds one for each subsequently + * declared symbol. + * @param depth The number of future layers expected to use the constant. * @return this layer builder. * * @see #futureConstants(String, Function, int) */ public LayeredBuilder futureConstants(String constantName, Function func, int depth) { - for(int layer=this.layer;layerPaths.get("models.nn") - * from disk with {@link Files#readAllLines(Path)}, and parses - * the loaded String. + * Parses a Neuralang source code file. Reads a file like + * Paths.get("models.nn") from disk with + * {@link Files#readAllLines(Path)}, and parses the loaded String. + * * @param path The source code file. * @return The Neuralang builder's instance. * @see #parse(String) @@ -38,30 +40,30 @@ public Neuralang parse(Path path) { } return this; } - + /** * Parses Neuralang source code by handling function declarations in addition to * other expressions. + * * @param text The source code to parse. * @return The Neuralang builder's instance. */ public Neuralang parse(String text) { int depth = 0; String progress = ""; - for(int i=0;imodel (the same instance as the first + * argument). + */ + public Model train(Model model, Matrix features, Matrix labels, Slice trainingSamples, Slice validationSamples) { + // ACTUΑL TRAINING + double minLoss = Double.POSITIVE_INFINITY; + HashMap minLossParameters = new HashMap(); + int currentPatience = patience; + for (int epoch = 0; epoch < epochs; epoch++) { + // long tic = System.currentTimeMillis(); + if (!stochasticGradientDescent) + trainingSamples.shuffle(epoch); + double[] batchLosses = new double[numBatches]; + for (int batch = 0; batch < numBatches; batch++) { + if (stochasticGradientDescent) + trainingSamples.shuffle(epoch); + int start = (trainingSamples.size() / numBatches) * batch; + int end = Math.min(trainingSamples.size(), start + (trainingSamples.size() / numBatches)); + int batchId = batch; + Matrix trainFeatures = new WrapRows(features.accessRows(trainingSamples.range(start, end))) + .setDimensionName(features.getRowName(), features.getColName()); + Matrix trainLabels = new WrapRows(labels.accessRows(trainingSamples.range(start, end))); + // .setDimensionName(labels.getRowName(), labels.getColName()); + // System.out.println(System.currentTimeMillis()-tic); + Runnable batchCode = new Runnable() { + @Override + public void run() { + List outputs; + outputs = model.train(loss, optimizer, Arrays.asList(trainFeatures), + Arrays.asList(trainLabels)); + if (stochasticGradientDescent) + optimizer.updateAll(); + if (validationSamples == null) + batchLosses[batchId] = loss.evaluate(outputs.get(0), trainLabels); + } + }; + if (paralellization) + ThreadPool.getInstance().submit(batchCode); + else + batchCode.run(); + // System.out.println(System.currentTimeMillis()-tic); + } + if (paralellization) + ThreadPool.getInstance().waitForConclusion(); + if (!stochasticGradientDescent) + optimizer.updateAll(); + double totalLoss = 0; + if (validationSamples == null) + for (double batchLoss : batchLosses) + totalLoss += batchLoss / numBatches; + else { + Memory.scope().enter(); + Matrix validationFeatures = new WrapRows(features.accessRows(validationSamples)); + Matrix validationLabels = new WrapRows(labels.accessRows(validationSamples)); + List outputs = model.predict(Arrays.asList(validationFeatures)); + totalLoss = (validationLoss != null ? validationLoss : loss).evaluate(outputs.get(0), validationLabels);// outputs.get(0).multiply(-1).cast(Matrix.class).selfAdd(validationLabels).selfAbs().norm(); + Memory.scope().exit(); + // for(long i=0;ithis Distribution. */ public Distribution setSeed(long seed); + /** * Retrieves a new sample from the distribution. + * * @return A double value. */ public double sample(); + /** * Sets the mean of the distribution. + * * @param mean The new mean. * @return this Distribution. */ public Distribution setMean(double mean); + /** * Sets the standard deviation of the distribution. + * * @param std The new standard deviation. * @return this Distribution. */ public Distribution setDeviation(double std); + /** * Retrieves the distribution's mean. + * * @return The mean value. */ public double getMean(); + /** * Retrieves the distribution's standard deviation. + * * @return The standard deviation. */ public double getDeviation(); diff --git a/JGNN/src/main/java/mklab/JGNN/core/Matrix.java b/JGNN/src/main/java/mklab/JGNN/core/Matrix.java index 326ef000..fbd09e37 100644 --- a/JGNN/src/main/java/mklab/JGNN/core/Matrix.java +++ b/JGNN/src/main/java/mklab/JGNN/core/Matrix.java @@ -18,10 +18,10 @@ /** * This class provides an abstract implementation of Matrix functionalities. - * Matrices inherit {@link Tensor} operations, such as addition, - * element-by-element multiplication, randomizing them and producing zero copies. - * Additionally, matrix multiplication, transposition and access operations are - * provided. + * Matrices inherit {@link Tensor} operations, such as addition, + * element-by-element multiplication, randomizing them and producing zero + * copies. Additionally, matrix multiplication, transposition and access + * operations are provided. * * @author Emmanouil Krasanakis */ @@ -30,27 +30,28 @@ public abstract class Matrix extends Tensor { private long cols; private String rowName; private String colName; - - + protected Matrix(long rows, long cols) { - init(rows*cols); + init(rows * cols); this.rows = rows; this.cols = cols; } - + public String getRowName() { return rowName; } - + public String getColName() { return colName; } - + /** - * Sets a name for the matrix's row and column dimensions. If set, names are checked for - * compatibility during matrix operations. + * Sets a name for the matrix's row and column dimensions. If set, names are + * checked for compatibility during matrix operations. + * * @param rowName The new row name or null to remove current name. - * @param colName The new column name or null to remove current name. + * @param colName The new column name or null to remove current + * name. * @return this Matrix instance. * @see #getRowName() * @see #getColName() @@ -64,10 +65,11 @@ public Matrix setDimensionName(String rowName, String colName) { setColName(colName); return this; } - + /** * Sets a name for the matrix's row dimension. If set, names are checked for * compatibility during matrix operations. + * * @param rowName The new row name or null to remove current name. * @return this Matrix instance. * @see #getRowName() @@ -78,11 +80,13 @@ public Matrix setRowName(String rowName) { this.rowName = rowName; return this; } - + /** * Sets a name for the matrix's column dimension. If set, names are checked for * compatibility during matrix operations. - * @param colName The new column name or null to remove current name. + * + * @param colName The new column name or null to remove current + * name. * @return this Matrix instance. * @see #getColName() * @see #setDimensionName(String, String) @@ -92,26 +96,29 @@ public Matrix setColName(String colName) { this.colName = colName; return this; } - + /** - * Retrieves an iterable that traverses (row, col) entry pairs - * of non zero entries. + * Retrieves an iterable that traverses (row, col) entry pairs of non zero + * entries. + * * @return An Entry iterable. * @see #getNonZeroElements() */ public abstract Iterable> getNonZeroEntries(); - + public Matrix setDimensionName(Tensor other) { super.setDimensionName(other); - if(rowName==null) + if (rowName == null) rowName = other.cast(Matrix.class).getRowName(); - if(colName==null) + if (colName == null) colName = other.cast(Matrix.class).getColName(); return this; } - + /** - * Creates a Matrix with the same class and dimensions and all element set to zero. + * Creates a Matrix with the same class and dimensions and all element set to + * zero. + * * @return A Matrix with the same class and dimensions. * @see #zeroCopy(long, long) */ @@ -119,9 +126,12 @@ public Matrix setDimensionName(Tensor other) { public Matrix zeroCopy() { return zeroCopy(rows, cols).setDimensionName(this).cast(Matrix.class); } + /** - * Creates a Matrix with the same class and dimensions and all element set to zero. This - * checks that the copy has a total number of elements equal to the given size. + * Creates a Matrix with the same class and dimensions and all element set to + * zero. This checks that the copy has a total number of elements equal to the + * given size. + * * @param size The desired size of the matrix. * @return A Matrix with the same class and dimensions. * @throws RuntimeException If the resulting tensor is not of the same size. @@ -129,32 +139,36 @@ public Matrix zeroCopy() { */ @Override public Tensor zeroCopy(long size) { - if(size!=size()) - throw new RuntimeException("To avoid ambiguity, desired matrix zeroCopy size "+size+" can only be equal to rows "+rows+" * "+cols); + if (size != size()) + throw new RuntimeException("To avoid ambiguity, desired matrix zeroCopy size " + size + + " can only be equal to rows " + rows + " * " + cols); return zeroCopy(rows, cols); } + /** - * Creates a tensor of the same class and all elements set to zero, - * but size and dimension names are obtained from a prototype tensor. + * Creates a tensor of the same class and all elements set to zero, but size and + * dimension names are obtained from a prototype tensor. */ public Tensor zeroCopy(Tensor prototype) { Matrix prototypeMatrix = prototype.cast(Matrix.class); - return zeroCopy(prototypeMatrix.getRows(), prototypeMatrix.getCols()) - .setRowName(prototypeMatrix.getRowName()) - .setColName(prototypeMatrix.getColName()) - .setDimensionName(prototype.getDimensionName()); + return zeroCopy(prototypeMatrix.getRows(), prototypeMatrix.getCols()).setRowName(prototypeMatrix.getRowName()) + .setColName(prototypeMatrix.getColName()).setDimensionName(prototype.getDimensionName()); } + /** - * Creates a matrix of the same class and all element set to zero, but with - * a given number of rows and columns. + * Creates a matrix of the same class and all element set to zero, but with a + * given number of rows and columns. + * * @param rows The number of rows of the matrix. * @param cols The number of columns of the matrix. * @return A Matrix of the same class. * @see #zeroCopy() */ public abstract Matrix zeroCopy(long rows, long cols); + /** * Retrieves the number of rows of a matrix. + * * @return The number of rows. * @see #getCols() * @see #getDimensionSize(String) @@ -162,8 +176,10 @@ public Tensor zeroCopy(Tensor prototype) { public long getRows() { return rows; } + /** * Retrieves the number of columns of a matrix. + * * @return The number of columns. * @see #getRows() * @see #getDimensionSize(String) @@ -171,155 +187,168 @@ public long getRows() { public long getCols() { return cols; } + /** * Retrieves the value of the dimension with the given name. + * * @param name The given name. - * @return Either the number of rows or the number of cols, depending on which dimension - * the given name matches, - * @throws RuntimeException if both matrix dimensions have the same name or if the given - * name is not a matrix dimension. - *@see #getRows() - *@see #getCols() - *@see #setDimensionName(String, String) - *@see #setColName(String) - *@see #setRowName(String) + * @return Either the number of rows or the number of cols, depending on which + * dimension the given name matches, + * @throws RuntimeException if both matrix dimensions have the same name or if + * the given name is not a matrix dimension. + * @see #getRows() + * @see #getCols() + * @see #setDimensionName(String, String) + * @see #setColName(String) + * @see #setRowName(String) */ public long getDimensionSize(String name) { - if(rowName!=null && colName!=null && rowName.equals(colName)) - throw new RuntimeException("Cannot call getDimension for the same row and col " - + "dimention names in "+describe()); - if(rowName!=null && name.equals(rowName)) + if (rowName != null && colName != null && rowName.equals(colName)) + throw new RuntimeException( + "Cannot call getDimension for the same row and col " + "dimention names in " + describe()); + if (rowName != null && name.equals(rowName)) return getRows(); - if(colName!=null && name.equals(colName)) + if (colName != null && name.equals(colName)) return getCols(); - throw new RuntimeException("No named dimensions in "+name); + throw new RuntimeException("No named dimensions in " + name); } + /** * Retrieves the value stored at a matrix element. + * * @param row The element's row. * @param col The element's column. * @return The value corresponding to the element (row, col). */ public double get(long row, long col) { - if(row<0 || col<0 || row>=rows || col>=cols) - throw new IllegalArgumentException("Element ("+row+","+col+") out of range for "+describe()); - return get(row+col*rows); + if (row < 0 || col < 0 || row >= rows || col >= cols) + throw new IllegalArgumentException("Element (" + row + "," + col + ") out of range for " + describe()); + return get(row + col * rows); } /** * Stores values at matrix elements. - * @param row The element's row. - * @param col The element's column. + * + * @param row The element's row. + * @param col The element's column. * @param value The value to store. * @return this Matrix instance. */ public Matrix put(long row, long col, double value) { - if(row<0 || col<0 || row>=rows || col>=cols) - throw new IllegalArgumentException("Element ("+row+","+col+") out of range for "+describe()); - put(row+col*rows, value); + if (row < 0 || col < 0 || row >= rows || col >= cols) + throw new IllegalArgumentException("Element (" + row + "," + col + ") out of range for " + describe()); + put(row + col * rows, value); return this; } + /** - * Creates a transposed copy of the matrix. - * Note: Contrary to typical tensor operations, in-place transposition is not supported. - * However, related methods can help avoid explicit transposition without allocating more - * memory. + * Creates a transposed copy of the matrix. Note: Contrary to typical tensor + * operations, in-place transposition is not supported. However, related methods + * can help avoid explicit transposition without allocating more memory. + * * @return A transposed copy of the matrix. * @see #matmul(Matrix, boolean, boolean) * @see #asTransposed() */ public Matrix transposed() { Matrix ret = zeroCopy(getCols(), getRows()); - for(Entry element : getNonZeroEntries()) + for (Entry element : getNonZeroEntries()) ret.put(element.getValue(), element.getKey(), get(element.getKey(), element.getValue())); return ret; } + /** - * Creates a transposed version of the matrix that accesses the same elements (thus, editing one - * edits the other) without allocating additional memory. + * Creates a transposed version of the matrix that accesses the same elements + * (thus, editing one edits the other) without allocating additional memory. + * * @return A {@link TransposedMatrix}. */ public Matrix asTransposed() { return new TransposedMatrix(this); } + /** - * Performs the linear algebra transformation A*x where A is this matrix and x a vector + * Performs the linear algebra transformation A*x where A is this matrix and x a + * vector + * * @param x The one-dimensional tensor which is the vector being transformed. * @return The one-dimensional outcome of the transformation. */ public Tensor transform(Tensor x) { x.assertSize(cols); DenseTensor ret = new DenseTensor(rows); - for(Entry element : getNonZeroEntries()) { + for (Entry element : getNonZeroEntries()) { long row = element.getKey(); long col = element.getValue(); - ret.putAdd(row, get(row, col)*x.get(col)); + ret.putAdd(row, get(row, col) * x.get(col)); } return ret; } + private int parallelizedMultiplication = 0; + /** - * Performs the matrix multiplication of this*with and the recipient. + * Performs the matrix multiplication of this*with and the + * recipient. * * @param with The matrix to multiply with. * @return A matrix that stores the outcome of the multiplication. * @see #matmul(Matrix, boolean, boolean) */ public Matrix matmul(Matrix with) { - if(cols!=with.getRows()) - throw new IllegalArgumentException("Mismatched matrix sizes between "+describe()+" and "+with.describe()); - if(colName!=null && with.getRowName()!=null && !colName.equals(with.getRowName())) - throw new IllegalArgumentException("Mismatched matrix dimension names between "+describe()+" and "+with.describe()); + if (cols != with.getRows()) + throw new IllegalArgumentException( + "Mismatched matrix sizes between " + describe() + " and " + with.describe()); + if (colName != null && with.getRowName() != null && !colName.equals(with.getRowName())) + throw new IllegalArgumentException( + "Mismatched matrix dimension names between " + describe() + " and " + with.describe()); Matrix ret = determineZeroCopy(with, getRows(), with.getCols(), getCols()); - if(parallelizedMultiplication>1) { + if (parallelizedMultiplication > 1) { ArrayList> entries = new ArrayList>(); - for(Entry element : getNonZeroEntries()) + for (Entry element : getNonZeroEntries()) entries.add(element); Thread[] threads = new Thread[parallelizedMultiplication]; - for(int i=0;i element = entries.get(entry); long row = element.getKey(); long col = element.getValue(); - for(long col2=0;col2 element : getNonZeroEntries()) { + for (Entry element : getNonZeroEntries()) { long row = element.getKey(); long col = element.getValue(); - for(long col2=0;col2 element : with.getNonZeroEntries()) { + for (Entry element : with.getNonZeroEntries()) { long row = element.getKey(); long col = element.getValue(); - for(long row1=0;row1this*with, - *
this.transposed()*with - *
this*with.transposed(), - *
this.transposed()*with.transposed() - *
while avoiding the overhead of calling - * {@link #transposed()}. In this first of those cases, this operation - * becomes equivalent to {@link #matmul(Matrix)}. + * Can be used to perform fast computation of the matrix multiplications
+ * this*with,
+ * this.transposed()*with
+ * this*with.transposed(),
+ * this.transposed()*with.transposed()
+ * while avoiding the overhead of calling {@link #transposed()}. In this first + * of those cases, this operation becomes equivalent to {@link #matmul(Matrix)}. * - * @param with The matrix to multiply with. - * @param transposeSelf Whether this matrix should be transposed before multiplication. - * @param transposeWith Whether the multiplied with matrix should be transposed before multiplication. + * @param with The matrix to multiply with. + * @param transposeSelf Whether this matrix should be transposed + * before multiplication. + * @param transposeWith Whether the multiplied with matrix should + * be transposed before multiplication. * @return A matrix that stores the outcome of the multiplication. * @see #matmul(Matrix) * @see #transposed() */ public Matrix matmul(Matrix with, boolean transposeSelf, boolean transposeWith) { - if((transposeSelf?rows:cols)!=(transposeWith?with.getCols():with.getRows())) + if ((transposeSelf ? rows : cols) != (transposeWith ? with.getCols() : with.getRows())) throw new IllegalArgumentException("Mismatched matrix sizes"); - if((transposeSelf?rowName:colName)!=null && - (transposeWith?with.getColName():with.getRowName())!=null && - !(transposeSelf?rowName:colName).equals(transposeWith?with.getColName():with.getRowName())) + if ((transposeSelf ? rowName : colName) != null + && (transposeWith ? with.getColName() : with.getRowName()) != null + && !(transposeSelf ? rowName : colName).equals(transposeWith ? with.getColName() : with.getRowName())) throw new IllegalArgumentException("Mismatched matrix dimension names"); - Matrix ret = determineZeroCopy(with, transposeSelf?cols:rows, transposeWith?with.getRows():with.getCols(), transposeWith?with.getCols():with.getRows()); - - if(parallelizedMultiplication>1) { + Matrix ret = determineZeroCopy(with, transposeSelf ? cols : rows, + transposeWith ? with.getRows() : with.getCols(), transposeWith ? with.getCols() : with.getRows()); + + if (parallelizedMultiplication > 1) { ArrayList> entries = new ArrayList>(); - for(Entry element : getNonZeroEntries()) + for (Entry element : getNonZeroEntries()) entries.add(element); Thread[] threads = new Thread[parallelizedMultiplication]; - for(int i=0;i element = entries.get(entry); - long row = transposeSelf?element.getValue():element.getKey(); - long col = transposeSelf?element.getKey():element.getValue(); - for(long col2=0;col2<(transposeWith?with.getRows():with.getCols());col2++) { - double val = get(element.getKey(),element.getValue())*with.get(transposeWith?col2:col, transposeWith?col:col2); + long row = transposeSelf ? element.getValue() : element.getKey(); + long col = transposeSelf ? element.getKey() : element.getValue(); + for (long col2 = 0; col2 < (transposeWith ? with.getRows() : with.getCols()); col2++) { + double val = get(element.getKey(), element.getValue()) + * with.get(transposeWith ? col2 : col, transposeWith ? col : col2); ret.put(row, col2, val + ret.get(row, col2)); - + } } } }; } - for(Thread thread : threads) + for (Thread thread : threads) thread.start(); - for(Thread thread : threads) + for (Thread thread : threads) try { thread.join(); - } - catch (InterruptedException e) { + } catch (InterruptedException e) { e.printStackTrace(); } - } - else { - if(estimateNumNonZeroElements()/(transposeSelf?getCols():getRows()) element : getNonZeroEntries()) { - long row = transposeSelf?element.getValue():element.getKey(); - long col = transposeSelf?element.getKey():element.getValue(); - for(long col2=0;col2<(transposeWith?with.getRows():with.getCols());col2++) - ret.put(row, col2, ret.get(row, col2) + get(element.getKey(),element.getValue())*with.get(transposeWith?col2:col, transposeWith?col:col2)); + } else { + if (estimateNumNonZeroElements() / (transposeSelf ? getCols() : getRows()) < with + .estimateNumNonZeroElements() / (transposeWith ? with.getRows() : with.getCols())) { + for (Entry element : getNonZeroEntries()) { + long row = transposeSelf ? element.getValue() : element.getKey(); + long col = transposeSelf ? element.getKey() : element.getValue(); + for (long col2 = 0; col2 < (transposeWith ? with.getRows() : with.getCols()); col2++) + ret.put(row, col2, ret.get(row, col2) + get(element.getKey(), element.getValue()) + * with.get(transposeWith ? col2 : col, transposeWith ? col : col2)); } - } - else { - for(Entry element : with.getNonZeroEntries()) { - long row = transposeWith?element.getValue():element.getKey(); - long col = transposeWith?element.getKey():element.getValue(); - for(long row1=0;row1<(transposeSelf?getCols():getRows());row1++) - ret.put(row1, col, ret.get(row1, col) + get(transposeSelf?row:row1, transposeSelf?row1:row)*with.get(element.getKey(), element.getValue())); + } else { + for (Entry element : with.getNonZeroEntries()) { + long row = transposeWith ? element.getValue() : element.getKey(); + long col = transposeWith ? element.getKey() : element.getValue(); + for (long row1 = 0; row1 < (transposeSelf ? getCols() : getRows()); row1++) + ret.put(row1, col, + ret.get(row1, col) + get(transposeSelf ? row : row1, transposeSelf ? row1 : row) + * with.get(element.getKey(), element.getValue())); } } } - return ret.setRowName(transposeSelf?getColName():getRowName()).setColName(transposeWith?with.getRowName():with.getColName()); + return ret.setRowName(transposeSelf ? getColName() : getRowName()) + .setColName(transposeWith ? with.getRowName() : with.getColName()); } - + /** - * Produces the external product of two tensors. - * This is equivalent but faster to calling matmul(horizontal.asColum(), vertical.asRow()). + * Produces the external product of two tensors. This is equivalent but faster + * to calling matmul(horizontal.asColum(), vertical.asRow()). + * * @param horizontal The first tensor. - * @param vertical The second tensor. + * @param vertical The second tensor. * @return A dense matrix holding the external product. */ public static Matrix external(Tensor horizontal, Tensor vertical) { Matrix ret = new DenseMatrix(horizontal.size(), vertical.size()); - for(long row=0;row element : getNonZeroEntries()) { - long row = element.getKey(); - long col = element.getValue(); - put(row, col, get(row, col)*other.get(col)); - } - return this; - } - else - return super.selfMultiply(other); - }*/ - + + /* + * @Override public Tensor selfMultiply(Tensor other) { + * if(other.size()==getCols()) { for(Entry element : + * getNonZeroEntries()) { long row = element.getKey(); long col = + * element.getValue(); put(row, col, get(row, col)*other.get(col)); } return + * this; } else return super.selfMultiply(other); } + */ + @Override protected boolean isMatching(Tensor other) { - if(!(other instanceof Matrix)) { - if(rows!=1 && cols!=1) + if (!(other instanceof Matrix)) { + if (rows != 1 && cols != 1) return false; else return super.isMatching(other); } Matrix otherMatrix = other.cast(Matrix.class); - if(rows!=otherMatrix.rows || cols!=otherMatrix.cols) + if (rows != otherMatrix.rows || cols != otherMatrix.cols) return false; - if(rowName!=null && otherMatrix.rowName!=null && !rowName.equals(otherMatrix.rowName)) + if (rowName != null && otherMatrix.rowName != null && !rowName.equals(otherMatrix.rowName)) return false; - if(colName!=null && otherMatrix.colName!=null && !colName.equals(otherMatrix.colName)) + if (colName != null && otherMatrix.colName != null && !colName.equals(otherMatrix.colName)) return false; return true; } - + @Override public String describe() { - return getClass().getSimpleName()+" ("+(rowName==null?"":(rowName+" "))+rows+","+(colName==null?"":(" "+colName+" "))+cols+")"; + return getClass().getSimpleName() + " (" + (rowName == null ? "" : (rowName + " ")) + rows + "," + + (colName == null ? "" : (" " + colName + " ")) + cols + ")"; } - + /** - * Produces a mask that indicates the non-zero elements of the matrix. - * Element's correspond to the matrix's whose non-zero ones are set to 1. + * Produces a mask that indicates the non-zero elements of the matrix. Element's + * correspond to the matrix's whose non-zero ones are set to 1. + * * @return A matrix of the same dimensions. */ public Matrix onesMask() { Matrix ones = zeroCopy(getRows(), getCols()); - for(Entry element : getNonZeroEntries()) { + for (Entry element : getNonZeroEntries()) { long row = element.getKey(); long col = element.getValue(); - if(get(row, col)!=0) + if (get(row, col) != 0) ones.put(row, col, 1.); } return ones; } - + /** * Creates a copy of the Matrix that holds its symmetrically normalized version. + * * @return A new Matrix of the same dimensions. * @see #setToSymmetricNormalization() */ public Matrix symmetricNormalization() { - return ((Matrix)copy()).setToSymmetricNormalization(); + return ((Matrix) copy()).setToSymmetricNormalization(); } /** * Sets the matrix's specified main diagonal elements to a given value value. + * * @param value The value to set to the diagonal's elements. * @return this Matrix instance. * @see #setDiagonal(long, double) @@ -494,104 +527,106 @@ public Matrix symmetricNormalization() { public Matrix setMainDiagonal(double value) { return setDiagonal(0, value); } - + /** * Sets the matrix's specified diagonal elements to a given value. + * * @param diagonal Which diagonal to set. 0 is the main diagonal - * @param value The value to set to the diagonal's elements. + * @param value The value to set to the diagonal's elements. * @return this Matrix instance. * @see #setMainDiagonal(double) */ public Matrix setDiagonal(long diagonal, double value) { - if(cols>rows) { - for(long col=Math.max(0, diagonal);col rows) { + for (long col = Math.max(0, diagonal); col < Math.min(cols, cols + diagonal); col++) + if (col < rows) put(col, col, value); - } - else { - for(long row=Math.max(0, diagonal);rowthis Matrix instance. * @see #symmetricNormalization() */ public Matrix setToSymmetricNormalization() { HashMap outDegrees = new HashMap(); HashMap inDegrees = new HashMap(); - for(Entry element : getNonZeroEntries()) { + for (Entry element : getNonZeroEntries()) { long row = element.getKey(); long col = element.getValue(); double value = get(row, col); - outDegrees.put(row, outDegrees.getOrDefault(row, 0.)+value); - inDegrees.put(col, inDegrees.getOrDefault(col, 0.)+value); + outDegrees.put(row, outDegrees.getOrDefault(row, 0.) + value); + inDegrees.put(col, inDegrees.getOrDefault(col, 0.) + value); } - for(Entry element : getNonZeroEntries()) { + for (Entry element : getNonZeroEntries()) { long row = element.getKey(); long col = element.getValue(); - double div = Math.sqrt(outDegrees.get(row)*inDegrees.get(col)); - if(div!=0) - put(row, col, get(row, col)/div); + double div = Math.sqrt(outDegrees.get(row) * inDegrees.get(col)); + if (div != 0) + put(row, col, get(row, col) / div); } return this; } - /** - * Sets the Matrix to its asymmetrically normalized transformation - * by appropriately adjusting its element values. + * Sets the Matrix to its asymmetrically normalized transformation by + * appropriately adjusting its element values. + * * @return this Matrix instance. * @see #symmetricNormalization() */ public Matrix setToASymmetricNormalization() { HashMap outDegrees = new HashMap(); HashMap inDegrees = new HashMap(); - for(Entry element : getNonZeroEntries()) { + for (Entry element : getNonZeroEntries()) { long row = element.getKey(); long col = element.getValue(); double value = get(row, col); - outDegrees.put(row, outDegrees.getOrDefault(row, 0.)+value); - inDegrees.put(col, inDegrees.getOrDefault(col, 0.)+value); + outDegrees.put(row, outDegrees.getOrDefault(row, 0.) + value); + inDegrees.put(col, inDegrees.getOrDefault(col, 0.) + value); } - for(Entry element : getNonZeroEntries()) { + for (Entry element : getNonZeroEntries()) { long row = element.getKey(); long col = element.getValue(); double div = inDegrees.get(col); - if(div!=0) - put(row, col, get(row, col)/div); + if (div != 0) + put(row, col, get(row, col) / div); } return this; } - + /** * Retrieves either the given row or column as a trensor. + * * @param index The dimension index to access. - * @param name The dimension's name. + * @param name The dimension's name. * @return Either a {@link AccessRow} or a {@link AccessCol} at the given index. * @see #accessRow(long) * @see #accessCol(long) */ public Tensor accessDim(long index, String name) { - if(rowName!=null && colName!=null && rowName.equals(colName)) - throw new RuntimeException("Cannot call accessDim for the same row and col " - + "dimension names in "+describe()); - if(rowName!=null && name.equals(rowName)) + if (rowName != null && colName != null && rowName.equals(colName)) + throw new RuntimeException( + "Cannot call accessDim for the same row and col " + "dimension names in " + describe()); + if (rowName != null && name.equals(rowName)) return accessRow(index); - if(colName!=null && name.equals(colName)) + if (colName != null && name.equals(colName)) return accessCol(index); - throw new RuntimeException("No named dimensions in "+name); + throw new RuntimeException("No named dimensions in " + name); } - + /** - * Retrieves the given row as a tensor. Editing the result - * also edits the original matrix. - * No new memory is allocated for matrix values. + * Retrieves the given row as a tensor. Editing the result also edits the + * original matrix. No new memory is allocated for matrix values. + * * @param row The given row. * @return An {@link AccessRow} instance of the corresponding row. * @see #accessDim(long, String) @@ -605,9 +640,9 @@ public Tensor accessRow(long row) { } /** - * Retrieves the given column as a tensor. Editing the result - * also edits the original matrix. - * No new memory is allocated for matrix values. + * Retrieves the given column as a tensor. Editing the result also edits the + * original matrix. No new memory is allocated for matrix values. + * * @param col The given column. * @return An {@link AccessCol} of the corresponding column. * @see #accessDim(long, String) @@ -620,56 +655,47 @@ public Tensor accessCol(long col) { return new AccessCol(this, col); } /* - @Override - public String toString() { - StringBuilder res = new StringBuilder(); - for(long row=0;row0) + for (long row = 0; row < rows; row++) { + if (cols > 0) ret += get(row, 0); - for(long col=1;col element : getNonZeroEntries()) { + for (Entry element : getNonZeroEntries()) { double value = get(element.getKey(), element.getValue()); - if(value!=0) - ret += element.getKey()+", "+element.getValue()+": "+value+"\n"; + if (value != 0) + ret += element.getKey() + ", " + element.getValue() + ": " + value + "\n"; } return ret; } - - /*public static Matrix fromColumns(List tensors) { - Matrix ret = new SparseMatrix(tensors.get(0).size(), tensors.size()); - for(int col=0;col tensors) { Matrix ret = new + * SparseMatrix(tensors.get(0).size(), tensors.size()); for(int + * col=0;col accessRows() { List ret = new ArrayList(); - for(long row=0;row accessRows() { */ public List accessColumns() { List ret = new ArrayList(); - for(long col=0;col accessColumns() { * @see #accessRows(Iterable) * @see #accessColumns(long...) */ - public Matrix accessRows(long ... rows) { + public Matrix accessRows(long... rows) { List ret = new ArrayList(); - for(long row : rows) + for (long row : rows) ret.add(accessRow(row)); return new WrapRows(ret).setZeroCopyType(this); } + /** * Organizes specific matrix columns to a list of tensors that share entries. - * This operation does not allocate memory for matrix elements and editing + * This operation does not allocate memory for matrix elements and editing * tensor elements edits the original matrix's elements. + * * @param cols An array of columns to access. * @return A list of {@link AccessCol} instances. * @see #accessCol(long) @@ -731,17 +762,18 @@ public Matrix accessRows(long ... rows) { * @see #accessColumns(Iterable) * @see #accessRows(long...) */ - public Matrix accessColumns(long ... cols) { + public Matrix accessColumns(long... cols) { List ret = new ArrayList(); - for(long col=0;col ret = new ArrayList(); - for(long row=0;row ret = new ArrayList(); - for(long col=0;col accessRows(Iterable rowIds) { List ret = new ArrayList(); - for(long row : rowIds) + for (long row : rowIds) ret.add(accessRow(row)); return ret; } - + /** - * Organizes some matrix columns to a list of tensors that share entries. - * This operation does not allocate memory for matrix elements and editing - * tensor elements edits the original matrix's elements. + * Organizes some matrix columns to a list of tensors that share entries. This + * operation does not allocate memory for matrix elements and editing tensor + * elements edits the original matrix's elements. + * * @param colIds The columns to access. * @return A list of {@link AccessCol} instances. * @see #accessCol(long) @@ -808,24 +844,21 @@ public List accessRows(Iterable rowIds) { */ public List accessColumns(Iterable colIds) { List ret = new ArrayList(); - for(long col : colIds) + for (long col : colIds) ret.add(accessCol(col)); return ret; } - /*public List toSparseColumns() { - List ret = new ArrayList(); - for(long col=0;col entry : getNonZeroEntries()) { - long row = entry.getKey(); - long col = entry.getValue(); - ret.get((int)col).put(row, get(row, col)); - } - return ret; - }*/ - + /* + * public List toSparseColumns() { List ret = new + * ArrayList(); for(long col=0;col entry : getNonZeroEntries()) + * { long row = entry.getKey(); long col = entry.getValue(); + * ret.get((int)col).put(row, get(row, col)); } return ret; } + */ + /** * Converts a given value to a JGNN-compatible 1x1 matrix. + * * @param value A given value. * @return a Matrix holding the given value * @see Tensor#fromDouble(double) @@ -836,51 +869,52 @@ public static Matrix fromDouble(double value) { ret.put(0, 0, value); return ret; } - + protected Matrix determineZeroCopy(Matrix with, long rows, long cols, long intermediate) { - if(1-Math.pow(1-density()*with.density(), intermediate) < 0.3) + if (1 - Math.pow(1 - density() * with.density(), intermediate) < 0.3) return new SparseMatrix(rows, cols); - /*if(with instanceof SparseMatrix) - return ((Matrix)with).zeroCopy(rows, cols); - try { - return zeroCopy(rows, cols); - } - catch(UnsupportedOperationException e) { - } - try { - return ((Matrix)with).zeroCopy(rows, cols); - } - catch(UnsupportedOperationException e) { - }*/ - if(rows>100000/cols && Tensor.vectorization) - return new VectorizedMatrix(rows, cols); // TODO: run benchmarks on several machines for this limit + /* + * if(with instanceof SparseMatrix) return ((Matrix)with).zeroCopy(rows, cols); + * try { return zeroCopy(rows, cols); } catch(UnsupportedOperationException e) { + * } try { return ((Matrix)with).zeroCopy(rows, cols); } + * catch(UnsupportedOperationException e) { } + */ + if (rows > 100000 / cols && Tensor.vectorization) + return new VectorizedMatrix(rows, cols); // TODO: run benchmarks on several machines for this limit return new DenseMatrix(rows, cols); - //throw new UnsupportedOperationException("Neither "+describe()+" nor "+with.describe()+" support zeroCopy("+rows+", "+cols+")"); + // throw new UnsupportedOperationException("Neither "+describe()+" nor + // "+with.describe()+" support zeroCopy("+rows+", "+cols+")"); } + /** * Creates a copy of the matrix organized as a dense matrix. + * * @return A {@link DenseMatrix} instance. */ public Matrix toDense() { - if(getRows()>100000/getCols() && vectorization) - return (VectorizedMatrix)new VectorizedMatrix(getRows(), getCols()).selfAdd(this).setDimensionName(this); - return (DenseMatrix)new DenseMatrix(getRows(), getCols()).selfAdd(this).setDimensionName(this); + if (getRows() > 100000 / getCols() && vectorization) + return (VectorizedMatrix) new VectorizedMatrix(getRows(), getCols()).selfAdd(this).setDimensionName(this); + return (DenseMatrix) new DenseMatrix(getRows(), getCols()).selfAdd(this).setDimensionName(this); } + /** * Creates a copy of the matrix organized as a sparse matrix. + * * @return A {@link SparseMatrix} instance. */ public Matrix toSparse() { - return (SparseMatrix)new SparseMatrix(getRows(), getCols()).selfAdd(this).setDimensionName(this); + return (SparseMatrix) new SparseMatrix(getRows(), getCols()).selfAdd(this).setDimensionName(this); } + /** * Creates a sparse unit matrix. + * * @param dims The dimensions of the unit matrix. * @return A sparse matrix. */ public static SparseMatrix eye(long dims) { SparseMatrix matrix = new SparseMatrix(dims, dims); - for(long pos=0;pos>> values = new Stack>>(); private ArrayList> topValues = null; + private Scope() { } + public void enter() { values.push(topValues = new ArrayList>()); } + public void exit() { - for(WeakReference ref : topValues) { + for (WeakReference ref : topValues) { double[] value = ref.get(); - if(value!=null) + if (value != null) release(value); } topValues.clear(); topValues = values.pop(); } + public void register(double[] value) { - if(topValues!=null) + if (topValues != null) topValues.add(new WeakReference(value)); } + public void unregister(double[] value) { - if(topValues!=null) { - for(WeakReference ref : topValues) - if(ref.get()==value) { + if (topValues != null) { + for (WeakReference ref : topValues) + if (ref.get() == value) { topValues.remove(ref); break; } } } } - + private static HashMap scopes = new HashMap(); - + public static Scope scope() { int threadId = ThreadPool.getCurrentThreadId(); Scope ret; - synchronized(scopes) { + synchronized (scopes) { ret = scopes.get(threadId); } - if(ret==null) { - synchronized(scopes) { + if (ret == null) { + synchronized (scopes) { scopes.put(threadId, ret = new Scope()); } } return ret; } - + private static class BoundAllocation { private SoftReference memory; public WeakReference boundObject; - + public BoundAllocation(int length, Object boundObject) { memory = new SoftReference(new double[length]); this.boundObject = new WeakReference(boundObject); } + public void changeBoundObject(Object boundObject) { this.boundObject = new WeakReference(boundObject); } + public boolean isReusable() { - return boundObject.get()==null && memory.get()!=null; + return boundObject.get() == null && memory.get() != null; } + public boolean isInvalid() { return memory.get() == null; } + public double[] value() { return memory.get(); } } - + private static HashMap> allocated = new HashMap>(); private static WeakHashMap bounded = new WeakHashMap(); - + public synchronized static double[] allocate(int length, Object boundTo) { ArrayList search = allocated.get(length); - if(search==null) + if (search == null) allocated.put(length, search = new ArrayList()); ArrayList toDelete = null; - for(BoundAllocation ref : search) { - if(ref.isReusable()) { + for (BoundAllocation ref : search) { + if (ref.isReusable()) { double[] ret = ref.value(); - for(int i=0;i(); toDelete.add(ref); } } - if(toDelete!=null) + if (toDelete != null) search.removeAll(toDelete); BoundAllocation ret = new BoundAllocation(length, boundTo); search.add(ret); @@ -115,10 +125,10 @@ public synchronized static double[] allocate(int length, Object boundTo) { scope().register(ret.value()); return ret.value(); } - + public static synchronized void release(double[] value) { BoundAllocation ref = bounded.get(value); - if(ref!=null) + if (ref != null) ref.changeBoundObject(null); } } diff --git a/JGNN/src/main/java/mklab/JGNN/core/Slice.java b/JGNN/src/main/java/mklab/JGNN/core/Slice.java index 33c80b43..6b4bf824 100644 --- a/JGNN/src/main/java/mklab/JGNN/core/Slice.java +++ b/JGNN/src/main/java/mklab/JGNN/core/Slice.java @@ -9,24 +9,28 @@ import mklab.JGNN.core.tensor.DenseTensor; /** - * This class provices an interface with which to define data slices, - * for instance to sample labels. + * This class provices an interface with which to define data slices, for + * instance to sample labels. * * @author Emmanouil Krasanakis */ public class Slice implements Iterable { private List ids; + /** * Instantiates the data slice from a collection of element identifiers. + * * @param collection An iterable of longs. */ public Slice(Iterable collection) { this.ids = new ArrayList(); - for(long id : collection) + for (long id : collection) this.ids.add(id); } + /** * Shuffles the slice. + * * @return this slice. * @see #shuffle(int) */ @@ -34,8 +38,10 @@ public Slice shuffle() { Collections.shuffle(ids); return this; } + /** * Shuffles the slice with a provided randomization seed. + * * @return this slice. * @param seed The seed to shuffle with. * @return this slice. @@ -45,66 +51,76 @@ public Slice shuffle(int seed) { Collections.shuffle(ids, new Random(seed)); return this; } + /** * Obtains the identifiers in a given range of the (shuffled) slice. + * * @param from The beginning of the identifiers' position in the slice. - * @param end The end (non-inclusive) of the identifiers' position in the slice. - * @return A new Slice instance holding the position identifiers in this one's given range. + * @param end The end (non-inclusive) of the identifiers' position in the + * slice. + * @return A new Slice instance holding the position identifiers in this one's + * given range. * * @see #range(double, double) */ public Slice range(int from, int end) { return new Slice(ids.subList(from, end)); } + /** - * Constructs a column matrix holding identifiers in - * the range 0,1,..{@link #size()}-1 so that the pattern + * Constructs a column matrix holding identifiers in the range + * 0,1,..{@link #size()}-1 so that the pattern * slice.samplesAsFeatures().accessRows(slice.range(from, end)) - * retrieves one-element tensors holding - * slice[from], slice[from+1], ... slice[end]. - * The constructed matrix is typically used as node identifier data. + * retrieves one-element tensors holding + * slice[from], slice[from+1], ... slice[end]. The constructed + * matrix is typically used as node identifier data. * * This is different than {@link #asTensor()}. * * @return A {@link Matrix}. */ - public Matrix samplesAsFeatures(){ + public Matrix samplesAsFeatures() { return Tensor.fromRange(0, size()).asColumn(); } + /** - * Performs the {@link #range(int, int)} operation - * while replacing values of from and end - * with (int)(from*size()) and (int)(end*size()) - * so that fractional ranges can be obtained. For example, - * you can call slice.shuffle().range(0.5, 1) to obtain a - * random subset of the slice's identifiers. + * Performs the {@link #range(int, int)} operation while replacing values of + * from and end with (int)(from*size()) + * and (int)(end*size()) so that fractional ranges can be obtained. + * For example, you can call slice.shuffle().range(0.5, 1) to + * obtain a random subset of the slice's identifiers. + * * @param from An integer at least 1 or a double in the range [0,1). - * @param end An integer greater than 1 or a double in the range [0,1]. - * @return A new Slice instance holding the position identifiers in this one's given range. + * @param end An integer greater than 1 or a double in the range [0,1]. + * @return A new Slice instance holding the position identifiers in this one's + * given range. * @see #size() */ public Slice range(double from, double end) { - if(from<1) - from = (int)(from*size()); - if(end<=1) - end = (int)(end*size()); - return range((int)from, (int)end); + if (from < 1) + from = (int) (from * size()); + if (end <= 1) + end = (int) (end * size()); + return range((int) from, (int) end); } + /** * Retrieves the size of the slice. + * * @return An integer. */ public int size() { return ids.size(); } - + @Override public Iterator iterator() { return ids.iterator(); } - + /** * Creates a dense tensor holding the slice's identifiers. + * * @return A {@link DenseTensor}. */ public Tensor asTensor() { diff --git a/JGNN/src/main/java/mklab/JGNN/core/Tensor.java b/JGNN/src/main/java/mklab/JGNN/core/Tensor.java index 1af1b8df..8fe3c9a6 100644 --- a/JGNN/src/main/java/mklab/JGNN/core/Tensor.java +++ b/JGNN/src/main/java/mklab/JGNN/core/Tensor.java @@ -17,21 +17,26 @@ public abstract class Tensor implements Iterable { public static boolean vectorization = ModuleLayer.boot().findModule("jdk.incubator.vector").isPresent(); private long size; private String dimensionName; - + /** * Construct that creates a tensor of zeros given its number of elements + * * @param size The number of tensor elements */ public Tensor(long size) { init(size); } - protected Tensor() {} - + + protected Tensor() { + } + /** * Sets a name for the tensor's one dimension. If set, names are checked for - * compatibility during operations, so that tensors laying across different dimensions - * do not match. Removed dimension names are matched to anything. - * @param dimensionName The new row name or null to remove current name. + * compatibility during operations, so that tensors laying across different + * dimensions do not match. Removed dimension names are matched to anything. + * + * @param dimensionName The new row name or null to remove current + * name. * @return this Tensor instance. * @see #getDimensionName() */ @@ -39,314 +44,372 @@ public Tensor setDimensionName(String dimensionName) { this.dimensionName = dimensionName; return this; } + public String getDimensionName() { return dimensionName; } + /** * Set tensor elements to random values from the uniform range [0,1] + * * @return this Tensor instance. */ public Tensor setToRandom() { - for(long i=0;ithis Tensor instance. */ public Tensor setToRandom(Distribution distribution) { - for(long i=0;isuper(size) by inheriting class - * constructors as needed after. Sets the tensor {@link #size()} to the given value - * and calls the {@link #allocate(long)} method. + * constructors as needed after. Sets the tensor {@link #size()} to the given + * value and calls the {@link #allocate(long)} method. + * * @param size The size of the tensor. */ protected void init(long size) { this.size = size; allocate(size); } + /** - * Asserts that the tensor holds only finite values. Helps catch errors - * early on and avoid misidentifying models as high quality by comparing - * desired outcomes with NaN when in reality they pass through infinity and hence - * don't converge. + * Asserts that the tensor holds only finite values. Helps catch errors early on + * and avoid misidentifying models as high quality by comparing desired outcomes + * with NaN when in reality they pass through infinity and hence don't converge. + * * @throws RuntimeException if one or more tensor elements are NaN or Inf. */ public void assertFinite() { - for(long i : getNonZeroElements()) - if(!Double.isFinite(get(i))) + for (long i : getNonZeroElements()) + if (!Double.isFinite(get(i))) throw new RuntimeException("Did not find a finite value"); } + protected abstract void allocate(long size); + /** - * If the subclassed tensor allows it, release all memory it takes up - * so that the garbage collector will eventually clean it up. This - * memory will be released anyway by Java once there are no more - * references to the object. + * If the subclassed tensor allows it, release all memory it takes up so that + * the garbage collector will eventually clean it up. This memory will be + * released anyway by Java once there are no more references to the object. + * * @see #persist() - * @deprecated This method may not be present in future versions - * of the library, depending on whether memory reuse proves useful or nor. + * @deprecated This method may not be present in future versions of the library, + * depending on whether memory reuse proves useful or nor. */ public abstract void release(); + /** * If supported by the subclassed tensor, invalidates calls to - * {@link #release()} so that memory is a de-allocated only when - * object references expire. + * {@link #release()} so that memory is a de-allocated only when object + * references expire. + * * @see #release() - * @deprecated This method may not be present in future versions - * of the library, depending on whether memory reuse proves useful or nor. + * @deprecated This method may not be present in future versions of the library, + * depending on whether memory reuse proves useful or nor. */ public abstract void persist(); + /** - * Assign a value to a tensor element. All tensor operations use this function to wrap - * element assignments. - * @param pos The position of the tensor element + * Assign a value to a tensor element. All tensor operations use this function + * to wrap element assignments. + * + * @param pos The position of the tensor element * @param value The value to assign - * @throws RuntimeException If the value is NaN or the element position is less than 0 or greater than {@link #size()}-1. + * @throws RuntimeException If the value is NaN or the element position is less + * than 0 or greater than {@link #size()}-1. * @return this Tensor instance. */ public abstract Tensor put(long pos, double value); + /** - * Retrieves the value of a tensor element at a given position. All tensor operations use this function to wrap - * element retrieval. + * Retrieves the value of a tensor element at a given position. All tensor + * operations use this function to wrap element retrieval. + * * @param pos The position of the tensor element * @return The value of the tensor element - * @throws RuntimeException If the element position is less than 0 or greater than {@link #size()}-1. + * @throws RuntimeException If the element position is less than 0 or greater + * than {@link #size()}-1. */ public abstract double get(long pos); + /** * Add a value to a tensor element. - * @param pos The position of the tensor element + * + * @param pos The position of the tensor element * @param value The value to assign * @return this Tensor instance. * @see #put(long, double) */ public Tensor putAdd(long pos, double value) { - put(pos, get(pos)+value); + put(pos, get(pos) + value); return this; } + /** * @return The number of tensor elements */ public long size() { return size; } + /** * Asserts that the tensor's {@link #size()} matches the given size. + * * @param size The size the tensor should match * @throws RuntimeException if the tensor does not match the given size */ public void assertSize(long size) { - if(size()!=size) - throw new RuntimeException("Different sizes: given "+size+" vs "+size()); + if (size() != size) + throw new RuntimeException("Different sizes: given " + size + " vs " + size()); } + /** - * Asserts that the tensor's dimensions match with another tensor. This check can be made - * more complex by derived classes, but for a base Tensor instance it is equivalent {@link #assertSize(long)}. - * This method calls {@link #isMatching(Tensor)} to compare the tensors and throws an exception - * if it returns false. + * Asserts that the tensor's dimensions match with another tensor. This check + * can be made more complex by derived classes, but for a base Tensor instance + * it is equivalent {@link #assertSize(long)}. This method calls + * {@link #isMatching(Tensor)} to compare the tensors and throws an exception if + * it returns false. + * * @param other The other tensor to compare with. * @return this Tensor instance. */ public Tensor assertMatching(Tensor other) { - if(!isMatching(other)) - throw new RuntimeException("Non-compliant: "+describe()+" vs "+other.describe()); + if (!isMatching(other)) + throw new RuntimeException("Non-compliant: " + describe() + " vs " + other.describe()); return this; } + /** - * Compares the tensor with another given tensor to see if they are of a same type. - * In the simplest (and weakest) case this checks whether the {@link #size()} is the same, - * but in more complex cases, this may check additional properties, such as a matching number - * of rows and columns in matrices. + * Compares the tensor with another given tensor to see if they are of a same + * type. In the simplest (and weakest) case this checks whether the + * {@link #size()} is the same, but in more complex cases, this may check + * additional properties, such as a matching number of rows and columns in + * matrices. + * * @param other The tensor to compare to. - * @return Whether binary operations can be performed between the given tensors given - * their characteristics (e.g. type, size, etc.). + * @return Whether binary operations can be performed between the given tensors + * given their characteristics (e.g. type, size, etc.). * @see #assertMatching(Tensor) */ protected boolean isMatching(Tensor other) { - return size==other.size() && (dimensionName==null || other.getDimensionName()==null || dimensionName.equals(other.getDimensionName())); + return size == other.size() && (dimensionName == null || other.getDimensionName() == null + || dimensionName.equals(other.getDimensionName())); } + /** - * Creates a tensor of the same class with the same size and all element set to zero. + * Creates a tensor of the same class with the same size and all element set to + * zero. + * * @return A tensor with the same size. * @see #zeroCopy(long) */ public Tensor zeroCopy() { return zeroCopy(size()).setDimensionName(this); } - + /** - * Creates a tensor of the same class and all elements set to zero, - * but size and dimension names are obtained from a prototype tensor. + * Creates a tensor of the same class and all elements set to zero, but size and + * dimension names are obtained from a prototype tensor. */ public Tensor zeroCopy(Tensor prototype) { return zeroCopy(prototype.size()).setDimensionName(prototype.getDimensionName()); } - + /** - * Fills in dimension names per an example {@link isMatching} tensor. This appropriately fills in dimension - * names of inherited classes too, such as matrices. Effectively, this method automatically infers - * dimension names during operations. + * Fills in dimension names per an example {@link isMatching} tensor. This + * appropriately fills in dimension names of inherited classes too, such as + * matrices. Effectively, this method automatically infers dimension names + * during operations. + * * @param other The tensor from which to retrieve dimension names. * @return this Tensor instance. */ public Tensor setDimensionName(Tensor other) { - assertMatching(other); - if(dimensionName==null) + assertMatching(other); + if (dimensionName == null) dimensionName = other.getDimensionName(); return this; } + /** - * Creates a tensor of the same class with a given size and all element set to zero. + * Creates a tensor of the same class with a given size and all element set to + * zero. + * * @param size The size of the new tensor. * @return A new tensor. * @see #zeroCopy(long) */ public abstract Tensor zeroCopy(long size); - + @Override public Iterator iterator() { return traverseNonZeroElements(); } + /** - * Retrieves an iterable that wraps {@link #traverseNonZeroElements()}. - * For the time being, this is returned by implementing Iterable, - * but this only serves the practical purpose of avoiding to instantiate - * a new object in case many tensors are used. + * Retrieves an iterable that wraps {@link #traverseNonZeroElements()}. For the + * time being, this is returned by implementing Iterable, but this + * only serves the practical purpose of avoiding to instantiate a new object in + * case many tensors are used. + * * @return An iterable of tensor positions. */ public Iterable getNonZeroElements() { return this; } + /** - * Provides an estimation for the non-zero number of elements stored in the tensor, - * where this number is equal to the size for dense tensors, but equal to the actual - * number of non-zero elements for sparse tensors. - * Basically, this quantity is proportional to the allocated memory. + * Provides an estimation for the non-zero number of elements stored in the + * tensor, where this number is equal to the size for dense tensors, but equal + * to the actual number of non-zero elements for sparse tensors. Basically, this + * quantity is proportional to the allocated memory. + * * @return A long number equal to or less to the tensor size. * @see #density() */ public long estimateNumNonZeroElements() { - /*long ret = 0; - for(long pos : getNonZeroElements()) - if(get(pos)!=0) - ret += 1; - return ret;*/ + /* + * long ret = 0; for(long pos : getNonZeroElements()) if(get(pos)!=0) ret += 1; + * return ret; + */ return size; } - + /** * Provides the memory allocation density of {@link #getNonZeroElements()} - * compare to the size of the tensor. 1 indicates fully dense tensors, - * and lower values sparser data. + * compare to the size of the tensor. 1 indicates fully dense tensors, and lower + * values sparser data. + * * @return A double in the range [0,1]. */ public double density() { - return estimateNumNonZeroElements() / (double)size; + return estimateNumNonZeroElements() / (double) size; } - + /** - * Retrieves positions within the tensor that may hold non-zero elements. - * This guarantees that all non-zero elements positions are traversed - * but some of the returned positions could hold zero elements. - * For example, {@link mklab.JGNN.core.tensor.DenseTensor} traverses all - * of its elements this way, whereas {@link mklab.JGNN.core.tensor.SparseTensor} - * indeed traverses only non-zero elements. + * Retrieves positions within the tensor that may hold non-zero elements. This + * guarantees that all non-zero elements positions are traversed but + * some of the returned positions could hold zero elements. For example, + * {@link mklab.JGNN.core.tensor.DenseTensor} traverses all of its elements this + * way, whereas {@link mklab.JGNN.core.tensor.SparseTensor} indeed traverses + * only non-zero elements. * * @return An iterator that traverses positions within the tensor. */ public abstract Iterator traverseNonZeroElements(); + /** - * Creates a {@link #zeroCopy()} and transfers to it all potentially non-zero element values. + * Creates a {@link #zeroCopy()} and transfers to it all potentially non-zero + * element values. + * * @return a copy of the Tensor with the same size and contents * @see #zeroCopy() * @see #getNonZeroElements() */ public Tensor copy() { Tensor res = zeroCopy(); - for(long i : getNonZeroElements()) + for (long i : getNonZeroElements()) res.put(i, get(i)); return res; } + /** * Performs a sparse assignment. + * * @param tensor The tensor whose elements to copy (it's not affected). * @return this Tensor instance. * @see #assign(Tensor) */ public Tensor assign(Tensor tensor) { assertMatching(tensor); - for(long i : tensor.getNonZeroElements()) + for (long i : tensor.getNonZeroElements()) put(i, tensor.get(i)); return this; } + /** * @param tensor The tensor to add with * @return a new Tensor that stores the outcome of addition */ public Tensor add(Tensor tensor) { assertMatching(tensor); - if(density()this Tensor instance. */ public Tensor selfAdd(Tensor tensor) { assertMatching(tensor); Tensor res = this; - for(long i : tensor.getNonZeroElements()) - res.put(i, get(i)+tensor.get(i)); + for (long i : tensor.getNonZeroElements()) + res.put(i, get(i) + tensor.get(i)); return res; } + /** - * Performs in-memory weighted addition to the Tensor, storing the result in itself. + * Performs in-memory weighted addition to the Tensor, storing the result in + * itself. + * * @param tensor The tensor to add (it's not affected). - * @param weight The weight to multiply the added tensor's elements with during addition. + * @param weight The weight to multiply the added tensor's elements with during + * addition. * @return this Tensor instance. */ public Tensor selfAdd(Tensor tensor, double weight) { assertMatching(tensor); Tensor res = this; - for(long i : tensor.getNonZeroElements()) - res.put(i, get(i)+weight*tensor.get(i)); + for (long i : tensor.getNonZeroElements()) + res.put(i, get(i) + weight * tensor.get(i)); return res; } + /** * Performs in-memory addition to the Tensor, storing the result in itself. + * * @param value The value to add to each tensor element. * @return this Tensor instance. */ public Tensor selfAdd(double value) { Tensor res = this; - for(long i=0;ithis Tensor instance. */ public Tensor selfSubtract(Tensor tensor) { assertMatching(tensor); Tensor res = this; - for(long i : tensor.getNonZeroElements()) - res.put(i, get(i)-tensor.get(i)); + for (long i : tensor.getNonZeroElements()) + res.put(i, get(i) - tensor.get(i)); return res; } + /** * @param tensor The tensor to perform element-wise multiplication with. * @return A new Tensor that stores the outcome of the multiplication. @@ -377,187 +443,228 @@ public Tensor selfSubtract(Tensor tensor) { public Tensor multiply(Tensor tensor) { assertMatching(tensor); Tensor res = determineZeroCopy(tensor); - for(long i : getNonZeroElements()) - res.put(i, get(i)*tensor.get(i)); + for (long i : getNonZeroElements()) + res.put(i, get(i) * tensor.get(i)); return res; } + /** - * Performs in-memory multiplication on the Tensor, storing the result in itself . - * @param tensor The tensor to perform element-wise multiplication with (it's not affected). + * Performs in-memory multiplication on the Tensor, storing the result in itself + * . + * + * @param tensor The tensor to perform element-wise multiplication with (it's + * not affected). * @return this Tensor instance. */ public Tensor selfMultiply(Tensor tensor) { assertMatching(tensor); Tensor res = this; - if(density()this Tensor instance. */ public Tensor selfMultiply(double value) { Tensor res = this; - for(long i : getNonZeroElements()) - res.put(i, get(i)*value); + for (long i : getNonZeroElements()) + res.put(i, get(i) * value); return res; } + /** * Computes the square root of tensor elements. - * @return A new Tensor that stores the outcome of finding the absolute square root of each element. + * + * @return A new Tensor that stores the outcome of finding the absolute square + * root of each element. */ public Tensor sqrt() { Tensor res = zeroCopy(); - for(long i : getNonZeroElements()) + for (long i : getNonZeroElements()) res.put(i, Math.sqrt(Math.abs(get(i)))); return res; } + /** - * Performs in-memory set of each element to the square root of its absolute value. + * Performs in-memory set of each element to the square root of its absolute + * value. + * * @return this Tensor instance. */ public Tensor selfSqrt() { Tensor res = this; - for(long i : getNonZeroElements()) + for (long i : getNonZeroElements()) res.put(i, Math.sqrt(Math.abs(get(i)))); return res; } + /** * Computes the exponential minus 1 of tensor elements. - * @return A new Tensor that stores the outcome of finding the operation on each element. + * + * @return A new Tensor that stores the outcome of finding the operation on each + * element. */ public Tensor expMinusOne() { Tensor res = zeroCopy(); - for(long i : getNonZeroElements()) + for (long i : getNonZeroElements()) res.put(i, Math.exp(get(i))); return res; } + /** * Sets the exponential minus 1 of tensor elements. - * @return this Tensor instance. + * + * @return this Tensor instance. */ public Tensor selfExpMinusOne() { Tensor res = this; - for(long i : getNonZeroElements()) + for (long i : getNonZeroElements()) res.put(i, Math.exp(get(i))); return res; } + /** * Computes the logarithm of tensor elements. - * @return A new Tensor that stores the outcome of finding the logarithm of the absolute of each element. + * + * @return A new Tensor that stores the outcome of finding the logarithm of the + * absolute of each element. */ public Tensor log() { Tensor res = zeroCopy(); - for(long i : getNonZeroElements()) + for (long i : getNonZeroElements()) res.put(i, Math.log(Math.abs(get(i)))); return res; } + /** - * Performs in-memory set of each element to the logarithm of its absolute value. + * Performs in-memory set of each element to the logarithm of its absolute + * value. + * * @return this Tensor instance. */ public Tensor selfLog() { Tensor res = this; - for(long i : getNonZeroElements()) + for (long i : getNonZeroElements()) res.put(i, Math.log(Math.abs(get(i)))); return res; } /** * Computes the negative of tensor elements. - * @return A new Tensor that stores the outcome of finding the negative of each element. + * + * @return A new Tensor that stores the outcome of finding the negative of each + * element. */ public Tensor negative() { Tensor res = zeroCopy(); - for(long i : getNonZeroElements()) + for (long i : getNonZeroElements()) res.put(i, -get(i)); return res; } + /** * Performs in-memory set of each element to the negative of itself. + * * @return this Tensor instance. */ public Tensor selfNegative() { Tensor res = this; - for(long i : getNonZeroElements()) + for (long i : getNonZeroElements()) res.put(i, -get(i)); return res; } + /** * Computes the absolute value of tensor elements. - * @return A new Tensor that stores the outcome of finding the absolute value of each element. + * + * @return A new Tensor that stores the outcome of finding the absolute value of + * each element. */ public Tensor abs() { Tensor res = zeroCopy(); - for(long i : getNonZeroElements()) + for (long i : getNonZeroElements()) res.put(i, Math.abs(get(i))); return res; } + /** * Performs in-memory set of each element to its absolute value. + * * @return this Tensor instance. */ public Tensor selfAbs() { Tensor res = this; - for(long i : getNonZeroElements()) + for (long i : getNonZeroElements()) res.put(i, Math.abs(get(i))); return res; } + /** * @return A new Tensor with inversed each non-zero element. */ public Tensor inverse() { Tensor res = zeroCopy(); - for(long i : getNonZeroElements()) - if(get(i)!=0) - res.put(i, 1./get(i)); + for (long i : getNonZeroElements()) + if (get(i) != 0) + res.put(i, 1. / get(i)); return res; } + /** * Performs in-memory the inverse of each non-zero element. + * * @return this Tensor instance. */ public Tensor selfInverse() { Tensor res = this; - for(long i : getNonZeroElements()) - if(get(i)!=0) - res.put(i, 1./get(i)); + for (long i : getNonZeroElements()) + if (get(i) != 0) + res.put(i, 1. / get(i)); return res; } + /** * Performs the dot product between this and another tensor. + * * @param tensor The tensor with which to find the product. * @return The dot product between the tensors. */ public double dot(Tensor tensor) { assertMatching(tensor); double res = 0; - if(density() < tensor.density()) - for(long i : getNonZeroElements()) - res += get(i)*tensor.get(i); + if (density() < tensor.density()) + for (long i : getNonZeroElements()) + res += get(i) * tensor.get(i); else - for(long i : tensor.getNonZeroElements()) - res += get(i)*tensor.get(i); + for (long i : tensor.getNonZeroElements()) + res += get(i) * tensor.get(i); return res; } + /** * Performs the triple dot product between this and two other tensors. + * * @param tensor1 The firth other tensor with which to find the product. * @param tensor2 The second other tensor with which to find the product. * @return The triple dot product between the tensors. @@ -566,34 +673,37 @@ public double dot(Tensor tensor1, Tensor tensor2) { assertMatching(tensor1); assertMatching(tensor2); double res = 0; - for(long i : getNonZeroElements()) - res += get(i)*tensor1.get(i)*tensor2.get(i); + for (long i : getNonZeroElements()) + res += get(i) * tensor1.get(i) * tensor2.get(i); return res; } + /** * @return The L2 norm of the tensor */ public double norm() { double res = 0; - for(long i : getNonZeroElements()) - res += get(i)*get(i); + for (long i : getNonZeroElements()) + res += get(i) * get(i); return Math.sqrt(res); } + /** * @return The sum of tensor elements */ public double sum() { double res = 0; - for(long i : getNonZeroElements()) + for (long i : getNonZeroElements()) res += get(i); return res; } + /** - * Wraps a range of elements within a tensor - * without allocating memory anew. Editing the returned - * tensor also affects the original one and conversely. - * The elements are accessed so that the starting position - * is accessed at position 0 of the starting tensor. + * Wraps a range of elements within a tensor without allocating memory anew. + * Editing the returned tensor also affects the original one and conversely. The + * elements are accessed so that the starting position is accessed at position 0 + * of the starting tensor. + * * @param from The starting position of the subtensor till its end. * @return An {@link AccessSubtensor}. * @see #accessSubtensor(long) @@ -601,42 +711,46 @@ public double sum() { public Tensor accessSubtensor(long from) { return new AccessSubtensor(this, from); } + /** - * Wraps a range of elements within a tensor - * without allocating memory anew. Editing the returned - * tensor also affects the original one and conversely. - * The elements are accessed so that the starting position - * is accessed at position 0 of the starting tensor. Accessing - * stops up to but not including the end poisition, - * so that accessSubtensor(0, size()) is - * a see-through copy of the original tensor. + * Wraps a range of elements within a tensor without allocating memory anew. + * Editing the returned tensor also affects the original one and conversely. The + * elements are accessed so that the starting position is accessed at position 0 + * of the starting tensor. Accessing stops up to but not including the end + * poisition, so that accessSubtensor(0, size()) is a see-through + * copy of the original tensor. + * * @param from The starting position of the subtensor. - * @param to The end position of the subtensor that is not included. + * @param to The end position of the subtensor that is not included. * @return An {@link AccessSubtensor}. * @see #accessSubtensor(long) */ public Tensor accessSubtensor(long from, long to) { return new AccessSubtensor(this, from, to); } + /** - * Computes the maximum tensor element. If the tensor has zero {@link #size()}, + * Computes the maximum tensor element. If the tensor has zero {@link #size()}, * this returns Double.NEGATIVE_INFINITY. + * * @return The maximum tensor element * @see #argmax() * @see #min() */ public double max() { double res = Double.NEGATIVE_INFINITY; - for(long i=0;ires) + if (value > res) res = value; } return res; } + /** - * Computes the position of the maximum tensor element. If the tensor has zero {@link #size()}, - * this returns -1. + * Computes the position of the maximum tensor element. If the tensor has zero + * {@link #size()}, this returns -1. + * * @return The position of the maximum tensor element * @see #max() * @see #argmin() @@ -644,34 +758,38 @@ public double max() { public long argmax() { double res = Double.NEGATIVE_INFINITY; long pos = -1; - for(long i=0;ires) { + if (value > res) { res = value; pos = i; } } return pos; } + /** - * Computes the minimum tensor element. If the tensor has zero {@link #size()}, + * Computes the minimum tensor element. If the tensor has zero {@link #size()}, * this returns Double.POSITIVE_INFINITY. + * * @return The minimum tensor element * @see #argmin() * @see #max() */ public double min() { double res = Double.POSITIVE_INFINITY; - for(long i=0;i-1. + * Computes the position of the minimum tensor element. If the tensor has zero + * {@link #size()}, this returns -1. + * * @return The position of the minimum tensor element * @see #min() * @see #argmax() @@ -679,28 +797,32 @@ public double min() { public long argmin() { double res = Double.POSITIVE_INFINITY; long pos = -1; - for(long i=0;ithis Tensor instance. * @see #normalized() */ public Tensor setToNormalized() { double norm = norm(); - if(norm!=0) - for(long i : getNonZeroElements()) - put(i, get(i)/norm); + if (norm != 0) + for (long i : getNonZeroElements()) + put(i, get(i) / norm); return this; } + /** - * Divides the tensor's elements with their sum. Does nothing if the {@link #sum()} is zero. + * Divides the tensor's elements with their sum. Does nothing if the + * {@link #sum()} is zero. + * * @return this Tensor instance. * @see #toProbability() */ public Tensor setToProbability() { double norm = sum(); - if(norm!=0) - for(long i : getNonZeroElements()) - put(i, get(i)/norm); + if (norm != 0) + for (long i : getNonZeroElements()) + put(i, get(i) / norm); return this; } + /** * Set all tensor element values to 1/{@link #size()} + * * @return this Tensor instance. */ public Tensor setToUniform() { - for(long i=0;ithis Tensor instance. */ public Tensor setToOnes() { - for(long i=0;ithis Tensor instance. */ public Tensor setToZero() { - for(long i=0;i The automatically inferred type of the class. - * @param type The class to cast to. + * @param type The class to cast to. * @return this Tensor instance typecast to the given type. */ @SuppressWarnings("unchecked") public Type cast(Class type) { - return (Type)this; + return (Type) this; } + /** - * Automatically determines which between the tensor and a competitor is chosen create zero copies for two-argument operations. + * Automatically determines which between the tensor and a competitor is chosen + * create zero copies for two-argument operations. * * @param with The competitor. * @return A zero copy of either the tensor or the competitor. @@ -877,14 +1034,13 @@ public Type cast(Class type) { protected Tensor determineZeroCopy(Tensor with) { try { return zeroCopy(size()); - } - catch(UnsupportedOperationException e) { + } catch (UnsupportedOperationException e) { } try { return with.zeroCopy(size()); + } catch (UnsupportedOperationException e) { } - catch(UnsupportedOperationException e) { - } - throw new UnsupportedOperationException("Neither "+describe()+" nor "+with.describe()+" support zeroCopy(rows, cols)"); + throw new UnsupportedOperationException( + "Neither " + describe() + " nor " + with.describe() + " support zeroCopy(rows, cols)"); } } diff --git a/JGNN/src/main/java/mklab/JGNN/core/ThreadPool.java b/JGNN/src/main/java/mklab/JGNN/core/ThreadPool.java index eb284569..1c985632 100644 --- a/JGNN/src/main/java/mklab/JGNN/core/ThreadPool.java +++ b/JGNN/src/main/java/mklab/JGNN/core/ThreadPool.java @@ -7,9 +7,10 @@ import java.util.concurrent.TimeUnit; /** - * This class provides thread execution pool utilities while keeping track of thread - * identifiers for use by thread-specific {@link mklab.JGNN.nn.NNOperation}. - * Threads scheduling relies on Java's {@link ThreadPoolExecutor}. + * This class provides thread execution pool utilities while keeping track of + * thread identifiers for use by thread-specific + * {@link mklab.JGNN.nn.NNOperation}. Threads scheduling relies on Java's + * {@link ThreadPoolExecutor}. * * @author Emmanouil Krasanakis */ @@ -20,27 +21,33 @@ public class ThreadPool { private int maxThreads; private static ThreadPool instance = new ThreadPool(Runtime.getRuntime().availableProcessors()); + /** * Retrieves the singleton {@link ThreadPool} instance used by JGNN. + * * @return A {@link ThreadPool}. */ public static ThreadPool getInstance() { - return instance; + return instance; } - + protected ThreadPool(int maxThreads) { this.maxThreads = maxThreads; executor = null; } + protected int getUnusedId() { - for(int i=0;iThreadPool.getInstance().submit(new Runnable(){public void run(){...}});. + * Submits a runnable to be executed at some future point by a thread, for + * example via + * ThreadPool.getInstance().submit(new Runnable(){public void run(){...}});. + * * @param runnable A Java {@link Runnable}. * @see #waitForConclusion() */ @@ -48,36 +55,39 @@ public synchronized void submit(Runnable runnable) { Thread thread = new Thread() { @Override public void run() { - synchronized(threadIds) { + synchronized (threadIds) { int threadId = getUnusedId(); - if(threadId==-1) + if (threadId == -1) throw new RuntimeException("Tried to instantiate thread without an available id"); threadIds.put(Thread.currentThread(), threadId); usedIds.add(threadId); } runnable.run(); - synchronized(threadIds) { + synchronized (threadIds) { int threadId = getCurrentThreadId(); threadIds.remove(this); usedIds.remove(threadId); } } }; - if(executor==null) + if (executor == null) executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(maxThreads); executor.submit(thread); } + /** * Retrieves a unique integer indicating the currently running thread. + * * @return An integer id. */ public static Integer getCurrentThreadId() { Integer ret = getInstance().threadIds.get(Thread.currentThread()); - return ret==null?-1:(int)ret; + return ret == null ? -1 : (int) ret; } + /** - * Waits until all threads in the pool have finished. This concludes only - * if all submitted runnable conclude. + * Waits until all threads in the pool have finished. This concludes only if all + * submitted runnable conclude. * * @see #submit(Runnable) */ @@ -85,11 +95,9 @@ public void waitForConclusion() { executor.shutdown(); try { executor.awaitTermination(Long.MAX_VALUE, TimeUnit.MINUTES); - } - catch (InterruptedException e) { + } catch (InterruptedException e) { e.printStackTrace(); - } - finally { + } finally { executor = null; } } diff --git a/JGNN/src/main/java/mklab/JGNN/core/distribution/Normal.java b/JGNN/src/main/java/mklab/JGNN/core/distribution/Normal.java index f8f48c93..af029a2f 100644 --- a/JGNN/src/main/java/mklab/JGNN/core/distribution/Normal.java +++ b/JGNN/src/main/java/mklab/JGNN/core/distribution/Normal.java @@ -5,65 +5,69 @@ import mklab.JGNN.core.Distribution; /** - * Implements a Normal {@link Distribution} of given mean and standard deviation. + * Implements a Normal {@link Distribution} of given mean and standard + * deviation. + * * @author Emmanouil Krasanakis */ public class Normal implements Distribution { private double mean; private double std; private Random randomGenerator; - + /** - * Instantiates a normal distribution with zero mean and standard deviation equal to 1. + * Instantiates a normal distribution with zero mean and standard deviation + * equal to 1. */ public Normal() { this(0, 1); } - + /** * Instantiates a normal distribution with a given mean and standard deviation. + * * @param mean The distibution's mean. - * @param std The distribution's standard deviation. + * @param std The distribution's standard deviation. */ public Normal(double mean, double std) { this.mean = mean; this.std = std; randomGenerator = null; } - + @Override public Normal setSeed(long seed) { randomGenerator = new Random(seed); return this; } - + @Override public Normal setMean(double mean) { this.mean = mean; return this; } - + @Override public Normal setDeviation(double std) { this.std = std; return this; } - + @Override public double getMean() { return mean; } - + @Override public double getDeviation() { return std; } - + @Override public double sample() { - if(randomGenerator==null) + if (randomGenerator == null) randomGenerator = new Random(); - return randomGenerator.nextGaussian()*std + mean; + return randomGenerator.nextGaussian() * std + mean; } } diff --git a/JGNN/src/main/java/mklab/JGNN/core/distribution/Uniform.java b/JGNN/src/main/java/mklab/JGNN/core/distribution/Uniform.java index a21a6fec..da818d23 100644 --- a/JGNN/src/main/java/mklab/JGNN/core/distribution/Uniform.java +++ b/JGNN/src/main/java/mklab/JGNN/core/distribution/Uniform.java @@ -6,6 +6,7 @@ /** * Implements a Uniform {@link Distribution} of given bounds. + * * @author Emmanouil Krasanakis */ public class Uniform implements Distribution { @@ -13,32 +14,35 @@ public class Uniform implements Distribution { private double to; private Random randomGenerator; private static double sqrt12 = Math.sqrt(12); - + /** * Instantiates a uniform distribution that samples values from the range [0,1]. */ public Uniform() { this(0, 1); } - + /** - * Instantiates a uniform distribution that samples values from the given range [from, to]. + * Instantiates a uniform distribution that samples values from the given range + * [from, to]. + * * @param from The minimum value of the distribution. - * @param to The maximum value of the distribution. + * @param to The maximum value of the distribution. */ public Uniform(double from, double to) { setRange(from, to); randomGenerator = null; } - + /** * Sets the random of the uniform distribution. + * * @param from The range's start. - * @param to The range's end. + * @param to The range's end. * @return this Distribution. */ public Uniform setRange(double from, double to) { - if(from>to) + if (from > to) throw new IllegalArgumentException("Invalid distribution range"); this.from = from; this.to = to; @@ -50,38 +54,38 @@ public Uniform setSeed(long seed) { randomGenerator = new Random(seed); return this; } - + @Override public Uniform setMean(double mean) { - double currentMean = (to+from)/2; - from += mean-currentMean; - to += mean-currentMean; + double currentMean = (to + from) / 2; + from += mean - currentMean; + to += mean - currentMean; return this; } - + @Override public Uniform setDeviation(double std) { - double currentMean = (to+from)/2; - double nextRange = std*sqrt12; + double currentMean = (to + from) / 2; + double nextRange = std * sqrt12; from = currentMean - nextRange; to = currentMean + nextRange; return this; } - + @Override public double getMean() { - return (to+from)/2; + return (to + from) / 2; } - + @Override public double getDeviation() { - return (from-to)/sqrt12; + return (from - to) / sqrt12; } - + @Override public double sample() { - if(randomGenerator==null) + if (randomGenerator == null) randomGenerator = new Random(); - return from+randomGenerator.nextFloat()*(to-from); + return from + randomGenerator.nextFloat() * (to - from); } } diff --git a/JGNN/src/main/java/mklab/JGNN/core/distribution/package-info.java b/JGNN/src/main/java/mklab/JGNN/core/distribution/package-info.java new file mode 100644 index 00000000..ff0710bf --- /dev/null +++ b/JGNN/src/main/java/mklab/JGNN/core/distribution/package-info.java @@ -0,0 +1,8 @@ +/** + * Contains data distributions that produce one numerical value and can be used + * for tensor value initialization. The {@link mklab.JGNN.nn.initializers} + * package uses these distributions to initialize trainable parameters. + * + * @author Emmanouil Krasanakis + */ +package mklab.JGNN.core.distribution; \ No newline at end of file diff --git a/JGNN/src/main/java/mklab/JGNN/core/empy/package-info.java b/JGNN/src/main/java/mklab/JGNN/core/empy/package-info.java new file mode 100644 index 00000000..67062968 --- /dev/null +++ b/JGNN/src/main/java/mklab/JGNN/core/empy/package-info.java @@ -0,0 +1,11 @@ +/** + * Contains empty extensions of datatypes that hold only dimension names and + * sizes but no ddata. These types are pervasive in that any operation they are + * involved in has an empty outcome too. Main usage of empty data types is to + * verify created model integrity in terms of operations without actually + * performing any computations. For example, empty inputs are preferred for + * {@link mklab.JGNN.adhoc.ModelBuilder#autosize(mklab.JGNN.core.Tensor...)} + * + * @author Emmanouil Krasanakis + */ +package mklab.JGNN.core.empy; \ No newline at end of file diff --git a/JGNN/src/main/java/mklab/JGNN/core/matrix/AccessCol.java b/JGNN/src/main/java/mklab/JGNN/core/matrix/AccessCol.java index a714c6f0..edf70398 100644 --- a/JGNN/src/main/java/mklab/JGNN/core/matrix/AccessCol.java +++ b/JGNN/src/main/java/mklab/JGNN/core/matrix/AccessCol.java @@ -8,9 +8,9 @@ /** * Accesses a column of a {@link Matrix} as if it were a dense {@link Tensor}. - * Prefer using {@link mklab.JGNN.core.Matrix#accessCol(long)}, which wraps usage - * of this class. Instances of this class share elements with the matrix which - * they access and do not allocate new memory. + * Prefer using {@link mklab.JGNN.core.Matrix#accessCol(long)}, which wraps + * usage of this class. Instances of this class share elements with the matrix + * which they access and do not allocate new memory. * * @author Emmanouil Krasanakis * @see AccessRow @@ -19,27 +19,28 @@ public class AccessCol extends Tensor { private Matrix matrix; private long col; private long estimateNonZeroes; - + /** * Instantiates a see-through access of a matrix column. + * * @param matrix The base matrix. - * @param col Which column to access. + * @param col Which column to access. */ public AccessCol(Matrix matrix, long col) { super(matrix.getRows()); this.matrix = matrix; this.col = col; - this.estimateNonZeroes = matrix.estimateNumNonZeroElements()/matrix.getCols(); + this.estimateNonZeroes = matrix.estimateNumNonZeroElements() / matrix.getCols(); this.setDimensionName(matrix.getColName()); - if(col<0 || col>=matrix.getCols()) - throw new IllegalArgumentException("Column "+col+" does not exist in "+matrix.describe()); + if (col < 0 || col >= matrix.getCols()) + throw new IllegalArgumentException("Column " + col + " does not exist in " + matrix.describe()); } - + @Override public long estimateNumNonZeroElements() { return estimateNonZeroes; } - + @Override protected void allocate(long size) { } diff --git a/JGNN/src/main/java/mklab/JGNN/core/matrix/AccessRow.java b/JGNN/src/main/java/mklab/JGNN/core/matrix/AccessRow.java index cae1a8e0..c50d76b1 100644 --- a/JGNN/src/main/java/mklab/JGNN/core/matrix/AccessRow.java +++ b/JGNN/src/main/java/mklab/JGNN/core/matrix/AccessRow.java @@ -8,9 +8,9 @@ /** * Accesses a row of a {@link Matrix} as if it were a dense {@link Tensor}. - * Prefer using {@link mklab.JGNN.core.Matrix#accessRow(long)}, which wraps usage - * of this class. Instances of this class share elements with the matrix which - * they access and do not allocate new memory. + * Prefer using {@link mklab.JGNN.core.Matrix#accessRow(long)}, which wraps + * usage of this class. Instances of this class share elements with the matrix + * which they access and do not allocate new memory. * * @author Emmanouil Krasanakis * @see AccessCol @@ -19,26 +19,28 @@ public class AccessRow extends Tensor { private Matrix matrix; private long row; private long estimateNonZeroes; + /** * Instantiates a see-through access of a matrix row. + * * @param matrix The base matrix. - * @param row Which row to access. + * @param row Which row to access. */ public AccessRow(Matrix matrix, long row) { super(matrix.getCols()); this.matrix = matrix; this.row = row; this.setDimensionName(matrix.getRowName()); - this.estimateNonZeroes = matrix.estimateNumNonZeroElements()/matrix.getRows(); - if(row<0 || row>=matrix.getRows()) - throw new IllegalArgumentException("Row "+row+" does not exist in "+matrix.describe()); + this.estimateNonZeroes = matrix.estimateNumNonZeroElements() / matrix.getRows(); + if (row < 0 || row >= matrix.getRows()) + throw new IllegalArgumentException("Row " + row + " does not exist in " + matrix.describe()); } - + @Override public long estimateNumNonZeroElements() { return estimateNonZeroes; } - + @Override protected void allocate(long size) { } diff --git a/JGNN/src/main/java/mklab/JGNN/core/matrix/ColumnRepetition.java b/JGNN/src/main/java/mklab/JGNN/core/matrix/ColumnRepetition.java index c65ceefc..dae36b6c 100644 --- a/JGNN/src/main/java/mklab/JGNN/core/matrix/ColumnRepetition.java +++ b/JGNN/src/main/java/mklab/JGNN/core/matrix/ColumnRepetition.java @@ -1,6 +1,5 @@ package mklab.JGNN.core.matrix; -import java.util.AbstractMap; import java.util.Iterator; import java.util.Map.Entry; @@ -9,9 +8,9 @@ import mklab.JGNN.core.util.FastEntry; /** - * Defines a matrix whose columns are all a copy of a {@link Tensor}. - * To avoid potential confusion, setting element values (and all supported operations) throws - * an exception. + * Defines a matrix whose columns are all a copy of a {@link Tensor}. To avoid + * potential confusion, setting element values (and all supported operations) + * throws an exception. * * @author Emmanouil Krasanakis * @see RowRepetition @@ -20,23 +19,27 @@ public class ColumnRepetition extends Matrix { protected class Repeat1DIterator implements Iterator, Iterable { private Iterator iterator; private long current; + public Repeat1DIterator() { this.iterator = column.iterator(); current = 0; } + @Override public boolean hasNext() { - return current iterator() { return this; @@ -46,18 +49,21 @@ public Iterator iterator() { protected class Repeat2DIterator implements Iterator>, Iterable> { private Iterator iterator; private long current; - private final FastEntry ret = new FastEntry(); + private final FastEntry ret = new FastEntry(); + public Repeat2DIterator() { this.iterator = column.iterator(); current = 0; } + @Override public boolean hasNext() { - return current next() { - if(!iterator.hasNext()) { + if (!iterator.hasNext()) { current += 1; iterator = column.iterator(); } @@ -66,18 +72,22 @@ public Entry next() { ret.setKey(pos); ret.setValue(current); return ret; - //return new AbstractMap.SimpleEntry(Long.valueOf(pos), Long.valueOf(current)); + // return new AbstractMap.SimpleEntry(Long.valueOf(pos), + // Long.valueOf(current)); } + @Override public Iterator> iterator() { return this; } } - + protected Tensor column; + /** * Instantiates a matrix repeating a tensor to be treated as a column. - * @param times The number of times the column should be repeated. + * + * @param times The number of times the column should be repeated. * @param column The column {@link Tensor}. */ public ColumnRepetition(long times, Tensor column) { @@ -85,43 +95,53 @@ public ColumnRepetition(long times, Tensor column) { this.column = column; this.setDimensionName(null, column.getDimensionName()); } + /** * Retrieves the wrapped column tensor. + * * @return The wrapped {@link Tensor}. */ public Tensor getColumn() { return column; } + @Override public Matrix zeroCopy(long rows, long cols) { return new DenseMatrix(getRows(), getCols()); } + @Override protected void allocate(long size) { } + @Override public Tensor put(long pos, double value) { - throw new RuntimeException("ColumnRepetion does not support changing base column values. Consider using getColumn().put(...)"); + throw new RuntimeException( + "ColumnRepetion does not support changing base column values. Consider using getColumn().put(...)"); } + @Override public double get(long pos) { - return column.get(pos/getRows()); + return column.get(pos / getRows()); } @Override public Iterator traverseNonZeroElements() { return new Repeat1DIterator(); } + @Override public Iterable> getNonZeroEntries() { return new Repeat2DIterator(); } + @Override public void release() { } + @Override public void persist() { column.persist(); } - + } diff --git a/JGNN/src/main/java/mklab/JGNN/core/matrix/DenseMatrix.java b/JGNN/src/main/java/mklab/JGNN/core/matrix/DenseMatrix.java index 2ff03c81..7c7b9c47 100644 --- a/JGNN/src/main/java/mklab/JGNN/core/matrix/DenseMatrix.java +++ b/JGNN/src/main/java/mklab/JGNN/core/matrix/DenseMatrix.java @@ -10,130 +10,138 @@ /** * Implements a dense {@link Matrix} where all elements are stored in memory. - * For matrices with more than MAXINT number of elements or many zeros use the {@link SparseMatrix} - * structure. + * For matrices with more than MAXINT number of elements or many zeros use the + * {@link SparseMatrix} structure. * * @author Emmanouil Krasanakis */ public class DenseMatrix extends Matrix { DenseTensor tensor; + /** * Generates a dense matrix with the designated number of rows and columns. + * * @param rows The number of rows. * @param cols The number of columns. */ public DenseMatrix(long rows, long cols) { super(rows, cols); } + @Override public Matrix zeroCopy(long rows, long cols) { - if(rows>100000/cols && vectorization) + if (rows > 100000 / cols && vectorization) return new VectorizedMatrix(rows, cols).setDimensionName(getRowName(), getColName()); return new DenseMatrix(rows, cols).setDimensionName(getRowName(), getColName()); } + @Override protected void allocate(long size) { tensor = new DenseTensor(size); } + @Override public Tensor put(long pos, double value) { tensor.put(pos, value); return this; } + @Override public double get(long pos) { return tensor.get(pos); } + @Override public Iterator traverseNonZeroElements() { return tensor.traverseNonZeroElements(); } + @Override public Iterable> getNonZeroEntries() { return new Range2D(0, getRows(), 0, getCols()); } + @Override public void release() { tensor.release(); } + @Override public void persist() { tensor.persist(); } - @Override public Matrix matmul(Matrix with) { - if(with instanceof SparseMatrix) + if (with instanceof SparseMatrix) return super.matmul(with); - if(getCols()!=with.getRows()) - throw new IllegalArgumentException("Mismatched matrix sizes between "+describe()+" and "+with.describe()); - if(getColName()!=null && with.getRowName()!=null && !getColName().equals(with.getRowName())) - throw new IllegalArgumentException("Mismatched matrix dimension names between "+describe()+" and "+with.describe()); + if (getCols() != with.getRows()) + throw new IllegalArgumentException( + "Mismatched matrix sizes between " + describe() + " and " + with.describe()); + if (getColName() != null && with.getRowName() != null && !getColName().equals(with.getRowName())) + throw new IllegalArgumentException( + "Mismatched matrix dimension names between " + describe() + " and " + with.describe()); DenseMatrix ret = new DenseMatrix(getRows(), with.getCols()); - double[] with_tensor_values = (with instanceof VectorizedMatrix) - ?((VectorizedMatrix) with).tensor.values - :((DenseMatrix) with).tensor.values; - + double[] with_tensor_values = (with instanceof VectorizedMatrix) ? ((VectorizedMatrix) with).tensor.values + : ((DenseMatrix) with).tensor.values; + int rows = (int) getRows(); int cols = (int) getCols(); int withRows = (int) with.getRows(); int withCols = (int) with.getCols(); - for(int col2=0;col2, Iterable { private Iterator iterator; + public Diagonal1DIterator(Iterator iterator) { this.iterator = iterator; } + @Override public boolean hasNext() { return iterator.hasNext(); } + @Override public Long next() { long pos = iterator.next(); - return pos+pos*getRows(); + return pos + pos * getRows(); } + @Override public Iterator iterator() { return this; @@ -38,33 +43,38 @@ public Iterator iterator() { protected class Diagonal2DIterator implements Iterator>, Iterable> { private Iterator iterator; - private final FastEntry ret = new FastEntry(); + private final FastEntry ret = new FastEntry(); + public Diagonal2DIterator(Iterator iterator) { this.iterator = iterator; } + @Override public boolean hasNext() { return iterator.hasNext(); } + @Override public Entry next() { long pos = iterator.next(); ret.setKey(pos); ret.setValue(pos); return ret; - //return new AbstractMap.SimpleEntry(Long.valueOf(pos), Long.valueOf(pos)); + // return new AbstractMap.SimpleEntry(Long.valueOf(pos), + // Long.valueOf(pos)); } + @Override public Iterator> iterator() { return this; } } - + @Override public long estimateNumNonZeroElements() { return this.diagonal.estimateNumNonZeroElements(); } - + protected Diagonal(Tensor diagonal) { super(diagonal.size(), diagonal.size()); this.diagonal = diagonal; @@ -77,7 +87,7 @@ public Iterable> getNonZeroEntries() { @Override public Matrix zeroCopy(long rows, long cols) { - if(rows!=cols) + if (rows != cols) throw new UnsupportedOperationException("Zero copies of diagonal matrices should be square matrices"); return new Diagonal(diagonal.zeroCopy(rows)); } @@ -90,7 +100,7 @@ protected void allocate(long size) { public Tensor put(long pos, double value) { long row = pos % getRows(); long col = pos / getRows(); - if(row!=col) + if (row != col) throw new UnsupportedOperationException("Cannot put values in off-diagonal elements of diagonal matrices"); diagonal.put(row, value); return this; @@ -100,7 +110,7 @@ public Tensor put(long pos, double value) { public double get(long pos) { long row = pos % getRows(); long col = pos / getRows(); - if(row==col) + if (row == col) return diagonal.get(row); return 0; } diff --git a/JGNN/src/main/java/mklab/JGNN/core/matrix/RepeatMatrix.java b/JGNN/src/main/java/mklab/JGNN/core/matrix/RepeatMatrix.java index b7f96049..cae87b27 100644 --- a/JGNN/src/main/java/mklab/JGNN/core/matrix/RepeatMatrix.java +++ b/JGNN/src/main/java/mklab/JGNN/core/matrix/RepeatMatrix.java @@ -15,42 +15,52 @@ */ public class RepeatMatrix extends Matrix { private Tensor tensor; + /** * Generates a dense matrix with the designated number of rows and columns. + * * @param rows The number of rows. * @param cols The number of columns. */ public RepeatMatrix(double value, long rows, long cols) { super(rows, cols); - tensor = new RepeatTensor(value, rows*cols); + tensor = new RepeatTensor(value, rows * cols); } + @Override public Matrix zeroCopy(long rows, long cols) { return new DenseMatrix(getRows(), getCols()); } + @Override protected void allocate(long size) { } + @Override public Tensor put(long pos, double value) { throw new UnsupportedOperationException(); } + @Override public double get(long pos) { return tensor.get(pos); } + @Override public Iterator traverseNonZeroElements() { return tensor.traverseNonZeroElements(); } + @Override public Iterable> getNonZeroEntries() { return new Range2D(0, getRows(), 0, getCols()); } + @Override public void release() { tensor.release(); } + @Override public void persist() { tensor.persist(); diff --git a/JGNN/src/main/java/mklab/JGNN/core/matrix/RowRepetition.java b/JGNN/src/main/java/mklab/JGNN/core/matrix/RowRepetition.java index 1952970c..73ac3f9d 100644 --- a/JGNN/src/main/java/mklab/JGNN/core/matrix/RowRepetition.java +++ b/JGNN/src/main/java/mklab/JGNN/core/matrix/RowRepetition.java @@ -1,19 +1,16 @@ package mklab.JGNN.core.matrix; -import java.util.AbstractMap; -import java.util.ArrayList; import java.util.Iterator; import java.util.Map.Entry; import mklab.JGNN.core.Matrix; import mklab.JGNN.core.Tensor; import mklab.JGNN.core.util.FastEntry; -import mklab.JGNN.core.util.Range2D; /** - * Defines a matrix whose rows are all a copy of a {@link Tensor}. - * To avoid potential confusion, setting element values (and all supported operations) throws - * an exception. + * Defines a matrix whose rows are all a copy of a {@link Tensor}. To avoid + * potential confusion, setting element values (and all supported operations) + * throws an exception. * * @author Emmanouil Krasanakis * @see ColumnRepetition @@ -22,23 +19,27 @@ public class RowRepetition extends Matrix { protected class Repeat1DIterator implements Iterator, Iterable { private Iterator iterator; private long current; + public Repeat1DIterator() { this.iterator = row.iterator(); current = 0; } + @Override public boolean hasNext() { - return current iterator() { return this; @@ -48,18 +49,21 @@ public Iterator iterator() { protected class Repeat2DIterator implements Iterator>, Iterable> { private Iterator iterator; private long current; - private final FastEntry ret = new FastEntry(); + private final FastEntry ret = new FastEntry(); + public Repeat2DIterator() { this.iterator = row.iterator(); current = 0; } + @Override public boolean hasNext() { - return current next() { - if(!iterator.hasNext()) { + if (!iterator.hasNext()) { current += 1; iterator = row.iterator(); } @@ -67,36 +71,44 @@ public Entry next() { ret.setKey(current); ret.setValue(pos); return ret; - //return new AbstractMap.SimpleEntry(Long.valueOf(current), Long.valueOf(pos)); + // return new AbstractMap.SimpleEntry(Long.valueOf(current), + // Long.valueOf(pos)); } + @Override public Iterator> iterator() { return this; } } - + protected Tensor row; + /** * Instantiates a matrix repeating a tensor to be treated as a row. + * * @param column The row {@link Tensor}. - * @param times The number of times the row should be repeated. + * @param times The number of times the row should be repeated. */ public RowRepetition(Tensor row, long times) { super(row.size(), times); this.row = row; this.setDimensionName(row.getDimensionName(), null); } + @Override public Matrix zeroCopy(long rows, long cols) { return new DenseMatrix(rows, cols); } + @Override protected void allocate(long size) { } + @Override public Tensor put(long pos, double value) { throw new RuntimeException("ColumnRepetion does not support method puts"); } + @Override public double get(long pos) { return row.get(pos % getRows()); @@ -106,16 +118,19 @@ public double get(long pos) { public Iterator traverseNonZeroElements() { return new Repeat1DIterator(); } + @Override public Iterable> getNonZeroEntries() { return new Repeat2DIterator(); } + @Override public void release() { } + @Override public void persist() { row.persist(); } - + } diff --git a/JGNN/src/main/java/mklab/JGNN/core/matrix/SparseMatrix.java b/JGNN/src/main/java/mklab/JGNN/core/matrix/SparseMatrix.java index ac5b64c5..321c21c0 100644 --- a/JGNN/src/main/java/mklab/JGNN/core/matrix/SparseMatrix.java +++ b/JGNN/src/main/java/mklab/JGNN/core/matrix/SparseMatrix.java @@ -9,87 +9,105 @@ import mklab.JGNN.core.util.FastEntry; /** - * A sparse {@link Matrix} that allocates memory only for non-zero elements. Operations - * that involve all matrix elements are slower compared to a {@link DenseMatrix}. + * A sparse {@link Matrix} that allocates memory only for non-zero elements. + * Operations that involve all matrix elements are slower compared to a + * {@link DenseMatrix}. * * @author Emmanouil Krasanakis */ public class SparseMatrix extends Matrix { private SparseTensor tensor; + /** * Generates a sparse matrix with the designated number of rows and columns. + * * @param rows The number of rows. * @param cols The number of columns. */ public SparseMatrix(long rows, long cols) { super(rows, cols); } + @Override public Matrix zeroCopy(long rows, long cols) { return new SparseMatrix(rows, cols); } + @Override protected void allocate(long size) { tensor = new SparseTensor(size); } + @Override public Tensor put(long pos, double value) { tensor.put(pos, value); return this; } + @Override public double get(long pos) { return tensor.get(pos); } + @Override public Iterator traverseNonZeroElements() { return tensor.traverseNonZeroElements(); } + @Override public String describe() { - return super.describe()+" "+estimateNumNonZeroElements()+"/"+(getRows()*getCols())+" entries"; + return super.describe() + " " + estimateNumNonZeroElements() + "/" + (getRows() * getCols()) + " entries"; } + @Override public long estimateNumNonZeroElements() { return tensor.estimateNumNonZeroElements(); } + @Override public Iterable> getNonZeroEntries() { - /*ArrayList> ret = new ArrayList>(); - for(long i : getNonZeroElements()) - ret.add(new AbstractMap.SimpleEntry(i % getRows(), i/getRows())); - return ret;*/ + /* + * ArrayList> ret = new ArrayList>(); + * for(long i : getNonZeroElements()) ret.add(new AbstractMap.SimpleEntry(i % getRows(), i/getRows())); return ret; + */ return new Sparse2DIterator(traverseNonZeroElements()); } + @Override public void release() { tensor.release(); } + @Override public void persist() { tensor.persist(); } - + protected class Sparse2DIterator implements Iterator>, Iterable> { private Iterator iterator; private long rows; - private final FastEntry ret = new FastEntry(); + private final FastEntry ret = new FastEntry(); + public Sparse2DIterator(Iterator iterator) { this.iterator = iterator; rows = getRows(); } + @Override public boolean hasNext() { return iterator.hasNext(); } + @Override public Entry next() { long pos = iterator.next(); ret.setKey(pos % rows); - ret.setValue(pos/rows); + ret.setValue(pos / rows); return ret; - //return new AbstractMap.SimpleEntry(pos % rows, pos/rows); + // return new AbstractMap.SimpleEntry(pos % rows, pos/rows); } + @Override public Iterator> iterator() { return this; diff --git a/JGNN/src/main/java/mklab/JGNN/core/matrix/SparseSymmetric.java b/JGNN/src/main/java/mklab/JGNN/core/matrix/SparseSymmetric.java index b075ffb3..5c251f0d 100644 --- a/JGNN/src/main/java/mklab/JGNN/core/matrix/SparseSymmetric.java +++ b/JGNN/src/main/java/mklab/JGNN/core/matrix/SparseSymmetric.java @@ -10,62 +10,73 @@ import mklab.JGNN.core.tensor.SparseTensor; /** - * Defines a {@link SparseMatrix} that is constrained to be symmetric - * in that it returns the sum of values put on elements (i,j) and (j ,i). + * Defines a {@link SparseMatrix} that is constrained to be symmetric in that it + * returns the sum of values put on elements (i,j) and (j ,i). * * @author Emmanouil Krasanakis * @deprecated Under development. */ public class SparseSymmetric extends Matrix { private Tensor tensor; + /** * Generates a symmetric matrix with the designated number of rows and columns. + * * @param rows The number of rows. * @param cols The number of columns. */ public SparseSymmetric(long rows, long cols) { super(rows, cols); } + @Override public Matrix zeroCopy(long rows, long cols) { return new SparseSymmetric(rows, cols); } + @Override protected void allocate(long size) { tensor = new SparseTensor(size); } + @Override public Tensor put(long pos, double value) { tensor.put(pos, value); return this; } + @Override public double get(long pos) { long rows = getRows(); - return tensor.get(pos) + tensor.get((pos % rows)*rows + pos/rows); + return tensor.get(pos) + tensor.get((pos % rows) * rows + pos / rows); } + @Override public Iterator traverseNonZeroElements() { return tensor.traverseNonZeroElements(); } + @Override public String describe() { - return super.describe()+" "+estimateNumNonZeroElements()+"/"+(getRows()*getCols())+" entries"; + return super.describe() + " " + estimateNumNonZeroElements() + "/" + (getRows() * getCols()) + " entries"; } + @Override public Iterable> getNonZeroEntries() { ArrayList> ret = new ArrayList>(); long rows = getRows(); - for(long i : getNonZeroElements()) - ret.add(new AbstractMap.SimpleEntry(i % rows, i/rows)); - for(long i : getNonZeroElements()) - ret.add(new AbstractMap.SimpleEntry(i / rows, i%rows)); + for (long i : getNonZeroElements()) + ret.add(new AbstractMap.SimpleEntry(i % rows, i / rows)); + for (long i : getNonZeroElements()) + ret.add(new AbstractMap.SimpleEntry(i / rows, i % rows)); return ret; } + @Override public void release() { tensor.release(); } + @Override public void persist() { tensor.persist(); diff --git a/JGNN/src/main/java/mklab/JGNN/core/matrix/TransposedMatrix.java b/JGNN/src/main/java/mklab/JGNN/core/matrix/TransposedMatrix.java index edf5e7e5..adbecd5b 100644 --- a/JGNN/src/main/java/mklab/JGNN/core/matrix/TransposedMatrix.java +++ b/JGNN/src/main/java/mklab/JGNN/core/matrix/TransposedMatrix.java @@ -8,56 +8,68 @@ import mklab.JGNN.core.Tensor; /** - * Generates a transposed version of a base matrix, with which it shares elements. - * This avoids excessive memory allocation and can be used to quickly perform - * operations with a transposed version of a matrix. Prefer using - * {@link mklab.JGNN.core.Matrix#asTransposed()}, which wraps usage of this class. + * Generates a transposed version of a base matrix, with which it shares + * elements. This avoids excessive memory allocation and can be used to quickly + * perform operations with a transposed version of a matrix. Prefer using + * {@link mklab.JGNN.core.Matrix#asTransposed()}, which wraps usage of this + * class. * * @author Emmanouil Krasanakis */ public class TransposedMatrix extends Matrix { private Matrix matrix; + protected class Transposed1DIterator implements Iterator, Iterable { private Iterator iterator; + public Transposed1DIterator(Iterator iterator) { this.iterator = iterator; } + @Override public boolean hasNext() { return iterator.hasNext(); } + @Override public Long next() { long pos = iterator.next(); long row = pos % getRows(); long col = pos / getRows(); - return col+row*getRows(); //transposed of Matrix.put convention + return col + row * getRows(); // transposed of Matrix.put convention } + @Override public Iterator iterator() { return this; } } + protected class Transposed2DIterator implements Iterator>, Iterable> { private Iterator> iterator; + public Transposed2DIterator(Iterator> iterator) { this.iterator = iterator; } + @Override public boolean hasNext() { return iterator.hasNext(); } + @Override public Entry next() { Entry origin = iterator.next(); - return new AbstractMap.SimpleEntry(Long.valueOf(origin.getValue()), Long.valueOf(origin.getKey())); + return new AbstractMap.SimpleEntry(Long.valueOf(origin.getValue()), + Long.valueOf(origin.getKey())); } + @Override public Iterator> iterator() { return this; } } - + public TransposedMatrix(Matrix matrix) { super(matrix.getCols(), matrix.getRows()); this.matrix = matrix; @@ -70,7 +82,7 @@ public TransposedMatrix(Matrix matrix) { public long estimateNumNonZeroElements() { return matrix.estimateNumNonZeroElements(); } - + @Override public Iterable> getNonZeroEntries() { return new Transposed2DIterator(matrix.getNonZeroEntries().iterator()); @@ -104,7 +116,7 @@ public double get(long pos) { public Iterator traverseNonZeroElements() { return new Transposed1DIterator(matrix.traverseNonZeroElements()); } - + @Override public Matrix asTransposed() { return matrix; @@ -112,7 +124,7 @@ public Matrix asTransposed() { @Override public String describe() { - return matrix.getClass().getSimpleName()+" ("+getRows()+","+getCols()+")"; + return matrix.getClass().getSimpleName() + " (" + getRows() + "," + getCols() + ")"; } @Override @@ -123,5 +135,5 @@ public void release() { public void persist() { matrix.persist(); } - + } diff --git a/JGNN/src/main/java/mklab/JGNN/core/matrix/VectorizedMatrix.java b/JGNN/src/main/java/mklab/JGNN/core/matrix/VectorizedMatrix.java index 41215644..8d6edf55 100644 --- a/JGNN/src/main/java/mklab/JGNN/core/matrix/VectorizedMatrix.java +++ b/JGNN/src/main/java/mklab/JGNN/core/matrix/VectorizedMatrix.java @@ -1,6 +1,5 @@ package mklab.JGNN.core.matrix; -import java.util.ArrayList; import java.util.Iterator; import java.util.Map.Entry; @@ -11,131 +10,138 @@ /** * Implements a dense {@link Matrix} where all elements are stored in memory. - * For matrices with more than MAXINT number of elements or many zeros use the {@link SparseMatrix} - * structure. + * For matrices with more than MAXINT number of elements or many zeros use the + * {@link SparseMatrix} structure. * * @author Emmanouil Krasanakis */ public class VectorizedMatrix extends Matrix { public VectorizedTensor tensor; + /** * Generates a dense matrix with the designated number of rows and columns. + * * @param rows The number of rows. * @param cols The number of columns. */ public VectorizedMatrix(long rows, long cols) { super(rows, cols); } + @Override public Matrix zeroCopy(long rows, long cols) { - if(rows<=100000/cols) + if (rows <= 100000 / cols) return new DenseMatrix(rows, cols).setDimensionName(getRowName(), getColName()); return new VectorizedMatrix(rows, cols).setDimensionName(getRowName(), getColName()); } + @Override protected void allocate(long size) { tensor = new VectorizedTensor(size); } + @Override public Tensor put(long pos, double value) { tensor.put(pos, value); return this; } + @Override public double get(long pos) { return tensor.get(pos); } + @Override public Iterator traverseNonZeroElements() { return tensor.traverseNonZeroElements(); } + @Override public Iterable> getNonZeroEntries() { return new Range2D(0, getRows(), 0, getCols()); } + @Override public void release() { tensor.release(); } + @Override public void persist() { tensor.persist(); } - @Override public Matrix matmul(Matrix with) { - if (with instanceof SparseMatrix) - return super.matmul(with); - if(getCols()!=with.getRows()) - throw new IllegalArgumentException("Mismatched matrix sizes between "+describe()+" and "+with.describe()); - if(getColName()!=null && with.getRowName()!=null && !getColName().equals(with.getRowName())) - throw new IllegalArgumentException("Mismatched matrix dimension names between "+describe()+" and "+with.describe()); + if (with instanceof SparseMatrix) + return super.matmul(with); + if (getCols() != with.getRows()) + throw new IllegalArgumentException( + "Mismatched matrix sizes between " + describe() + " and " + with.describe()); + if (getColName() != null && with.getRowName() != null && !getColName().equals(with.getRowName())) + throw new IllegalArgumentException( + "Mismatched matrix dimension names between " + describe() + " and " + with.describe()); VectorizedMatrix ret = new VectorizedMatrix(getRows(), with.getCols()); - double[] with_tensor_values = (with instanceof VectorizedMatrix) - ?((VectorizedMatrix) with).tensor.values - :((DenseMatrix) with).tensor.values; - + double[] with_tensor_values = (with instanceof VectorizedMatrix) ? ((VectorizedMatrix) with).tensor.values + : ((DenseMatrix) with).tensor.values; + int rows = (int) getRows(); int cols = (int) getCols(); int withRows = (int) with.getRows(); int withCols = (int) with.getCols(); - for(int col2=0;col2 + * Wraps a list of tensors into a matrix with the tensors as columns. Does not + * allocate additional elements. Editing the matrix edits the original tensors + * and conversely.
+ * * @author Emmanouil Krasanakis */ public class WrapCols extends Matrix { @@ -23,23 +22,27 @@ public class WrapCols extends Matrix { protected class Wrap1DIterator implements Iterator, Iterable { private Iterator iterator; private long current; + public Wrap1DIterator() { this.iterator = cols.get(0).iterator(); current = 0; } + @Override public boolean hasNext() { - while(!iterator.hasNext() && current iterator() { return this; @@ -49,77 +52,91 @@ public Iterator iterator() { protected class Wrap2DIterator implements Iterator>, Iterable> { private Iterator iterator; private long current; - private final FastEntry ret = new FastEntry(); + private final FastEntry ret = new FastEntry(); + public Wrap2DIterator() { this.iterator = cols.get(0).iterator(); current = 0; } + @Override public boolean hasNext() { - while(!iterator.hasNext() && current next() { long pos = iterator.next(); ret.setKey(pos); ret.setValue(current); return ret; - //return new AbstractMap.SimpleEntry(Long.valueOf(pos), Long.valueOf(current)); + // return new AbstractMap.SimpleEntry(Long.valueOf(pos), + // Long.valueOf(current)); } + @Override public Iterator> iterator() { return this; } } - + private List cols; private Matrix zeroCopyType; private long estimateNonZeroes; + public WrapCols(Tensor... cols) { this(Arrays.asList(cols)); } + public WrapCols(List cols) { super(cols.get(0).size(), cols.size()); this.cols = cols; estimateNonZeroes = 0; - for(Tensor col : cols) { + for (Tensor col : cols) { col.assertMatching(cols.get(0)); estimateNonZeroes += col.estimateNumNonZeroElements(); } setColName(cols.get(0).getDimensionName()); } + @Override public long estimateNumNonZeroElements() { return estimateNonZeroes; } + /** * Sets a prototype matrix from which to borrow copying operations. - * @param zeroCopyType A {@link Matrix} instance from which to borrow {@link #zeroCopy(long, long)}. + * + * @param zeroCopyType A {@link Matrix} instance from which to borrow + * {@link #zeroCopy(long, long)}. * @return this object */ public WrapCols setZeroCopyType(Matrix zeroCopyType) { this.zeroCopyType = zeroCopyType; return this; } + @Override public Matrix zeroCopy(long rows, long cols) { - if(zeroCopyType!=null) + if (zeroCopyType != null) return zeroCopyType.zeroCopy(rows, cols); - if(cols!=getCols() && rows!=getCols()) + if (cols != getCols() && rows != getCols()) throw new UnsupportedOperationException(); - long rowSize = cols==getCols()?rows:cols; + long rowSize = cols == getCols() ? rows : cols; ArrayList newCols = new ArrayList(); - for(Tensor col : this.cols) + for (Tensor col : this.cols) newCols.add(col.zeroCopy(rowSize)); - return cols==getCols()?new WrapCols(newCols):new WrapRows(newCols); + return cols == getCols() ? new WrapCols(newCols) : new WrapRows(newCols); } + @Override protected void allocate(long size) { } + @Override public Tensor put(long pos, double value) { long row = pos % getRows(); @@ -127,31 +144,36 @@ public Tensor put(long pos, double value) { cols.get((int) col).put(row, value); return this; } + @Override public double get(long pos) { long row = pos % getRows(); long col = pos / getRows(); return cols.get((int) col).get(row); } + @Override public Iterator traverseNonZeroElements() { return new Wrap1DIterator(); } + @Override public Iterable> getNonZeroEntries() { return new Wrap2DIterator(); } - + @Override public Tensor accessCol(long col) { return cols.get((int) col); } + @Override public void release() { } + @Override public void persist() { - for(Tensor col : cols) + for (Tensor col : cols) col.persist(); } } diff --git a/JGNN/src/main/java/mklab/JGNN/core/matrix/WrapRows.java b/JGNN/src/main/java/mklab/JGNN/core/matrix/WrapRows.java index 288b7ce0..1f46f347 100644 --- a/JGNN/src/main/java/mklab/JGNN/core/matrix/WrapRows.java +++ b/JGNN/src/main/java/mklab/JGNN/core/matrix/WrapRows.java @@ -1,6 +1,5 @@ package mklab.JGNN.core.matrix; -import java.util.AbstractMap; import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; diff --git a/JGNN/src/main/java/mklab/JGNN/core/matrix/package-info.java b/JGNN/src/main/java/mklab/JGNN/core/matrix/package-info.java new file mode 100644 index 00000000..5b6acd0e --- /dev/null +++ b/JGNN/src/main/java/mklab/JGNN/core/matrix/package-info.java @@ -0,0 +1,8 @@ +/** + * Contains implementations of matrix classes, of transparent access to parts of + * these classes, and of column/row repetitions that broadcast vectors into + * matrices. Matrices rely on tensors for density/sparsity. + * + * @author Emmanouil Krasanakis + */ +package mklab.JGNN.core.matrix; \ No newline at end of file diff --git a/JGNN/src/main/java/mklab/JGNN/core/package-info.java b/JGNN/src/main/java/mklab/JGNN/core/package-info.java new file mode 100644 index 00000000..7c0a3f51 --- /dev/null +++ b/JGNN/src/main/java/mklab/JGNN/core/package-info.java @@ -0,0 +1,12 @@ +/** + * Contains base numerical data classes, as well as supporting abstract classes. + * Provided classes are endowed with binary, unary, and in-place editing + * operations. Sub-packages are responsible for implementing variations, for + * example organize data in sparse, dense, or SIMD dense tensors, or provide + * views to segments of larger structures. This package and its sub-packages can + * be used as a standalone product for vector/tensor and matrix operations using + * only native Java. + * + * @author Emmanouil Krasanakis + */ +package mklab.JGNN.core; \ No newline at end of file diff --git a/JGNN/src/main/java/mklab/JGNN/core/tensor/package-info.java b/JGNN/src/main/java/mklab/JGNN/core/tensor/package-info.java new file mode 100644 index 00000000..ef2be4c7 --- /dev/null +++ b/JGNN/src/main/java/mklab/JGNN/core/tensor/package-info.java @@ -0,0 +1,10 @@ +/** + * Contains implementations of tensor classes, as well as transparent access to + * parts of these classes. Depending on the type of tensor, internal data can be + * sparse or dense, with dense tensors being further subdivided in traditional + * Java implementations and implementations levering SIMD (Single Input/Multiple + * Data) optimizations. + * + * @author Emmanouil Krasanakis + */ +package mklab.JGNN.core.tensor; \ No newline at end of file diff --git a/JGNN/src/main/java/mklab/JGNN/core/util/Range2D.java b/JGNN/src/main/java/mklab/JGNN/core/util/Range2D.java index eb7c4778..37f6ca8b 100644 --- a/JGNN/src/main/java/mklab/JGNN/core/util/Range2D.java +++ b/JGNN/src/main/java/mklab/JGNN/core/util/Range2D.java @@ -1,6 +1,5 @@ package mklab.JGNN.core.util; -import java.util.AbstractMap; import java.util.Iterator; import java.util.NoSuchElementException; import java.util.Map.Entry; diff --git a/JGNN/src/main/java/mklab/JGNN/core/util/package-info.java b/JGNN/src/main/java/mklab/JGNN/core/util/package-info.java new file mode 100644 index 00000000..9ed69e56 --- /dev/null +++ b/JGNN/src/main/java/mklab/JGNN/core/util/package-info.java @@ -0,0 +1,7 @@ +/** + * Contains utility functions that are employed internally, mainly optimized 1D + * and 2D iterators. + * + * @author Emmanouil Krasanakis + */ +package mklab.JGNN.core.util; \ No newline at end of file diff --git a/JGNN/src/main/java/mklab/JGNN/nn/Initializer.java b/JGNN/src/main/java/mklab/JGNN/nn/Initializer.java index 43078438..b75f3b21 100644 --- a/JGNN/src/main/java/mklab/JGNN/nn/Initializer.java +++ b/JGNN/src/main/java/mklab/JGNN/nn/Initializer.java @@ -8,9 +8,10 @@ public abstract class Initializer { /** * Applies the initializer to a given model's parameters. + * * @param model The given model. * @return The given model after parameters are initialized. */ public abstract Model apply(Model model); - + } diff --git a/JGNN/src/main/java/mklab/JGNN/nn/Loss.java b/JGNN/src/main/java/mklab/JGNN/nn/Loss.java index edcb01f1..4e34e26c 100644 --- a/JGNN/src/main/java/mklab/JGNN/nn/Loss.java +++ b/JGNN/src/main/java/mklab/JGNN/nn/Loss.java @@ -1,29 +1,32 @@ package mklab.JGNN.nn; +import mklab.JGNN.adhoc.ModelTraining; import mklab.JGNN.core.Tensor; /** - * This class provides an abstract implementation of loss functions - * to be used during {@link Model} training. Preferred use is by - * passing loss instances to {@link ModelTraining}s. + * This class provides an abstract implementation of loss functions to be used + * during {@link Model} training. Preferred use is by passing loss instances to + * {@link ModelTraining}s. * * @author Emmanouil Krasanakis */ public abstract class Loss { /** - * Provides a numerical evaluation of a loss function, so that - * lower values correspond to better predictions. - * @param output A model's estimation of true outputs. + * Provides a numerical evaluation of a loss function, so that lower values + * correspond to better predictions. + * + * @param output A model's estimation of true outputs. * @param desired The expected outputs. - * @return A double value (is negative if smaller - * values are better). + * @return A double value (is negative if smaller values are + * better). * @see #derivative(Tensor, Tensor) */ public abstract double evaluate(Tensor output, Tensor desired); - + /** - * Provides the derivative of a loss function at its evaluation point. - * @param output A model's estimation of true outputs. + * Provides the derivative of a loss function at its evaluation point. + * + * @param output A model's estimation of true outputs. * @param desired The expected outputs. * @return A Tensor compliant to the model's estimation. * @see #evaluate(Tensor, Tensor) diff --git a/JGNN/src/main/java/mklab/JGNN/nn/Model.java b/JGNN/src/main/java/mklab/JGNN/nn/Model.java index c212aec4..374c6dc2 100644 --- a/JGNN/src/main/java/mklab/JGNN/nn/Model.java +++ b/JGNN/src/main/java/mklab/JGNN/nn/Model.java @@ -6,6 +6,7 @@ import java.util.HashSet; import java.util.List; +import mklab.JGNN.adhoc.ModelTraining; import mklab.JGNN.core.Matrix; import mklab.JGNN.core.Memory; import mklab.JGNN.core.Slice; diff --git a/JGNN/src/main/java/mklab/JGNN/nn/ModelTraining.java b/JGNN/src/main/java/mklab/JGNN/nn/ModelTraining.java deleted file mode 100644 index 36cb60df..00000000 --- a/JGNN/src/main/java/mklab/JGNN/nn/ModelTraining.java +++ /dev/null @@ -1,232 +0,0 @@ -package mklab.JGNN.nn; - -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; - -import mklab.JGNN.adhoc.ModelBuilder; -import mklab.JGNN.core.Matrix; -import mklab.JGNN.core.Memory; -import mklab.JGNN.core.Slice; -import mklab.JGNN.core.Tensor; -import mklab.JGNN.core.ThreadPool; -import mklab.JGNN.core.matrix.WrapRows; -import mklab.JGNN.nn.inputs.Parameter; -import mklab.JGNN.nn.optimizers.Adam; -import mklab.JGNN.nn.optimizers.BatchOptimizer; - -/** - * This is a helper class that automates the definition of training processes of {@link Model} instances - * by defining the number of epochs, loss functions, number of batches and the ability to use {@link ThreadPool} - * for parallelized batch computations. - * - * @author Emmanouil Krasanakis - */ -public class ModelTraining { - private BatchOptimizer optimizer; - private int numBatches = 1; - private int epochs = 300; - private int patience = Integer.MAX_VALUE; - private boolean paralellization = false; - private boolean stochasticGradientDescent = false; - private Loss loss, validationLoss; - private boolean verbose = false; - - public ModelTraining() { - } - - /** - * @param verbose Whether an error message will be printed. - * @deprecated This method was available in earlier JGNN versions but will be gradually phased out. - * Instead, wrap the validation loss within {@link mklab.JGNN.nn.loss.report.VerboseLoss} to replicate - * the same behavior. - */ - public ModelTraining setVerbose(boolean verbose) { - System.err.println("WARNING: The setVerbose method was available in earlier JGNN versions" - + "\n but will be gradually phased out. Instead, wrap the validation" - + "\n loss within a VerboseLoss instance to replicate the same" - + "\n behavior. Look for more losses of the mklab.JGNN.nn.loss.report" - + "\n package for more types of training feedback."); - this.verbose = verbose; - return this; - } - - /** - * Set - * @param loss - * @return - */ - public ModelTraining setLoss(Loss loss) { - this.loss = loss; - return this; - } - public ModelTraining setValidationLoss(Loss loss) { - this.validationLoss = loss; - return this; - } - - /** - * Sets an {@link Optimizer} instance to controls parameter updates during training. - * If the provided optimizer is not an instance of {@link BatchOptimizer}, - * it is forcefully wrapped by the latter. Training calls the batch optimizer's - * update method after every batch. - * @param optimizer The desired optimizer. - * @return this model training instance. - * @see #train(Model, Matrix, Matrix, Slice, Slice) - */ - public ModelTraining setOptimizer(Optimizer optimizer) { - if(optimizer instanceof BatchOptimizer) - this.optimizer = (BatchOptimizer) optimizer; - else - this.optimizer = new BatchOptimizer(optimizer); - return this; - } - - /** - * Sets the number of batches training data slices should be split into. - * @param numBatches The desired number of batches. Default is 1. - * @return this model training instance. - * @see #setParallelizedStochasticGradientDescent(boolean) - */ - public ModelTraining setNumBatches(int numBatches) { - this.numBatches = numBatches; - return this; - } - - /** - * Sets whether the training strategy should reflect stochastic - * gradient descent by randomly sampling from the training dataset to obtain data samples. - * If true, both this feature and acceptable thread-based paralellization - * is enabled. Parallelization makes use of JGNN's {@link ThreadPool}. - * @param paralellization A boolean value indicating whether this feature is enabled. - * @return this model training instance. - * @see #setNumBatches(int) - * @see #train(Model, Matrix, Matrix, Slice, Slice) - */ - public ModelTraining setParallelizedStochasticGradientDescent(boolean paralellization) { - this.paralellization = paralellization; - this.stochasticGradientDescent = paralellization; - return this; - } - - /** - * Sets the maximum number of epochs for which training runs. - * If no patience has been set, training runs for exactly this - * number of epochs. - * @param epochs The maximum number of epochs. - * @return this model training instance. - * @see #setPatience(int) - */ - public ModelTraining setEpochs(int epochs) { - this.epochs = epochs; - return this; - } - - /** - * Sets the patience of the training strategy that performs early stopping. - * If training does not encounter a smaller validation loss for this number of - * epochs, it stops. - * @param patience The number of patience epochs. Default is Integer.MAX_VALUE to effectively disable this - * feature and let training always reach the maximum number of set epochs. - * @return this model training instance. - * @see #setEpochs(int) - */ - public ModelTraining setPatience(int patience) { - this.patience = patience; - return this; - } - - /** - * Trains a {@link Model} instance based on current settings. - * - * @param model The model instance to train. - * @param features A matrix whose columns correspond to sample features. - * @param labels A matrix whose columns correspond to sample (one hot) labels. - * @param trainingSamples Which columns to select for training. - * @return The trained model (the same instance as the first argument). - */ - public Model train(Model model, - Matrix features, - Matrix labels, - Slice trainingSamples, - Slice validationSamples) { - // ACTUΑL TRAINING - double minLoss = Double.POSITIVE_INFINITY; - HashMap minLossParameters = new HashMap(); - int currentPatience = patience; - for(int epoch=0;epoch outputs; - outputs = model.train(loss, optimizer, Arrays.asList(trainFeatures), Arrays.asList(trainLabels)); - if(stochasticGradientDescent) - optimizer.updateAll(); - if(validationSamples==null) - batchLosses[batchId] = loss.evaluate(outputs.get(0), trainLabels); - } - }; - if(paralellization) - ThreadPool.getInstance().submit(batchCode); - else - batchCode.run(); - //System.out.println(System.currentTimeMillis()-tic); - } - if(paralellization) - ThreadPool.getInstance().waitForConclusion(); - if(!stochasticGradientDescent) - optimizer.updateAll(); - double totalLoss = 0; - if(validationSamples==null) - for(double batchLoss : batchLosses) - totalLoss += batchLoss/numBatches; - else { - Memory.scope().enter(); - Matrix validationFeatures = new WrapRows(features.accessRows(validationSamples)); - Matrix validationLabels = new WrapRows(labels.accessRows(validationSamples)); - List outputs = model.predict(Arrays.asList(validationFeatures)); - totalLoss = (validationLoss!=null?validationLoss:loss).evaluate(outputs.get(0), validationLabels);// outputs.get(0).multiply(-1).cast(Matrix.class).selfAdd(validationLabels).selfAbs().norm(); - Memory.scope().exit(); - //for(long i=0;inot after each epoch). + * Resets (and lets the garbage collector free) optimizer memory. Should be + * called at the beginning of training (not after each epoch). */ - public default void reset() {}; + public default void reset() { + }; } diff --git a/JGNN/src/main/java/mklab/JGNN/nn/activations/Exp.java b/JGNN/src/main/java/mklab/JGNN/nn/activations/Exp.java index adf46183..88ecb561 100644 --- a/JGNN/src/main/java/mklab/JGNN/nn/activations/Exp.java +++ b/JGNN/src/main/java/mklab/JGNN/nn/activations/Exp.java @@ -6,7 +6,8 @@ import mklab.JGNN.core.Tensor; /** - * Implements a {@link NNOperation} that performs an exponential transformation of its single input. + * Implements a {@link NNOperation} that performs an element-by-element + * exponential transformation of its one input tensor. * * @author Emmanouil Krasanakis */ @@ -15,10 +16,12 @@ public class Exp extends NNOperation { protected Tensor forward(List inputs) { return inputs.get(0).expMinusOne().selfAdd(1.); } + @Override protected Tensor partial(int inputId, List inputs, Tensor output, Tensor error) { return output.multiply(error); } + @Override public double getNonLinearity(int inputId, double inputMass, double outputNonLinearity) { return outputNonLinearity; diff --git a/JGNN/src/main/java/mklab/JGNN/nn/activations/L1.java b/JGNN/src/main/java/mklab/JGNN/nn/activations/L1.java index 4f25bfb6..90602603 100644 --- a/JGNN/src/main/java/mklab/JGNN/nn/activations/L1.java +++ b/JGNN/src/main/java/mklab/JGNN/nn/activations/L1.java @@ -8,76 +8,89 @@ import mklab.JGNN.core.Tensor; import mklab.JGNN.core.tensor.DenseTensor; - /** - * Implements a {@link NNOperation} that performs a L1 transformation of its single input - * by row or column. + * Implements a {@link NNOperation} that performs a L1 transformation of its one + * input tensor by row or by column. If the input tensor is not a matrix, it is + * just L1-normalized. * * @author Emmanouil Krasanakis */ public class L1 extends NNOperation { private boolean colMode; + + /** + * Instantiates an L1 operation that transforms inputs by row. + * + * @see #L1(boolean) + */ public L1() { this(false); } + + /** + * Instantiates an L1 operation that transforms inputs alongside the dimension + * signified by its argument. + * + * @param colMode True to perform the normalization on each column, otherwise it + * is performed on each row. + */ public L1(boolean colMode) { super(); this.colMode = colMode; } - + @Override public Tensor forward(List inputs) { - if(inputs.size()!=1) + if (inputs.size() != 1) throw new IllegalArgumentException(); Tensor ret = inputs.get(0).copy(); - if(ret instanceof Matrix) { - if(colMode) { + if (ret instanceof Matrix) { + if (colMode) { Matrix matrix = ret.cast(Matrix.class); Tensor sums = new DenseTensor(matrix.getRows()); - for(Entry pos : matrix.getNonZeroEntries()) { + for (Entry pos : matrix.getNonZeroEntries()) { long row = pos.getKey(); long col = pos.getValue(); sums.putAdd(row, matrix.get(row, col)); } - for(Entry pos : matrix.getNonZeroEntries()) { + for (Entry pos : matrix.getNonZeroEntries()) { long row = pos.getKey(); long col = pos.getValue(); double div = sums.get(row); - matrix.put(row, col, Math.abs(matrix.get(row, col))/div); + matrix.put(row, col, Math.abs(matrix.get(row, col)) / div); } - } - else { + } else { Matrix matrix = ret.cast(Matrix.class); Tensor sums = new DenseTensor(matrix.getCols()); - for(Entry pos : matrix.getNonZeroEntries()) { + for (Entry pos : matrix.getNonZeroEntries()) { long row = pos.getKey(); long col = pos.getValue(); sums.putAdd(col, matrix.get(row, col)); } - for(Entry pos : matrix.getNonZeroEntries()) { + for (Entry pos : matrix.getNonZeroEntries()) { long row = pos.getKey(); long col = pos.getValue(); double div = sums.get(col); - if(div!=0) - matrix.put(row, col, Math.abs(matrix.get(row, col))/div); + if (div != 0) + matrix.put(row, col, Math.abs(matrix.get(row, col)) / div); } } - } - else + } else return ret.setToProbability(); return ret; } + @Override protected Tensor partial(int inputId, List inputs, Tensor output, Tensor error) { Tensor ret = error.zeroCopy(); Tensor input = inputs.get(0); - for(long pos : error.getNonZeroElements()) { + for (long pos : error.getNonZeroElements()) { double nom = input.get(pos); - if(nom==0) + if (nom == 0) continue; - double denom = nom/output.get(pos); - double sgn = nom>0?1:-1; - ret.put(pos, sgn*(1.-nom/denom)/denom); + double denom = nom / output.get(pos); + double sgn = nom > 0 ? 1 : -1; + ret.put(pos, sgn * (1. - nom / denom) / denom); } return ret; } diff --git a/JGNN/src/main/java/mklab/JGNN/nn/activations/LRelu.java b/JGNN/src/main/java/mklab/JGNN/nn/activations/LRelu.java index 1f291e27..ef7bfa72 100644 --- a/JGNN/src/main/java/mklab/JGNN/nn/activations/LRelu.java +++ b/JGNN/src/main/java/mklab/JGNN/nn/activations/LRelu.java @@ -6,47 +6,51 @@ import mklab.JGNN.nn.NNOperation; import mklab.JGNN.nn.inputs.Constant; - /** - * Implements a {@link NNOperation} that performs a leaky relu operation, where the first argument is a tensor on which - * it is applied and the second one should be a tensor wrapping a double value (consider initializing this with as a - * {@link mklab.JGNN.nn.inputs.Constant} holding a tensor generated with {@link Tensor#fromDouble(double)}) where - * the wrapped value indicates the negative region's slope. If the negative slope is zero, leaky relu is reduced to {@link Relu}. + * Implements a {@link NNOperation} that performs a leaky relu operation, where + * the first argument is a tensor on which it is applied and the second one + * should be a tensor wrapping a double value (consider initializing this with + * as a {@link mklab.JGNN.nn.inputs.Constant} holding a tensor generated with + * {@link Tensor#fromDouble(double)}) where the wrapped value indicates the + * negative region's slope. If the negative slope is zero, leaky relu is reduced + * to {@link Relu}. * * @author Emmanouil Krasanakis */ public class LRelu extends NNOperation { @Override protected Tensor forward(List inputs) { - if(inputs.size()!=2) + if (inputs.size() != 2) throw new IllegalArgumentException(); Tensor x = inputs.get(0); Tensor ret = x.zeroCopy(); double mult = inputs.get(1).toDouble(); - for(long i : x.getNonZeroElements()) { + for (long i : x.getNonZeroElements()) { double val = x.get(i); - ret.put(i, val>0?val:(val*mult)); + ret.put(i, val > 0 ? val : (val * mult)); } return ret; } + @Override protected Tensor partial(int inputId, List inputs, Tensor output, Tensor error) { Tensor x = inputs.get(0); Tensor ret = x.zeroCopy(); double mult = inputs.get(1).toDouble(); - for(long i : x.getNonZeroElements()) { + for (long i : x.getNonZeroElements()) { double val = x.get(i); - if(val>=0) + if (val >= 0) ret.put(i, error.get(i)); else - ret.put(i, mult*error.get(i)); + ret.put(i, mult * error.get(i)); } return ret; } + @Override public double getNonLinearity(int inputId, double inputMass, double outputNonLinearity) { - double slope = ((Constant)getInputs().get(1)).get().toDouble(); - return outputNonLinearity*Math.sqrt(2./(1+(slope*slope))); + double slope = ((Constant) getInputs().get(1)).get().toDouble(); + return outputNonLinearity * Math.sqrt(2. / (1 + (slope * slope))); } - + } \ No newline at end of file diff --git a/JGNN/src/main/java/mklab/JGNN/nn/activations/NExp.java b/JGNN/src/main/java/mklab/JGNN/nn/activations/NExp.java index c0bdeb21..4b4c68c0 100644 --- a/JGNN/src/main/java/mklab/JGNN/nn/activations/NExp.java +++ b/JGNN/src/main/java/mklab/JGNN/nn/activations/NExp.java @@ -6,8 +6,8 @@ import mklab.JGNN.core.Tensor; /** - * Implements a {@link NNOperation} that performs an exponential transformation of - * its single input, but only on the non-zero elements. + * Implements a {@link NNOperation} that performs an exponential transformation + * of its single input, but only on the non-zero elements. * * @author Emmanouil Krasanakis */ @@ -16,20 +16,22 @@ public class NExp extends NNOperation { protected Tensor forward(List inputs) { Tensor input = inputs.get(0); Tensor ret = input.zeroCopy(); - for(long pos : input) - if(input.get(pos)!=0) + for (long pos : input) + if (input.get(pos) != 0) ret.put(pos, Math.exp(input.get(pos))); return ret; } + @Override protected Tensor partial(int inputId, List inputs, Tensor output, Tensor error) { Tensor ret = output.copy(); Tensor input = inputs.get(0); - for(long pos : input) - if(input.get(pos)!=0) - ret.put(pos, Math.exp(input.get(pos)*error.get(pos))); + for (long pos : input) + if (input.get(pos) != 0) + ret.put(pos, Math.exp(input.get(pos) * error.get(pos))); return output.multiply(error); } + @Override public double getNonLinearity(int inputId, double inputMass, double outputNonLinearity) { return outputNonLinearity; diff --git a/JGNN/src/main/java/mklab/JGNN/nn/activations/PRelu.java b/JGNN/src/main/java/mklab/JGNN/nn/activations/PRelu.java index ed60d265..0d24c804 100644 --- a/JGNN/src/main/java/mklab/JGNN/nn/activations/PRelu.java +++ b/JGNN/src/main/java/mklab/JGNN/nn/activations/PRelu.java @@ -13,58 +13,56 @@ public class PRelu extends NNOperation { protected Tensor forward(List inputs) { Tensor x = inputs.get(0); Tensor param = inputs.get(1); - if(param.size()==1) + if (param.size() == 1) return x.multiply(param.toDouble()); - if(x instanceof Matrix && !(param instanceof Matrix)) - param = new ColumnRepetition(((Matrix)x).getRows(), param); + if (x instanceof Matrix && !(param instanceof Matrix)) + param = new ColumnRepetition(((Matrix) x).getRows(), param); Tensor ret = x.zeroCopy(); - for(long i : x.getNonZeroElements()) { + for (long i : x.getNonZeroElements()) { double val = x.get(i); - ret.put(i, val>0?val:(val*param.get(i))); + ret.put(i, val > 0 ? val : (val * param.get(i))); } return ret; } + @Override protected Tensor partial(int inputId, List inputs, Tensor output, Tensor error) { Tensor x = inputs.get(0); Tensor param = inputs.get(1); Tensor ret = inputs.get(inputId).zeroCopy(); - if(inputId==0) { - if(param.size()==1) + if (inputId == 0) { + if (param.size() == 1) param = new ColumnRepetition(x.size(), param); - if(x instanceof Matrix && !(param instanceof Matrix)) - param = new ColumnRepetition(((Matrix)x).getRows(), param); - for(long i : error.getNonZeroElements()) { + if (x instanceof Matrix && !(param instanceof Matrix)) + param = new ColumnRepetition(((Matrix) x).getRows(), param); + for (long i : error.getNonZeroElements()) { double val = x.get(i); - ret.put(i, val>=0?error.get(i):(error.get(i)*param.get(i))); + ret.put(i, val >= 0 ? error.get(i) : (error.get(i) * param.get(i))); } - } - else if(inputId==1) { - if(x instanceof Matrix && !(param instanceof Matrix)) { - Matrix matrix = (Matrix)x; - for(Entry entry : matrix.getNonZeroEntries()) { + } else if (inputId == 1) { + if (x instanceof Matrix && !(param instanceof Matrix)) { + Matrix matrix = (Matrix) x; + for (Entry entry : matrix.getNonZeroEntries()) { long row = entry.getKey(); long col = entry.getValue(); double val = matrix.get(row, col); - if(val<0) - ret.put(col, ret.get(col)*((Matrix)error).get(row, col)); + if (val < 0) + ret.put(col, ret.get(col) * ((Matrix) error).get(row, col)); } - } - else { - for(long i : x.getNonZeroElements()) { + } else { + for (long i : x.getNonZeroElements()) { double val = x.get(i); - if(val<0) - ret.put(i, error.get(i)*val); + if (val < 0) + ret.put(i, error.get(i) * val); } } - } - else + } else throw new RuntimeException("prelu takes exactly 2 arguments"); return ret; } - + @Override public double getNonLinearity(int inputId, double inputMass, double outputNonLinearity) { - return outputNonLinearity*Math.sqrt(2);//TODO: check for this function + return outputNonLinearity * Math.sqrt(2);// TODO: check for this function } } \ No newline at end of file diff --git a/JGNN/src/main/java/mklab/JGNN/nn/activations/Relu.java b/JGNN/src/main/java/mklab/JGNN/nn/activations/Relu.java index 2dbdb42a..8de66896 100644 --- a/JGNN/src/main/java/mklab/JGNN/nn/activations/Relu.java +++ b/JGNN/src/main/java/mklab/JGNN/nn/activations/Relu.java @@ -6,11 +6,12 @@ import mklab.JGNN.core.Tensor; /** - * Implements a {@link NNOperation} that performs a relu transformation of its single input first introduced by - * Hahnloser, Richard HR, Rahul Sarpeshkar, Misha A. Mahowald, Rodney J. Douglas, and H. Sebastian Seung. - * "Digital selection and analogue amplification coexist in a cortex-inspired silicon circuit." - * Nature 405, no. 6789 (2000): 947-951. - * + * Implements a {@link NNOperation} that performs a relu transformation of its + * one input tensor. This transformation was first introduced by Hahnloser, + * Richard HR, Rahul Sarpeshkar, Misha A. Mahowald, Rodney J. Douglas, and H. + * Sebastian Seung. "Digital selection and analogue amplification coexist in a + * cortex-inspired silicon circuit." Nature 405, no. 6789 (2000): 947-951. + * * @author Emmanouil Krasanakis */ public class Relu extends NNOperation { @@ -18,25 +19,27 @@ public class Relu extends NNOperation { protected Tensor forward(List inputs) { Tensor x = inputs.get(0); Tensor ret = x.zeroCopy(); - for(long i : x.getNonZeroElements()) { + for (long i : x.getNonZeroElements()) { double val = x.get(i); - ret.put(i, val>0?val:0); + ret.put(i, val > 0 ? val : 0); } return ret; } + @Override protected Tensor partial(int inputId, List inputs, Tensor output, Tensor error) { Tensor x = inputs.get(0); Tensor ret = x.zeroCopy(); - for(long i : x.getNonZeroElements()) { + for (long i : x.getNonZeroElements()) { double val = x.get(i); - if(val>=0) + if (val >= 0) ret.put(i, error.get(i)); } return ret; } + @Override public double getNonLinearity(int inputId, double inputMass, double outputNonLinearity) { - return Math.sqrt(2)*outputNonLinearity; + return Math.sqrt(2) * outputNonLinearity; } } \ No newline at end of file diff --git a/JGNN/src/main/java/mklab/JGNN/nn/activations/Sigmoid.java b/JGNN/src/main/java/mklab/JGNN/nn/activations/Sigmoid.java index 3cc0c881..4c0a1e43 100644 --- a/JGNN/src/main/java/mklab/JGNN/nn/activations/Sigmoid.java +++ b/JGNN/src/main/java/mklab/JGNN/nn/activations/Sigmoid.java @@ -7,7 +7,8 @@ import mklab.JGNN.core.util.Loss; /** - * Implements a {@link NNOperation} that performs a sigmoid transformation of its single input. + * Implements a {@link NNOperation} that performs a sigmoid transformation of + * its single input. * * @author Emmanouil Krasanakis */ @@ -16,10 +17,12 @@ public class Sigmoid extends NNOperation { protected Tensor forward(List inputs) { return Loss.sigmoid(inputs.get(0)); } + @Override protected Tensor partial(int inputId, List inputs, Tensor output, Tensor error) { return Loss.sigmoidDerivative(inputs.get(0)).selfMultiply(error); } + @Override public double getNonLinearity(int inputId, double inputMass, double outputNonLinearity) { return outputNonLinearity; diff --git a/JGNN/src/main/java/mklab/JGNN/nn/activations/Tanh.java b/JGNN/src/main/java/mklab/JGNN/nn/activations/Tanh.java index f4949444..fe5fced9 100644 --- a/JGNN/src/main/java/mklab/JGNN/nn/activations/Tanh.java +++ b/JGNN/src/main/java/mklab/JGNN/nn/activations/Tanh.java @@ -7,7 +7,8 @@ import mklab.JGNN.core.util.Loss; /** - * Implements a {@link NNOperation} that performs a tanh transformation of its single input. + * Implements a {@link NNOperation} that performs a tanh transformation of its + * single input. * * @author Emmanouil Krasanakis */ @@ -16,12 +17,14 @@ public class Tanh extends NNOperation { protected Tensor forward(List inputs) { return Loss.tanh(inputs.get(0)); } + @Override protected Tensor partial(int inputId, List inputs, Tensor output, Tensor error) { return Loss.tanhDerivative(inputs.get(0)).selfMultiply(error); } + @Override public double getNonLinearity(int inputId, double inputMass, double outputNonLinearity) { - return 5*outputNonLinearity/3; + return 5 * outputNonLinearity / 3; } } diff --git a/JGNN/src/main/java/mklab/JGNN/nn/activations/package-info.java b/JGNN/src/main/java/mklab/JGNN/nn/activations/package-info.java new file mode 100644 index 00000000..772b1dae --- /dev/null +++ b/JGNN/src/main/java/mklab/JGNN/nn/activations/package-info.java @@ -0,0 +1,6 @@ +/** + * Implements activations function to be used as model operations. + * + * @author Emmanouil Krasanakis + */ +package mklab.JGNN.nn.activations; \ No newline at end of file diff --git a/JGNN/src/main/java/mklab/JGNN/nn/initializers/package-info.java b/JGNN/src/main/java/mklab/JGNN/nn/initializers/package-info.java new file mode 100644 index 00000000..56524779 --- /dev/null +++ b/JGNN/src/main/java/mklab/JGNN/nn/initializers/package-info.java @@ -0,0 +1,8 @@ +/** + * Implements initializers to be applied on {@link mklab.JGNN.nn.Model} + * parameters to stochastically induce some desired property at the first + * training epoch. + * + * @author Emmanouil Krasanakis + */ +package mklab.JGNN.nn.initializers; \ No newline at end of file diff --git a/JGNN/src/main/java/mklab/JGNN/nn/inputs/package-info.java b/JGNN/src/main/java/mklab/JGNN/nn/inputs/package-info.java new file mode 100644 index 00000000..2eeb042d --- /dev/null +++ b/JGNN/src/main/java/mklab/JGNN/nn/inputs/package-info.java @@ -0,0 +1,8 @@ +/** + * Contains various types of neural architecture inputs. These different on + * whether they are constants, parameters to be training, or variables manually + * set in each architecture run. + * + * @author Emmanouil Krasanakis + */ +package mklab.JGNN.nn.inputs; \ No newline at end of file diff --git a/JGNN/src/main/java/mklab/JGNN/nn/loss/package-info.java b/JGNN/src/main/java/mklab/JGNN/nn/loss/package-info.java new file mode 100644 index 00000000..de3aff20 --- /dev/null +++ b/JGNN/src/main/java/mklab/JGNN/nn/loss/package-info.java @@ -0,0 +1,7 @@ +/** + * Contains classes for instantiating loss function. These are then to be used + * during model training. + * + * @author Emmanouil Krasanakis + */ +package mklab.JGNN.nn.loss; \ No newline at end of file diff --git a/JGNN/src/main/java/mklab/JGNN/nn/loss/report/VerboseLoss.java b/JGNN/src/main/java/mklab/JGNN/nn/loss/report/VerboseLoss.java index d745f289..b436dbe0 100644 --- a/JGNN/src/main/java/mklab/JGNN/nn/loss/report/VerboseLoss.java +++ b/JGNN/src/main/java/mklab/JGNN/nn/loss/report/VerboseLoss.java @@ -6,8 +6,9 @@ import mklab.JGNN.nn.Loss; /** - * Implements a {@link Loss} that wraps other losses and outputs their value during training to an output stream - * (to {@link System#out} by default). This is the simplest loss wrapper to keep track of training progress. + * Implements a {@link Loss} that wraps other losses and outputs their value + * during training to an output stream (to {@link System#out} by default). This + * is the simplest loss wrapper to keep track of training progress. * * @author Emmanouil Krasanakis * @see VerboseLoss#VerboseLoss(Loss) @@ -17,15 +18,16 @@ public class VerboseLoss extends Loss { private int every = 1; private Loss baseLoss; private PrintStream out; - + public void reset() { epoch = 0; } - + /** - * Instantiates a {@link VerboseLoss} given a base loss to be wrapped. - * Use a method chain to modify when losses should be reported, and which - * output stream is used. + * Instantiates a {@link VerboseLoss} given a base loss to be wrapped. Use a + * method chain to modify when losses should be reported, and which output + * stream is used. + * * @param baseLoss * @see #setInterval(int) * @see #setStream(PrintStream) @@ -34,19 +36,22 @@ public VerboseLoss(Loss baseLoss) { this.baseLoss = baseLoss; out = System.out; } - + /** * Changes on which epochs the loss should be reported. - * @param every The loss is reported on epochs 0, every, 2every, ... Default is 1. + * + * @param every The loss is reported on epochs 0, every, 2every, ... Default is + * 1. * @return this verbose loss instance. */ public VerboseLoss setInterval(int every) { this.every = every; return this; } - + /** * Changes where the output is printed. + * * @param out The print stream to print to. Default is {@link System#out}. * @return this verbose loss instance. */ @@ -54,13 +59,14 @@ public VerboseLoss setStream(PrintStream out) { this.out = out; return this; } - + @Override public double evaluate(Tensor output, Tensor desired) { epoch += 1; double value = baseLoss.evaluate(output, desired); - if(epoch==0 || epoch%every==0) - out.println("Epoch "+epoch+" "+baseLoss.getClass().getSimpleName()+" "+Math.round(Math.abs(value*1000))/1000.0); + if (epoch == 0 || epoch % every == 0) + out.println("Epoch " + epoch + " " + baseLoss.getClass().getSimpleName() + " " + + Math.round(Math.abs(value * 1000)) / 1000.0); return value; } diff --git a/JGNN/src/main/java/mklab/JGNN/nn/loss/report/package-info.java b/JGNN/src/main/java/mklab/JGNN/nn/loss/report/package-info.java new file mode 100644 index 00000000..f43f1cd3 --- /dev/null +++ b/JGNN/src/main/java/mklab/JGNN/nn/loss/report/package-info.java @@ -0,0 +1,8 @@ +/** + * Contains losses that wrap other losses and augment their numeric computations + * with live reporting of the training status. For example, these can be used + * for printing or logging training progress. + * + * @author Emmanouil Krasanakis + */ +package mklab.JGNN.nn.loss.report; \ No newline at end of file diff --git a/JGNN/src/main/java/mklab/JGNN/nn/operations/Add.java b/JGNN/src/main/java/mklab/JGNN/nn/operations/Add.java index e9f7dcd3..7e329956 100644 --- a/JGNN/src/main/java/mklab/JGNN/nn/operations/Add.java +++ b/JGNN/src/main/java/mklab/JGNN/nn/operations/Add.java @@ -19,153 +19,137 @@ public class Add extends NNOperation { @Override protected Tensor forward(List inputs) { - if(inputs.size()!=2) + if (inputs.size() != 2) throw new IllegalArgumentException(); Tensor input0 = inputs.get(0); Tensor input1 = inputs.get(1); - if(input0.size()==1) + if (input0.size() == 1) return input1.add(input0.toDouble()); - if(input1.size()==1) + if (input1.size() == 1) return input0.add(input1.toDouble()); - if(input0 instanceof Matrix && !(input1 instanceof Matrix)) - input1 = ((Matrix)input0).getCols()!=input1.size() ? - new RowRepetition(input1, ((Matrix)input0).getCols()) : - new ColumnRepetition(((Matrix)input0).getRows(), input1); + if (input0 instanceof Matrix && !(input1 instanceof Matrix)) + input1 = ((Matrix) input0).getCols() != input1.size() + ? new RowRepetition(input1, ((Matrix) input0).getCols()) + : new ColumnRepetition(((Matrix) input0).getRows(), input1); return input0.add(input1); } + @Override protected Tensor partial(int inputId, List inputs, Tensor output, Tensor error) { Tensor input0 = inputs.get(0); Tensor input1 = inputs.get(1); - if((input0.size()==1 && inputId==0) || (input1.size()==1 && inputId==1)) { + if ((input0.size() == 1 && inputId == 0) || (input1.size() == 1 && inputId == 1)) { double val = 0; - for(long pos : error.getNonZeroElements()) + for (long pos : error.getNonZeroElements()) val += error.get(pos); return Tensor.fromDouble(val); } - if(inputId==1 && input0 instanceof Matrix && !(input1 instanceof Matrix)) - return new Sum(((Matrix)input0).getCols()==input1.size()).run(error); - if(inputId==0 && input1 instanceof Matrix && !(input0 instanceof Matrix)) - return new Sum(((Matrix)input1).getRows()==input0.size()).run(error); + if (inputId == 1 && input0 instanceof Matrix && !(input1 instanceof Matrix)) + return new Sum(((Matrix) input0).getCols() == input1.size()).run(error); + if (inputId == 0 && input1 instanceof Matrix && !(input0 instanceof Matrix)) + return new Sum(((Matrix) input1).getRows() == input0.size()).run(error); return error; } - - @Override protected void autosize(ArrayList lastInputs) { Tensor input0 = lastInputs.get(0); Tensor input1 = lastInputs.get(1); - if(input0 instanceof Matrix && input1 instanceof Matrix) { + if (input0 instanceof Matrix && input1 instanceof Matrix) { Matrix left = input0.cast(Matrix.class); Matrix right = input1.cast(Matrix.class); - if(getInputs().get(0) instanceof Parameter && left.getRows()==0 && left.getRowName().equals("?")) { - if(right.getRows()==0 && right.getRowName().equals("?")) + if (getInputs().get(0) instanceof Parameter && left.getRows() == 0 && left.getRowName().equals("?")) { + if (right.getRows() == 0 && right.getRowName().equals("?")) throw new RuntimeException("Cannot autosize based on two unknown dimensions"); - ((Parameter)getInputs().get(0)).set( - left.zeroCopy(right.getRows(), left.getCols()) - .setRowName(right.getRowName()) - .setColName(left.getColName()) - .setDimensionName(left.getDimensionName()) - ).runPrediction(); - if(debugging) - System.out.println("Automatically sized parameter: "+getInputs().get(0).describe()); + ((Parameter) getInputs().get(0)) + .set(left.zeroCopy(right.getRows(), left.getCols()).setRowName(right.getRowName()) + .setColName(left.getColName()).setDimensionName(left.getDimensionName())) + .runPrediction(); + if (debugging) + System.out.println("Automatically sized parameter: " + getInputs().get(0).describe()); } - if(getInputs().get(0) instanceof Parameter && left.getCols()==0 && left.getColName().equals("?")) { - if(right.getCols()==0 && right.getColName().equals("?")) + if (getInputs().get(0) instanceof Parameter && left.getCols() == 0 && left.getColName().equals("?")) { + if (right.getCols() == 0 && right.getColName().equals("?")) throw new RuntimeException("Cannot autosize based on two unknown dimensions"); - ((Parameter)getInputs().get(0)).set( - left.zeroCopy(left.getRows(), right.getCols()) - .setRowName(left.getRowName()) - .setColName(right.getColName()) - .setDimensionName(left.getDimensionName()) - ).runPrediction(); - if(debugging) - System.out.println("Automatically sized parameter: "+getInputs().get(0).describe()); + ((Parameter) getInputs().get(0)) + .set(left.zeroCopy(left.getRows(), right.getCols()).setRowName(left.getRowName()) + .setColName(right.getColName()).setDimensionName(left.getDimensionName())) + .runPrediction(); + if (debugging) + System.out.println("Automatically sized parameter: " + getInputs().get(0).describe()); } - if(getInputs().get(0) instanceof Parameter && left.getRows()==0 && left.getRowName().equals("?")) { - if(right.getRows()==0 && right.getRowName().equals("?")) + if (getInputs().get(0) instanceof Parameter && left.getRows() == 0 && left.getRowName().equals("?")) { + if (right.getRows() == 0 && right.getRowName().equals("?")) throw new RuntimeException("Cannot autosize based on two unknown dimensions"); - ((Parameter)getInputs().get(0)).set( - left.zeroCopy(right.getRows(), left.getCols()) - .setRowName(right.getRowName()) - .setColName(left.getColName()) - .setDimensionName(left.getDimensionName()) - ).runPrediction(); - if(debugging) - System.out.println("Automatically sized parameter: "+getInputs().get(0).describe()); + ((Parameter) getInputs().get(0)) + .set(left.zeroCopy(right.getRows(), left.getCols()).setRowName(right.getRowName()) + .setColName(left.getColName()).setDimensionName(left.getDimensionName())) + .runPrediction(); + if (debugging) + System.out.println("Automatically sized parameter: " + getInputs().get(0).describe()); } - if(getInputs().get(1) instanceof Parameter && right.getCols()==0 && right.getColName().equals("?")) { - if(left.getCols()==0 && left.getColName().equals("?")) + if (getInputs().get(1) instanceof Parameter && right.getCols() == 0 && right.getColName().equals("?")) { + if (left.getCols() == 0 && left.getColName().equals("?")) throw new RuntimeException("Cannot autosize based on two unknown dimensions"); - ((Parameter)getInputs().get(1)).set( - right.zeroCopy(right.getRows(), left.getCols()) - .setRowName(right.getRowName()) - .setColName(left.getColName()) - .setDimensionName(right.getDimensionName()) - ).runPrediction(); - if(debugging) - System.out.println("Automatically sized parameter: "+getInputs().get(1).describe()); + ((Parameter) getInputs().get(1)) + .set(right.zeroCopy(right.getRows(), left.getCols()).setRowName(right.getRowName()) + .setColName(left.getColName()).setDimensionName(right.getDimensionName())) + .runPrediction(); + if (debugging) + System.out.println("Automatically sized parameter: " + getInputs().get(1).describe()); } - if(getInputs().get(1) instanceof Parameter && right.getRows()==0 && right.getRowName().equals("?")) { - if(left.getRows()==0 && left.getRowName().equals("?")) + if (getInputs().get(1) instanceof Parameter && right.getRows() == 0 && right.getRowName().equals("?")) { + if (left.getRows() == 0 && left.getRowName().equals("?")) throw new RuntimeException("Cannot autosize based on two unknown dimensions"); - ((Parameter)getInputs().get(1)).set( - right.zeroCopy(left.getRows(), right.getCols()) - .setRowName(left.getRowName()) - .setColName(right.getColName()) - .setDimensionName(right.getDimensionName()) - ).runPrediction(); - if(debugging) - System.out.println("Automatically sized parameter: "+getInputs().get(1).describe()); + ((Parameter) getInputs().get(1)) + .set(right.zeroCopy(left.getRows(), right.getCols()).setRowName(left.getRowName()) + .setColName(right.getColName()).setDimensionName(right.getDimensionName())) + .runPrediction(); + if (debugging) + System.out.println("Automatically sized parameter: " + getInputs().get(1).describe()); } - } - else if(input0 instanceof Matrix && !(input1 instanceof Matrix)) { + } else if (input0 instanceof Matrix && !(input1 instanceof Matrix)) { Matrix matrix = input0.cast(Matrix.class); - if(getInputs().get(0) instanceof Parameter && matrix.getCols()==0 && matrix.getColName().equals("?")) { - if(input1.size()==0 && input1.getDimensionName().equals("?")) + if (getInputs().get(0) instanceof Parameter && matrix.getCols() == 0 && matrix.getColName().equals("?")) { + if (input1.size() == 0 && input1.getDimensionName().equals("?")) throw new RuntimeException("Cannot autosize based on two unknown dimensions"); - ((Parameter)getInputs().get(0)).set( - matrix.zeroCopy(matrix.getRows(), input1.size()) - .setRowName(matrix.getRowName()) - .setColName(input1.getDimensionName()) - .setDimensionName(matrix.getDimensionName()) - ).runPrediction(); - if(debugging) - System.out.println("Automatically sized parameter: "+getInputs().get(0).describe()); + ((Parameter) getInputs().get(0)) + .set(matrix.zeroCopy(matrix.getRows(), input1.size()).setRowName(matrix.getRowName()) + .setColName(input1.getDimensionName()).setDimensionName(matrix.getDimensionName())) + .runPrediction(); + if (debugging) + System.out.println("Automatically sized parameter: " + getInputs().get(0).describe()); } - if(getInputs().get(1) instanceof Parameter && input1.size()==0 && input1.getDimensionName().equals("?")) { - if(matrix.getCols()==0 && matrix.getColName().equals("?")) + if (getInputs().get(1) instanceof Parameter && input1.size() == 0 + && input1.getDimensionName().equals("?")) { + if (matrix.getCols() == 0 && matrix.getColName().equals("?")) throw new RuntimeException("Cannot autosize based on two unknown dimensions"); - ((Parameter)getInputs().get(1)).set( - input1.zeroCopy(matrix.getCols()) - .setDimensionName(matrix.getColName()) - ).runPrediction(); - if(debugging) - System.out.println("Automatically sized parameter: "+getInputs().get(1).describe()); + ((Parameter) getInputs().get(1)) + .set(input1.zeroCopy(matrix.getCols()).setDimensionName(matrix.getColName())).runPrediction(); + if (debugging) + System.out.println("Automatically sized parameter: " + getInputs().get(1).describe()); } - } - else { - if(getInputs().get(0) instanceof Parameter && input0.size()==0 && input0.getDimensionName().equals("?")) { - if(input1.size()==0 && input1.getDimensionName().equals("?")) + } else { + if (getInputs().get(0) instanceof Parameter && input0.size() == 0 + && input0.getDimensionName().equals("?")) { + if (input1.size() == 0 && input1.getDimensionName().equals("?")) throw new RuntimeException("Cannot autosize based on two unknown dimensions"); - ((Parameter)getInputs().get(0)).set( - input0.zeroCopy(input1.size()) - .setDimensionName(input1.getDimensionName()) - ).runPrediction(); - if(debugging) - System.out.println("Automatically sized parameter: "+getInputs().get(0).describe()); + ((Parameter) getInputs().get(0)) + .set(input0.zeroCopy(input1.size()).setDimensionName(input1.getDimensionName())) + .runPrediction(); + if (debugging) + System.out.println("Automatically sized parameter: " + getInputs().get(0).describe()); } - if(getInputs().get(1) instanceof Parameter && input1.size()==0 && input1.getDimensionName().equals("?")) { - if(input0.size()==0 && input0.getDimensionName().equals("?")) + if (getInputs().get(1) instanceof Parameter && input1.size() == 0 + && input1.getDimensionName().equals("?")) { + if (input0.size() == 0 && input0.getDimensionName().equals("?")) throw new RuntimeException("Cannot autosize based on two unknown dimensions"); - ((Parameter)getInputs().get(1)).set( - input1.zeroCopy(input0.size()) - .setDimensionName(input0.getDimensionName()) - ).runPrediction(); - if(debugging) - System.out.println("Automatically sized parameter: "+getInputs().get(1).describe()); - + ((Parameter) getInputs().get(1)) + .set(input1.zeroCopy(input0.size()).setDimensionName(input0.getDimensionName())) + .runPrediction(); + if (debugging) + System.out.println("Automatically sized parameter: " + getInputs().get(1).describe()); + } } } diff --git a/JGNN/src/main/java/mklab/JGNN/nn/operations/Attention.java b/JGNN/src/main/java/mklab/JGNN/nn/operations/Attention.java index 2d8fe2a4..a56b9fc3 100644 --- a/JGNN/src/main/java/mklab/JGNN/nn/operations/Attention.java +++ b/JGNN/src/main/java/mklab/JGNN/nn/operations/Attention.java @@ -1,4 +1,5 @@ package mklab.JGNN.nn.operations; + import java.util.List; import java.util.Map.Entry; import mklab.JGNN.core.Matrix; @@ -14,43 +15,45 @@ public class Attention extends NNOperation { public Attention() { } + @Override public Tensor forward(List inputs) { - if(inputs.size()!=2) + if (inputs.size() != 2) throw new IllegalArgumentException(); Matrix adjacency = inputs.get(0).cast(Matrix.class); Matrix features = inputs.get(1).cast(Matrix.class); Matrix ret = adjacency.zeroCopy(); - for(Entry pos : adjacency.getNonZeroEntries()) { + for (Entry pos : adjacency.getNonZeroEntries()) { long row = pos.getKey(); long col = pos.getValue(); - if(row!=col) - ret.put(row, col, adjacency.get(row, col)*features.accessRow(row).dot(features.accessRow(col))); + if (row != col) + ret.put(row, col, adjacency.get(row, col) * features.accessRow(row).dot(features.accessRow(col))); } return ret; } + @Override protected Tensor partial(int inputId, List inputs, Tensor output, Tensor error) { Matrix features = inputs.get(1).cast(Matrix.class); Matrix errorMatrix = error.cast(Matrix.class); - if(inputId==0) { + if (inputId == 0) { Tensor ret = inputs.get(0).zeroCopy(); Matrix adjacency = inputs.get(0).cast(Matrix.class); - for(long pos : output.getNonZeroElements()) - if(adjacency.get(pos)!=0) - ret.put(pos, error.get(pos)*output.get(pos)/adjacency.get(pos)); + for (long pos : output.getNonZeroElements()) + if (adjacency.get(pos) != 0) + ret.put(pos, error.get(pos) * output.get(pos) / adjacency.get(pos)); throw new RuntimeException("Should not create non-constant adjacency matrices"); } Matrix ret = features.zeroCopy().cast(Matrix.class); - for(Entry pos : errorMatrix.getNonZeroEntries()) { + for (Entry pos : errorMatrix.getNonZeroEntries()) { long row = pos.getKey(); long col = pos.getValue(); - if(row==col) + if (row == col) continue; double err = errorMatrix.get(row, col); - for(long i=0;i inputs) { return inputs.get(0).multiply(-1).selfAdd(1); } + @Override protected Tensor partial(int inputId, List inputs, Tensor output, Tensor error) { return error.multiply(-1); diff --git a/JGNN/src/main/java/mklab/JGNN/nn/operations/Concat.java b/JGNN/src/main/java/mklab/JGNN/nn/operations/Concat.java index 92fed257..aa24d412 100644 --- a/JGNN/src/main/java/mklab/JGNN/nn/operations/Concat.java +++ b/JGNN/src/main/java/mklab/JGNN/nn/operations/Concat.java @@ -15,48 +15,49 @@ public class Concat extends NNOperation { @Override protected Tensor forward(List inputs) { - if(inputs.size()!=2) + if (inputs.size() != 2) throw new IllegalArgumentException(); List cols0 = inputs.get(0).cast(Matrix.class).accessColumns(); List cols1 = inputs.get(1).cast(Matrix.class).accessColumns(); cols0.addAll(cols1); - if(inputs.get(0).cast(Matrix.class).getRowName()!=inputs.get(1).cast(Matrix.class).getRowName()) - throw new RuntimeException("Cannot concatenate: "+inputs.get(0).describe()+" and "+inputs.get(1).describe()); - return new WrapCols(cols0).setZeroCopyType(inputs.get(0).cast(Matrix.class)).setDimensionName(inputs.get(0).cast(Matrix.class).getRowName(), null); - /*Matrix matrix0 = (Matrix)inputs.get(0); - Matrix matrix1 = (Matrix)inputs.get(1); - if(matrix0.getRows()!=matrix1.getRows()) - throw new IllegalArgumentException(); - Matrix matrix = (matrix0 instanceof SparseMatrix && matrix1 instanceof SparseMatrix) - ?new SparseMatrix(matrix0.getRows(), matrix0.getCols()+matrix1.getCols()) - :new DenseMatrix(matrix0.getRows(), matrix0.getCols()+matrix1.getCols()); - for(Entry entry : matrix0.getNonZeroEntries()) - matrix.put(entry.getKey(), entry.getValue(), matrix0.get(entry.getKey(), entry.getValue())); - for(Entry entry : matrix1.getNonZeroEntries()) - matrix.put(entry.getKey(), matrix0.getCols()+entry.getValue(), matrix1.get(entry.getKey(), entry.getValue())); - return matrix;*/ + if (inputs.get(0).cast(Matrix.class).getRowName() != inputs.get(1).cast(Matrix.class).getRowName()) + throw new RuntimeException( + "Cannot concatenate: " + inputs.get(0).describe() + " and " + inputs.get(1).describe()); + return new WrapCols(cols0).setZeroCopyType(inputs.get(0).cast(Matrix.class)) + .setDimensionName(inputs.get(0).cast(Matrix.class).getRowName(), null); + /* + * Matrix matrix0 = (Matrix)inputs.get(0); Matrix matrix1 = + * (Matrix)inputs.get(1); if(matrix0.getRows()!=matrix1.getRows()) throw new + * IllegalArgumentException(); Matrix matrix = (matrix0 instanceof SparseMatrix + * && matrix1 instanceof SparseMatrix) ?new SparseMatrix(matrix0.getRows(), + * matrix0.getCols()+matrix1.getCols()) :new DenseMatrix(matrix0.getRows(), + * matrix0.getCols()+matrix1.getCols()); for(Entry entry : + * matrix0.getNonZeroEntries()) matrix.put(entry.getKey(), entry.getValue(), + * matrix0.get(entry.getKey(), entry.getValue())); for(Entry entry : + * matrix1.getNonZeroEntries()) matrix.put(entry.getKey(), + * matrix0.getCols()+entry.getValue(), matrix1.get(entry.getKey(), + * entry.getValue())); return matrix; + */ } @Override protected Tensor partial(int inputId, List inputs, Tensor output, Tensor error) { - Matrix matrix0 = (Matrix)inputs.get(0); - Matrix matrix1 = (Matrix)inputs.get(1); - long start = inputId==0?0:matrix0.getCols(); - long end = inputId==0?matrix0.getCols():(matrix0.getCols()+matrix1.getCols()); + Matrix matrix0 = (Matrix) inputs.get(0); + Matrix matrix1 = (Matrix) inputs.get(1); + long start = inputId == 0 ? 0 : matrix0.getCols(); + long end = inputId == 0 ? matrix0.getCols() : (matrix0.getCols() + matrix1.getCols()); return error.cast(Matrix.class).accessColumns(Tensor.fromRange(start, end)).setDimensionName(null, null); - /*Matrix inputError = (Matrix)inputs.get(inputId).zeroCopy(); - Matrix matrix0 = (Matrix)inputs.get(0); - Matrix matrix1 = (Matrix)inputs.get(1); - Matrix errorMatrix = (Matrix)error; - long start = inputId==0?0:matrix0.getCols(); - long end = inputId==0?matrix0.getCols():(matrix0.getCols()+matrix1.getCols()); - for(Entry entry : matrix0.getNonZeroEntries()) { - long row = entry.getKey(); - long col = entry.getValue(); - if(col>=start && col entry : matrix0.getNonZeroEntries()) { long row = + * entry.getKey(); long col = entry.getValue(); if(col>=start && col inputs) { - if(inputs.size()!=2) + if (inputs.size() != 2) throw new IllegalArgumentException(); double value = inputs.get(1).toDouble(); - if(value<0 || value>1) + if (value < 0 || value > 1) throw new IllegalArgumentException(); - if(!enabled || value==0) + if (!enabled || value == 0) return inputs.get(0); Tensor input = inputs.get(0); Tensor ret = inputs.get(0).zeroCopy(); - for(long pos : input.getNonZeroElements()) - if(Math.random() inputs, Tensor output, Tensor error) { - if(inputId==1) + if (inputId == 1) return null; - if(!enabled) + if (!enabled) return error; double value = inputs.get(1).toDouble(); Tensor ret = output.zeroCopy(); - for(long pos : output.getNonZeroElements()) - if(output.get(pos)!=0) - ret.put(pos, error.get(pos)*value); + for (long pos : output.getNonZeroElements()) + if (output.get(pos) != 0) + ret.put(pos, error.get(pos) * value); return ret; } - + @Override public boolean isCachable() { return false; } - + } \ No newline at end of file diff --git a/JGNN/src/main/java/mklab/JGNN/nn/operations/From.java b/JGNN/src/main/java/mklab/JGNN/nn/operations/From.java index 2de05c9d..aa839893 100644 --- a/JGNN/src/main/java/mklab/JGNN/nn/operations/From.java +++ b/JGNN/src/main/java/mklab/JGNN/nn/operations/From.java @@ -10,26 +10,27 @@ import mklab.JGNN.core.tensor.DenseTensor; /** - * Implements a {@link NNOperation} that lists the first element of the 2D matrix element iterator. + * Implements a {@link NNOperation} that lists the first element of the 2D + * matrix element iterator. * * @author Emmanouil Krasanakis */ public class From extends NNOperation { @Override protected Tensor forward(List inputs) { - if(inputs.size()!=1) + if (inputs.size() != 1) throw new IllegalArgumentException(); ArrayList ret = new ArrayList((int) inputs.get(0).estimateNumNonZeroElements()); - for(Entry entry : inputs.get(0).cast(Matrix.class).getNonZeroEntries()) + for (Entry entry : inputs.get(0).cast(Matrix.class).getNonZeroEntries()) ret.add(entry.getKey()); return new DenseTensor(ret.iterator()); } - + @Override public boolean isCachable() { return true; } - + @Override protected Tensor partial(int inputId, List inputs, Tensor output, Tensor error) { throw new UnsupportedOperationException("Cannot iterate over non-constant matrices"); diff --git a/JGNN/src/main/java/mklab/JGNN/nn/operations/Gather.java b/JGNN/src/main/java/mklab/JGNN/nn/operations/Gather.java index 8a7b990a..b3d6364d 100644 --- a/JGNN/src/main/java/mklab/JGNN/nn/operations/Gather.java +++ b/JGNN/src/main/java/mklab/JGNN/nn/operations/Gather.java @@ -7,39 +7,39 @@ import mklab.JGNN.core.Tensor; /** - * Implements a {@link NNOperation} that performs the equivalent of TensorFlow's gather operation. + * Implements a {@link NNOperation} that performs the equivalent of TensorFlow's + * gather operation. * * @author Emmanouil Krasanakis */ public class Gather extends NNOperation { @Override protected Tensor forward(List inputs) { - if(inputs.size()!=2) + if (inputs.size() != 2) throw new IllegalArgumentException(); return inputs.get(1).cast(Matrix.class).accessRows(inputs.get(0)); - /*Tensor index = inputs.get(0); - Matrix H = (Matrix) inputs.get(1); - Matrix ret = H.zeroCopy(index.size(), H.getCols()).setRowName(index.getDimensionName()); - for(int i=0;i inputs, Tensor output, Tensor error) { - if(inputId==0) + if (inputId == 0) return null; Tensor index = inputs.get(0); Matrix H = inputs.get(1).cast(Matrix.class); Matrix errorMatrix = error.cast(Matrix.class); Matrix derivative = H.zeroCopy().cast(Matrix.class); - for(int i=0;i inputs) { return inputs.get(0); } + @Override protected Tensor partial(int inputId, List inputs, Tensor output, Tensor error) { return error; diff --git a/JGNN/src/main/java/mklab/JGNN/nn/operations/LSTM.java b/JGNN/src/main/java/mklab/JGNN/nn/operations/LSTM.java index 2d0954dd..da4c4813 100644 --- a/JGNN/src/main/java/mklab/JGNN/nn/operations/LSTM.java +++ b/JGNN/src/main/java/mklab/JGNN/nn/operations/LSTM.java @@ -11,22 +11,25 @@ public class LSTM { private Matrix ui, wi, uf, wf, uo, wo, ug, wg; private Matrix tape_ui, tape_wi, tape_uf, tape_wf, tape_uo, tape_wo, tape_ug, tape_wg; private Optimizer optimizer; - + public static class LSTMState { private Tensor previousMemory; private Tensor previousOutput; + public LSTMState(Tensor previousMemory, Tensor previousOutput) { this.previousMemory = previousMemory; this.previousOutput = previousOutput; } + public Tensor getMemory() { return previousMemory; } + public Tensor getOutput() { return previousOutput; } } - + public LSTM(Optimizer optimizer, int inputSize, int outputSize) { this.optimizer = optimizer; int memorySize = outputSize; @@ -47,18 +50,18 @@ public LSTM(Optimizer optimizer, int inputSize, int outputSize) { wo.setToRandom(); wg.setToRandom(); } - + protected LSTM() { } - + public Optimizer getOptimizer() { return optimizer; } - + public LSTMState createFirstState() { return new LSTMState(new DenseTensor(ui.getRows()), new DenseTensor(wi.getCols())); } - + public LSTMState output(Tensor input, LSTMState previousState) { Tensor previousMemory = previousState.getMemory(); Tensor previousOutput = previousState.getOutput(); @@ -69,20 +72,21 @@ public LSTMState output(Tensor input, LSTMState previousState) { Tensor memory = Loss.sigmoid(f.selfMultiply(previousMemory).selfAdd(i.selfMultiply(memoryGate))); Tensor output = Loss.tanh(memory).selfMultiply(o); - /*System.out.println("------------ "+this); - System.out.println("Input "+input.describe()); - System.out.println("Prev memory "+previousMemory.describe()); - System.out.println("Prev output "+previousOutput.describe()); - System.out.println("i "+i.describe()); - System.out.println("f "+f.describe()); - System.out.println("o "+o.describe()); - System.out.println("memoryGate "+memoryGate.describe()); - System.out.println("memory "+memory.describe()); - System.out.println("output "+output.describe());*/ - + /* + * System.out.println("------------ "+this); + * System.out.println("Input "+input.describe()); + * System.out.println("Prev memory "+previousMemory.describe()); + * System.out.println("Prev output "+previousOutput.describe()); + * System.out.println("i "+i.describe()); System.out.println("f "+f.describe()); + * System.out.println("o "+o.describe()); + * System.out.println("memoryGate "+memoryGate.describe()); + * System.out.println("memory "+memory.describe()); + * System.out.println("output "+output.describe()); + */ + return new LSTMState(memory, output); } - + public void startTape() { tape_ui = ui.zeroCopy(); tape_uf = uf.zeroCopy(); @@ -93,36 +97,36 @@ public void startTape() { tape_wo = wo.zeroCopy(); tape_wg = wg.zeroCopy(); } - + public double train(Tensor[] inputs, Tensor output) { - LSTMState[] states = new LSTMState[inputs.length+1]; + LSTMState[] states = new LSTMState[inputs.length + 1]; states[0] = createFirstState(); - for(int i=0;i=0;i--) + for (int i = inputs.length - 1; i >= 0; i--) error = updateTape(inputs[i], states[i], error); return topError.norm(); } - + public void trainOnOutputError(Tensor[] inputs, Tensor outputGradient) { - LSTMState[] states = new LSTMState[inputs.length+1]; + LSTMState[] states = new LSTMState[inputs.length + 1]; states[0] = createFirstState(); - for(int i=0;i=0;i--) + for (int i = inputs.length - 1; i >= 0; i--) error = updateTape(inputs[i], states[i], error); } - + public Tensor predict(Tensor[] inputs) { LSTMState state = createFirstState(); - for(int i=0;i inputs) { - if(inputs.size()!=1) + if (inputs.size() != 1) throw new IllegalArgumentException(); Tensor ret = inputs.get(0).zeroCopy(); - for(long i : inputs.get(0).getNonZeroElements()) - ret.put(i, Math.log(inputs.get(0).get(i)+1.E-12)); + for (long i : inputs.get(0).getNonZeroElements()) + ret.put(i, Math.log(inputs.get(0).get(i) + 1.E-12)); return ret; } + @Override protected Tensor partial(int inputId, List inputs, Tensor output, Tensor error) { return inputs.get(0).inverse().selfMultiply(error); diff --git a/JGNN/src/main/java/mklab/JGNN/nn/operations/MatMul.java b/JGNN/src/main/java/mklab/JGNN/nn/operations/MatMul.java index 951ad8f1..479ae491 100644 --- a/JGNN/src/main/java/mklab/JGNN/nn/operations/MatMul.java +++ b/JGNN/src/main/java/mklab/JGNN/nn/operations/MatMul.java @@ -6,7 +6,6 @@ import mklab.JGNN.core.Matrix; import mklab.JGNN.nn.NNOperation; import mklab.JGNN.nn.inputs.Parameter; -import mklab.JGNN.nn.inputs.Variable; import mklab.JGNN.core.Tensor; /** @@ -17,62 +16,58 @@ public class MatMul extends NNOperation { @Override protected Tensor forward(List inputs) { - if(inputs.size()!=2) + if (inputs.size() != 2) throw new IllegalArgumentException(); Matrix W = inputs.get(0).cast(Matrix.class); Matrix H = inputs.get(1).cast(Matrix.class); return W.matmul(H); } - + protected boolean isInputNeededForDerivative(int inputId) { - return !getInputs().get(1-inputId).isConstant(); + return !getInputs().get(1 - inputId).isConstant(); } @Override protected Tensor partial(int inputId, List inputs, Tensor output, Tensor error) { - Matrix errorMatrix = (Matrix)error; - if(inputId==0) { + Matrix errorMatrix = (Matrix) error; + if (inputId == 0) { Matrix H = inputs.get(1).cast(Matrix.class); errorMatrix = errorMatrix.matmul(H, false, true); - } - else if(inputId==1) { + } else if (inputId == 1) { Matrix W = inputs.get(0).cast(Matrix.class); errorMatrix = W.matmul(errorMatrix, true, false); } return errorMatrix; } + @Override public double getNonLinearity(int inputId, double inputMass, double outputNonLinearity) { return outputNonLinearity * inputMass; } - + @Override protected void autosize(ArrayList lastInputs) { Matrix left = lastInputs.get(0).cast(Matrix.class); Matrix right = lastInputs.get(1).cast(Matrix.class); - if(getInputs().get(0) instanceof Parameter && left.getCols()==0 && left.getColName().equals("?")) { - if(right.getRows()==0 && right.getRowName().equals("?")) + if (getInputs().get(0) instanceof Parameter && left.getCols() == 0 && left.getColName().equals("?")) { + if (right.getRows() == 0 && right.getRowName().equals("?")) throw new RuntimeException("Cannot autosize based on two unknown dimensions"); - ((Parameter)getInputs().get(0)).set( - left.zeroCopy(left.getRows(), right.getRows()) - .setRowName(left.getRowName()) - .setColName(right.getRowName()) - .setDimensionName(left.getDimensionName()) - ).runPrediction(); - if(debugging) - System.out.println("Automatically sized parameter: "+getInputs().get(0).describe()); + ((Parameter) getInputs().get(0)) + .set(left.zeroCopy(left.getRows(), right.getRows()).setRowName(left.getRowName()) + .setColName(right.getRowName()).setDimensionName(left.getDimensionName())) + .runPrediction(); + if (debugging) + System.out.println("Automatically sized parameter: " + getInputs().get(0).describe()); } - if(getInputs().get(1) instanceof Parameter && right.getRows()==0 && right.getRowName().equals("?")) { - if(left.getCols()==0 && left.getColName().equals("?")) + if (getInputs().get(1) instanceof Parameter && right.getRows() == 0 && right.getRowName().equals("?")) { + if (left.getCols() == 0 && left.getColName().equals("?")) throw new RuntimeException("Cannot autosize based on two unknown dimensions"); - ((Parameter)getInputs().get(1)).set( - right.zeroCopy(left.getCols(), right.getCols()) - .setRowName(left.getColName()) - .setColName(right.getColName()) - .setDimensionName(right.getDimensionName()) - ).runPrediction(); - if(debugging) - System.out.println("Automatically sized parameter: "+getInputs().get(1).describe()); - } + ((Parameter) getInputs().get(1)) + .set(right.zeroCopy(left.getCols(), right.getCols()).setRowName(left.getColName()) + .setColName(right.getColName()).setDimensionName(right.getDimensionName())) + .runPrediction(); + if (debugging) + System.out.println("Automatically sized parameter: " + getInputs().get(1).describe()); + } } } diff --git a/JGNN/src/main/java/mklab/JGNN/nn/operations/Multiply.java b/JGNN/src/main/java/mklab/JGNN/nn/operations/Multiply.java index 9d7bb789..0bb7c8b0 100644 --- a/JGNN/src/main/java/mklab/JGNN/nn/operations/Multiply.java +++ b/JGNN/src/main/java/mklab/JGNN/nn/operations/Multiply.java @@ -10,57 +10,58 @@ import mklab.JGNN.nn.pooling.Sum; /** - * Implements a {@link NNOperation} that multiplies its two inputs element-by-element. + * Implements a {@link NNOperation} that multiplies its two inputs + * element-by-element. * * @author Emmanouil Krasanakis */ public class Multiply extends NNOperation { @Override protected Tensor forward(List inputs) { - if(inputs.size()!=2) + if (inputs.size() != 2) throw new IllegalArgumentException(); Tensor input0 = inputs.get(0); Tensor input1 = inputs.get(1); - if(input0.size()==1) + if (input0.size() == 1) return input1.multiply(input0.toDouble()); - if(input0 instanceof Matrix && !(input1 instanceof Matrix)) - input1 = new ColumnRepetition(((Matrix)input0).getRows(), input1); + if (input0 instanceof Matrix && !(input1 instanceof Matrix)) + input1 = new ColumnRepetition(((Matrix) input0).getRows(), input1); Tensor product = input0.copy(); product.selfMultiply(input1); return product; } + @Override protected Tensor partial(int inputId, List inputs, Tensor output, Tensor error) { Tensor input0 = inputs.get(0); Tensor input1 = inputs.get(1); - if(input0.size()==1) { - if(inputId==0) { + if (input0.size() == 1) { + if (inputId == 0) { return Tensor.fromDouble(error.dot(input1)); } - if(inputId==1 && input0.toDouble()!=0) + if (inputId == 1 && input0.toDouble() != 0) return error.multiply(input0.toDouble()); - if(inputId==1) - return null;//this is the case where input0 is zero + if (inputId == 1) + return null;// this is the case where input0 is zero } - if(inputId==0) { + if (inputId == 0) { Tensor partialProduct = error.copy(); - if(input0 instanceof Matrix && !(input1 instanceof Matrix)) - input1 = new ColumnRepetition(((Matrix)input0).getRows(), input1); + if (input0 instanceof Matrix && !(input1 instanceof Matrix)) + input1 = new ColumnRepetition(((Matrix) input0).getRows(), input1); partialProduct.selfMultiply(input1); return partialProduct; - } - else if(inputId==1) { - if(input0 instanceof Matrix && !(input1 instanceof Matrix)) + } else if (inputId == 1) { + if (input0 instanceof Matrix && !(input1 instanceof Matrix)) return (new Sum(true).forward(Arrays.asList(error.multiply(input0)))); else { Tensor partialProduct = error.copy(); partialProduct.selfMultiply(input0); return partialProduct; } - } - else + } else throw new RuntimeException("Multiply takes exactly 2 arguments"); } + @Override public double getNonLinearity(int inputId, double inputMass, double outputNonLinearity) { return outputNonLinearity * inputMass; diff --git a/JGNN/src/main/java/mklab/JGNN/nn/operations/Reduce.java b/JGNN/src/main/java/mklab/JGNN/nn/operations/Reduce.java index 25a5d924..cbdd689c 100644 --- a/JGNN/src/main/java/mklab/JGNN/nn/operations/Reduce.java +++ b/JGNN/src/main/java/mklab/JGNN/nn/operations/Reduce.java @@ -10,13 +10,13 @@ public class Reduce extends NNOperation { @Override protected Tensor forward(List inputs) { - if(inputs.size()!=2) + if (inputs.size() != 2) throw new IllegalArgumentException(); Matrix edgeFeats = inputs.get(0).cast(Matrix.class); Matrix adj = inputs.get(1).cast(Matrix.class); Matrix ret = edgeFeats.zeroCopy(adj.getRows(), edgeFeats.getCols()); long id = 0; - for(Entry entry : adj.getNonZeroEntries()) { + for (Entry entry : adj.getNonZeroEntries()) { ret.accessRow(entry.getKey()).selfAdd(edgeFeats.accessRow(id), adj.get(entry.getKey(), entry.getValue())); id += 1; } @@ -25,14 +25,14 @@ protected Tensor forward(List inputs) { @Override protected Tensor partial(int inputId, List inputs, Tensor output, Tensor error) { - if(inputId==1) + if (inputId == 1) throw new RuntimeException("Cannot backpropagate over adjacency matrices in reduce"); Matrix edgeFeats = inputs.get(0).cast(Matrix.class); Matrix adj = inputs.get(1).cast(Matrix.class); Matrix err = error.cast(Matrix.class); Matrix ret = edgeFeats.zeroCopy(); long id = 0; - for(Entry entry : adj.getNonZeroEntries()) { + for (Entry entry : adj.getNonZeroEntries()) { ret.accessRow(id).selfAdd(err.accessRow(entry.getKey()), adj.get(entry.getKey(), entry.getValue())); id += 1; } diff --git a/JGNN/src/main/java/mklab/JGNN/nn/operations/Repeat.java b/JGNN/src/main/java/mklab/JGNN/nn/operations/Repeat.java index b7b47edf..91c3a167 100644 --- a/JGNN/src/main/java/mklab/JGNN/nn/operations/Repeat.java +++ b/JGNN/src/main/java/mklab/JGNN/nn/operations/Repeat.java @@ -9,32 +9,33 @@ import mklab.JGNN.core.matrix.ColumnRepetition; /** - * Implements a {@link NNOperation} that converts its first argument to a {@link ColumnRepetition} matrix - * with a number of columns equal to the second argument. + * Implements a {@link NNOperation} that converts its first argument to a + * {@link ColumnRepetition} matrix with a number of columns equal to the second + * argument. * * @author Emmanouil Krasanakis */ public class Repeat extends NNOperation { @Override protected Tensor forward(List inputs) { - if(inputs.size()!=2) + if (inputs.size() != 2) throw new IllegalArgumentException(); - int repetitions = (int)inputs.get(1).toDouble(); + int repetitions = (int) inputs.get(1).toDouble(); return new ColumnRepetition(repetitions, inputs.get(0)); } @Override protected Tensor partial(int inputId, List inputs, Tensor output, Tensor error) { - if(inputId==1) + if (inputId == 1) return null; Tensor ret = inputs.get(0).zeroCopy(); - Matrix errorMatrix = (Matrix)error; - for(Entry element : errorMatrix.getNonZeroEntries()) { + Matrix errorMatrix = (Matrix) error; + for (Entry element : errorMatrix.getNonZeroEntries()) { long row = element.getKey(); long col = element.getValue(); ret.put(col, ret.get(col) + errorMatrix.get(row, col)); } return ret; } - + } \ No newline at end of file diff --git a/JGNN/src/main/java/mklab/JGNN/nn/operations/Reshape.java b/JGNN/src/main/java/mklab/JGNN/nn/operations/Reshape.java index de12eb33..acdac42b 100644 --- a/JGNN/src/main/java/mklab/JGNN/nn/operations/Reshape.java +++ b/JGNN/src/main/java/mklab/JGNN/nn/operations/Reshape.java @@ -16,38 +16,40 @@ public class Reshape extends NNOperation { private long cols; private String rowName = null; private String colName = null; - + public Reshape(long rows, long cols) { this.rows = rows; this.cols = cols; - if(rows!=1 && cols!=1) - throw new IllegalArgumentException("For the time being, reshape should have at least one of its dimensions be 1"); + if (rows != 1 && cols != 1) + throw new IllegalArgumentException( + "For the time being, reshape should have at least one of its dimensions be 1"); } @Override protected Tensor forward(List inputs) { - if(inputs.size()!=1) + if (inputs.size() != 1) throw new IllegalArgumentException(); Tensor H = inputs.get(0); - Matrix ret = rows==1?H.asRow():H.asColumn(); - ret.assertSize(rows*cols); + Matrix ret = rows == 1 ? H.asRow() : H.asColumn(); + ret.assertSize(rows * cols); return ret.setDimensionName(rowName, colName); } - + @Override public String getSimpleDescription() { - return super.getSimpleDescription()+" ("+(rowName==null?"":(rowName+" "))+rows+","+(colName==null?"":(" "+colName+" "))+cols+")"; + return super.getSimpleDescription() + " (" + (rowName == null ? "" : (rowName + " ")) + rows + "," + + (colName == null ? "" : (" " + colName + " ")) + cols + ")"; } @Override protected Tensor partial(int inputId, List inputs, Tensor output, Tensor error) { - Tensor ret = inputs.get(0).zeroCopy(); // ensures typecast back to the correct matrix dims + Tensor ret = inputs.get(0).zeroCopy(); // ensures typecast back to the correct matrix dims error.assertMatching(output); - for(long i : error.getNonZeroElements()) // manual implementation of self-add to ignore all checks + for (long i : error.getNonZeroElements()) // manual implementation of self-add to ignore all checks ret.put(i, error.get(i)); return ret; } - + @Override public boolean isCachable() { return false; @@ -58,5 +60,5 @@ public Reshape setDimensionName(String rowName, String colName) { this.colName = colName; return this; } - + } \ No newline at end of file diff --git a/JGNN/src/main/java/mklab/JGNN/nn/operations/To.java b/JGNN/src/main/java/mklab/JGNN/nn/operations/To.java index 450788f9..cb998305 100644 --- a/JGNN/src/main/java/mklab/JGNN/nn/operations/To.java +++ b/JGNN/src/main/java/mklab/JGNN/nn/operations/To.java @@ -10,7 +10,8 @@ import mklab.JGNN.core.tensor.DenseTensor; /** - * Implements a {@link NNOperation} that lists the second element of the 2D matrix element iterator. + * Implements a {@link NNOperation} that lists the second element of the 2D + * matrix element iterator. * * @author Emmanouil Krasanakis * @see From @@ -18,19 +19,19 @@ public class To extends NNOperation { @Override protected Tensor forward(List inputs) { - if(inputs.size()!=1) + if (inputs.size() != 1) throw new IllegalArgumentException(); ArrayList ret = new ArrayList((int) inputs.get(0).estimateNumNonZeroElements()); - for(Entry entry : inputs.get(0).cast(Matrix.class).getNonZeroEntries()) + for (Entry entry : inputs.get(0).cast(Matrix.class).getNonZeroEntries()) ret.add(entry.getKey()); return new DenseTensor(ret.iterator()); } - + @Override public boolean isCachable() { return true; } - + @Override protected Tensor partial(int inputId, List inputs, Tensor output, Tensor error) { throw new UnsupportedOperationException("Cannot iterate over non-constant matrices"); diff --git a/JGNN/src/main/java/mklab/JGNN/nn/operations/Transpose.java b/JGNN/src/main/java/mklab/JGNN/nn/operations/Transpose.java index 1013f459..dbb9fe1c 100644 --- a/JGNN/src/main/java/mklab/JGNN/nn/operations/Transpose.java +++ b/JGNN/src/main/java/mklab/JGNN/nn/operations/Transpose.java @@ -14,12 +14,13 @@ public class Transpose extends NNOperation { @Override protected Tensor forward(List inputs) { - if(inputs.size()!=1) + if (inputs.size() != 1) throw new IllegalArgumentException(); - return ((Matrix)inputs.get(0)).asTransposed(); + return ((Matrix) inputs.get(0)).asTransposed(); } + @Override protected Tensor partial(int inputId, List inputs, Tensor output, Tensor error) { - return ((Matrix)error).asTransposed(); + return ((Matrix) error).asTransposed(); } } diff --git a/JGNN/src/main/java/mklab/JGNN/nn/operations/package-info.java b/JGNN/src/main/java/mklab/JGNN/nn/operations/package-info.java new file mode 100644 index 00000000..7a362427 --- /dev/null +++ b/JGNN/src/main/java/mklab/JGNN/nn/operations/package-info.java @@ -0,0 +1,8 @@ +/** + * Contains popular neural network and GNN operations. These are intermediate + * representations of parsed {@link mklab.JGNN.adhoc.ModelBuilder} expressions + * that call operations of the {@link mklab.JGNN.core} package. + * + * @author Emmanouil Krasanakis + */ +package mklab.JGNN.nn.operations; \ No newline at end of file diff --git a/JGNN/src/main/java/mklab/JGNN/nn/optimizers/Regularization.java b/JGNN/src/main/java/mklab/JGNN/nn/optimizers/Regularization.java index 228b6eb7..62e58a5b 100644 --- a/JGNN/src/main/java/mklab/JGNN/nn/optimizers/Regularization.java +++ b/JGNN/src/main/java/mklab/JGNN/nn/optimizers/Regularization.java @@ -4,31 +4,37 @@ import mklab.JGNN.core.Tensor; /** - * Wraps an {@link Optimizer} by applying the derivative of L2 loss - * on every tensor during {@link Optimizer#update(Tensor, Tensor)}. + * Wraps an {@link Optimizer} by applying the derivative of L2 loss on every + * tensor during {@link Optimizer#update(Tensor, Tensor)}. * * @author Emmanouil Krasanakis */ public class Regularization implements Optimizer { private Optimizer baseOptimizer; protected double regularization; + /** * Initializes a {@link Regularization}. - * @param baseOptimizer The base optimizer on which to apply regularization. + * + * @param baseOptimizer The base optimizer on which to apply regularization. * @param regularization The weight of the regularization. */ public Regularization(Optimizer baseOptimizer, double regularization) { this.baseOptimizer = baseOptimizer; this.regularization = regularization; } - protected Regularization() {} + + protected Regularization() { + } + @Override public void update(Tensor value, Tensor gradient) { - if(regularization==0) + if (regularization == 0) baseOptimizer.update(value, gradient); else baseOptimizer.update(value, gradient.add(value.multiply(regularization))); } + @Override public void reset() { baseOptimizer.reset(); diff --git a/JGNN/src/main/java/mklab/JGNN/nn/optimizers/package-info.java b/JGNN/src/main/java/mklab/JGNN/nn/optimizers/package-info.java new file mode 100644 index 00000000..b8c77e8b --- /dev/null +++ b/JGNN/src/main/java/mklab/JGNN/nn/optimizers/package-info.java @@ -0,0 +1,14 @@ +/** + * Contains optimizers that can be used to update training losses. Instantiate + * optimizers, and let methods like + * {@link mklab.JGNN.nn.Model#train(mklab.JGNN.nn.Loss, mklab.JGNN.nn.Optimizer, java.util.List, java.util.List)} + * request parameter update rules given the internally computed outcome of + * backprogagation. When writing training procedure of your own, use the + * {@link mklab.JGNN.nn.optimizers.BatchOptimizer} to wrap some base optimizer + * and accumulate gradient updates until calling + * {@link mklab.JGNN.nn.optimizers.BatchOptimizer#updateAll()} at the end of + * each batch or epoch. + * + * @author Emmanouil Krasanakis + */ +package mklab.JGNN.nn.optimizers; \ No newline at end of file diff --git a/JGNN/src/main/java/mklab/JGNN/nn/package-info.java b/JGNN/src/main/java/mklab/JGNN/nn/package-info.java new file mode 100644 index 00000000..6deb2ec2 --- /dev/null +++ b/JGNN/src/main/java/mklab/JGNN/nn/package-info.java @@ -0,0 +1,13 @@ +/** + * Implements neural networks components that are combined to define GNNs or + * other types of machine learning models. Hand-wiring everything may be + * cumbersome, so prefer using {@link mklab.JGNN.adhoc.ModelBuilder} and its + * extensions to construct {@link mklab.JGNN.nn.Model} instances. Components + * matching common neural operations are provided in sub-packages, where they + * are separated by their functional role as activations, inputs, operations, or + * pooling functions. In addition to operations. Additionally, Java code + * components are provided for losses and model parameter initialization. + * + * @author Emmanouil Krasanakis + */ +package mklab.JGNN.nn; \ No newline at end of file diff --git a/JGNN/src/main/java/mklab/JGNN/nn/pooling/package-info.java b/JGNN/src/main/java/mklab/JGNN/nn/pooling/package-info.java new file mode 100644 index 00000000..1d39d743 --- /dev/null +++ b/JGNN/src/main/java/mklab/JGNN/nn/pooling/package-info.java @@ -0,0 +1,7 @@ +/** + * Contains pooling/reduction operations that reduce the dimensions of inputs. + * In JGNN, this mainly means reducing matrices to vectors or smaller matrices. + * + * @author Emmanouil Krasanakis + */ +package mklab.JGNN.nn.pooling; \ No newline at end of file diff --git a/docs/index.html b/docs/index.html index fa3b6ab3..c8806e94 100644 --- a/docs/index.html +++ b/docs/index.html @@ -174,28 +174,27 @@

JGNN

-

Graph Neural Networks (GNNs) are getting more and more popular, for example to make predictions +

Graph Neural Networks (GNNs) are getting more and more popular as a machine learning paradigm, + for example to make predictions based on relational information, or to perform inference on small datasets. JGNN is a library that - provides cross-platform implementations of this machine learning paradigm that do not require dedicated - hardware or firmware. The goal is to provide highly portable solutions that fit in + provides cross-platform implementations of this paradigm without the need for dedicated + hardware or firmware; create highly portable models that fit and are trained in a few megabytes of memory. While reading this guidebook, keep in mind that this is not a library - for running computationally intensive architectures; it has no GPU support and does not plan to - add any (unless such support becomes integrated in the Java virtual machine). So, while complex - architectures like gated attention networks with many layers and hidden dimensions are supported, + for running computationally intensive stuff; it has no GPU support and we do not plan to + add any (unless such support becomes integrated in the Java virtual machine). So, while source code is + highly optimized and complex architectures are supported, running them fastly on graphs with many nodes may require compromises in the number of learned - parameters or computational complexity. The main advantage of JGNN is its support for settings - with limited resources.

+ parameters or running time.

-

This guidebook is organized into six sections that focus on - practical capabilities. After this brief introduction and - instructions for including JGNN in Java projects, section 2 - gives a taste of what using the library looks like, with details being left for later. Then, - section 3 describes how the library implements - the builder patter for constructing GNN models. Model construction +

This guidebook is organized into four sections that focus on + practical use. After this brief introduction and + instructions for how to set things up, section 2 + gives a taste of what using the library looks like. Then, + section 3 describes the + library's builder patter for constructing GNN models. Model construction includes symbolic expression parsing for machine learning operations, - which drastically simplifies coding. Parsed expressions are - part of the Neuralang scripting language for model - definition. Finally, section 4 describes + which drastically simplifies coding. Parsed expressions follow the Neuralang + scripting language for model definitions. Finally, section 4 describes interfaces for training on automatically generated or customized data and testing. It also takes a deep dive into obtaining raw model predictions, and using them in custom training and evaluation schemes. @@ -211,9 +210,9 @@

JGNN

1. Setup

The simplest way to set up JGNN is to download it as a JAR package from releases - and add it your Java project's dependencies. Those working with Maven - or Gradle can instead add JGNN's latest nightly release as a dependency from the JitPack - repository. Follow the link below for full instructions.
+ and add it your Java project's build path. Those working with Maven + or Gradle can instead add JGNN's latest nightly release as a dependency from its JitPack + distribution. Follow the link below for full instructions.
download JGNN

@@ -375,11 +374,11 @@

2. Quickstart

3. GNN Builders

-

We already touched on the subject of model builders in the quickstart section, +

We already touched on the subject of model builders in the previous section, where one of them was used to create a model. There exist different kinds of - builders that offer different conveniences. All of them support the method chain pattern. + builders that offer different conveniences.

    -
  • GNNBuilder - Parses simple Neuralang expressions.
  • +
  • GNNBuilder - Parses strings of simple Neuralang expressions.
  • FastBuilder - Extends the GNNBuilder class with methods that inject boilerplate code for the inputs, outputs, and layers of node classification tasks. Prefer this builder of your want to keep track @@ -387,7 +386,7 @@

    3. GNN Builders

  • Neuralang - Extends the GNNBuilder class so that it can parse all aspects of the Neuralang language, such as functional declarations of machine learning modules, where parts of function signatures manage configuration hyperparameters. - Use this builder to maintain model definitions in one place (e.g., packed in one String + Use this builder to maintain model definitions in one place (e.g., packed in one string variable, or in one file) and avoid weaving symbolic expressions in Java code.
In this section we cover these three builder classes and summarize debugging mechanisms that @@ -445,10 +444,10 @@

3.1. ModelBuilder

for evaluation can be declared in outputs.

- The operation parses String expressions that are typically structured + The operation parses string expressions that are typically structured as assignments to symbols; the right-hand side of assignments accepts several operators and functions that are listed in the next table. Models allow multiple operations too, which are parsed through either multiple - method calls or by being separated with a semicolon ; within larger String expressions. + method calls or by being separated with a semicolon ; within larger string expressions. All methods need to use previously declared symbols. For example, parsing .out("symbol") throws an exception if no operation previously assigned to the symbol or declared it as an input. For logic safety, symbols cannot be overwritten or set to updated values outside of Neuralang functions. @@ -822,9 +821,10 @@

3.2. FastBuilder

for each feature dimension, aggregates feature values across all nodes.

-

Reduction-based pooling is conceptually simple, but - could fail to distinguish between the structural positioning of - nodes to be pooled. One computationally light alternative, +

Reduction-based pooling performs a symmetric operation and + therefore fail to distinguish between + the structural positioning of nodes to be pooled. + One computationally light alternative, which JGNN implements, is sorting nodes based on learned features before concatenating their features into one vector for each graph. This process is further simplified by @@ -866,9 +866,11 @@

3.2. FastBuilder

3.3. Neuralang

Neuralang scripts consist of functions that declare machine learning - components. These call each other and adopt a syntax inspired by the - Mojo - language. Use a Rust highlighter to cover all keywords, though. + components. Use a Rust highlighter to cover all keywords. + Functions correspond to machine learning modules and call each other. + At their end lies a return statement, which expresses their + outcome. All arguments are passed by value, i.e., any assignments are + performed on fresh variable instances. Before explaining how to use the Neuralang model builder, we present and analyse code that supports a fully functional architecture. First, look at the classify @@ -895,9 +897,9 @@

3.3. Neuralang

Exclamation marks ! before numbers broadcast values to all subsequent function calls that have configurations with the same - name. The broadcasted defaults overwrite any already existing default values with the same - name, but all defaults are replaced by values explicitly set when calling functions. - For example, take advantage of this prioritization to set specific dimensions for some layers. Importantly, + name. The broadcasted defaults overwrite already existing defaults of configurations with the same + name anywhere in the code. All defaults are replaced by values explicitly set when calling functions. + For example, take advantage of this prioritization to force output layer dimensions match your data. Importantly, broadcasted values are stored within JGNN's Neuralang model builder too; this is useful for Java integration, for example to retrieve learning training hyperparameters from the model. To sum up, configuration values have the following priority, from strongest to weakest:
@@ -939,7 +941,7 @@

3.3. Neuralang

model builder and using them to create an architecture. To this end, save your code to a file and get is as a path Path architecture = Paths.get("filename.nn");, or avoid external files by inlining the definition within Java code through - a multiline String per String architecture = """ ... """;. + a multiline string per String architecture = """ ... """;. Below, this string is parsed within a functional programming chain, where each method call returns the modelBuilder instance to continue calling more methods.

@@ -1002,7 +1004,7 @@

3.4. Debugging

for this purpose: a .print() method that prints built functional flows in the system console, and a .getExecutionGraphDot() - method that returns a String holding the execution graph in + method that returns a string holding the execution graph in .dot format for visualization with tools like GraphViz.

@@ -1173,31 +1175,24 @@

3.4. Debugging

4. Training

Here we describe how to train a JGNN model created - per the previous section's instructions. - In general, training a machine learning architecture consists - of using some training and employing some optimization scheme - to adjust trainable parameter values based on those data. We + with the previous section's builders. + Broadly, we need to load some reference data and employ an optimization scheme + to adjust trainable parameter values based on the differences between desired + and current outputs. To this end, we start by describing generic patterns for creating graph and node - feature data, and then move to specific data organization + feature data, and then move to specific data organizations for the tasks of node classification and graph classification. - For these, we show how to set up both automated and custom - training schemas.

+ These tasks have helper classes that implement common training schemas + (reach out with requests for helper classes for other kinds of predictive tasks + in the project's GitHub issues).

4.1. Create data

-

JGNN provides dataset classes that can be used out-of-the-box - by automatically downloading their data. These can be found +

JGNN contains dataset classes that automatically download and load + datasets for out-of-the-box experimentation. These datasets can be found in the - adhoc.datasets Javadoc. In practice, though, you will want to - use your own data. We thus describe how to - manually fill in data, as well as operations that manipulate - those data. Data manipulation is needed to preprocess neural - inputs, post-process learning outcomes, create custom - parameters, contribute to the library with more components, - or make derivative works based on native Java vector and - matrix arithmetics. -

- -

In the simplest case, both the number of nodes or data samples, and + adhoc.datasets Javadoc, and we already covered their usage patterns. + In practice, though, you will want to + use your own data. In the simplest case, both the number of nodes or data samples, and the number of feature dimensions are known beforehand. If so, create dense feature matrices with the following code. This uses the minimum memory necessary to construct the feature matrix. If @@ -1213,8 +1208,8 @@

4.1. Create data

Sometimes, it is easier to read node or sample features line-by-line, for instance, when reading a .csv - file. In this case, store each line as a separate tensor, - so that list of row tensors can then be converted into a + file. In this case, store each line as a separate tensor. + Convert a list of tensors representing row vectors into a feature matrix like in the example below.

ArrayList rows = new ArrayList();
@@ -1255,7 +1250,7 @@ 

4.1. Create data

All tensor operations can be viewed in the core.tensor and core.matrix - Javadoc. Of those, the Matrix class extends the concept + Javadoc. The Matrix class extends the concept of tensors with additional operations, like transposition, matrix multiplication, and row and column access. Under the hood, matrices linearly store elements and use @@ -1272,11 +1267,11 @@

4.1. Create data

summary statistics that output simple numeric values (e.g., double Tensor.sum()), and element getters and setters. In-place arithmetics follow the same naming - conventions of base arithmetics and begin with a "self" - prefix for pairwise operations, or bein with a "setTo" prefix - for unary operations. Prefer in-place arithmetics when - for intermediate calculation steps, as these do not allocate new - memory. For example, the following code can be + conventions of base arithmetics but their method names begin with a "self" + prefix for pairwise operations and a "setTo" prefix + for unary operations. Since they do not allocate new memory, + prefer them for intermediate calculation steps. + For example, the following code can be used for creating and normalizing a tensor of ones without using any additional memory.

@@ -1284,8 +1279,8 @@

4.1. Create data

.setToOnes() .setToNormalized();
-

Initialize a dense or sparse tensor with their number - of elements. If there are many zero elements expected, +

Initialize a dense or sparse tensor -both of which represent one-dimensional vectors- with its number + of elements. If there are many zeros expected, prefer using a sparse tensor. For example, one-hot encodings for classification problems can be generated with the following code. This creates a dense tensor with @@ -1293,35 +1288,35 @@

4.1. Create data

element classId the value 1:

-
int classId = ...;
-int numClasses = ...;
-Tensor oneHotEncoding = new mklab.JGNN.tensor.DenseTensor(numClasses).set(classId, 1);
+
int classId = 1;
+int numClasses = 5;
+Tensor oneHotEncoding = new mklab.JGNN.tensor.DenseTensor(numClasses).set(classId, 1); // creates the tensor [0,1,0,0,0]

The above snippets all make use of numerical node identifiers. To - manage these, JGNN provides an IdConverter class. - You can convert hashable - objects (e.g., Strings) to identifiers by calling - IdConverter.getOrCreateId(object). The same - functionality is also helpful for one-hot encoding of - class labels. To search only for previously - registered identifiers, use - IdConverter.get(object). - For example, construct a label matrix of one-hot - encodings for training data:

+ manage these, JGNN provides an IdConverter class; + convert hashable objects (typically strings) to identifiers by calling + IdConverter.getOrCreateId(object). Also use + converters to one-hot encode class labels. To search only for previously + registered identifiers, use IdConverter.get(object). + For example, construct a label matrix with the following snippet. + In this, nodeLabels is a dictionary + from node identifiers to node labels that is being converted to a sparse matrix.

IdConverter nodeIds = new IdConverter();
 IdConverter classIds = new IdConverter();
-for(Entry entry : nodeLabels) {
+for(Entry<String, String> entry : nodeLabels) {
 	nodeids.getOrCreateId(entry.getKey());
 	classIds.getOrCreateId(entry.getValue());
 }
 Matrix labels = new SparseMatrix(nodeIds.size(), classIds.size());
-for(Entry entry : nodeLabels) 
-labels.put(nodeids.get(entry.getKey()), classIds.get(entry.getValue()), 1);
+for(Entry<String, String> entry : nodeLabels) + labels.put(nodeids.get(entry.getKey()), classIds.get(entry.getValue()), 1); -

Reverse-search the IdConverter to obtain the original object - of predictions using IdConverter.get(String). For example: +

Reverse-search the converter to obtain the original object + of predictions per IdConverter.get(String). The following example + accesses one row of a label matrix, performs and argmax operation to find the position of the + maximum element, and reconstruct the label for the corresponding row with reverse-search.

long nodeId = nodeIds.get("nodeName");
@@ -1331,7 +1326,25 @@ 

4.1. Create data

4.2. Node classification

- [This section is under contruction] +

+ Node classification models can be backpropagated by considering a list of node indeces and desired + predictions for those nodes. We first show an automation of the training process that controls + it in a predictable manner. +

+ + This section is under construction. + +
Slice nodes = dataset.samples().getSlice().shuffle(100);  // or nodes = new Slice(0, numNodes).shuffle(100);
+Model model = modelBuilder()
+	.getModel()
+	.init(new XavierNormal())
+	.train(trainer,
+			nodes.samplesAsFeatures(), 
+			dataset.labels(), 
+			nodes.range(0, trainSplit), 
+			nodes.range(trainSplit, validationSplit));
+		
+

4.3. Graph classification

@@ -1390,7 +1403,8 @@

4.3. Graph classification

training batch. To do this, first retrieve the model and initialize its parameters:

-
Model model = builder.getModel().init(new XavierNormal());
+
Model model = builder.getModel()
+	.init(new XavierNormal());

Next, define a loss function and set up a batch optimization strategy wrapping any base optimizer and @@ -1425,21 +1439,18 @@

4.3. Graph classification

optimizer.updateAll(); }
-

To speed up graph classification, you can use JGNN's - parallelization capabilities to calculate gradients - across multiple threads. Parallelization for node - classification holds little meaning, as the same - propagation mechanism needs to be run on the same graph - in parallel. However, this process yields substantial - speedup for the graph classification - problem.

- -

Parallelization can make use of JGNN's thread pooling to - perform gradients, wait for the conclusion of submitted - tasks, and then apply all gradient updates. This is - achieved by declaring a batch optimizer to gather all - the gradients. The entire process is detailed in the - following example:

+

To speed up graph classification, use JGNN's + parallelization capabilities to calculate gradients + across multiple threads. Parallelization for node + classification holds little meaning, as the same + propagation mechanism needs to be run on the same graph + in parallel. However, this process yields substantial + speedup for the graph classification + problem. Parallelization can use JGNN's thread pooling to + perform gradients, wait for the conclusion of submitted + tasks, and then apply the accumulated gradient updates. + This is achieved through a batch optimizer that accumulates + gradients in the following example:

for(int epoch=0; epoch<500; epoch++) {
     // gradient update
diff --git a/docs/javadoc/allclasses-index.html b/docs/javadoc/allclasses-index.html
index 6e1bcb73..afd2283c 100644
--- a/docs/javadoc/allclasses-index.html
+++ b/docs/javadoc/allclasses-index.html
@@ -1,11 +1,11 @@
 
 
 
-
+
 All Classes and Interfaces
 
 
-
+
 
 
 
@@ -118,7 +118,8 @@ 

All Classes and Interfaces<

-
Implements a NNOperation that performs the operation 1-x for its simple input x.
+
Implements a NNOperation that performs the operation 1-x for its + simple input x.
@@ -146,19 +147,19 @@

All Classes and Interfaces<

-
Implements a square matrix whose diagonal elements are determined by the correspond values of - an underlying tensor and off-diagonal elements are zero.
+
Implements a square matrix whose diagonal elements are determined by the + correspond values of an underlying tensor and off-diagonal elements are zero.
-
This interface abstracts a probability distribution - that can be passed to Tensor.setToRandom(Distribution) - for random tensor initialization.
+
This interface abstracts a probability distribution that can be passed to + Tensor.setToRandom(Distribution) for random tensor initialization.
-
Implements a NNOperation that converts its first argument to a ColumnRepetition matrix - with a number of columns equal to the second argument.
+
Implements a NNOperation that converts its first argument to a + ColumnRepetition matrix with a number of columns equal to the second + argument.
 
@@ -166,22 +167,25 @@

All Classes and Interfaces<
 
-
Implements a NNOperation that performs an exponential transformation of its single input.
+
Implements a NNOperation that performs an element-by-element + exponential transformation of its one input tensor.
-
Extends the capabilities of LayeredBuilder to use - for node classification.
+
Extends the capabilities of LayeredBuilder to use for node + classification.
 
-
Implements a NNOperation that lists the first element of the 2D matrix element iterator.
+
Implements a NNOperation that lists the first element of the 2D + matrix element iterator.
-
Implements a NNOperation that performs the equivalent of TensorFlow's gather operation.
+
Implements a NNOperation that performs the equivalent of TensorFlow's + gather operation.
@@ -209,17 +213,18 @@

All Classes and Interfaces<

-
Implements a NNOperation that performs a L1 transformation of its single input - by row or column.
+
Implements a NNOperation that performs a L1 transformation of its one + input tensor by row or by column.
-
Extends the capabilities of the ModelBuilder - with the ability to define multilayer (e.g.
+
Extends the capabilities of the ModelBuilder with the ability to + define multilayer (e.g.
-
Implements a NNOperation that outputs the natural logarithm of its single input.
+
Implements a NNOperation that outputs the natural logarithm of its + single input.
@@ -228,15 +233,17 @@

All Classes and Interfaces<

-
This class provides an abstract implementation of loss functions - to be used during Model training.
+
This class provides an abstract implementation of loss functions to be used + during Model training.
-
Implements a NNOperation that performs a leaky relu operation, where the first argument is a tensor on which - it is applied and the second one should be a tensor wrapping a double value (consider initializing this with as a - Constant holding a tensor generated with Tensor.fromDouble(double)) where - the wrapped value indicates the negative region's slope.
+
Implements a NNOperation that performs a leaky relu operation, where + the first argument is a tensor on which it is applied and the second one + should be a tensor wrapping a double value (consider initializing this with + as a Constant holding a tensor generated with + Tensor.fromDouble(double)) where the wrapped value indicates the + negative region's slope.
 
@@ -262,7 +269,8 @@

All Classes and Interfaces<
-
A memory management system for thread-safe allocation and release of arrays of doubles.
+
A memory management system for thread-safe allocation and release of arrays + of doubles.
 
@@ -273,195 +281,212 @@

All Classes and Interfaces<
-
This class and subclasses can be used to create Model instances - by automatically creating and managing NNOperation instances based on +
This class and subclasses can be used to create Model instances by + automatically creating and managing NNOperation instances based on textual descriptions.
- +
-
This is a helper class that automates the definition of training processes of Model instances - by defining the number of epochs, loss functions, number of batches and the ability to use ThreadPool - for parallelized batch computations.
+
This is a helper class that automates the definition of training processes of + Model instances by defining the number of epochs, loss functions, + number of batches and the ability to use ThreadPool for parallelized + batch computations.
-
Implements a NNOperation that multiplies its two inputs element-by-element.
+
Implements a NNOperation that multiplies its two inputs + element-by-element.
-
 
+
+
Extends the base ModelBuilder with the full capabilities of the + Neuralang scripting language.
+
-
Implements a NNOperation that performs an exponential transformation of - its single input, but only on the non-zero elements.
+
Implements a NNOperation that performs an exponential transformation + of its single input, but only on the non-zero elements.
This class defines an abstract neural network operation with forward and backpropagation capabilities.
- +
-
Implements a Normal Distribution of given mean and standard deviation.
+
Extends the ModelTraining class with a method to explicitly train a + model from feature and label matrices.
+
+ +
+
Implements a Normal Distribution of given mean and standard + deviation.
- -
+ +
Provides an interface for training tensors.
- -
+ +
Implements a NNOperation that holds and returns a parameter tensor.
- -
 
- -
+ +
 
+ +
Downloads and constructs the Pubmed node classification Dataset.
- -
+ +
Implements an iterator that traverses a range (similar to Python's range(min, max) method).
- -
+ +
Implements an iterator that traverses a two-dimensional range (min, max) x (min2, max2).
- -
 
- -
-
Wraps an Optimizer by applying the derivative of L2 loss - on every tensor during Optimizer.update(Tensor, Tensor).
-
- + +
 
+
-
Implements a NNOperation that performs a relu transformation of its single input first introduced by - Hahnloser, Richard HR, Rahul Sarpeshkar, Misha A.
+
Wraps an Optimizer by applying the derivative of L2 loss on every + tensor during Optimizer.update(Tensor, Tensor).
- +
-
Implements a NNOperation that converts its first argument to a ColumnRepetition matrix - with a number of columns equal to the second argument.
+
Implements a NNOperation that performs a relu transformation of its + one input tensor.
- +
-
Implements a Matrix whose elements are all equals.
+
Implements a NNOperation that converts its first argument to a + ColumnRepetition matrix with a number of columns equal to the second + argument.
- +
-
This class provides Tensor whose elements are all equal.
+
Implements a Matrix whose elements are all equals.
- +
-
Implements a NNOperation that reshapes a matrix.
+
This class provides Tensor whose elements are all equal.
- +
-
Defines a matrix whose rows are all a copy of a Tensor.
+
Implements a NNOperation that reshapes a matrix.
- +
-
Implements a NNOperation that performs a sigmoid transformation of its single input.
+
Defines a matrix whose rows are all a copy of a Tensor.
- +
-
This class provices an interface with which to define data slices, - for instance to sample labels.
+
Implements a NNOperation that performs a sigmoid transformation of + its single input.
- +
+
This class provices an interface with which to define data slices, for + instance to sample labels.
+
+ +
Implements a NNOperation that performs row-wise or column-wise softmax on vector tensors or matrices.
- -
 
- +
 
- -
+ +
 
+ +
A sparse Matrix that allocates memory only for non-zero elements.
- -
Deprecated. + +
Deprecated.
Under development.
- -
+ +
This class provides a sparse Tensor with many zero elements.
- -
+ +
Implements a NNOperation that performs row-wise or column-wise sum reduction on vector tensors or matrices.
- -
-
Implements a NNOperation that performs a tanh transformation of its single input.
-
- +
-
This class provides a native java implementation of Tensor functionalities.
+
Implements a NNOperation that performs a tanh transformation of its + single input.
- +
-
This class provides thread execution pool utilities while keeping track of thread - identifiers for use by thread-specific NNOperation.
+
This class provides a native java implementation of Tensor functionalities.
- +
-
Implements a NNOperation that lists the second element of the 2D matrix element iterator.
+
This class provides thread execution pool utilities while keeping track of + thread identifiers for use by thread-specific + NNOperation.
- +
-
Implements a NNOperation that performs matrix transposition.
+
Implements a NNOperation that lists the second element of the 2D + matrix element iterator.
- +
-
Generates a transposed version of a base matrix, with which it shares elements.
+
Implements a NNOperation that performs matrix transposition.
- +
-
Implements a Uniform Distribution of given bounds.
+
Generates a transposed version of a base matrix, with which it shares + elements.
- +
-
Implements a NNOperation that represents Model inputs.
+
Implements a Uniform Distribution of given bounds.
- +
+
Implements a NNOperation that represents Model inputs.
+
+ +
This class describes a broad class of Initializer strategies, in which dense neural layer initialization is controlled so that variance is mostly preserved from inputs to outputs to avoid vanishing or exploding gradients in the first training runs.
- -
-
Implements a dense Matrix where all elements are stored in memory.
-
- +
-
This class provides a dense Tensor that wraps an array of doubles.
+
Implements a dense Matrix where all elements are stored in memory.
- +
-
Implements a Loss that wraps other losses and outputs their value during training to an output stream - (to System.out by default).
+
This class provides a dense Tensor that wraps an array of doubles.
- +
-
Wraps a list of tensors into a matrix with the tensors as columns.
+
Implements a Loss that wraps other losses and outputs their value + during training to an output stream (to System.out by default).
- +
-
Wraps a list of tensors into a matrix with the tensors as rows.
+
Wraps a list of tensors into a matrix with the tensors as columns.
- +
- +
Wraps a list of tensors into a matrix with the tensors as rows.
- + + +
diff --git a/docs/javadoc/allpackages-index.html b/docs/javadoc/allpackages-index.html index b184a7fe..ccdcb548 100644 --- a/docs/javadoc/allpackages-index.html +++ b/docs/javadoc/allpackages-index.html @@ -1,11 +1,11 @@ - + All Packages - + @@ -57,41 +57,93 @@

All Packages

Package
Description
-
 
+
+
Contains classes that simplify data loading, model building, and training.
+
-
 
+
+
This package contains datasets for out-of-the-box experimentation.
+
-
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
+
+
Contains model builders that parse expression of the Neuralang scripting + language to simplify mathematical parts of the definitions.
+
+ +
+
Contains model training strategies that correspond to different predictive + tasks.
+
+ +
+
Contains base numerical data classes, as well as supporting abstract classes.
+
+ +
+
Contains data distributions that produce one numerical value and can be used + for tensor value initialization.
+
+ +
+
Contains empty extensions of datatypes that hold only dimension names and + sizes but no ddata.
+
+ +
+
Contains implementations of matrix classes, of transparent access to parts of + these classes, and of column/row repetitions that broadcast vectors into + matrices.
+
+ +
+
Contains implementations of tensor classes, as well as transparent access to + parts of these classes.
+
+ +
+
Contains utility functions that are employed internally, mainly optimized 1D + and 2D iterators.
+
+ +
+
Implements neural networks components that are combined to define GNNs or + other types of machine learning models.
+
+ +
+
Implements activations function to be used as model operations.
+
+ +
+
Implements initializers to be applied on Model + parameters to stochastically induce some desired property at the first + training epoch.
+
+ +
+
Contains various types of neural architecture inputs.
+
+ +
+
Contains classes for instantiating loss function.
+
+ +
+
Contains losses that wrap other losses and augment their numeric computations + with live reporting of the training status.
+
+ +
+
Contains popular neural network and GNN operations.
+
+ +
+
Contains optimizers that can be used to update training losses.
+
+ +
+
Contains pooling/reduction operations that reduce the dimensions of inputs.
+
diff --git a/docs/javadoc/deprecated-list.html b/docs/javadoc/deprecated-list.html index 9504a209..c6cb9605 100644 --- a/docs/javadoc/deprecated-list.html +++ b/docs/javadoc/deprecated-list.html @@ -1,11 +1,11 @@ - + Deprecated List - + @@ -80,21 +80,27 @@

Contents

Method
Description
- +
-
This method may not be present in future versions - of the library, depending on whether memory reuse proves useful or nor.
+
This method was available in earlier JGNN versions but will be + gradually phased out. Instead, wrap the validation loss within + VerboseLoss to replicate the + same behavior.
- +
-
This method may not be present in future versions - of the library, depending on whether memory reuse proves useful or nor.
+
- +
-
This method was available in earlier JGNN versions but will be gradually phased out. - Instead, wrap the validation loss within VerboseLoss to replicate - the same behavior.
+
This method may not be present in future versions of the library, + depending on whether memory reuse proves useful or nor.
+
+ +
+
This method may not be present in future versions of the library, + depending on whether memory reuse proves useful or nor.
diff --git a/docs/javadoc/element-list b/docs/javadoc/element-list index 8783cac7..94c50b99 100644 --- a/docs/javadoc/element-list +++ b/docs/javadoc/element-list @@ -1,6 +1,7 @@ mklab.JGNN.adhoc mklab.JGNN.adhoc.datasets mklab.JGNN.adhoc.parsers +mklab.JGNN.adhoc.train mklab.JGNN.core mklab.JGNN.core.distribution mklab.JGNN.core.empy diff --git a/docs/javadoc/help-doc.html b/docs/javadoc/help-doc.html index bdd7d91b..64408a74 100644 --- a/docs/javadoc/help-doc.html +++ b/docs/javadoc/help-doc.html @@ -1,11 +1,11 @@ - + API Help - + diff --git a/docs/javadoc/index-files/index-1.html b/docs/javadoc/index-files/index-1.html index edd5fcbc..451f13b3 100644 --- a/docs/javadoc/index-files/index-1.html +++ b/docs/javadoc/index-files/index-1.html @@ -1,11 +1,11 @@ - + A-Index - + @@ -127,13 +127,11 @@

A

accessSubtensor(long) - Method in class mklab.JGNN.core.Tensor
-
Wraps a range of elements within a tensor - without allocating memory anew.
+
Wraps a range of elements within a tensor without allocating memory anew.
accessSubtensor(long, long) - Method in class mklab.JGNN.core.Tensor
-
Wraps a range of elements within a tensor - without allocating memory anew.
+
Wraps a range of elements within a tensor without allocating memory anew.
AccessSubtensor - Class in mklab.JGNN.core.tensor
@@ -229,17 +227,19 @@

A

asColumn() - Method in class mklab.JGNN.core.Tensor
-
Accesses the tensor through a single-column matrix with the tensor as the only row.
+
Accesses the tensor through a single-column matrix with the tensor as the + only row.
asRow() - Method in class mklab.JGNN.core.Tensor
-
Accesses the tensor through a single-row matrix with the tensor as the only column.
+
Accesses the tensor through a single-row matrix with the tensor as the only + column.
assertBackwardValidity() - Method in class mklab.JGNN.adhoc.ModelBuilder
Asserts that all components parsed into a call graph with - ModelBuilder.operation(String) are eventually used by at least one ModelBuilder.out(String) - component.
+ ModelBuilder.operation(String) are eventually used by at least one + ModelBuilder.out(String) component.
assertFinite() - Method in class mklab.JGNN.core.Tensor
@@ -263,8 +263,8 @@

A

asTransposed() - Method in class mklab.JGNN.core.Matrix
-
Creates a transposed version of the matrix that accesses the same elements (thus, editing one - edits the other) without allocating additional memory.
+
Creates a transposed version of the matrix that accesses the same elements + (thus, editing one edits the other) without allocating additional memory.
asTransposed() - Method in class mklab.JGNN.core.matrix.TransposedMatrix
 
@@ -276,9 +276,19 @@

A

Attention() - Constructor for class mklab.JGNN.nn.operations.Attention
 
autosize(List<Tensor>) - Method in class mklab.JGNN.adhoc.ModelBuilder
-
 
+
+
Applies the ModelBuilder.createForwardValidity(List) method for the given inputs + to replace zero tensor dimensions (annotated with ? in symbolic definitions) + with a valid dimension size and name, and then checks that all computation + outcomes are valid with ModelBuilder.assertBackwardValidity().
+
autosize(Tensor...) - Method in class mklab.JGNN.adhoc.ModelBuilder
-
 
+
+
Applies the ModelBuilder.createForwardValidity(List) method for the given inputs + to replace zero tensor dimensions (annotated with ? in symbolic definitions) + with a valid dimension size and name, and then checks that all computation + outcomes are valid with ModelBuilder.assertBackwardValidity().
+
A B C D E F G H I K L M N O P R S T U V W X Z 
All Classes and Interfaces|All Packages
diff --git a/docs/javadoc/index-files/index-10.html b/docs/javadoc/index-files/index-10.html index b53e46d9..a17f03d1 100644 --- a/docs/javadoc/index-files/index-10.html +++ b/docs/javadoc/index-files/index-10.html @@ -1,11 +1,11 @@ - + K-Index - + diff --git a/docs/javadoc/index-files/index-11.html b/docs/javadoc/index-files/index-11.html index aad17943..8c57e9e2 100644 --- a/docs/javadoc/index-files/index-11.html +++ b/docs/javadoc/index-files/index-11.html @@ -1,11 +1,11 @@ - + L-Index - + @@ -57,29 +57,36 @@

L

L1 - Class in mklab.JGNN.nn.activations
-
Implements a NNOperation that performs a L1 transformation of its single input - by row or column.
+
Implements a NNOperation that performs a L1 transformation of its one + input tensor by row or by column.
L1() - Constructor for class mklab.JGNN.nn.activations.L1
-
 
+
+
Instantiates an L1 operation that transforms inputs by row.
+
L1(boolean) - Constructor for class mklab.JGNN.nn.activations.L1
-
 
+
+
Instantiates an L1 operation that transforms inputs alongside the dimension + signified by its argument.
+
labels() - Method in class mklab.JGNN.adhoc.Dataset
Retrieves the dataset's sample labels in one-hot encoding.
layer(String) - Method in class mklab.JGNN.adhoc.parsers.FastBuilder
-
Applies an FastBuilder.operation(String) and increases the layer identifier count.
+
Applies an FastBuilder.operation(String) and increases the layer identifier + count.
layer(String) - Method in class mklab.JGNN.adhoc.parsers.LayeredBuilder
-
Applies an LayeredBuilder.operation(String) and increases the layer identifier count.
+
Applies an LayeredBuilder.operation(String) and increases the layer identifier + count.
LayeredBuilder - Class in mklab.JGNN.adhoc.parsers
-
Extends the capabilities of the ModelBuilder - with the ability to define multilayer (e.g.
+
Extends the capabilities of the ModelBuilder with the ability to + define multilayer (e.g.
LayeredBuilder() - Constructor for class mklab.JGNN.adhoc.parsers.LayeredBuilder
@@ -98,7 +105,10 @@

L

Repeats a LayeredBuilder.layer(String) definition a number of times.
load(Path) - Static method in class mklab.JGNN.adhoc.ModelBuilder
-
 
+
+
Loads a ModelBuilder instance from the provided path, such as + Paths.get("example.jgnn").
+
log() - Method in class mklab.JGNN.core.tensor.DenseTensor
 
log() - Method in class mklab.JGNN.core.Tensor
@@ -107,14 +117,15 @@

L

Log - Class in mklab.JGNN.nn.operations
-
Implements a NNOperation that outputs the natural logarithm of its single input.
+
Implements a NNOperation that outputs the natural logarithm of its + single input.
Log() - Constructor for class mklab.JGNN.nn.operations.Log
 
Loss - Class in mklab.JGNN.nn
-
This class provides an abstract implementation of loss functions - to be used during Model training.
+
This class provides an abstract implementation of loss functions to be used + during Model training.
Loss - Interface in mklab.JGNN.core.util
@@ -125,10 +136,12 @@

L

 
LRelu - Class in mklab.JGNN.nn.activations
-
Implements a NNOperation that performs a leaky relu operation, where the first argument is a tensor on which - it is applied and the second one should be a tensor wrapping a double value (consider initializing this with as a - Constant holding a tensor generated with Tensor.fromDouble(double)) where - the wrapped value indicates the negative region's slope.
+
Implements a NNOperation that performs a leaky relu operation, where + the first argument is a tensor on which it is applied and the second one + should be a tensor wrapping a double value (consider initializing this with + as a Constant holding a tensor generated with + Tensor.fromDouble(double)) where the wrapped value indicates the + negative region's slope.
LRelu() - Constructor for class mklab.JGNN.nn.activations.LRelu
 
diff --git a/docs/javadoc/index-files/index-12.html b/docs/javadoc/index-files/index-12.html index 2c2a2fbb..845826af 100644 --- a/docs/javadoc/index-files/index-12.html +++ b/docs/javadoc/index-files/index-12.html @@ -1,11 +1,11 @@ - + M-Index - + @@ -59,7 +59,8 @@

M

 
matmul(Matrix) - Method in class mklab.JGNN.core.Matrix
-
Performs the matrix multiplication of this*with and the recipient.
+
Performs the matrix multiplication of this*with and the + recipient.
matmul(Matrix) - Method in class mklab.JGNN.core.matrix.VectorizedMatrix
 
@@ -67,13 +68,12 @@

M

 
matmul(Matrix, boolean, boolean) - Method in class mklab.JGNN.core.Matrix
-
Can be used to perform fast computation of the matrix multiplications -
this*with, -
this.transposed()*with -
this*with.transposed(), -
this.transposed()*with.transposed() -
while avoiding the overhead of calling - Matrix.transposed().
+
Can be used to perform fast computation of the matrix multiplications
+ this*with,
+ this.transposed()*with
+ this*with.transposed(),
+ this.transposed()*with.transposed()
+ while avoiding the overhead of calling Matrix.transposed().
matmul(Matrix, boolean, boolean) - Method in class mklab.JGNN.core.matrix.VectorizedMatrix
 
@@ -111,7 +111,8 @@

M

 
Memory - Class in mklab.JGNN.core
-
A memory management system for thread-safe allocation and release of arrays of doubles.
+
A memory management system for thread-safe allocation and release of arrays + of doubles.
Memory() - Constructor for class mklab.JGNN.core.Memory
 
@@ -122,37 +123,84 @@

M

Computes the minimum tensor element.
mklab.JGNN.adhoc - package mklab.JGNN.adhoc
-
 
+
+
Contains classes that simplify data loading, model building, and training.
+
mklab.JGNN.adhoc.datasets - package mklab.JGNN.adhoc.datasets
-
 
+
+
This package contains datasets for out-of-the-box experimentation.
+
mklab.JGNN.adhoc.parsers - package mklab.JGNN.adhoc.parsers
-
 
+
+
Contains model builders that parse expression of the Neuralang scripting + language to simplify mathematical parts of the definitions.
+
+
mklab.JGNN.adhoc.train - package mklab.JGNN.adhoc.train
+
+
Contains model training strategies that correspond to different predictive + tasks.
+
mklab.JGNN.core.distribution - package mklab.JGNN.core.distribution
-
 
+
+
Contains data distributions that produce one numerical value and can be used + for tensor value initialization.
+
mklab.JGNN.core.empy - package mklab.JGNN.core.empy
-
 
+
+
Contains empty extensions of datatypes that hold only dimension names and + sizes but no ddata.
+
mklab.JGNN.core.matrix - package mklab.JGNN.core.matrix
-
 
+
+
Contains implementations of matrix classes, of transparent access to parts of + these classes, and of column/row repetitions that broadcast vectors into + matrices.
+
mklab.JGNN.core.tensor - package mklab.JGNN.core.tensor
-
 
+
+
Contains implementations of tensor classes, as well as transparent access to + parts of these classes.
+
mklab.JGNN.nn - package mklab.JGNN.nn
-
 
+
+
Implements neural networks components that are combined to define GNNs or + other types of machine learning models.
+
mklab.JGNN.nn.activations - package mklab.JGNN.nn.activations
-
 
+
+
Implements activations function to be used as model operations.
+
mklab.JGNN.nn.initializers - package mklab.JGNN.nn.initializers
-
 
+
+
Implements initializers to be applied on Model + parameters to stochastically induce some desired property at the first + training epoch.
+
mklab.JGNN.nn.inputs - package mklab.JGNN.nn.inputs
-
 
+
+
Contains various types of neural architecture inputs.
+
mklab.JGNN.nn.loss - package mklab.JGNN.nn.loss
-
 
+
+
Contains classes for instantiating loss function.
+
mklab.JGNN.nn.loss.report - package mklab.JGNN.nn.loss.report
-
 
+
+
Contains losses that wrap other losses and augment their numeric computations + with live reporting of the training status.
+
mklab.JGNN.nn.operations - package mklab.JGNN.nn.operations
-
 
+
+
Contains popular neural network and GNN operations.
+
mklab.JGNN.nn.optimizers - package mklab.JGNN.nn.optimizers
-
 
+
+
Contains optimizers that can be used to update training losses.
+
mklab.JGNN.nn.pooling - package mklab.JGNN.nn.pooling
-
 
+
+
Contains pooling/reduction operations that reduce the dimensions of inputs.
+
Model - Class in mklab.JGNN.nn
This class is a way to organize NNOperation trees into trainable machine @@ -166,21 +214,22 @@

M

ModelBuilder - Class in mklab.JGNN.adhoc
-
This class and subclasses can be used to create Model instances - by automatically creating and managing NNOperation instances based on +
This class and subclasses can be used to create Model instances by + automatically creating and managing NNOperation instances based on textual descriptions.
ModelBuilder() - Constructor for class mklab.JGNN.adhoc.ModelBuilder
 
ModelBuilder(Model) - Constructor for class mklab.JGNN.adhoc.ModelBuilder
 
-
ModelTraining - Class in mklab.JGNN.nn
+
ModelTraining - Class in mklab.JGNN.adhoc
-
This is a helper class that automates the definition of training processes of Model instances - by defining the number of epochs, loss functions, number of batches and the ability to use ThreadPool - for parallelized batch computations.
+
This is a helper class that automates the definition of training processes of + Model instances by defining the number of epochs, loss functions, + number of batches and the ability to use ThreadPool for parallelized + batch computations.
-
ModelTraining() - Constructor for class mklab.JGNN.nn.ModelTraining
+
ModelTraining() - Constructor for class mklab.JGNN.adhoc.ModelTraining
 
multiply(double) - Method in class mklab.JGNN.core.tensor.DenseTensor
 
@@ -196,7 +245,8 @@

M

 
Multiply - Class in mklab.JGNN.nn.operations
-
Implements a NNOperation that multiplies its two inputs element-by-element.
+
Implements a NNOperation that multiplies its two inputs + element-by-element.
Multiply() - Constructor for class mklab.JGNN.nn.operations.Multiply
 
diff --git a/docs/javadoc/index-files/index-13.html b/docs/javadoc/index-files/index-13.html index b675ede8..c15a8454 100644 --- a/docs/javadoc/index-files/index-13.html +++ b/docs/javadoc/index-files/index-13.html @@ -1,11 +1,11 @@ - + N-Index - + @@ -62,13 +62,16 @@

N

Computes the negative of tensor elements.
Neuralang - Class in mklab.JGNN.adhoc.parsers
-
 
+
+
Extends the base ModelBuilder with the full capabilities of the + Neuralang scripting language.
+
Neuralang() - Constructor for class mklab.JGNN.adhoc.parsers.Neuralang
 
NExp - Class in mklab.JGNN.nn.activations
-
Implements a NNOperation that performs an exponential transformation of - its single input, but only on the non-zero elements.
+
Implements a NNOperation that performs an exponential transformation + of its single input, but only on the non-zero elements.
NExp() - Constructor for class mklab.JGNN.nn.activations.NExp
 
@@ -81,15 +84,24 @@

N

This class defines an abstract neural network operation with forward and backpropagation capabilities.
+
NodeClassification - Class in mklab.JGNN.adhoc.train
+
+
Extends the ModelTraining class with a method to explicitly train a + model from feature and label matrices.
+
+
NodeClassification() - Constructor for class mklab.JGNN.adhoc.train.NodeClassification
+
 
norm() - Method in class mklab.JGNN.core.Tensor
 
Normal - Class in mklab.JGNN.core.distribution
-
Implements a Normal Distribution of given mean and standard deviation.
+
Implements a Normal Distribution of given mean and standard + deviation.
Normal() - Constructor for class mklab.JGNN.core.distribution.Normal
-
Instantiates a normal distribution with zero mean and standard deviation equal to 1.
+
Instantiates a normal distribution with zero mean and standard deviation + equal to 1.
Normal(double, double) - Constructor for class mklab.JGNN.core.distribution.Normal
diff --git a/docs/javadoc/index-files/index-14.html b/docs/javadoc/index-files/index-14.html index 3b3e4b39..09fd12e3 100644 --- a/docs/javadoc/index-files/index-14.html +++ b/docs/javadoc/index-files/index-14.html @@ -1,11 +1,11 @@ - + O-Index - + @@ -61,8 +61,8 @@

O

operation(String) - Method in class mklab.JGNN.adhoc.ModelBuilder
-
Parses one or more operations split by new line characters or ; - to add to the execution graph.
+
Parses one or more operations split by new line characters or ; to add to the + execution graph.
operation(String) - Method in class mklab.JGNN.adhoc.parsers.FastBuilder
 
@@ -74,8 +74,7 @@

O

out(String) - Method in class mklab.JGNN.adhoc.ModelBuilder
-
Declares the component with the given name an output of the - managed model.
+
Declares the component with the given name an output of the managed model.
out(String) - Method in class mklab.JGNN.adhoc.parsers.LayeredBuilder
 
diff --git a/docs/javadoc/index-files/index-15.html b/docs/javadoc/index-files/index-15.html index 4723ca0d..84ba1d4d 100644 --- a/docs/javadoc/index-files/index-15.html +++ b/docs/javadoc/index-files/index-15.html @@ -1,11 +1,11 @@ - + P-Index - + @@ -66,8 +66,8 @@

P

 
param(String, Tensor) - Method in class mklab.JGNN.adhoc.ModelBuilder
-
Declares a learnable mklab.JGNN.nn.inputs.Paramater component with the given name, - zero regularization, and initial value.
+
Declares a learnable mklab.JGNN.nn.inputs.Paramater component with + the given name, zero regularization, and initial value.
param(String, Tensor) - Method in class mklab.JGNN.adhoc.parsers.FastBuilder
 
@@ -82,9 +82,14 @@

P

Parameter(Tensor, double) - Constructor for class mklab.JGNN.nn.inputs.Parameter
 
parse(String) - Method in class mklab.JGNN.adhoc.parsers.Neuralang
-
 
+
+
Parses Neuralang source code by handling function declarations in addition to + other expressions.
+
parse(Path) - Method in class mklab.JGNN.adhoc.parsers.Neuralang
-
 
+
+
Parses a Neuralang source code file.
+
persist() - Method in class mklab.JGNN.core.empy.EmptyMatrix
 
persist() - Method in class mklab.JGNN.core.empy.EmptyTensor
@@ -124,8 +129,8 @@

P

persist() - Method in class mklab.JGNN.core.Tensor
Deprecated. -
This method may not be present in future versions - of the library, depending on whether memory reuse proves useful or nor.
+
This method may not be present in future versions of the library, + depending on whether memory reuse proves useful or nor.
persist() - Method in class mklab.JGNN.core.tensor.RepeatTensor
diff --git a/docs/javadoc/index-files/index-16.html b/docs/javadoc/index-files/index-16.html index 272cf52b..234ddd4c 100644 --- a/docs/javadoc/index-files/index-16.html +++ b/docs/javadoc/index-files/index-16.html @@ -1,11 +1,11 @@ - + R-Index - + @@ -57,10 +57,9 @@

R

range(double, double) - Method in class mklab.JGNN.core.Slice
-
Performs the Slice.range(int, int) operation - while replacing values of from and end - with (int)(from*size()) and (int)(end*size()) - so that fractional ranges can be obtained.
+
Performs the Slice.range(int, int) operation while replacing values of + from and end with (int)(from*size()) + and (int)(end*size()) so that fractional ranges can be obtained.
range(int, int) - Method in class mklab.JGNN.core.Slice
@@ -86,8 +85,8 @@

R

 
Regularization - Class in mklab.JGNN.nn.optimizers
-
Wraps an Optimizer by applying the derivative of L2 loss - on every tensor during Optimizer.update(Tensor, Tensor).
+
Wraps an Optimizer by applying the derivative of L2 loss on every + tensor during Optimizer.update(Tensor, Tensor).
Regularization(Optimizer, double) - Constructor for class mklab.JGNN.nn.optimizers.Regularization
@@ -132,8 +131,8 @@

R

release() - Method in class mklab.JGNN.core.Tensor
Deprecated. -
This method may not be present in future versions - of the library, depending on whether memory reuse proves useful or nor.
+
This method may not be present in future versions of the library, + depending on whether memory reuse proves useful or nor.
release() - Method in class mklab.JGNN.core.tensor.RepeatTensor
@@ -154,8 +153,8 @@

R

Relu - Class in mklab.JGNN.nn.activations
-
Implements a NNOperation that performs a relu transformation of its single input first introduced by - Hahnloser, Richard HR, Rahul Sarpeshkar, Misha A.
+
Implements a NNOperation that performs a relu transformation of its + one input tensor.
Relu() - Constructor for class mklab.JGNN.nn.activations.Relu
 
@@ -176,13 +175,14 @@

R

rememberAs(String) - Method in class mklab.JGNN.adhoc.parsers.LayeredBuilder
Sets the current layer identifier to a specific symbol layerId - so that future usage of {layerId} is automatically replaced with + so that future usage of {layerId} is automatically replaced with the identifier.
Repeat - Class in mklab.JGNN.nn.operations
-
Implements a NNOperation that converts its first argument to a ColumnRepetition matrix - with a number of columns equal to the second argument.
+
Implements a NNOperation that converts its first argument to a + ColumnRepetition matrix with a number of columns equal to the second + argument.
Repeat() - Constructor for class mklab.JGNN.nn.operations.Repeat
 
@@ -240,15 +240,13 @@

R

runModel(ArrayList<Tensor>) - Method in class mklab.JGNN.adhoc.ModelBuilder
-
This is a wrapper for getModel().predict(inputs) - without returning output values (use ModelBuilder.get(String) - afterwards to view outputs.
+
This is a wrapper for getModel().predict(inputs) without + returning output values (use ModelBuilder.get(String) afterwards to view outputs.
runModel(Tensor...) - Method in class mklab.JGNN.adhoc.ModelBuilder
-
This is a wrapper for getModel().predict(inputs) - without returning output values (use ModelBuilder.get(String) - afterwards to view outputs.
+
This is a wrapper for getModel().predict(inputs) without + returning output values (use ModelBuilder.get(String) afterwards to view outputs.
runPrediction() - Method in class mklab.JGNN.nn.NNOperation
 
diff --git a/docs/javadoc/index-files/index-17.html b/docs/javadoc/index-files/index-17.html index 3617c7e6..1fe2da93 100644 --- a/docs/javadoc/index-files/index-17.html +++ b/docs/javadoc/index-files/index-17.html @@ -1,11 +1,11 @@ - + S-Index - + @@ -65,19 +65,23 @@

S

 
samples() - Method in class mklab.JGNN.adhoc.Dataset
-
Retrieves a converter that maps samples to long identifiers that match them to - rows of Dataset.features(), Dataset.labels(), and Dataset.graph() matrices.
+
Retrieves a converter that maps samples to long identifiers that match them + to rows of Dataset.features(), Dataset.labels(), and Dataset.graph() + matrices.
samplesAsFeatures() - Method in class mklab.JGNN.core.Slice
-
Constructs a column matrix holding identifiers in - the range 0,1,..Slice.size()-1 so that the pattern +
Constructs a column matrix holding identifiers in the range + 0,1,..Slice.size()-1 so that the pattern slice.samplesAsFeatures().accessRows(slice.range(from, end)) - retrieves one-element tensors holding + retrieves one-element tensors holding slice[from], slice[from+1], ...
save(Path) - Method in class mklab.JGNN.adhoc.ModelBuilder
-
 
+
+
Serializes the model builder instance into a Path, such as + Paths.get("example.jgnn").
+
scope() - Static method in class mklab.JGNN.core.Memory
 
selfAbs() - Method in class mklab.JGNN.core.tensor.DenseTensor
@@ -102,7 +106,8 @@

S

 
selfAdd(Tensor, double) - Method in class mklab.JGNN.core.Tensor
-
Performs in-memory weighted addition to the Tensor, storing the result in itself.
+
Performs in-memory weighted addition to the Tensor, storing the result in + itself.
selfExpMinusOne() - Method in class mklab.JGNN.core.tensor.DenseTensor
 
@@ -120,13 +125,15 @@

S

 
selfLog() - Method in class mklab.JGNN.core.Tensor
-
Performs in-memory set of each element to the logarithm of its absolute value.
+
Performs in-memory set of each element to the logarithm of its absolute + value.
selfMultiply(double) - Method in class mklab.JGNN.core.tensor.DenseTensor
 
selfMultiply(double) - Method in class mklab.JGNN.core.Tensor
-
Performs in-memory multiplication on the Tensor, storing the result to itself.
+
Performs in-memory multiplication on the Tensor, storing the result to + itself.
selfMultiply(double) - Method in class mklab.JGNN.core.tensor.VectorizedTensor
 
@@ -134,7 +141,8 @@

S

 
selfMultiply(Tensor) - Method in class mklab.JGNN.core.Tensor
-
Performs in-memory multiplication on the Tensor, storing the result in itself .
+
Performs in-memory multiplication on the Tensor, storing the result in itself + .
selfMultiply(Tensor) - Method in class mklab.JGNN.core.tensor.VectorizedTensor
 
@@ -148,7 +156,8 @@

S

 
selfSqrt() - Method in class mklab.JGNN.core.Tensor
-
Performs in-memory set of each element to the square root of its absolute value.
+
Performs in-memory set of each element to the square root of its absolute + value.
selfSubtract(Tensor) - Method in class mklab.JGNN.core.tensor.DenseTensor
 
@@ -204,7 +213,7 @@

S

setEnabled(boolean) - Method in class mklab.JGNN.nn.operations.Dropout
 
-
setEpochs(int) - Method in class mklab.JGNN.nn.ModelTraining
+
setEpochs(int) - Method in class mklab.JGNN.adhoc.ModelTraining
Sets the maximum number of epochs for which training runs.
@@ -214,7 +223,7 @@

S

setKey(K) - Method in class mklab.JGNN.core.util.FastEntry
 
-
setLoss(Loss) - Method in class mklab.JGNN.nn.ModelTraining
+
setLoss(Loss) - Method in class mklab.JGNN.adhoc.ModelTraining
Set
@@ -234,20 +243,21 @@

S

Sets the reduction mechanism of categorical cross entropy.
-
setNumBatches(int) - Method in class mklab.JGNN.nn.ModelTraining
+
setNumBatches(int) - Method in class mklab.JGNN.adhoc.ModelTraining
Sets the number of batches training data slices should be split into.
-
setOptimizer(Optimizer) - Method in class mklab.JGNN.nn.ModelTraining
+
setOptimizer(Optimizer) - Method in class mklab.JGNN.adhoc.ModelTraining
-
Sets an Optimizer instance to controls parameter updates during training.
+
Sets an Optimizer instance to controls parameter updates during + training.
-
setParallelizedStochasticGradientDescent(boolean) - Method in class mklab.JGNN.nn.ModelTraining
+
setParallelizedStochasticGradientDescent(boolean) - Method in class mklab.JGNN.adhoc.ModelTraining
-
Sets whether the training strategy should reflect stochastic - gradient descent by randomly sampling from the training dataset to obtain data samples.
+
Sets whether the training strategy should reflect stochastic gradient descent + by randomly sampling from the training dataset to obtain data samples.
-
setPatience(int) - Method in class mklab.JGNN.nn.ModelTraining
+
setPatience(int) - Method in class mklab.JGNN.adhoc.ModelTraining
Sets the patience of the training strategy that performs early stopping.
@@ -275,8 +285,8 @@

S

 
setToASymmetricNormalization() - Method in class mklab.JGNN.core.Matrix
-
Sets the Matrix to its asymmetrically normalized transformation - by appropriately adjusting its element values.
+
Sets the Matrix to its asymmetrically normalized transformation by + appropriately adjusting its element values.
setToNormalized() - Method in class mklab.JGNN.core.Tensor
@@ -296,13 +306,13 @@

S

setToRandom(Distribution) - Method in class mklab.JGNN.core.Tensor
-
Set tensor elements to random values by sampling them from a given Distribution - instance.
+
Set tensor elements to random values by sampling them from a given + Distribution instance.
setToSymmetricNormalization() - Method in class mklab.JGNN.core.Matrix
-
Sets the Matrix to its symmetrically normalized transformation - by appropriately adjusting its element values.
+
Sets the Matrix to its symmetrically normalized transformation by + appropriately adjusting its element values.
setToUniform() - Method in class mklab.JGNN.core.Tensor
@@ -312,16 +322,17 @@

S

Set all tensor element values to 0.
-
setValidationLoss(Loss) - Method in class mklab.JGNN.nn.ModelTraining
+
setValidationLoss(Loss) - Method in class mklab.JGNN.adhoc.ModelTraining
 
setValue(V) - Method in class mklab.JGNN.core.util.FastEntry
 
-
setVerbose(boolean) - Method in class mklab.JGNN.nn.ModelTraining
+
setVerbose(boolean) - Method in class mklab.JGNN.adhoc.ModelTraining
Deprecated. -
This method was available in earlier JGNN versions but will be gradually phased out. - Instead, wrap the validation loss within VerboseLoss to replicate - the same behavior.
+
This method was available in earlier JGNN versions but will be + gradually phased out. Instead, wrap the validation loss within + VerboseLoss to replicate the + same behavior.
setZeroCopyType(Matrix) - Method in class mklab.JGNN.core.matrix.WrapCols
@@ -350,7 +361,8 @@

S

Sigmoid - Class in mklab.JGNN.nn.activations
-
Implements a NNOperation that performs a sigmoid transformation of its single input.
+
Implements a NNOperation that performs a sigmoid transformation of + its single input.
Sigmoid() - Constructor for class mklab.JGNN.nn.activations.Sigmoid
 
@@ -374,8 +386,8 @@

S

 
Slice - Class in mklab.JGNN.core
-
This class provices an interface with which to define data slices, - for instance to sample labels.
+
This class provices an interface with which to define data slices, for + instance to sample labels.
Slice(Iterable<Long>) - Constructor for class mklab.JGNN.core.Slice
@@ -441,8 +453,9 @@

S

 
submit(Runnable) - Method in class mklab.JGNN.core.ThreadPool
-
Submits a runnable to be executed at some future point by a thread, - for example via ThreadPool.getInstance().submit(new Runnable(){public void run(){...}});.
+
Submits a runnable to be executed at some future point by a thread, for + example via + ThreadPool.getInstance().submit(new Runnable(){public void run(){...}});.
subtract(Tensor) - Method in class mklab.JGNN.core.tensor.DenseTensor
 
diff --git a/docs/javadoc/index-files/index-18.html b/docs/javadoc/index-files/index-18.html index 00ded7d4..640ba8a8 100644 --- a/docs/javadoc/index-files/index-18.html +++ b/docs/javadoc/index-files/index-18.html @@ -1,11 +1,11 @@ - + T-Index - + @@ -65,7 +65,8 @@

T

Tanh - Class in mklab.JGNN.nn.activations
-
Implements a NNOperation that performs a tanh transformation of its single input.
+
Implements a NNOperation that performs a tanh transformation of its + single input.
Tanh() - Constructor for class mklab.JGNN.nn.activations.Tanh
 
@@ -89,12 +90,14 @@

T

ThreadPool - Class in mklab.JGNN.core
-
This class provides thread execution pool utilities while keeping track of thread - identifiers for use by thread-specific NNOperation.
+
This class provides thread execution pool utilities while keeping track of + thread identifiers for use by thread-specific + NNOperation.
To - Class in mklab.JGNN.nn.operations
-
Implements a NNOperation that lists the second element of the 2D matrix element iterator.
+
Implements a NNOperation that lists the second element of the 2D + matrix element iterator.
To() - Constructor for class mklab.JGNN.nn.operations.To
 
@@ -122,10 +125,17 @@

T

 
toString() - Method in class mklab.JGNN.core.Tensor
-
A string serialization of the tensor that can be used by the constructor DenseTensor(String) to create an identical copy.
+
A string serialization of the tensor that can be used by the constructor + DenseTensor(String) to create an identical copy.
toString() - Method in class mklab.JGNN.core.util.FastEntry
 
+
train(ModelTraining, Matrix, Matrix, Slice, Slice) - Method in class mklab.JGNN.nn.Model
+
+
Trains the model by appropriately calling + ModelTraining.train(Model, Matrix, Matrix, Slice, Slice) + with the provided parameters.
+
train(Tensor[], Tensor) - Method in class mklab.JGNN.nn.operations.LSTM
 
train(Loss, Optimizer, List<Tensor>, List<Tensor>) - Method in class mklab.JGNN.nn.Model
@@ -136,15 +146,16 @@

T

Performs one parameter adjustment step (e.g.
-
train(Model, Matrix, Matrix, Slice, Slice) - Method in class mklab.JGNN.nn.ModelTraining
+
train(Model, Matrix, Matrix, Slice, Slice) - Method in class mklab.JGNN.adhoc.ModelTraining
-
Trains a Model instance based on current settings.
+
Deprecated. + +
-
train(ModelTraining, Matrix, Matrix, Slice, Slice) - Method in class mklab.JGNN.nn.Model
+
train(Model, Matrix, Matrix, Slice, Slice) - Method in class mklab.JGNN.adhoc.train.NodeClassification
-
Trains the model by appropriately calling - ModelTraining.train(Model, Matrix, Matrix, Slice, Slice) - with the provided parameters.
+
Trains a Model instance based on current settings.
trainOnOutputError(Tensor[], Tensor) - Method in class mklab.JGNN.nn.operations.LSTM
 
@@ -155,7 +166,8 @@

T

transform(Tensor) - Method in class mklab.JGNN.core.Matrix
-
Performs the linear algebra transformation A*x where A is this matrix and x a vector
+
Performs the linear algebra transformation A*x where A is this matrix and x a + vector
Transpose - Class in mklab.JGNN.nn.operations
@@ -169,7 +181,8 @@

T

TransposedMatrix - Class in mklab.JGNN.core.matrix
-
Generates a transposed version of a base matrix, with which it shares elements.
+
Generates a transposed version of a base matrix, with which it shares + elements.
TransposedMatrix(Matrix) - Constructor for class mklab.JGNN.core.matrix.TransposedMatrix
 
diff --git a/docs/javadoc/index-files/index-19.html b/docs/javadoc/index-files/index-19.html index 0a491fef..7b74f45a 100644 --- a/docs/javadoc/index-files/index-19.html +++ b/docs/javadoc/index-files/index-19.html @@ -1,11 +1,11 @@ - + U-Index - + @@ -65,7 +65,8 @@

U

Uniform(double, double) - Constructor for class mklab.JGNN.core.distribution.Uniform
-
Instantiates a uniform distribution that samples values from the given range [from, to].
+
Instantiates a uniform distribution that samples values from the given range + [from, to].
unregister(double[]) - Method in class mklab.JGNN.core.Memory.Scope
 
diff --git a/docs/javadoc/index-files/index-2.html b/docs/javadoc/index-files/index-2.html index 371c047b..e531d0ab 100644 --- a/docs/javadoc/index-files/index-2.html +++ b/docs/javadoc/index-files/index-2.html @@ -1,11 +1,11 @@ - + B-Index - + diff --git a/docs/javadoc/index-files/index-20.html b/docs/javadoc/index-files/index-20.html index 70274384..40955933 100644 --- a/docs/javadoc/index-files/index-20.html +++ b/docs/javadoc/index-files/index-20.html @@ -1,11 +1,11 @@ - + V-Index - + @@ -61,8 +61,8 @@

V

 
var(String) - Method in class mklab.JGNN.adhoc.ModelBuilder
-
Declares a component with the given name to be used as an input - of the managed model.
+
Declares a component with the given name to be used as an input of the + managed model.
var(String) - Method in class mklab.JGNN.adhoc.parsers.LayeredBuilder
 
@@ -115,8 +115,8 @@

V

VerboseLoss - Class in mklab.JGNN.nn.loss.report
-
Implements a Loss that wraps other losses and outputs their value during training to an output stream - (to System.out by default).
+
Implements a Loss that wraps other losses and outputs their value + during training to an output stream (to System.out by default).
VerboseLoss(Loss) - Constructor for class mklab.JGNN.nn.loss.report.VerboseLoss
diff --git a/docs/javadoc/index-files/index-21.html b/docs/javadoc/index-files/index-21.html index 157ef613..6c1676ff 100644 --- a/docs/javadoc/index-files/index-21.html +++ b/docs/javadoc/index-files/index-21.html @@ -1,11 +1,11 @@ - + W-Index - + diff --git a/docs/javadoc/index-files/index-22.html b/docs/javadoc/index-files/index-22.html index 2dbc7361..b2825096 100644 --- a/docs/javadoc/index-files/index-22.html +++ b/docs/javadoc/index-files/index-22.html @@ -1,11 +1,11 @@ - + X-Index - + diff --git a/docs/javadoc/index-files/index-23.html b/docs/javadoc/index-files/index-23.html index 4daefc91..e2d6863e 100644 --- a/docs/javadoc/index-files/index-23.html +++ b/docs/javadoc/index-files/index-23.html @@ -1,11 +1,11 @@ - + Z-Index - + @@ -57,11 +57,13 @@

Z

zeroCopy() - Method in class mklab.JGNN.core.Matrix
-
Creates a Matrix with the same class and dimensions and all element set to zero.
+
Creates a Matrix with the same class and dimensions and all element set to + zero.
zeroCopy() - Method in class mklab.JGNN.core.Tensor
-
Creates a tensor of the same class with the same size and all element set to zero.
+
Creates a tensor of the same class with the same size and all element set to + zero.
zeroCopy(long) - Method in class mklab.JGNN.core.empy.EmptyTensor
 
@@ -71,7 +73,8 @@

Z

 
zeroCopy(long) - Method in class mklab.JGNN.core.Matrix
-
Creates a Matrix with the same class and dimensions and all element set to zero.
+
Creates a Matrix with the same class and dimensions and all element set to + zero.
zeroCopy(long) - Method in class mklab.JGNN.core.tensor.AccessSubtensor
 
@@ -85,7 +88,8 @@

Z

 
zeroCopy(long) - Method in class mklab.JGNN.core.Tensor
-
Creates a tensor of the same class with a given size and all element set to zero.
+
Creates a tensor of the same class with a given size and all element set to + zero.
zeroCopy(long, long) - Method in class mklab.JGNN.core.empy.EmptyMatrix
 
@@ -115,18 +119,18 @@

Z

 
zeroCopy(long, long) - Method in class mklab.JGNN.core.Matrix
-
Creates a matrix of the same class and all element set to zero, but with - a given number of rows and columns.
+
Creates a matrix of the same class and all element set to zero, but with a + given number of rows and columns.
zeroCopy(Tensor) - Method in class mklab.JGNN.core.Matrix
-
Creates a tensor of the same class and all elements set to zero, - but size and dimension names are obtained from a prototype tensor.
+
Creates a tensor of the same class and all elements set to zero, but size and + dimension names are obtained from a prototype tensor.
zeroCopy(Tensor) - Method in class mklab.JGNN.core.Tensor
-
Creates a tensor of the same class and all elements set to zero, - but size and dimension names are obtained from a prototype tensor.
+
Creates a tensor of the same class and all elements set to zero, but size and + dimension names are obtained from a prototype tensor.
A B C D E F G H I K L M N O P R S T U V W X Z 
All Classes and Interfaces|All Packages diff --git a/docs/javadoc/index-files/index-3.html b/docs/javadoc/index-files/index-3.html index 84a5bc9f..379bd260 100644 --- a/docs/javadoc/index-files/index-3.html +++ b/docs/javadoc/index-files/index-3.html @@ -1,11 +1,11 @@ - + C-Index - + @@ -57,8 +57,8 @@

C

cast(Class<Type>) - Method in class mklab.JGNN.core.Tensor
-
Performs the equivalent of Java's typecasting that fits - in functional interfaces.
+
Performs the equivalent of Java's typecasting that fits in functional + interfaces.
CategoricalCrossEntropy - Class in mklab.JGNN.nn.loss
@@ -86,8 +86,8 @@

C

classify() - Method in class mklab.JGNN.adhoc.parsers.FastBuilder
-
Adds a classification layer that gather the number of inputs nodes - and applies softmax on all of them.
+
Adds a classification layer that gather the number of inputs nodes and + applies softmax on all of them.
clearPrediction() - Method in class mklab.JGNN.nn.NNOperation
 
@@ -101,19 +101,20 @@

C

Complement - Class in mklab.JGNN.nn.operations
-
Implements a NNOperation that performs the operation 1-x for its simple input x.
+
Implements a NNOperation that performs the operation 1-x for its + simple input x.
Complement() - Constructor for class mklab.JGNN.nn.operations.Complement
 
concat(int) - Method in class mklab.JGNN.adhoc.parsers.FastBuilder
-
Concatenates horizontally the output of a number of given layers, - starting from the last one and going backwards.
+
Concatenates horizontally the output of a number of given layers, starting + from the last one and going backwards.
concat(int) - Method in class mklab.JGNN.adhoc.parsers.LayeredBuilder
-
Concatenates horizontally the output of a number of given layers, - starting from the last one and going backwards.
+
Concatenates horizontally the output of a number of given layers, starting + from the last one and going backwards.
Concat - Class in mklab.JGNN.nn.operations
@@ -123,8 +124,8 @@

C

 
config(String, double) - Method in class mklab.JGNN.adhoc.ModelBuilder
-
Declares a configuration hyperparameter, which can be used to declare - matrix and vector parameters during ModelBuilder.operation(String) expressions.
+
Declares a configuration hyperparameter, which can be used to declare matrix + and vector parameters during ModelBuilder.operation(String) expressions.
config(String, double) - Method in class mklab.JGNN.adhoc.parsers.FastBuilder
 
@@ -133,11 +134,17 @@

C

config(String, double) - Method in class mklab.JGNN.adhoc.parsers.Neuralang
 
config(String, String) - Method in class mklab.JGNN.adhoc.ModelBuilder
-
 
+
+
Applies ModelBuilder.config(String, double) where the set value is obtained from + another configuration hyperaparameter.
+
config(String, String) - Method in class mklab.JGNN.adhoc.parsers.FastBuilder
 
-
configFrom(ModelBuilder) - Method in class mklab.JGNN.nn.ModelTraining
-
 
+
configFrom(ModelBuilder) - Method in class mklab.JGNN.adhoc.ModelTraining
+
+
Retrieves the learning rate (lr), epochs, batches, and patience parameters + from the configurations of a
+
constant(String, double) - Method in class mklab.JGNN.adhoc.ModelBuilder
Declares a non-learnable constant component with the given name.
@@ -168,11 +175,13 @@

C

contains(Object) - Method in class mklab.JGNN.adhoc.IdConverter
-
Checks whether the object has been registered with IdConverter.getOrCreateId(Object).
+
Checks whether the object has been registered with + IdConverter.getOrCreateId(Object).
copy() - Method in class mklab.JGNN.core.Tensor
-
Creates a Tensor.zeroCopy() and transfers to it all potentially non-zero element values.
+
Creates a Tensor.zeroCopy() and transfers to it all potentially non-zero + element values.
Cora - Class in mklab.JGNN.adhoc.datasets
@@ -184,9 +193,8 @@

C

 
createForwardValidity(List<Tensor>) - Method in class mklab.JGNN.adhoc.ModelBuilder
-
Asserts that all components parsed into a call graph with - ModelBuilder.operation(String) are eventually used by at least one ModelBuilder.out(String) - component.
+
Asserts that a forward run of the architecture is valid given some input + data.
crossEntropy(double, double) - Static method in interface mklab.JGNN.core.util.Loss
diff --git a/docs/javadoc/index-files/index-4.html b/docs/javadoc/index-files/index-4.html index ab6d3f5d..4e63092c 100644 --- a/docs/javadoc/index-files/index-4.html +++ b/docs/javadoc/index-files/index-4.html @@ -1,11 +1,11 @@ - + D-Index - + @@ -134,14 +134,13 @@

D

Diagonal - Class in mklab.JGNN.core.matrix
-
Implements a square matrix whose diagonal elements are determined by the correspond values of - an underlying tensor and off-diagonal elements are zero.
+
Implements a square matrix whose diagonal elements are determined by the + correspond values of an underlying tensor and off-diagonal elements are zero.
Distribution - Interface in mklab.JGNN.core
-
This interface abstracts a probability distribution - that can be passed to Tensor.setToRandom(Distribution) - for random tensor initialization.
+
This interface abstracts a probability distribution that can be passed to + Tensor.setToRandom(Distribution) for random tensor initialization.
dot(Tensor) - Method in class mklab.JGNN.core.Tensor
@@ -153,8 +152,9 @@

D

Dropout - Class in mklab.JGNN.nn.operations
-
Implements a NNOperation that converts its first argument to a ColumnRepetition matrix - with a number of columns equal to the second argument.
+
Implements a NNOperation that converts its first argument to a + ColumnRepetition matrix with a number of columns equal to the second + argument.
Dropout() - Constructor for class mklab.JGNN.nn.operations.Dropout
 
diff --git a/docs/javadoc/index-files/index-5.html b/docs/javadoc/index-files/index-5.html index 7464c91f..322e028f 100644 --- a/docs/javadoc/index-files/index-5.html +++ b/docs/javadoc/index-files/index-5.html @@ -1,11 +1,11 @@ - + E-Index - + @@ -85,9 +85,9 @@

E

 
estimateNumNonZeroElements() - Method in class mklab.JGNN.core.Tensor
-
Provides an estimation for the non-zero number of elements stored in the tensor, - where this number is equal to the size for dense tensors, but equal to the actual - number of non-zero elements for sparse tensors.
+
Provides an estimation for the non-zero number of elements stored in the + tensor, where this number is equal to the size for dense tensors, but equal + to the actual number of non-zero elements for sparse tensors.
estimateNumNonZeroElements() - Method in class mklab.JGNN.core.tensor.SparseTensor
 
@@ -99,8 +99,8 @@

E

 
evaluate(Tensor, Tensor) - Method in class mklab.JGNN.nn.Loss
-
Provides a numerical evaluation of a loss function, so that - lower values correspond to better predictions.
+
Provides a numerical evaluation of a loss function, so that lower values + correspond to better predictions.
evaluate(Tensor, Tensor) - Method in class mklab.JGNN.nn.loss.report.VerboseLoss
 
@@ -108,7 +108,8 @@

E

 
Exp - Class in mklab.JGNN.nn.activations
-
Implements a NNOperation that performs an exponential transformation of its single input.
+
Implements a NNOperation that performs an element-by-element + exponential transformation of its one input tensor.
Exp() - Constructor for class mklab.JGNN.nn.activations.Exp
 
diff --git a/docs/javadoc/index-files/index-6.html b/docs/javadoc/index-files/index-6.html index 0c33255d..02ce17f6 100644 --- a/docs/javadoc/index-files/index-6.html +++ b/docs/javadoc/index-files/index-6.html @@ -1,11 +1,11 @@ - + F-Index - + @@ -57,8 +57,8 @@

F

FastBuilder - Class in mklab.JGNN.adhoc.parsers
-
Extends the capabilities of LayeredBuilder to use - for node classification.
+
Extends the capabilities of LayeredBuilder to use for node + classification.
FastBuilder() - Constructor for class mklab.JGNN.adhoc.parsers.FastBuilder
@@ -68,8 +68,8 @@

F

FastBuilder(Matrix, Matrix) - Constructor for class mklab.JGNN.adhoc.parsers.FastBuilder
-
Creates a graph neural network builder from an - normalized adjacency matrix and a node feature matrix.
+
Creates a graph neural network builder from an normalized adjacency matrix + and a node feature matrix.
FastEntry<K,V> - Class in mklab.JGNN.core.util
 
@@ -91,7 +91,8 @@

F

 
From - Class in mklab.JGNN.nn.operations
-
Implements a NNOperation that lists the first element of the 2D matrix element iterator.
+
Implements a NNOperation that lists the first element of the 2D + matrix element iterator.
From() - Constructor for class mklab.JGNN.nn.operations.From
 
@@ -109,7 +110,8 @@

F

fromRange(long, long) - Static method in class mklab.JGNN.core.Tensor
-
Creates a dense tensor holding the desired range [start, start+1, ..., end-1].
+
Creates a dense tensor holding the desired range [start, start+1, ..., + end-1].
function(String, String) - Method in class mklab.JGNN.adhoc.ModelBuilder
 
@@ -117,22 +119,26 @@

F

 
futureConfigs(String, Function<Integer, Double>, int) - Method in class mklab.JGNN.adhoc.parsers.FastBuilder
-
Defines a number of FastBuilder.config(String, double) symbols involving a {l} - notation, for example so that they can be used during FastBuilder.layerRepeat(String, int).
+
Defines a number of FastBuilder.config(String, double) symbols involving a + {l} notation, for example so that they can be used during + FastBuilder.layerRepeat(String, int).
futureConfigs(String, Function<Integer, Double>, int) - Method in class mklab.JGNN.adhoc.parsers.LayeredBuilder
-
Defines a number of LayeredBuilder.config(String, double) symbols involving a {l} - notation, for example so that they can be used during LayeredBuilder.layerRepeat(String, int).
+
Defines a number of LayeredBuilder.config(String, double) symbols involving a + {l} notation, for example so that they can be used during + LayeredBuilder.layerRepeat(String, int).
futureConstants(String, Function<Integer, Double>, int) - Method in class mklab.JGNN.adhoc.parsers.FastBuilder
-
Defines a number of FastBuilder.constant(String, double) symbols involving a {l} +
Defines a number of FastBuilder.constant(String, double) symbols involving a + {l} notation, for example so that they can be used during FastBuilder.layerRepeat(String, int).
futureConstants(String, Function<Integer, Double>, int) - Method in class mklab.JGNN.adhoc.parsers.LayeredBuilder
-
Defines a number of LayeredBuilder.constant(String, double) symbols involving a {l} +
Defines a number of LayeredBuilder.constant(String, double) symbols involving a + {l} notation, for example so that they can be used during LayeredBuilder.layerRepeat(String, int).
diff --git a/docs/javadoc/index-files/index-7.html b/docs/javadoc/index-files/index-7.html index 777b96ac..4ad139f2 100644 --- a/docs/javadoc/index-files/index-7.html +++ b/docs/javadoc/index-files/index-7.html @@ -1,11 +1,11 @@ - + G-Index - + @@ -57,7 +57,8 @@

G

Gather - Class in mklab.JGNN.nn.operations
-
Implements a NNOperation that performs the equivalent of TensorFlow's gather operation.
+
Implements a NNOperation that performs the equivalent of TensorFlow's + gather operation.
Gather() - Constructor for class mklab.JGNN.nn.operations.Gather
 
@@ -125,8 +126,8 @@

G

get(String) - Method in class mklab.JGNN.adhoc.ModelBuilder
-
Retrieves the NNOperation registered with the provided - name, for example to investigates its value.
+
Retrieves the NNOperation registered with the provided name, for + example to investigates its value.
getColName() - Method in class mklab.JGNN.core.Matrix
 
@@ -138,10 +139,14 @@

G

Retrieves the wrapped column tensor.
+
getConfig(String) - Method in class mklab.JGNN.adhoc.ModelBuilder
+
+
Retrieves a configuration hyperparameter's value.
+
getConfigOrDefault(String, double) - Method in class mklab.JGNN.adhoc.ModelBuilder
-
 
-
getConfigOrDefault(String, int) - Method in class mklab.JGNN.adhoc.ModelBuilder
-
 
+
+
Retrieves a configuration hyperparameter's value.
+
getCurrentThreadId() - Static method in class mklab.JGNN.core.ThreadPool
Retrieves a unique integer indicating the currently running thread.
@@ -244,8 +249,8 @@

G

 
getNonZeroEntries() - Method in class mklab.JGNN.core.Matrix
-
Retrieves an iterable that traverses (row, col) entry pairs - of non zero entries.
+
Retrieves an iterable that traverses (row, col) entry pairs of non zero + entries.
getNonZeroEntries() - Method in class mklab.JGNN.core.matrix.RepeatMatrix
 
diff --git a/docs/javadoc/index-files/index-8.html b/docs/javadoc/index-files/index-8.html index 632d7df1..1276aa50 100644 --- a/docs/javadoc/index-files/index-8.html +++ b/docs/javadoc/index-files/index-8.html @@ -1,11 +1,11 @@ - + H-Index - + @@ -57,8 +57,8 @@

H

hasComponent(String) - Method in class mklab.JGNN.adhoc.ModelBuilder
-
Checks whether the builder has added to its managed model a component of - the given name.
+
Checks whether the builder has added to its managed model a component of the + given name.
hasNext() - Method in class mklab.JGNN.core.util.Range
 
diff --git a/docs/javadoc/index-files/index-9.html b/docs/javadoc/index-files/index-9.html index 97309541..93e8b0e8 100644 --- a/docs/javadoc/index-files/index-9.html +++ b/docs/javadoc/index-files/index-9.html @@ -1,11 +1,11 @@ - + I-Index - + @@ -61,12 +61,13 @@

I

IdConverter() - Constructor for class mklab.JGNN.adhoc.IdConverter
-
Instantiates an empty converter to be filled with IdConverter.getOrCreateId(Object).
+
Instantiates an empty converter to be filled with + IdConverter.getOrCreateId(Object).
IdConverter(List<?>) - Constructor for class mklab.JGNN.adhoc.IdConverter
-
Instantiates the converter on a list of objects to register - with IdConverter.getOrCreateId(Object) on.
+
Instantiates the converter on a list of objects to register with + IdConverter.getOrCreateId(Object) on.
Identity - Class in mklab.JGNN.nn.operations
diff --git a/docs/javadoc/index.html b/docs/javadoc/index.html index 8183ef86..34d407bb 100644 --- a/docs/javadoc/index.html +++ b/docs/javadoc/index.html @@ -1,11 +1,11 @@ - + Overview - + @@ -55,41 +55,93 @@
Package
Description
-
 
+
+
Contains classes that simplify data loading, model building, and training.
+
-
 
+
+
This package contains datasets for out-of-the-box experimentation.
+
-
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
- -
 
+
+
Contains model builders that parse expression of the Neuralang scripting + language to simplify mathematical parts of the definitions.
+
+ +
+
Contains model training strategies that correspond to different predictive + tasks.
+
+ +
+
Contains base numerical data classes, as well as supporting abstract classes.
+
+ +
+
Contains data distributions that produce one numerical value and can be used + for tensor value initialization.
+
+ +
+
Contains empty extensions of datatypes that hold only dimension names and + sizes but no ddata.
+
+ +
+
Contains implementations of matrix classes, of transparent access to parts of + these classes, and of column/row repetitions that broadcast vectors into + matrices.
+
+ +
+
Contains implementations of tensor classes, as well as transparent access to + parts of these classes.
+
+ +
+
Contains utility functions that are employed internally, mainly optimized 1D + and 2D iterators.
+
+ +
+
Implements neural networks components that are combined to define GNNs or + other types of machine learning models.
+
+ +
+
Implements activations function to be used as model operations.
+
+ +
+
Implements initializers to be applied on Model + parameters to stochastically induce some desired property at the first + training epoch.
+
+ +
+
Contains various types of neural architecture inputs.
+
+ +
+
Contains classes for instantiating loss function.
+
+ +
+
Contains losses that wrap other losses and augment their numeric computations + with live reporting of the training status.
+
+ +
+
Contains popular neural network and GNN operations.
+
+ +
+
Contains optimizers that can be used to update training losses.
+
+ +
+
Contains pooling/reduction operations that reduce the dimensions of inputs.
+
diff --git a/docs/javadoc/member-search-index.js b/docs/javadoc/member-search-index.js index 942230ab..415f7318 100644 --- a/docs/javadoc/member-search-index.js +++ b/docs/javadoc/member-search-index.js @@ -1 +1 @@ -memberSearchIndex = [{"p":"mklab.JGNN.core","c":"Tensor","l":"abs()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"abs()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"accessCol(long)"},{"p":"mklab.JGNN.core.matrix","c":"WrapCols","l":"accessCol(long)"},{"p":"mklab.JGNN.core.matrix","c":"AccessCol","l":"AccessCol(Matrix, long)","u":"%3Cinit%3E(mklab.JGNN.core.Matrix,long)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"accessColumns()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"accessColumns(Iterable)","u":"accessColumns(java.lang.Iterable)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"accessColumns(long...)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"accessColumns(Tensor)","u":"accessColumns(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"accessDim(long, String)","u":"accessDim(long,java.lang.String)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"accessRow(long)"},{"p":"mklab.JGNN.core.matrix","c":"WrapRows","l":"accessRow(long)"},{"p":"mklab.JGNN.core.matrix","c":"AccessRow","l":"AccessRow(Matrix, long)","u":"%3Cinit%3E(mklab.JGNN.core.Matrix,long)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"accessRows()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"accessRows(Iterable)","u":"accessRows(java.lang.Iterable)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"accessRows(long...)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"accessRows(Tensor)","u":"accessRows(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"accessSubtensor(long)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"accessSubtensor(long, long)","u":"accessSubtensor(long,long)"},{"p":"mklab.JGNN.core.tensor","c":"AccessSubtensor","l":"AccessSubtensor(Tensor, long)","u":"%3Cinit%3E(mklab.JGNN.core.Tensor,long)"},{"p":"mklab.JGNN.core.tensor","c":"AccessSubtensor","l":"AccessSubtensor(Tensor, long, long)","u":"%3Cinit%3E(mklab.JGNN.core.Tensor,long,long)"},{"p":"mklab.JGNN.nn.loss","c":"Accuracy","l":"Accuracy()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.optimizers","c":"Adam","l":"Adam()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.optimizers","c":"Adam","l":"Adam(boolean, double)","u":"%3Cinit%3E(boolean,double)"},{"p":"mklab.JGNN.nn.optimizers","c":"Adam","l":"Adam(boolean, double, double, double)","u":"%3Cinit%3E(boolean,double,double,double)"},{"p":"mklab.JGNN.nn.optimizers","c":"Adam","l":"Adam(boolean, double, double, double, double)","u":"%3Cinit%3E(boolean,double,double,double,double)"},{"p":"mklab.JGNN.nn.optimizers","c":"Adam","l":"Adam(double)","u":"%3Cinit%3E(double)"},{"p":"mklab.JGNN.nn.operations","c":"Add","l":"Add()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"add(double)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"add(double)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"add(Tensor)","u":"add(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"add(Tensor)","u":"add(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"add(Tensor)","u":"add(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.inputs","c":"Parameter","l":"addInput(NNOperation)","u":"addInput(mklab.JGNN.nn.NNOperation)"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"addInput(NNOperation)","u":"addInput(mklab.JGNN.nn.NNOperation)"},{"p":"mklab.JGNN.nn","c":"Model","l":"addInput(Variable)","u":"addInput(mklab.JGNN.nn.inputs.Variable)"},{"p":"mklab.JGNN.nn","c":"Model","l":"addOutput(NNOperation)","u":"addOutput(mklab.JGNN.nn.NNOperation)"},{"p":"mklab.JGNN.nn.operations","c":"LSTM","l":"aggregate(LSTM)","u":"aggregate(mklab.JGNN.nn.operations.LSTM)"},{"p":"mklab.JGNN.core","c":"Memory","l":"allocate(int, Object)","u":"allocate(int,java.lang.Object)"},{"p":"mklab.JGNN.nn","c":"Initializer","l":"apply(Model)","u":"apply(mklab.JGNN.nn.Model)"},{"p":"mklab.JGNN.nn.initializers","c":"VariancePreservingInitializer","l":"apply(Model)","u":"apply(mklab.JGNN.nn.Model)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"argmax()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"argmin()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"asColumn()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"asRow()"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"assertBackwardValidity()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"assertFinite()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"assertMatching(Tensor)","u":"assertMatching(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"assertSize(long)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"assign(Tensor)","u":"assign(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Slice","l":"asTensor()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"asTransposed()"},{"p":"mklab.JGNN.core.matrix","c":"TransposedMatrix","l":"asTransposed()"},{"p":"mklab.JGNN.nn.operations","c":"Attention","l":"Attention()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"autosize(List)","u":"autosize(java.util.List)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"autosize(Tensor...)","u":"autosize(mklab.JGNN.core.Tensor...)"},{"p":"mklab.JGNN.nn.optimizers","c":"BatchOptimizer","l":"BatchOptimizer(Optimizer)","u":"%3Cinit%3E(mklab.JGNN.nn.Optimizer)"},{"p":"mklab.JGNN.nn.optimizers","c":"BatchOptimizer","l":"BatchOptimizer(Optimizer, long)","u":"%3Cinit%3E(mklab.JGNN.nn.Optimizer,long)"},{"p":"mklab.JGNN.nn.loss","c":"BinaryCrossEntropy","l":"BinaryCrossEntropy()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.loss","c":"BinaryCrossEntropy","l":"BinaryCrossEntropy(double)","u":"%3Cinit%3E(double)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"cast(Class)","u":"cast(java.lang.Class)"},{"p":"mklab.JGNN.nn.loss","c":"CategoricalCrossEntropy","l":"CategoricalCrossEntropy()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.loss","c":"CategoricalCrossEntropy","l":"CategoricalCrossEntropy(double)","u":"%3Cinit%3E(double)"},{"p":"mklab.JGNN.adhoc.datasets","c":"Citeseer","l":"Citeseer()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.adhoc","c":"Dataset","l":"classes()"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"classify()"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"clearPrediction()"},{"p":"mklab.JGNN.core.matrix","c":"ColumnRepetition","l":"ColumnRepetition(long, Tensor)","u":"%3Cinit%3E(long,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.operations","c":"Complement","l":"Complement()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.operations","c":"Concat","l":"Concat()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"concat(int)"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"concat(int)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"config(String, double)","u":"config(java.lang.String,double)"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"config(String, double)","u":"config(java.lang.String,double)"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"config(String, double)","u":"config(java.lang.String,double)"},{"p":"mklab.JGNN.adhoc.parsers","c":"Neuralang","l":"config(String, double)","u":"config(java.lang.String,double)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"config(String, String)","u":"config(java.lang.String,java.lang.String)"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"config(String, String)","u":"config(java.lang.String,java.lang.String)"},{"p":"mklab.JGNN.nn","c":"ModelTraining","l":"configFrom(ModelBuilder)","u":"configFrom(mklab.JGNN.adhoc.ModelBuilder)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"constant(String, double)","u":"constant(java.lang.String,double)"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"constant(String, double)","u":"constant(java.lang.String,double)"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"constant(String, double)","u":"constant(java.lang.String,double)"},{"p":"mklab.JGNN.adhoc.parsers","c":"Neuralang","l":"constant(String, double)","u":"constant(java.lang.String,double)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"constant(String, Tensor)","u":"constant(java.lang.String,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"constant(String, Tensor)","u":"constant(java.lang.String,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"constant(String, Tensor)","u":"constant(java.lang.String,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.adhoc.parsers","c":"Neuralang","l":"constant(String, Tensor)","u":"constant(java.lang.String,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.inputs","c":"Constant","l":"Constant(Tensor)","u":"%3Cinit%3E(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.adhoc","c":"IdConverter","l":"contains(Object)","u":"contains(java.lang.Object)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"copy()"},{"p":"mklab.JGNN.adhoc.datasets","c":"Cora","l":"Cora()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.operations","c":"LSTM","l":"createFirstState()"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"createForwardValidity(List)","u":"createForwardValidity(java.util.List)"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"crossEntropy(double, double)","u":"crossEntropy(double,double)"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"crossEntropyDerivative(double, double)","u":"crossEntropyDerivative(double,double)"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"crossEntropyDerivativeCategorical(double, double)","u":"crossEntropyDerivativeCategorical(double,double)"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"crossEntropySigmoidDerivative(double, double)","u":"crossEntropySigmoidDerivative(double,double)"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"crossEntropyTanhDerivative(double, double)","u":"crossEntropyTanhDerivative(double,double)"},{"p":"mklab.JGNN.adhoc","c":"Dataset","l":"Dataset()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"debugging"},{"p":"mklab.JGNN.core.matrix","c":"DenseMatrix","l":"DenseMatrix(long, long)","u":"%3Cinit%3E(long,long)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"DenseTensor()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"DenseTensor(double...)","u":"%3Cinit%3E(double...)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"DenseTensor(Iterator)","u":"%3Cinit%3E(java.util.Iterator)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"DenseTensor(long)","u":"%3Cinit%3E(long)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"DenseTensor(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"density()"},{"p":"mklab.JGNN.nn.loss","c":"Accuracy","l":"derivative(Tensor, Tensor)","u":"derivative(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.loss","c":"BinaryCrossEntropy","l":"derivative(Tensor, Tensor)","u":"derivative(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.loss","c":"CategoricalCrossEntropy","l":"derivative(Tensor, Tensor)","u":"derivative(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn","c":"Loss","l":"derivative(Tensor, Tensor)","u":"derivative(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.loss.report","c":"VerboseLoss","l":"derivative(Tensor, Tensor)","u":"derivative(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"describe()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"describe()"},{"p":"mklab.JGNN.core.matrix","c":"SparseMatrix","l":"describe()"},{"p":"mklab.JGNN.core.matrix","c":"SparseSymmetric","l":"describe()"},{"p":"mklab.JGNN.core.matrix","c":"TransposedMatrix","l":"describe()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"describe()"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"describe()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"dot(Tensor)","u":"dot(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"dot(Tensor, Tensor)","u":"dot(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.operations","c":"Dropout","l":"Dropout()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.empy","c":"EmptyMatrix","l":"EmptyMatrix(long, long)","u":"%3Cinit%3E(long,long)"},{"p":"mklab.JGNN.core.empy","c":"EmptyTensor","l":"EmptyTensor()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.empy","c":"EmptyTensor","l":"EmptyTensor(long)","u":"%3Cinit%3E(long)"},{"p":"mklab.JGNN.nn.operations","c":"LSTM","l":"endTape()"},{"p":"mklab.JGNN.core","c":"Memory.Scope","l":"enter()"},{"p":"mklab.JGNN.core.matrix","c":"AccessCol","l":"estimateNumNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"AccessRow","l":"estimateNumNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"Diagonal","l":"estimateNumNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"SparseMatrix","l":"estimateNumNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"TransposedMatrix","l":"estimateNumNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"WrapCols","l":"estimateNumNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"WrapRows","l":"estimateNumNonZeroElements()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"estimateNumNonZeroElements()"},{"p":"mklab.JGNN.core.tensor","c":"SparseTensor","l":"estimateNumNonZeroElements()"},{"p":"mklab.JGNN.nn.loss","c":"Accuracy","l":"evaluate(Tensor, Tensor)","u":"evaluate(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.loss","c":"BinaryCrossEntropy","l":"evaluate(Tensor, Tensor)","u":"evaluate(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.loss","c":"CategoricalCrossEntropy","l":"evaluate(Tensor, Tensor)","u":"evaluate(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn","c":"Loss","l":"evaluate(Tensor, Tensor)","u":"evaluate(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.loss.report","c":"VerboseLoss","l":"evaluate(Tensor, Tensor)","u":"evaluate(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Memory.Scope","l":"exit()"},{"p":"mklab.JGNN.nn.activations","c":"Exp","l":"Exp()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"expMinusOne()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"expMinusOne()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"external(Tensor, Tensor)","u":"external(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"eye(long)"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"FastBuilder()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"FastBuilder(Matrix, Matrix)","u":"%3Cinit%3E(mklab.JGNN.core.Matrix,mklab.JGNN.core.Matrix)"},{"p":"mklab.JGNN.core.util","c":"FastEntry","l":"FastEntry()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.util","c":"FastEntry","l":"FastEntry(K, V)","u":"%3Cinit%3E(K,V)"},{"p":"mklab.JGNN.adhoc","c":"Dataset","l":"features()"},{"p":"mklab.JGNN.nn.activations","c":"L1","l":"forward(List)","u":"forward(java.util.List)"},{"p":"mklab.JGNN.nn.operations","c":"Attention","l":"forward(List)","u":"forward(java.util.List)"},{"p":"mklab.JGNN.nn.pooling","c":"Mean","l":"forward(List)","u":"forward(java.util.List)"},{"p":"mklab.JGNN.nn.pooling","c":"Sum","l":"forward(List)","u":"forward(java.util.List)"},{"p":"mklab.JGNN.nn.operations","c":"From","l":"From()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"fromDouble(double)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"fromDouble(double)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"fromRange(long)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"fromRange(long, long)","u":"fromRange(long,long)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"function(String, String)","u":"function(java.lang.String,java.lang.String)"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"function(String, String)","u":"function(java.lang.String,java.lang.String)"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"futureConfigs(String, Function, int)","u":"futureConfigs(java.lang.String,java.util.function.Function,int)"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"futureConfigs(String, Function, int)","u":"futureConfigs(java.lang.String,java.util.function.Function,int)"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"futureConstants(String, Function, int)","u":"futureConstants(java.lang.String,java.util.function.Function,int)"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"futureConstants(String, Function, int)","u":"futureConstants(java.lang.String,java.util.function.Function,int)"},{"p":"mklab.JGNN.nn.operations","c":"Gather","l":"Gather()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.inputs","c":"Parameter","l":"get()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"get(int)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"get(int)"},{"p":"mklab.JGNN.adhoc","c":"IdConverter","l":"get(long)"},{"p":"mklab.JGNN.core.empy","c":"EmptyMatrix","l":"get(long)"},{"p":"mklab.JGNN.core.empy","c":"EmptyTensor","l":"get(long)"},{"p":"mklab.JGNN.core.matrix","c":"AccessCol","l":"get(long)"},{"p":"mklab.JGNN.core.matrix","c":"AccessRow","l":"get(long)"},{"p":"mklab.JGNN.core.matrix","c":"ColumnRepetition","l":"get(long)"},{"p":"mklab.JGNN.core.matrix","c":"DenseMatrix","l":"get(long)"},{"p":"mklab.JGNN.core.matrix","c":"Diagonal","l":"get(long)"},{"p":"mklab.JGNN.core.matrix","c":"RepeatMatrix","l":"get(long)"},{"p":"mklab.JGNN.core.matrix","c":"RowRepetition","l":"get(long)"},{"p":"mklab.JGNN.core.matrix","c":"SparseMatrix","l":"get(long)"},{"p":"mklab.JGNN.core.matrix","c":"SparseSymmetric","l":"get(long)"},{"p":"mklab.JGNN.core.matrix","c":"TransposedMatrix","l":"get(long)"},{"p":"mklab.JGNN.core.matrix","c":"VectorizedMatrix","l":"get(long)"},{"p":"mklab.JGNN.core.matrix","c":"WrapCols","l":"get(long)"},{"p":"mklab.JGNN.core.matrix","c":"WrapRows","l":"get(long)"},{"p":"mklab.JGNN.core.tensor","c":"AccessSubtensor","l":"get(long)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"get(long)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"get(long)"},{"p":"mklab.JGNN.core.tensor","c":"RepeatTensor","l":"get(long)"},{"p":"mklab.JGNN.core.tensor","c":"SparseTensor","l":"get(long)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"get(long)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"get(long, long)","u":"get(long,long)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"get(String)","u":"get(java.lang.String)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"getColName()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"getCols()"},{"p":"mklab.JGNN.core.matrix","c":"ColumnRepetition","l":"getColumn()"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"getConfigOrDefault(String, double)","u":"getConfigOrDefault(java.lang.String,double)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"getConfigOrDefault(String, int)","u":"getConfigOrDefault(java.lang.String,int)"},{"p":"mklab.JGNN.core","c":"ThreadPool","l":"getCurrentThreadId()"},{"p":"mklab.JGNN.nn","c":"Model","l":"getDepthLastOperations()"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"getDescription()"},{"p":"mklab.JGNN.core","c":"Distribution","l":"getDeviation()"},{"p":"mklab.JGNN.core.distribution","c":"Normal","l":"getDeviation()"},{"p":"mklab.JGNN.core.distribution","c":"Uniform","l":"getDeviation()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"getDimensionName()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"getDimensionSize(String)","u":"getDimensionSize(java.lang.String)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"getExecutionGraphDot()"},{"p":"mklab.JGNN.adhoc","c":"IdConverter","l":"getId(Object)","u":"getId(java.lang.Object)"},{"p":"mklab.JGNN.nn","c":"Model","l":"getInputs()"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"getInputs()"},{"p":"mklab.JGNN.core","c":"ThreadPool","l":"getInstance()"},{"p":"mklab.JGNN.core.util","c":"FastEntry","l":"getKey()"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"getLastTapeError()"},{"p":"mklab.JGNN.core","c":"Distribution","l":"getMean()"},{"p":"mklab.JGNN.core.distribution","c":"Normal","l":"getMean()"},{"p":"mklab.JGNN.core.distribution","c":"Uniform","l":"getMean()"},{"p":"mklab.JGNN.nn.operations","c":"LSTM.LSTMState","l":"getMemory()"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"getModel()"},{"p":"mklab.JGNN.nn.activations","c":"Exp","l":"getNonLinearity(int, double, double)","u":"getNonLinearity(int,double,double)"},{"p":"mklab.JGNN.nn.activations","c":"LRelu","l":"getNonLinearity(int, double, double)","u":"getNonLinearity(int,double,double)"},{"p":"mklab.JGNN.nn.activations","c":"NExp","l":"getNonLinearity(int, double, double)","u":"getNonLinearity(int,double,double)"},{"p":"mklab.JGNN.nn.activations","c":"PRelu","l":"getNonLinearity(int, double, double)","u":"getNonLinearity(int,double,double)"},{"p":"mklab.JGNN.nn.activations","c":"Relu","l":"getNonLinearity(int, double, double)","u":"getNonLinearity(int,double,double)"},{"p":"mklab.JGNN.nn.activations","c":"Sigmoid","l":"getNonLinearity(int, double, double)","u":"getNonLinearity(int,double,double)"},{"p":"mklab.JGNN.nn.activations","c":"Tanh","l":"getNonLinearity(int, double, double)","u":"getNonLinearity(int,double,double)"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"getNonLinearity(int, double, double)","u":"getNonLinearity(int,double,double)"},{"p":"mklab.JGNN.nn.operations","c":"MatMul","l":"getNonLinearity(int, double, double)","u":"getNonLinearity(int,double,double)"},{"p":"mklab.JGNN.nn.operations","c":"Multiply","l":"getNonLinearity(int, double, double)","u":"getNonLinearity(int,double,double)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"getNonZeroElements()"},{"p":"mklab.JGNN.core.empy","c":"EmptyMatrix","l":"getNonZeroEntries()"},{"p":"mklab.JGNN.core.matrix","c":"ColumnRepetition","l":"getNonZeroEntries()"},{"p":"mklab.JGNN.core.matrix","c":"DenseMatrix","l":"getNonZeroEntries()"},{"p":"mklab.JGNN.core.matrix","c":"Diagonal","l":"getNonZeroEntries()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"getNonZeroEntries()"},{"p":"mklab.JGNN.core.matrix","c":"RepeatMatrix","l":"getNonZeroEntries()"},{"p":"mklab.JGNN.core.matrix","c":"RowRepetition","l":"getNonZeroEntries()"},{"p":"mklab.JGNN.core.matrix","c":"SparseMatrix","l":"getNonZeroEntries()"},{"p":"mklab.JGNN.core.matrix","c":"SparseSymmetric","l":"getNonZeroEntries()"},{"p":"mklab.JGNN.core.matrix","c":"TransposedMatrix","l":"getNonZeroEntries()"},{"p":"mklab.JGNN.core.matrix","c":"VectorizedMatrix","l":"getNonZeroEntries()"},{"p":"mklab.JGNN.core.matrix","c":"WrapCols","l":"getNonZeroEntries()"},{"p":"mklab.JGNN.core.matrix","c":"WrapRows","l":"getNonZeroEntries()"},{"p":"mklab.JGNN.nn.operations","c":"LSTM","l":"getOptimizer()"},{"p":"mklab.JGNN.adhoc","c":"IdConverter","l":"getOrCreateId(Object)","u":"getOrCreateId(java.lang.Object)"},{"p":"mklab.JGNN.nn.operations","c":"LSTM.LSTMState","l":"getOutput()"},{"p":"mklab.JGNN.nn","c":"Model","l":"getOutputs()"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"getOutputs()"},{"p":"mklab.JGNN.nn","c":"Model","l":"getParameters()"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"getPrediction()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"getRowName()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"getRows()"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"getSimpleDescription()"},{"p":"mklab.JGNN.nn.operations","c":"Reshape","l":"getSimpleDescription()"},{"p":"mklab.JGNN.adhoc","c":"IdConverter","l":"getSlice()"},{"p":"mklab.JGNN.core.util","c":"FastEntry","l":"getValue()"},{"p":"mklab.JGNN.nn.optimizers","c":"GradientDescent","l":"GradientDescent(double)","u":"%3Cinit%3E(double)"},{"p":"mklab.JGNN.nn.optimizers","c":"GradientDescent","l":"GradientDescent(double, double)","u":"%3Cinit%3E(double,double)"},{"p":"mklab.JGNN.adhoc","c":"Dataset","l":"graph()"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"hasComponent(String)","u":"hasComponent(java.lang.String)"},{"p":"mklab.JGNN.core.util","c":"Range","l":"hasNext()"},{"p":"mklab.JGNN.core.util","c":"Range2D","l":"hasNext()"},{"p":"mklab.JGNN.adhoc","c":"IdConverter","l":"IdConverter()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.adhoc","c":"IdConverter","l":"IdConverter(List)","u":"%3Cinit%3E(java.util.List)"},{"p":"mklab.JGNN.nn.operations","c":"Identity","l":"Identity()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn","c":"Model","l":"init(Initializer)","u":"init(mklab.JGNN.nn.Initializer)"},{"p":"mklab.JGNN.nn","c":"Initializer","l":"Initializer()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"inverse()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"inverse()"},{"p":"mklab.JGNN.nn.inputs","c":"Constant","l":"isCachable()"},{"p":"mklab.JGNN.nn.inputs","c":"Parameter","l":"isCachable()"},{"p":"mklab.JGNN.nn.inputs","c":"Variable","l":"isCachable()"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"isCachable()"},{"p":"mklab.JGNN.nn.operations","c":"Dropout","l":"isCachable()"},{"p":"mklab.JGNN.nn.operations","c":"From","l":"isCachable()"},{"p":"mklab.JGNN.nn.operations","c":"Reshape","l":"isCachable()"},{"p":"mklab.JGNN.nn.operations","c":"To","l":"isCachable()"},{"p":"mklab.JGNN.nn.inputs","c":"Constant","l":"isConstant()"},{"p":"mklab.JGNN.nn.inputs","c":"Parameter","l":"isConstant()"},{"p":"mklab.JGNN.nn.inputs","c":"Variable","l":"isConstant()"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"isConstant()"},{"p":"mklab.JGNN.nn.operations","c":"Dropout","l":"isEnabled()"},{"p":"mklab.JGNN.core","c":"Slice","l":"iterator()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"iterator()"},{"p":"mklab.JGNN.core.util","c":"Range","l":"iterator()"},{"p":"mklab.JGNN.core.util","c":"Range2D","l":"iterator()"},{"p":"mklab.JGNN.nn.initializers","c":"KaimingNormal","l":"KaimingNormal()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.initializers","c":"KaimingUniform","l":"KaimingUniform()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.activations","c":"L1","l":"L1()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.activations","c":"L1","l":"L1(boolean)","u":"%3Cinit%3E(boolean)"},{"p":"mklab.JGNN.adhoc","c":"Dataset","l":"labels()"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"layer(String)","u":"layer(java.lang.String)"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"layer(String)","u":"layer(java.lang.String)"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"LayeredBuilder()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"LayeredBuilder(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"layerRepeat(String, int)","u":"layerRepeat(java.lang.String,int)"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"layerRepeat(String, int)","u":"layerRepeat(java.lang.String,int)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"load(Path)","u":"load(java.nio.file.Path)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"log()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"log()"},{"p":"mklab.JGNN.nn.operations","c":"Log","l":"Log()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn","c":"Loss","l":"Loss()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.activations","c":"LRelu","l":"LRelu()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.operations","c":"LSTM","l":"LSTM(Optimizer, int, int)","u":"%3Cinit%3E(mklab.JGNN.nn.Optimizer,int,int)"},{"p":"mklab.JGNN.nn.operations","c":"LSTM.LSTMState","l":"LSTMState(Tensor, Tensor)","u":"%3Cinit%3E(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.operations","c":"MatMul","l":"MatMul()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.matrix","c":"DenseMatrix","l":"matmul(Matrix)","u":"matmul(mklab.JGNN.core.Matrix)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"matmul(Matrix)","u":"matmul(mklab.JGNN.core.Matrix)"},{"p":"mklab.JGNN.core.matrix","c":"VectorizedMatrix","l":"matmul(Matrix)","u":"matmul(mklab.JGNN.core.Matrix)"},{"p":"mklab.JGNN.core.matrix","c":"DenseMatrix","l":"matmul(Matrix, boolean, boolean)","u":"matmul(mklab.JGNN.core.Matrix,boolean,boolean)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"matmul(Matrix, boolean, boolean)","u":"matmul(mklab.JGNN.core.Matrix,boolean,boolean)"},{"p":"mklab.JGNN.core.matrix","c":"VectorizedMatrix","l":"matmul(Matrix, boolean, boolean)","u":"matmul(mklab.JGNN.core.Matrix,boolean,boolean)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"max()"},{"p":"mklab.JGNN.nn.pooling","c":"Max","l":"Max()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.pooling","c":"Max","l":"Max(boolean)","u":"%3Cinit%3E(boolean)"},{"p":"mklab.JGNN.nn.pooling","c":"Mean","l":"Mean()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.pooling","c":"Mean","l":"Mean(boolean)","u":"%3Cinit%3E(boolean)"},{"p":"mklab.JGNN.core","c":"Memory","l":"Memory()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"min()"},{"p":"mklab.JGNN.nn","c":"Model","l":"Model()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"ModelBuilder()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"ModelBuilder(Model)","u":"%3Cinit%3E(mklab.JGNN.nn.Model)"},{"p":"mklab.JGNN.nn","c":"ModelTraining","l":"ModelTraining()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.operations","c":"Multiply","l":"Multiply()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"multiply(double)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"multiply(double)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"multiply(double)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"multiply(Tensor)","u":"multiply(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"multiply(Tensor)","u":"multiply(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"multiply(Tensor)","u":"multiply(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"negative()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"negative()"},{"p":"mklab.JGNN.adhoc.parsers","c":"Neuralang","l":"Neuralang()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.activations","c":"NExp","l":"NExp()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.util","c":"Range","l":"next()"},{"p":"mklab.JGNN.core.util","c":"Range2D","l":"next()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"norm()"},{"p":"mklab.JGNN.core.distribution","c":"Normal","l":"Normal()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.distribution","c":"Normal","l":"Normal(double, double)","u":"%3Cinit%3E(double,double)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"normalized()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"onesMask()"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"operation(String)","u":"operation(java.lang.String)"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"operation(String)","u":"operation(java.lang.String)"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"operation(String)","u":"operation(java.lang.String)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"out(String)","u":"out(java.lang.String)"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"out(String)","u":"out(java.lang.String)"},{"p":"mklab.JGNN.nn.operations","c":"LSTM","l":"output(Tensor, LSTM.LSTMState)","u":"output(mklab.JGNN.core.Tensor,mklab.JGNN.nn.operations.LSTM.LSTMState)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"param(String, double, Tensor)","u":"param(java.lang.String,double,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"param(String, double, Tensor)","u":"param(java.lang.String,double,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"param(String, double, Tensor)","u":"param(java.lang.String,double,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"param(String, Tensor)","u":"param(java.lang.String,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"param(String, Tensor)","u":"param(java.lang.String,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"param(String, Tensor)","u":"param(java.lang.String,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.inputs","c":"Parameter","l":"Parameter(Tensor)","u":"%3Cinit%3E(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.inputs","c":"Parameter","l":"Parameter(Tensor, double)","u":"%3Cinit%3E(mklab.JGNN.core.Tensor,double)"},{"p":"mklab.JGNN.adhoc.parsers","c":"Neuralang","l":"parse(Path)","u":"parse(java.nio.file.Path)"},{"p":"mklab.JGNN.adhoc.parsers","c":"Neuralang","l":"parse(String)","u":"parse(java.lang.String)"},{"p":"mklab.JGNN.core.empy","c":"EmptyMatrix","l":"persist()"},{"p":"mklab.JGNN.core.empy","c":"EmptyTensor","l":"persist()"},{"p":"mklab.JGNN.core.matrix","c":"AccessCol","l":"persist()"},{"p":"mklab.JGNN.core.matrix","c":"AccessRow","l":"persist()"},{"p":"mklab.JGNN.core.matrix","c":"ColumnRepetition","l":"persist()"},{"p":"mklab.JGNN.core.matrix","c":"DenseMatrix","l":"persist()"},{"p":"mklab.JGNN.core.matrix","c":"Diagonal","l":"persist()"},{"p":"mklab.JGNN.core.matrix","c":"RepeatMatrix","l":"persist()"},{"p":"mklab.JGNN.core.matrix","c":"RowRepetition","l":"persist()"},{"p":"mklab.JGNN.core.matrix","c":"SparseMatrix","l":"persist()"},{"p":"mklab.JGNN.core.matrix","c":"SparseSymmetric","l":"persist()"},{"p":"mklab.JGNN.core.matrix","c":"TransposedMatrix","l":"persist()"},{"p":"mklab.JGNN.core.matrix","c":"VectorizedMatrix","l":"persist()"},{"p":"mklab.JGNN.core.matrix","c":"WrapCols","l":"persist()"},{"p":"mklab.JGNN.core.matrix","c":"WrapRows","l":"persist()"},{"p":"mklab.JGNN.core.tensor","c":"AccessSubtensor","l":"persist()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"persist()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"persist()"},{"p":"mklab.JGNN.core.tensor","c":"RepeatTensor","l":"persist()"},{"p":"mklab.JGNN.core.tensor","c":"SparseTensor","l":"persist()"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"persist()"},{"p":"mklab.JGNN.nn","c":"Model","l":"predict(List)","u":"predict(java.util.List)"},{"p":"mklab.JGNN.nn","c":"Model","l":"predict(Tensor...)","u":"predict(mklab.JGNN.core.Tensor...)"},{"p":"mklab.JGNN.nn.operations","c":"LSTM","l":"predict(Tensor[])","u":"predict(mklab.JGNN.core.Tensor[])"},{"p":"mklab.JGNN.nn.activations","c":"PRelu","l":"PRelu()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"print()"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"printState()"},{"p":"mklab.JGNN.adhoc.datasets","c":"Pubmed","l":"Pubmed()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"put(int, double)","u":"put(int,double)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"put(int, double)","u":"put(int,double)"},{"p":"mklab.JGNN.core.empy","c":"EmptyMatrix","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.empy","c":"EmptyTensor","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.matrix","c":"AccessCol","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.matrix","c":"AccessRow","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.matrix","c":"ColumnRepetition","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.matrix","c":"DenseMatrix","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.matrix","c":"Diagonal","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.matrix","c":"RepeatMatrix","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.matrix","c":"RowRepetition","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.matrix","c":"SparseMatrix","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.matrix","c":"SparseSymmetric","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.matrix","c":"TransposedMatrix","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.matrix","c":"VectorizedMatrix","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.matrix","c":"WrapCols","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.matrix","c":"WrapRows","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.tensor","c":"AccessSubtensor","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.tensor","c":"RepeatTensor","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.tensor","c":"SparseTensor","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"put(long, long, double)","u":"put(long,long,double)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"putAdd(int, double)","u":"putAdd(int,double)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"putAdd(int, double)","u":"putAdd(int,double)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"putAdd(long, double)","u":"putAdd(long,double)"},{"p":"mklab.JGNN.core","c":"Slice","l":"range(double, double)","u":"range(double,double)"},{"p":"mklab.JGNN.core","c":"Slice","l":"range(int, int)","u":"range(int,int)"},{"p":"mklab.JGNN.core.util","c":"Range","l":"Range(long, long)","u":"%3Cinit%3E(long,long)"},{"p":"mklab.JGNN.core.util","c":"Range2D","l":"Range2D(long, long, long, long)","u":"%3Cinit%3E(long,long,long,long)"},{"p":"mklab.JGNN.nn.operations","c":"Reduce","l":"Reduce()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core","c":"Memory.Scope","l":"register(double[])"},{"p":"mklab.JGNN.nn.optimizers","c":"Regularization","l":"Regularization(Optimizer, double)","u":"%3Cinit%3E(mklab.JGNN.nn.Optimizer,double)"},{"p":"mklab.JGNN.core.empy","c":"EmptyMatrix","l":"release()"},{"p":"mklab.JGNN.core.empy","c":"EmptyTensor","l":"release()"},{"p":"mklab.JGNN.core.matrix","c":"AccessCol","l":"release()"},{"p":"mklab.JGNN.core.matrix","c":"AccessRow","l":"release()"},{"p":"mklab.JGNN.core.matrix","c":"ColumnRepetition","l":"release()"},{"p":"mklab.JGNN.core.matrix","c":"DenseMatrix","l":"release()"},{"p":"mklab.JGNN.core.matrix","c":"Diagonal","l":"release()"},{"p":"mklab.JGNN.core.matrix","c":"RepeatMatrix","l":"release()"},{"p":"mklab.JGNN.core.matrix","c":"RowRepetition","l":"release()"},{"p":"mklab.JGNN.core.matrix","c":"SparseMatrix","l":"release()"},{"p":"mklab.JGNN.core.matrix","c":"SparseSymmetric","l":"release()"},{"p":"mklab.JGNN.core.matrix","c":"TransposedMatrix","l":"release()"},{"p":"mklab.JGNN.core.matrix","c":"VectorizedMatrix","l":"release()"},{"p":"mklab.JGNN.core.matrix","c":"WrapCols","l":"release()"},{"p":"mklab.JGNN.core.matrix","c":"WrapRows","l":"release()"},{"p":"mklab.JGNN.core.tensor","c":"AccessSubtensor","l":"release()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"release()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"release()"},{"p":"mklab.JGNN.core.tensor","c":"RepeatTensor","l":"release()"},{"p":"mklab.JGNN.core.tensor","c":"SparseTensor","l":"release()"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"release()"},{"p":"mklab.JGNN.core","c":"Memory","l":"release(double[])"},{"p":"mklab.JGNN.nn.activations","c":"Relu","l":"Relu()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"relu(double)"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"relu(Tensor)","u":"relu(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"reluDerivative(double)"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"reluDerivative(Tensor)","u":"reluDerivative(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"rememberAs(String)","u":"rememberAs(java.lang.String)"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"rememberAs(String)","u":"rememberAs(java.lang.String)"},{"p":"mklab.JGNN.nn.operations","c":"Repeat","l":"Repeat()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.matrix","c":"RepeatMatrix","l":"RepeatMatrix(double, long, long)","u":"%3Cinit%3E(double,long,long)"},{"p":"mklab.JGNN.core.tensor","c":"RepeatTensor","l":"RepeatTensor(double, long)","u":"%3Cinit%3E(double,long)"},{"p":"mklab.JGNN.nn.loss.report","c":"VerboseLoss","l":"reset()"},{"p":"mklab.JGNN.nn","c":"Optimizer","l":"reset()"},{"p":"mklab.JGNN.nn.optimizers","c":"Adam","l":"reset()"},{"p":"mklab.JGNN.nn.optimizers","c":"BatchOptimizer","l":"reset()"},{"p":"mklab.JGNN.nn.optimizers","c":"GradientDescent","l":"reset()"},{"p":"mklab.JGNN.nn.optimizers","c":"Regularization","l":"reset()"},{"p":"mklab.JGNN.nn.operations","c":"Reshape","l":"Reshape(long, long)","u":"%3Cinit%3E(long,long)"},{"p":"mklab.JGNN.core.matrix","c":"RowRepetition","l":"RowRepetition(Tensor, long)","u":"%3Cinit%3E(mklab.JGNN.core.Tensor,long)"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"run(List)","u":"run(java.util.List)"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"run(Tensor...)","u":"run(mklab.JGNN.core.Tensor...)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"runModel(ArrayList)","u":"runModel(java.util.ArrayList)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"runModel(Tensor...)","u":"runModel(mklab.JGNN.core.Tensor...)"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"runPrediction()"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"runPredictionAndAutosize()"},{"p":"mklab.JGNN.core.distribution","c":"Normal","l":"sample()"},{"p":"mklab.JGNN.core","c":"Distribution","l":"sample()"},{"p":"mklab.JGNN.core.distribution","c":"Uniform","l":"sample()"},{"p":"mklab.JGNN.adhoc","c":"Dataset","l":"samples()"},{"p":"mklab.JGNN.core","c":"Slice","l":"samplesAsFeatures()"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"save(Path)","u":"save(java.nio.file.Path)"},{"p":"mklab.JGNN.core","c":"Memory","l":"scope()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"selfAbs()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"selfAbs()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"selfAdd(double)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"selfAdd(double)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"selfAdd(Tensor)","u":"selfAdd(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"selfAdd(Tensor)","u":"selfAdd(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"selfAdd(Tensor)","u":"selfAdd(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"selfAdd(Tensor, double)","u":"selfAdd(mklab.JGNN.core.Tensor,double)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"selfExpMinusOne()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"selfExpMinusOne()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"selfInverse()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"selfInverse()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"selfLog()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"selfLog()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"selfMultiply(double)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"selfMultiply(double)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"selfMultiply(double)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"selfMultiply(Tensor)","u":"selfMultiply(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"selfMultiply(Tensor)","u":"selfMultiply(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"selfMultiply(Tensor)","u":"selfMultiply(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"selfNegative()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"selfNegative()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"selfSqrt()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"selfSqrt()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"selfSubtract(Tensor)","u":"selfSubtract(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"selfSubtract(Tensor)","u":"selfSubtract(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"selfSubtract(Tensor)","u":"selfSubtract(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.inputs","c":"Parameter","l":"set(Tensor)","u":"set(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"setColName(String)","u":"setColName(java.lang.String)"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"setDescription(String)","u":"setDescription(java.lang.String)"},{"p":"mklab.JGNN.core.distribution","c":"Normal","l":"setDeviation(double)"},{"p":"mklab.JGNN.core","c":"Distribution","l":"setDeviation(double)"},{"p":"mklab.JGNN.core.distribution","c":"Uniform","l":"setDeviation(double)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"setDiagonal(long, double)","u":"setDiagonal(long,double)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"setDimensionName(String)","u":"setDimensionName(java.lang.String)"},{"p":"mklab.JGNN.nn.pooling","c":"Sort","l":"setDimensionName(String)","u":"setDimensionName(java.lang.String)"},{"p":"mklab.JGNN.adhoc","c":"IdConverter","l":"setDimensionName(String, String)","u":"setDimensionName(java.lang.String,java.lang.String)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"setDimensionName(String, String)","u":"setDimensionName(java.lang.String,java.lang.String)"},{"p":"mklab.JGNN.nn.operations","c":"Reshape","l":"setDimensionName(String, String)","u":"setDimensionName(java.lang.String,java.lang.String)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"setDimensionName(Tensor)","u":"setDimensionName(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"setDimensionName(Tensor)","u":"setDimensionName(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.operations","c":"Dropout","l":"setEnabled(boolean)"},{"p":"mklab.JGNN.nn","c":"ModelTraining","l":"setEpochs(int)"},{"p":"mklab.JGNN.nn.loss.report","c":"VerboseLoss","l":"setInterval(int)"},{"p":"mklab.JGNN.core.util","c":"FastEntry","l":"setKey(K)"},{"p":"mklab.JGNN.nn","c":"ModelTraining","l":"setLoss(Loss)","u":"setLoss(mklab.JGNN.nn.Loss)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"setMainDiagonal(double)"},{"p":"mklab.JGNN.core.distribution","c":"Normal","l":"setMean(double)"},{"p":"mklab.JGNN.core","c":"Distribution","l":"setMean(double)"},{"p":"mklab.JGNN.core.distribution","c":"Uniform","l":"setMean(double)"},{"p":"mklab.JGNN.nn.loss","c":"CategoricalCrossEntropy","l":"setMeanReduction(boolean)"},{"p":"mklab.JGNN.nn","c":"ModelTraining","l":"setNumBatches(int)"},{"p":"mklab.JGNN.nn","c":"ModelTraining","l":"setOptimizer(Optimizer)","u":"setOptimizer(mklab.JGNN.nn.Optimizer)"},{"p":"mklab.JGNN.nn","c":"ModelTraining","l":"setParallelizedStochasticGradientDescent(boolean)"},{"p":"mklab.JGNN.nn","c":"ModelTraining","l":"setPatience(int)"},{"p":"mklab.JGNN.core.distribution","c":"Uniform","l":"setRange(double, double)","u":"setRange(double,double)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"setRowName(String)","u":"setRowName(java.lang.String)"},{"p":"mklab.JGNN.core.distribution","c":"Normal","l":"setSeed(long)"},{"p":"mklab.JGNN.core","c":"Distribution","l":"setSeed(long)"},{"p":"mklab.JGNN.core.distribution","c":"Uniform","l":"setSeed(long)"},{"p":"mklab.JGNN.nn.loss.report","c":"VerboseLoss","l":"setStream(PrintStream)","u":"setStream(java.io.PrintStream)"},{"p":"mklab.JGNN.nn.inputs","c":"Variable","l":"setTo(Tensor)","u":"setTo(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"setToASymmetricNormalization()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"setToNormalized()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"setToOnes()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"setToProbability()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"setToRandom()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"setToRandom(Distribution)","u":"setToRandom(mklab.JGNN.core.Distribution)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"setToSymmetricNormalization()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"setToUniform()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"setToZero()"},{"p":"mklab.JGNN.nn","c":"ModelTraining","l":"setValidationLoss(Loss)","u":"setValidationLoss(mklab.JGNN.nn.Loss)"},{"p":"mklab.JGNN.core.util","c":"FastEntry","l":"setValue(V)"},{"p":"mklab.JGNN.nn","c":"ModelTraining","l":"setVerbose(boolean)"},{"p":"mklab.JGNN.core.matrix","c":"WrapCols","l":"setZeroCopyType(Matrix)","u":"setZeroCopyType(mklab.JGNN.core.Matrix)"},{"p":"mklab.JGNN.core.matrix","c":"WrapRows","l":"setZeroCopyType(Matrix)","u":"setZeroCopyType(mklab.JGNN.core.Matrix)"},{"p":"mklab.JGNN.core","c":"Slice","l":"shuffle()"},{"p":"mklab.JGNN.core","c":"Slice","l":"shuffle(int)"},{"p":"mklab.JGNN.nn.activations","c":"Sigmoid","l":"Sigmoid()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"sigmoid(double)"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"sigmoid(Tensor)","u":"sigmoid(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"sigmoidDerivative(double)"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"sigmoidDerivative(Tensor)","u":"sigmoidDerivative(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.adhoc","c":"IdConverter","l":"size()"},{"p":"mklab.JGNN.core","c":"Slice","l":"size()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"size()"},{"p":"mklab.JGNN.core","c":"Slice","l":"Slice(Iterable)","u":"%3Cinit%3E(java.lang.Iterable)"},{"p":"mklab.JGNN.nn.pooling","c":"SoftMax","l":"SoftMax()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.pooling","c":"SoftMax","l":"SoftMax(boolean)","u":"%3Cinit%3E(boolean)"},{"p":"mklab.JGNN.core.util","c":"Sort","l":"Sort()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.pooling","c":"Sort","l":"Sort(int)","u":"%3Cinit%3E(int)"},{"p":"mklab.JGNN.core.util","c":"Sort","l":"sortedIndexes(ArrayList)","u":"sortedIndexes(java.util.ArrayList)"},{"p":"mklab.JGNN.core.util","c":"Sort","l":"sortedIndexes(double[])"},{"p":"mklab.JGNN.core.matrix","c":"SparseMatrix","l":"SparseMatrix(long, long)","u":"%3Cinit%3E(long,long)"},{"p":"mklab.JGNN.core.matrix","c":"SparseSymmetric","l":"SparseSymmetric(long, long)","u":"%3Cinit%3E(long,long)"},{"p":"mklab.JGNN.core.tensor","c":"SparseTensor","l":"SparseTensor()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.tensor","c":"SparseTensor","l":"SparseTensor(long)","u":"%3Cinit%3E(long)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"SPECIES"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"sqrt()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"sqrt()"},{"p":"mklab.JGNN.nn.operations","c":"LSTM","l":"startTape()"},{"p":"mklab.JGNN.core","c":"ThreadPool","l":"submit(Runnable)","u":"submit(java.lang.Runnable)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"subtract(Tensor)","u":"subtract(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"subtract(Tensor)","u":"subtract(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"subtract(Tensor)","u":"subtract(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"sum()"},{"p":"mklab.JGNN.nn.pooling","c":"Sum","l":"Sum()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.pooling","c":"Sum","l":"Sum(boolean)","u":"%3Cinit%3E(boolean)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"symmetricNormalization()"},{"p":"mklab.JGNN.nn.activations","c":"Tanh","l":"Tanh()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"tanh(double)"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"tanh(Tensor)","u":"tanh(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"tanhDerivative(double)"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"tanhDerivative(Tensor)","u":"tanhDerivative(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core.matrix","c":"VectorizedMatrix","l":"tensor"},{"p":"mklab.JGNN.core","c":"Tensor","l":"Tensor(long)","u":"%3Cinit%3E(long)"},{"p":"mklab.JGNN.nn.operations","c":"To","l":"To()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"toArray()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"toDense()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"toDouble()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"toNonZeroString()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"toProbability()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"toSparse()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"toString()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"toString()"},{"p":"mklab.JGNN.core.util","c":"FastEntry","l":"toString()"},{"p":"mklab.JGNN.nn","c":"Model","l":"train(Loss, Optimizer, List, List)","u":"train(mklab.JGNN.nn.Loss,mklab.JGNN.nn.Optimizer,java.util.List,java.util.List)"},{"p":"mklab.JGNN.nn","c":"Model","l":"train(Loss, Optimizer, List, List, List)","u":"train(mklab.JGNN.nn.Loss,mklab.JGNN.nn.Optimizer,java.util.List,java.util.List,java.util.List)"},{"p":"mklab.JGNN.nn","c":"ModelTraining","l":"train(Model, Matrix, Matrix, Slice, Slice)","u":"train(mklab.JGNN.nn.Model,mklab.JGNN.core.Matrix,mklab.JGNN.core.Matrix,mklab.JGNN.core.Slice,mklab.JGNN.core.Slice)"},{"p":"mklab.JGNN.nn","c":"Model","l":"train(ModelTraining, Matrix, Matrix, Slice, Slice)","u":"train(mklab.JGNN.nn.ModelTraining,mklab.JGNN.core.Matrix,mklab.JGNN.core.Matrix,mklab.JGNN.core.Slice,mklab.JGNN.core.Slice)"},{"p":"mklab.JGNN.nn.operations","c":"LSTM","l":"train(Tensor[], Tensor)","u":"train(mklab.JGNN.core.Tensor[],mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.operations","c":"LSTM","l":"trainOnOutputError(Tensor[], Tensor)","u":"trainOnOutputError(mklab.JGNN.core.Tensor[],mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn","c":"Model","l":"trainTowardsZero(Optimizer, List)","u":"trainTowardsZero(mklab.JGNN.nn.Optimizer,java.util.List)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"transform(Tensor)","u":"transform(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.operations","c":"Transpose","l":"Transpose()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"transposed()"},{"p":"mklab.JGNN.core.matrix","c":"TransposedMatrix","l":"TransposedMatrix(Matrix)","u":"%3Cinit%3E(mklab.JGNN.core.Matrix)"},{"p":"mklab.JGNN.core.empy","c":"EmptyMatrix","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.empy","c":"EmptyTensor","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"AccessCol","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"AccessRow","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"ColumnRepetition","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"DenseMatrix","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"Diagonal","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"RepeatMatrix","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"RowRepetition","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"SparseMatrix","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"SparseSymmetric","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"TransposedMatrix","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"VectorizedMatrix","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"WrapCols","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"WrapRows","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.tensor","c":"AccessSubtensor","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.tensor","c":"RepeatTensor","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.tensor","c":"SparseTensor","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.distribution","c":"Uniform","l":"Uniform()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.distribution","c":"Uniform","l":"Uniform(double, double)","u":"%3Cinit%3E(double,double)"},{"p":"mklab.JGNN.core","c":"Memory.Scope","l":"unregister(double[])"},{"p":"mklab.JGNN.nn","c":"Optimizer","l":"update(Tensor, Tensor)","u":"update(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.optimizers","c":"Adam","l":"update(Tensor, Tensor)","u":"update(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.optimizers","c":"BatchOptimizer","l":"update(Tensor, Tensor)","u":"update(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.optimizers","c":"GradientDescent","l":"update(Tensor, Tensor)","u":"update(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.optimizers","c":"Regularization","l":"update(Tensor, Tensor)","u":"update(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.optimizers","c":"BatchOptimizer","l":"updateAll()"},{"p":"mklab.JGNN.nn.operations","c":"LSTM","l":"updateTape(Tensor, LSTM.LSTMState, Tensor)","u":"updateTape(mklab.JGNN.core.Tensor,mklab.JGNN.nn.operations.LSTM.LSTMState,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"values"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"values"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"var(String)","u":"var(java.lang.String)"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"var(String)","u":"var(java.lang.String)"},{"p":"mklab.JGNN.adhoc.parsers","c":"Neuralang","l":"var(String)","u":"var(java.lang.String)"},{"p":"mklab.JGNN.nn.inputs","c":"Variable","l":"Variable()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.initializers","c":"VariancePreservingInitializer","l":"VariancePreservingInitializer()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"vectorization"},{"p":"mklab.JGNN.core.matrix","c":"VectorizedMatrix","l":"VectorizedMatrix(long, long)","u":"%3Cinit%3E(long,long)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"VectorizedTensor()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"VectorizedTensor(double...)","u":"%3Cinit%3E(double...)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"VectorizedTensor(Iterator)","u":"%3Cinit%3E(java.util.Iterator)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"VectorizedTensor(long)","u":"%3Cinit%3E(long)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"VectorizedTensor(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"mklab.JGNN.nn.loss.report","c":"VerboseLoss","l":"VerboseLoss(Loss)","u":"%3Cinit%3E(mklab.JGNN.nn.Loss)"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"view()"},{"p":"mklab.JGNN.core","c":"ThreadPool","l":"waitForConclusion()"},{"p":"mklab.JGNN.core.matrix","c":"WrapCols","l":"WrapCols(List)","u":"%3Cinit%3E(java.util.List)"},{"p":"mklab.JGNN.core.matrix","c":"WrapCols","l":"WrapCols(Tensor...)","u":"%3Cinit%3E(mklab.JGNN.core.Tensor...)"},{"p":"mklab.JGNN.core.matrix","c":"WrapRows","l":"WrapRows(List)","u":"%3Cinit%3E(java.util.List)"},{"p":"mklab.JGNN.core.matrix","c":"WrapRows","l":"WrapRows(Tensor...)","u":"%3Cinit%3E(mklab.JGNN.core.Tensor...)"},{"p":"mklab.JGNN.nn.initializers","c":"XavierNormal","l":"XavierNormal()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.initializers","c":"XavierUniform","l":"XavierUniform()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"zeroCopy()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"zeroCopy()"},{"p":"mklab.JGNN.core.empy","c":"EmptyTensor","l":"zeroCopy(long)"},{"p":"mklab.JGNN.core.matrix","c":"AccessCol","l":"zeroCopy(long)"},{"p":"mklab.JGNN.core.matrix","c":"AccessRow","l":"zeroCopy(long)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"zeroCopy(long)"},{"p":"mklab.JGNN.core.tensor","c":"AccessSubtensor","l":"zeroCopy(long)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"zeroCopy(long)"},{"p":"mklab.JGNN.core.tensor","c":"RepeatTensor","l":"zeroCopy(long)"},{"p":"mklab.JGNN.core.tensor","c":"SparseTensor","l":"zeroCopy(long)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"zeroCopy(long)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"zeroCopy(long)"},{"p":"mklab.JGNN.core.empy","c":"EmptyMatrix","l":"zeroCopy(long, long)","u":"zeroCopy(long,long)"},{"p":"mklab.JGNN.core.matrix","c":"ColumnRepetition","l":"zeroCopy(long, long)","u":"zeroCopy(long,long)"},{"p":"mklab.JGNN.core.matrix","c":"DenseMatrix","l":"zeroCopy(long, long)","u":"zeroCopy(long,long)"},{"p":"mklab.JGNN.core.matrix","c":"Diagonal","l":"zeroCopy(long, long)","u":"zeroCopy(long,long)"},{"p":"mklab.JGNN.core.matrix","c":"RepeatMatrix","l":"zeroCopy(long, long)","u":"zeroCopy(long,long)"},{"p":"mklab.JGNN.core.matrix","c":"RowRepetition","l":"zeroCopy(long, long)","u":"zeroCopy(long,long)"},{"p":"mklab.JGNN.core.matrix","c":"SparseMatrix","l":"zeroCopy(long, long)","u":"zeroCopy(long,long)"},{"p":"mklab.JGNN.core.matrix","c":"SparseSymmetric","l":"zeroCopy(long, long)","u":"zeroCopy(long,long)"},{"p":"mklab.JGNN.core.matrix","c":"TransposedMatrix","l":"zeroCopy(long, long)","u":"zeroCopy(long,long)"},{"p":"mklab.JGNN.core.matrix","c":"VectorizedMatrix","l":"zeroCopy(long, long)","u":"zeroCopy(long,long)"},{"p":"mklab.JGNN.core.matrix","c":"WrapCols","l":"zeroCopy(long, long)","u":"zeroCopy(long,long)"},{"p":"mklab.JGNN.core.matrix","c":"WrapRows","l":"zeroCopy(long, long)","u":"zeroCopy(long,long)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"zeroCopy(long, long)","u":"zeroCopy(long,long)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"zeroCopy(Tensor)","u":"zeroCopy(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"zeroCopy(Tensor)","u":"zeroCopy(mklab.JGNN.core.Tensor)"}];updateSearchResults(); \ No newline at end of file +memberSearchIndex = [{"p":"mklab.JGNN.core","c":"Tensor","l":"abs()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"abs()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"accessCol(long)"},{"p":"mklab.JGNN.core.matrix","c":"WrapCols","l":"accessCol(long)"},{"p":"mklab.JGNN.core.matrix","c":"AccessCol","l":"AccessCol(Matrix, long)","u":"%3Cinit%3E(mklab.JGNN.core.Matrix,long)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"accessColumns()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"accessColumns(Iterable)","u":"accessColumns(java.lang.Iterable)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"accessColumns(long...)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"accessColumns(Tensor)","u":"accessColumns(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"accessDim(long, String)","u":"accessDim(long,java.lang.String)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"accessRow(long)"},{"p":"mklab.JGNN.core.matrix","c":"WrapRows","l":"accessRow(long)"},{"p":"mklab.JGNN.core.matrix","c":"AccessRow","l":"AccessRow(Matrix, long)","u":"%3Cinit%3E(mklab.JGNN.core.Matrix,long)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"accessRows()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"accessRows(Iterable)","u":"accessRows(java.lang.Iterable)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"accessRows(long...)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"accessRows(Tensor)","u":"accessRows(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"accessSubtensor(long)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"accessSubtensor(long, long)","u":"accessSubtensor(long,long)"},{"p":"mklab.JGNN.core.tensor","c":"AccessSubtensor","l":"AccessSubtensor(Tensor, long)","u":"%3Cinit%3E(mklab.JGNN.core.Tensor,long)"},{"p":"mklab.JGNN.core.tensor","c":"AccessSubtensor","l":"AccessSubtensor(Tensor, long, long)","u":"%3Cinit%3E(mklab.JGNN.core.Tensor,long,long)"},{"p":"mklab.JGNN.nn.loss","c":"Accuracy","l":"Accuracy()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.optimizers","c":"Adam","l":"Adam()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.optimizers","c":"Adam","l":"Adam(boolean, double)","u":"%3Cinit%3E(boolean,double)"},{"p":"mklab.JGNN.nn.optimizers","c":"Adam","l":"Adam(boolean, double, double, double)","u":"%3Cinit%3E(boolean,double,double,double)"},{"p":"mklab.JGNN.nn.optimizers","c":"Adam","l":"Adam(boolean, double, double, double, double)","u":"%3Cinit%3E(boolean,double,double,double,double)"},{"p":"mklab.JGNN.nn.optimizers","c":"Adam","l":"Adam(double)","u":"%3Cinit%3E(double)"},{"p":"mklab.JGNN.nn.operations","c":"Add","l":"Add()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"add(double)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"add(double)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"add(Tensor)","u":"add(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"add(Tensor)","u":"add(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"add(Tensor)","u":"add(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.inputs","c":"Parameter","l":"addInput(NNOperation)","u":"addInput(mklab.JGNN.nn.NNOperation)"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"addInput(NNOperation)","u":"addInput(mklab.JGNN.nn.NNOperation)"},{"p":"mklab.JGNN.nn","c":"Model","l":"addInput(Variable)","u":"addInput(mklab.JGNN.nn.inputs.Variable)"},{"p":"mklab.JGNN.nn","c":"Model","l":"addOutput(NNOperation)","u":"addOutput(mklab.JGNN.nn.NNOperation)"},{"p":"mklab.JGNN.nn.operations","c":"LSTM","l":"aggregate(LSTM)","u":"aggregate(mklab.JGNN.nn.operations.LSTM)"},{"p":"mklab.JGNN.core","c":"Memory","l":"allocate(int, Object)","u":"allocate(int,java.lang.Object)"},{"p":"mklab.JGNN.nn","c":"Initializer","l":"apply(Model)","u":"apply(mklab.JGNN.nn.Model)"},{"p":"mklab.JGNN.nn.initializers","c":"VariancePreservingInitializer","l":"apply(Model)","u":"apply(mklab.JGNN.nn.Model)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"argmax()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"argmin()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"asColumn()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"asRow()"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"assertBackwardValidity()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"assertFinite()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"assertMatching(Tensor)","u":"assertMatching(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"assertSize(long)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"assign(Tensor)","u":"assign(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Slice","l":"asTensor()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"asTransposed()"},{"p":"mklab.JGNN.core.matrix","c":"TransposedMatrix","l":"asTransposed()"},{"p":"mklab.JGNN.nn.operations","c":"Attention","l":"Attention()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"autosize(List)","u":"autosize(java.util.List)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"autosize(Tensor...)","u":"autosize(mklab.JGNN.core.Tensor...)"},{"p":"mklab.JGNN.nn.optimizers","c":"BatchOptimizer","l":"BatchOptimizer(Optimizer)","u":"%3Cinit%3E(mklab.JGNN.nn.Optimizer)"},{"p":"mklab.JGNN.nn.optimizers","c":"BatchOptimizer","l":"BatchOptimizer(Optimizer, long)","u":"%3Cinit%3E(mklab.JGNN.nn.Optimizer,long)"},{"p":"mklab.JGNN.nn.loss","c":"BinaryCrossEntropy","l":"BinaryCrossEntropy()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.loss","c":"BinaryCrossEntropy","l":"BinaryCrossEntropy(double)","u":"%3Cinit%3E(double)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"cast(Class)","u":"cast(java.lang.Class)"},{"p":"mklab.JGNN.nn.loss","c":"CategoricalCrossEntropy","l":"CategoricalCrossEntropy()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.loss","c":"CategoricalCrossEntropy","l":"CategoricalCrossEntropy(double)","u":"%3Cinit%3E(double)"},{"p":"mklab.JGNN.adhoc.datasets","c":"Citeseer","l":"Citeseer()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.adhoc","c":"Dataset","l":"classes()"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"classify()"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"clearPrediction()"},{"p":"mklab.JGNN.core.matrix","c":"ColumnRepetition","l":"ColumnRepetition(long, Tensor)","u":"%3Cinit%3E(long,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.operations","c":"Complement","l":"Complement()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.operations","c":"Concat","l":"Concat()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"concat(int)"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"concat(int)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"config(String, double)","u":"config(java.lang.String,double)"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"config(String, double)","u":"config(java.lang.String,double)"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"config(String, double)","u":"config(java.lang.String,double)"},{"p":"mklab.JGNN.adhoc.parsers","c":"Neuralang","l":"config(String, double)","u":"config(java.lang.String,double)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"config(String, String)","u":"config(java.lang.String,java.lang.String)"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"config(String, String)","u":"config(java.lang.String,java.lang.String)"},{"p":"mklab.JGNN.adhoc","c":"ModelTraining","l":"configFrom(ModelBuilder)","u":"configFrom(mklab.JGNN.adhoc.ModelBuilder)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"constant(String, double)","u":"constant(java.lang.String,double)"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"constant(String, double)","u":"constant(java.lang.String,double)"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"constant(String, double)","u":"constant(java.lang.String,double)"},{"p":"mklab.JGNN.adhoc.parsers","c":"Neuralang","l":"constant(String, double)","u":"constant(java.lang.String,double)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"constant(String, Tensor)","u":"constant(java.lang.String,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"constant(String, Tensor)","u":"constant(java.lang.String,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"constant(String, Tensor)","u":"constant(java.lang.String,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.adhoc.parsers","c":"Neuralang","l":"constant(String, Tensor)","u":"constant(java.lang.String,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.inputs","c":"Constant","l":"Constant(Tensor)","u":"%3Cinit%3E(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.adhoc","c":"IdConverter","l":"contains(Object)","u":"contains(java.lang.Object)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"copy()"},{"p":"mklab.JGNN.adhoc.datasets","c":"Cora","l":"Cora()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.operations","c":"LSTM","l":"createFirstState()"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"createForwardValidity(List)","u":"createForwardValidity(java.util.List)"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"crossEntropy(double, double)","u":"crossEntropy(double,double)"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"crossEntropyDerivative(double, double)","u":"crossEntropyDerivative(double,double)"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"crossEntropyDerivativeCategorical(double, double)","u":"crossEntropyDerivativeCategorical(double,double)"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"crossEntropySigmoidDerivative(double, double)","u":"crossEntropySigmoidDerivative(double,double)"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"crossEntropyTanhDerivative(double, double)","u":"crossEntropyTanhDerivative(double,double)"},{"p":"mklab.JGNN.adhoc","c":"Dataset","l":"Dataset()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"debugging"},{"p":"mklab.JGNN.core.matrix","c":"DenseMatrix","l":"DenseMatrix(long, long)","u":"%3Cinit%3E(long,long)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"DenseTensor()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"DenseTensor(double...)","u":"%3Cinit%3E(double...)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"DenseTensor(Iterator)","u":"%3Cinit%3E(java.util.Iterator)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"DenseTensor(long)","u":"%3Cinit%3E(long)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"DenseTensor(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"density()"},{"p":"mklab.JGNN.nn.loss","c":"Accuracy","l":"derivative(Tensor, Tensor)","u":"derivative(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.loss","c":"BinaryCrossEntropy","l":"derivative(Tensor, Tensor)","u":"derivative(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.loss","c":"CategoricalCrossEntropy","l":"derivative(Tensor, Tensor)","u":"derivative(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn","c":"Loss","l":"derivative(Tensor, Tensor)","u":"derivative(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.loss.report","c":"VerboseLoss","l":"derivative(Tensor, Tensor)","u":"derivative(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"describe()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"describe()"},{"p":"mklab.JGNN.core.matrix","c":"SparseMatrix","l":"describe()"},{"p":"mklab.JGNN.core.matrix","c":"SparseSymmetric","l":"describe()"},{"p":"mklab.JGNN.core.matrix","c":"TransposedMatrix","l":"describe()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"describe()"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"describe()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"dot(Tensor)","u":"dot(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"dot(Tensor, Tensor)","u":"dot(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.operations","c":"Dropout","l":"Dropout()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.empy","c":"EmptyMatrix","l":"EmptyMatrix(long, long)","u":"%3Cinit%3E(long,long)"},{"p":"mklab.JGNN.core.empy","c":"EmptyTensor","l":"EmptyTensor()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.empy","c":"EmptyTensor","l":"EmptyTensor(long)","u":"%3Cinit%3E(long)"},{"p":"mklab.JGNN.nn.operations","c":"LSTM","l":"endTape()"},{"p":"mklab.JGNN.core","c":"Memory.Scope","l":"enter()"},{"p":"mklab.JGNN.core.matrix","c":"AccessCol","l":"estimateNumNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"AccessRow","l":"estimateNumNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"Diagonal","l":"estimateNumNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"SparseMatrix","l":"estimateNumNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"TransposedMatrix","l":"estimateNumNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"WrapCols","l":"estimateNumNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"WrapRows","l":"estimateNumNonZeroElements()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"estimateNumNonZeroElements()"},{"p":"mklab.JGNN.core.tensor","c":"SparseTensor","l":"estimateNumNonZeroElements()"},{"p":"mklab.JGNN.nn.loss","c":"Accuracy","l":"evaluate(Tensor, Tensor)","u":"evaluate(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.loss","c":"BinaryCrossEntropy","l":"evaluate(Tensor, Tensor)","u":"evaluate(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.loss","c":"CategoricalCrossEntropy","l":"evaluate(Tensor, Tensor)","u":"evaluate(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn","c":"Loss","l":"evaluate(Tensor, Tensor)","u":"evaluate(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.loss.report","c":"VerboseLoss","l":"evaluate(Tensor, Tensor)","u":"evaluate(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Memory.Scope","l":"exit()"},{"p":"mklab.JGNN.nn.activations","c":"Exp","l":"Exp()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"expMinusOne()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"expMinusOne()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"external(Tensor, Tensor)","u":"external(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"eye(long)"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"FastBuilder()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"FastBuilder(Matrix, Matrix)","u":"%3Cinit%3E(mklab.JGNN.core.Matrix,mklab.JGNN.core.Matrix)"},{"p":"mklab.JGNN.core.util","c":"FastEntry","l":"FastEntry()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.util","c":"FastEntry","l":"FastEntry(K, V)","u":"%3Cinit%3E(K,V)"},{"p":"mklab.JGNN.adhoc","c":"Dataset","l":"features()"},{"p":"mklab.JGNN.nn.activations","c":"L1","l":"forward(List)","u":"forward(java.util.List)"},{"p":"mklab.JGNN.nn.operations","c":"Attention","l":"forward(List)","u":"forward(java.util.List)"},{"p":"mklab.JGNN.nn.pooling","c":"Mean","l":"forward(List)","u":"forward(java.util.List)"},{"p":"mklab.JGNN.nn.pooling","c":"Sum","l":"forward(List)","u":"forward(java.util.List)"},{"p":"mklab.JGNN.nn.operations","c":"From","l":"From()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"fromDouble(double)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"fromDouble(double)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"fromRange(long)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"fromRange(long, long)","u":"fromRange(long,long)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"function(String, String)","u":"function(java.lang.String,java.lang.String)"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"function(String, String)","u":"function(java.lang.String,java.lang.String)"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"futureConfigs(String, Function, int)","u":"futureConfigs(java.lang.String,java.util.function.Function,int)"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"futureConfigs(String, Function, int)","u":"futureConfigs(java.lang.String,java.util.function.Function,int)"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"futureConstants(String, Function, int)","u":"futureConstants(java.lang.String,java.util.function.Function,int)"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"futureConstants(String, Function, int)","u":"futureConstants(java.lang.String,java.util.function.Function,int)"},{"p":"mklab.JGNN.nn.operations","c":"Gather","l":"Gather()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.inputs","c":"Parameter","l":"get()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"get(int)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"get(int)"},{"p":"mklab.JGNN.adhoc","c":"IdConverter","l":"get(long)"},{"p":"mklab.JGNN.core.empy","c":"EmptyMatrix","l":"get(long)"},{"p":"mklab.JGNN.core.empy","c":"EmptyTensor","l":"get(long)"},{"p":"mklab.JGNN.core.matrix","c":"AccessCol","l":"get(long)"},{"p":"mklab.JGNN.core.matrix","c":"AccessRow","l":"get(long)"},{"p":"mklab.JGNN.core.matrix","c":"ColumnRepetition","l":"get(long)"},{"p":"mklab.JGNN.core.matrix","c":"DenseMatrix","l":"get(long)"},{"p":"mklab.JGNN.core.matrix","c":"Diagonal","l":"get(long)"},{"p":"mklab.JGNN.core.matrix","c":"RepeatMatrix","l":"get(long)"},{"p":"mklab.JGNN.core.matrix","c":"RowRepetition","l":"get(long)"},{"p":"mklab.JGNN.core.matrix","c":"SparseMatrix","l":"get(long)"},{"p":"mklab.JGNN.core.matrix","c":"SparseSymmetric","l":"get(long)"},{"p":"mklab.JGNN.core.matrix","c":"TransposedMatrix","l":"get(long)"},{"p":"mklab.JGNN.core.matrix","c":"VectorizedMatrix","l":"get(long)"},{"p":"mklab.JGNN.core.matrix","c":"WrapCols","l":"get(long)"},{"p":"mklab.JGNN.core.matrix","c":"WrapRows","l":"get(long)"},{"p":"mklab.JGNN.core.tensor","c":"AccessSubtensor","l":"get(long)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"get(long)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"get(long)"},{"p":"mklab.JGNN.core.tensor","c":"RepeatTensor","l":"get(long)"},{"p":"mklab.JGNN.core.tensor","c":"SparseTensor","l":"get(long)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"get(long)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"get(long, long)","u":"get(long,long)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"get(String)","u":"get(java.lang.String)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"getColName()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"getCols()"},{"p":"mklab.JGNN.core.matrix","c":"ColumnRepetition","l":"getColumn()"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"getConfig(String)","u":"getConfig(java.lang.String)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"getConfigOrDefault(String, double)","u":"getConfigOrDefault(java.lang.String,double)"},{"p":"mklab.JGNN.core","c":"ThreadPool","l":"getCurrentThreadId()"},{"p":"mklab.JGNN.nn","c":"Model","l":"getDepthLastOperations()"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"getDescription()"},{"p":"mklab.JGNN.core","c":"Distribution","l":"getDeviation()"},{"p":"mklab.JGNN.core.distribution","c":"Normal","l":"getDeviation()"},{"p":"mklab.JGNN.core.distribution","c":"Uniform","l":"getDeviation()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"getDimensionName()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"getDimensionSize(String)","u":"getDimensionSize(java.lang.String)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"getExecutionGraphDot()"},{"p":"mklab.JGNN.adhoc","c":"IdConverter","l":"getId(Object)","u":"getId(java.lang.Object)"},{"p":"mklab.JGNN.nn","c":"Model","l":"getInputs()"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"getInputs()"},{"p":"mklab.JGNN.core","c":"ThreadPool","l":"getInstance()"},{"p":"mklab.JGNN.core.util","c":"FastEntry","l":"getKey()"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"getLastTapeError()"},{"p":"mklab.JGNN.core","c":"Distribution","l":"getMean()"},{"p":"mklab.JGNN.core.distribution","c":"Normal","l":"getMean()"},{"p":"mklab.JGNN.core.distribution","c":"Uniform","l":"getMean()"},{"p":"mklab.JGNN.nn.operations","c":"LSTM.LSTMState","l":"getMemory()"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"getModel()"},{"p":"mklab.JGNN.nn.activations","c":"Exp","l":"getNonLinearity(int, double, double)","u":"getNonLinearity(int,double,double)"},{"p":"mklab.JGNN.nn.activations","c":"LRelu","l":"getNonLinearity(int, double, double)","u":"getNonLinearity(int,double,double)"},{"p":"mklab.JGNN.nn.activations","c":"NExp","l":"getNonLinearity(int, double, double)","u":"getNonLinearity(int,double,double)"},{"p":"mklab.JGNN.nn.activations","c":"PRelu","l":"getNonLinearity(int, double, double)","u":"getNonLinearity(int,double,double)"},{"p":"mklab.JGNN.nn.activations","c":"Relu","l":"getNonLinearity(int, double, double)","u":"getNonLinearity(int,double,double)"},{"p":"mklab.JGNN.nn.activations","c":"Sigmoid","l":"getNonLinearity(int, double, double)","u":"getNonLinearity(int,double,double)"},{"p":"mklab.JGNN.nn.activations","c":"Tanh","l":"getNonLinearity(int, double, double)","u":"getNonLinearity(int,double,double)"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"getNonLinearity(int, double, double)","u":"getNonLinearity(int,double,double)"},{"p":"mklab.JGNN.nn.operations","c":"MatMul","l":"getNonLinearity(int, double, double)","u":"getNonLinearity(int,double,double)"},{"p":"mklab.JGNN.nn.operations","c":"Multiply","l":"getNonLinearity(int, double, double)","u":"getNonLinearity(int,double,double)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"getNonZeroElements()"},{"p":"mklab.JGNN.core.empy","c":"EmptyMatrix","l":"getNonZeroEntries()"},{"p":"mklab.JGNN.core.matrix","c":"ColumnRepetition","l":"getNonZeroEntries()"},{"p":"mklab.JGNN.core.matrix","c":"DenseMatrix","l":"getNonZeroEntries()"},{"p":"mklab.JGNN.core.matrix","c":"Diagonal","l":"getNonZeroEntries()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"getNonZeroEntries()"},{"p":"mklab.JGNN.core.matrix","c":"RepeatMatrix","l":"getNonZeroEntries()"},{"p":"mklab.JGNN.core.matrix","c":"RowRepetition","l":"getNonZeroEntries()"},{"p":"mklab.JGNN.core.matrix","c":"SparseMatrix","l":"getNonZeroEntries()"},{"p":"mklab.JGNN.core.matrix","c":"SparseSymmetric","l":"getNonZeroEntries()"},{"p":"mklab.JGNN.core.matrix","c":"TransposedMatrix","l":"getNonZeroEntries()"},{"p":"mklab.JGNN.core.matrix","c":"VectorizedMatrix","l":"getNonZeroEntries()"},{"p":"mklab.JGNN.core.matrix","c":"WrapCols","l":"getNonZeroEntries()"},{"p":"mklab.JGNN.core.matrix","c":"WrapRows","l":"getNonZeroEntries()"},{"p":"mklab.JGNN.nn.operations","c":"LSTM","l":"getOptimizer()"},{"p":"mklab.JGNN.adhoc","c":"IdConverter","l":"getOrCreateId(Object)","u":"getOrCreateId(java.lang.Object)"},{"p":"mklab.JGNN.nn.operations","c":"LSTM.LSTMState","l":"getOutput()"},{"p":"mklab.JGNN.nn","c":"Model","l":"getOutputs()"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"getOutputs()"},{"p":"mklab.JGNN.nn","c":"Model","l":"getParameters()"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"getPrediction()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"getRowName()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"getRows()"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"getSimpleDescription()"},{"p":"mklab.JGNN.nn.operations","c":"Reshape","l":"getSimpleDescription()"},{"p":"mklab.JGNN.adhoc","c":"IdConverter","l":"getSlice()"},{"p":"mklab.JGNN.core.util","c":"FastEntry","l":"getValue()"},{"p":"mklab.JGNN.nn.optimizers","c":"GradientDescent","l":"GradientDescent(double)","u":"%3Cinit%3E(double)"},{"p":"mklab.JGNN.nn.optimizers","c":"GradientDescent","l":"GradientDescent(double, double)","u":"%3Cinit%3E(double,double)"},{"p":"mklab.JGNN.adhoc","c":"Dataset","l":"graph()"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"hasComponent(String)","u":"hasComponent(java.lang.String)"},{"p":"mklab.JGNN.core.util","c":"Range","l":"hasNext()"},{"p":"mklab.JGNN.core.util","c":"Range2D","l":"hasNext()"},{"p":"mklab.JGNN.adhoc","c":"IdConverter","l":"IdConverter()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.adhoc","c":"IdConverter","l":"IdConverter(List)","u":"%3Cinit%3E(java.util.List)"},{"p":"mklab.JGNN.nn.operations","c":"Identity","l":"Identity()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn","c":"Model","l":"init(Initializer)","u":"init(mklab.JGNN.nn.Initializer)"},{"p":"mklab.JGNN.nn","c":"Initializer","l":"Initializer()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"inverse()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"inverse()"},{"p":"mklab.JGNN.nn.inputs","c":"Constant","l":"isCachable()"},{"p":"mklab.JGNN.nn.inputs","c":"Parameter","l":"isCachable()"},{"p":"mklab.JGNN.nn.inputs","c":"Variable","l":"isCachable()"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"isCachable()"},{"p":"mklab.JGNN.nn.operations","c":"Dropout","l":"isCachable()"},{"p":"mklab.JGNN.nn.operations","c":"From","l":"isCachable()"},{"p":"mklab.JGNN.nn.operations","c":"Reshape","l":"isCachable()"},{"p":"mklab.JGNN.nn.operations","c":"To","l":"isCachable()"},{"p":"mklab.JGNN.nn.inputs","c":"Constant","l":"isConstant()"},{"p":"mklab.JGNN.nn.inputs","c":"Parameter","l":"isConstant()"},{"p":"mklab.JGNN.nn.inputs","c":"Variable","l":"isConstant()"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"isConstant()"},{"p":"mklab.JGNN.nn.operations","c":"Dropout","l":"isEnabled()"},{"p":"mklab.JGNN.core","c":"Slice","l":"iterator()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"iterator()"},{"p":"mklab.JGNN.core.util","c":"Range","l":"iterator()"},{"p":"mklab.JGNN.core.util","c":"Range2D","l":"iterator()"},{"p":"mklab.JGNN.nn.initializers","c":"KaimingNormal","l":"KaimingNormal()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.initializers","c":"KaimingUniform","l":"KaimingUniform()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.activations","c":"L1","l":"L1()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.activations","c":"L1","l":"L1(boolean)","u":"%3Cinit%3E(boolean)"},{"p":"mklab.JGNN.adhoc","c":"Dataset","l":"labels()"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"layer(String)","u":"layer(java.lang.String)"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"layer(String)","u":"layer(java.lang.String)"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"LayeredBuilder()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"LayeredBuilder(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"layerRepeat(String, int)","u":"layerRepeat(java.lang.String,int)"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"layerRepeat(String, int)","u":"layerRepeat(java.lang.String,int)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"load(Path)","u":"load(java.nio.file.Path)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"log()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"log()"},{"p":"mklab.JGNN.nn.operations","c":"Log","l":"Log()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn","c":"Loss","l":"Loss()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.activations","c":"LRelu","l":"LRelu()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.operations","c":"LSTM","l":"LSTM(Optimizer, int, int)","u":"%3Cinit%3E(mklab.JGNN.nn.Optimizer,int,int)"},{"p":"mklab.JGNN.nn.operations","c":"LSTM.LSTMState","l":"LSTMState(Tensor, Tensor)","u":"%3Cinit%3E(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.operations","c":"MatMul","l":"MatMul()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.matrix","c":"DenseMatrix","l":"matmul(Matrix)","u":"matmul(mklab.JGNN.core.Matrix)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"matmul(Matrix)","u":"matmul(mklab.JGNN.core.Matrix)"},{"p":"mklab.JGNN.core.matrix","c":"VectorizedMatrix","l":"matmul(Matrix)","u":"matmul(mklab.JGNN.core.Matrix)"},{"p":"mklab.JGNN.core.matrix","c":"DenseMatrix","l":"matmul(Matrix, boolean, boolean)","u":"matmul(mklab.JGNN.core.Matrix,boolean,boolean)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"matmul(Matrix, boolean, boolean)","u":"matmul(mklab.JGNN.core.Matrix,boolean,boolean)"},{"p":"mklab.JGNN.core.matrix","c":"VectorizedMatrix","l":"matmul(Matrix, boolean, boolean)","u":"matmul(mklab.JGNN.core.Matrix,boolean,boolean)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"max()"},{"p":"mklab.JGNN.nn.pooling","c":"Max","l":"Max()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.pooling","c":"Max","l":"Max(boolean)","u":"%3Cinit%3E(boolean)"},{"p":"mklab.JGNN.nn.pooling","c":"Mean","l":"Mean()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.pooling","c":"Mean","l":"Mean(boolean)","u":"%3Cinit%3E(boolean)"},{"p":"mklab.JGNN.core","c":"Memory","l":"Memory()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"min()"},{"p":"mklab.JGNN.nn","c":"Model","l":"Model()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"ModelBuilder()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"ModelBuilder(Model)","u":"%3Cinit%3E(mklab.JGNN.nn.Model)"},{"p":"mklab.JGNN.adhoc","c":"ModelTraining","l":"ModelTraining()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.operations","c":"Multiply","l":"Multiply()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"multiply(double)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"multiply(double)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"multiply(double)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"multiply(Tensor)","u":"multiply(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"multiply(Tensor)","u":"multiply(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"multiply(Tensor)","u":"multiply(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"negative()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"negative()"},{"p":"mklab.JGNN.adhoc.parsers","c":"Neuralang","l":"Neuralang()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.activations","c":"NExp","l":"NExp()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.util","c":"Range","l":"next()"},{"p":"mklab.JGNN.core.util","c":"Range2D","l":"next()"},{"p":"mklab.JGNN.adhoc.train","c":"NodeClassification","l":"NodeClassification()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"norm()"},{"p":"mklab.JGNN.core.distribution","c":"Normal","l":"Normal()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.distribution","c":"Normal","l":"Normal(double, double)","u":"%3Cinit%3E(double,double)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"normalized()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"onesMask()"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"operation(String)","u":"operation(java.lang.String)"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"operation(String)","u":"operation(java.lang.String)"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"operation(String)","u":"operation(java.lang.String)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"out(String)","u":"out(java.lang.String)"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"out(String)","u":"out(java.lang.String)"},{"p":"mklab.JGNN.nn.operations","c":"LSTM","l":"output(Tensor, LSTM.LSTMState)","u":"output(mklab.JGNN.core.Tensor,mklab.JGNN.nn.operations.LSTM.LSTMState)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"param(String, double, Tensor)","u":"param(java.lang.String,double,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"param(String, double, Tensor)","u":"param(java.lang.String,double,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"param(String, double, Tensor)","u":"param(java.lang.String,double,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"param(String, Tensor)","u":"param(java.lang.String,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"param(String, Tensor)","u":"param(java.lang.String,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"param(String, Tensor)","u":"param(java.lang.String,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.inputs","c":"Parameter","l":"Parameter(Tensor)","u":"%3Cinit%3E(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.inputs","c":"Parameter","l":"Parameter(Tensor, double)","u":"%3Cinit%3E(mklab.JGNN.core.Tensor,double)"},{"p":"mklab.JGNN.adhoc.parsers","c":"Neuralang","l":"parse(Path)","u":"parse(java.nio.file.Path)"},{"p":"mklab.JGNN.adhoc.parsers","c":"Neuralang","l":"parse(String)","u":"parse(java.lang.String)"},{"p":"mklab.JGNN.core.empy","c":"EmptyMatrix","l":"persist()"},{"p":"mklab.JGNN.core.empy","c":"EmptyTensor","l":"persist()"},{"p":"mklab.JGNN.core.matrix","c":"AccessCol","l":"persist()"},{"p":"mklab.JGNN.core.matrix","c":"AccessRow","l":"persist()"},{"p":"mklab.JGNN.core.matrix","c":"ColumnRepetition","l":"persist()"},{"p":"mklab.JGNN.core.matrix","c":"DenseMatrix","l":"persist()"},{"p":"mklab.JGNN.core.matrix","c":"Diagonal","l":"persist()"},{"p":"mklab.JGNN.core.matrix","c":"RepeatMatrix","l":"persist()"},{"p":"mklab.JGNN.core.matrix","c":"RowRepetition","l":"persist()"},{"p":"mklab.JGNN.core.matrix","c":"SparseMatrix","l":"persist()"},{"p":"mklab.JGNN.core.matrix","c":"SparseSymmetric","l":"persist()"},{"p":"mklab.JGNN.core.matrix","c":"TransposedMatrix","l":"persist()"},{"p":"mklab.JGNN.core.matrix","c":"VectorizedMatrix","l":"persist()"},{"p":"mklab.JGNN.core.matrix","c":"WrapCols","l":"persist()"},{"p":"mklab.JGNN.core.matrix","c":"WrapRows","l":"persist()"},{"p":"mklab.JGNN.core.tensor","c":"AccessSubtensor","l":"persist()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"persist()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"persist()"},{"p":"mklab.JGNN.core.tensor","c":"RepeatTensor","l":"persist()"},{"p":"mklab.JGNN.core.tensor","c":"SparseTensor","l":"persist()"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"persist()"},{"p":"mklab.JGNN.nn","c":"Model","l":"predict(List)","u":"predict(java.util.List)"},{"p":"mklab.JGNN.nn","c":"Model","l":"predict(Tensor...)","u":"predict(mklab.JGNN.core.Tensor...)"},{"p":"mklab.JGNN.nn.operations","c":"LSTM","l":"predict(Tensor[])","u":"predict(mklab.JGNN.core.Tensor[])"},{"p":"mklab.JGNN.nn.activations","c":"PRelu","l":"PRelu()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"print()"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"printState()"},{"p":"mklab.JGNN.adhoc.datasets","c":"Pubmed","l":"Pubmed()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"put(int, double)","u":"put(int,double)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"put(int, double)","u":"put(int,double)"},{"p":"mklab.JGNN.core.empy","c":"EmptyMatrix","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.empy","c":"EmptyTensor","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.matrix","c":"AccessCol","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.matrix","c":"AccessRow","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.matrix","c":"ColumnRepetition","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.matrix","c":"DenseMatrix","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.matrix","c":"Diagonal","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.matrix","c":"RepeatMatrix","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.matrix","c":"RowRepetition","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.matrix","c":"SparseMatrix","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.matrix","c":"SparseSymmetric","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.matrix","c":"TransposedMatrix","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.matrix","c":"VectorizedMatrix","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.matrix","c":"WrapCols","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.matrix","c":"WrapRows","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.tensor","c":"AccessSubtensor","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.tensor","c":"RepeatTensor","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.tensor","c":"SparseTensor","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"put(long, double)","u":"put(long,double)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"put(long, long, double)","u":"put(long,long,double)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"putAdd(int, double)","u":"putAdd(int,double)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"putAdd(int, double)","u":"putAdd(int,double)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"putAdd(long, double)","u":"putAdd(long,double)"},{"p":"mklab.JGNN.core","c":"Slice","l":"range(double, double)","u":"range(double,double)"},{"p":"mklab.JGNN.core","c":"Slice","l":"range(int, int)","u":"range(int,int)"},{"p":"mklab.JGNN.core.util","c":"Range","l":"Range(long, long)","u":"%3Cinit%3E(long,long)"},{"p":"mklab.JGNN.core.util","c":"Range2D","l":"Range2D(long, long, long, long)","u":"%3Cinit%3E(long,long,long,long)"},{"p":"mklab.JGNN.nn.operations","c":"Reduce","l":"Reduce()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core","c":"Memory.Scope","l":"register(double[])"},{"p":"mklab.JGNN.nn.optimizers","c":"Regularization","l":"Regularization(Optimizer, double)","u":"%3Cinit%3E(mklab.JGNN.nn.Optimizer,double)"},{"p":"mklab.JGNN.core.empy","c":"EmptyMatrix","l":"release()"},{"p":"mklab.JGNN.core.empy","c":"EmptyTensor","l":"release()"},{"p":"mklab.JGNN.core.matrix","c":"AccessCol","l":"release()"},{"p":"mklab.JGNN.core.matrix","c":"AccessRow","l":"release()"},{"p":"mklab.JGNN.core.matrix","c":"ColumnRepetition","l":"release()"},{"p":"mklab.JGNN.core.matrix","c":"DenseMatrix","l":"release()"},{"p":"mklab.JGNN.core.matrix","c":"Diagonal","l":"release()"},{"p":"mklab.JGNN.core.matrix","c":"RepeatMatrix","l":"release()"},{"p":"mklab.JGNN.core.matrix","c":"RowRepetition","l":"release()"},{"p":"mklab.JGNN.core.matrix","c":"SparseMatrix","l":"release()"},{"p":"mklab.JGNN.core.matrix","c":"SparseSymmetric","l":"release()"},{"p":"mklab.JGNN.core.matrix","c":"TransposedMatrix","l":"release()"},{"p":"mklab.JGNN.core.matrix","c":"VectorizedMatrix","l":"release()"},{"p":"mklab.JGNN.core.matrix","c":"WrapCols","l":"release()"},{"p":"mklab.JGNN.core.matrix","c":"WrapRows","l":"release()"},{"p":"mklab.JGNN.core.tensor","c":"AccessSubtensor","l":"release()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"release()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"release()"},{"p":"mklab.JGNN.core.tensor","c":"RepeatTensor","l":"release()"},{"p":"mklab.JGNN.core.tensor","c":"SparseTensor","l":"release()"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"release()"},{"p":"mklab.JGNN.core","c":"Memory","l":"release(double[])"},{"p":"mklab.JGNN.nn.activations","c":"Relu","l":"Relu()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"relu(double)"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"relu(Tensor)","u":"relu(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"reluDerivative(double)"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"reluDerivative(Tensor)","u":"reluDerivative(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.adhoc.parsers","c":"FastBuilder","l":"rememberAs(String)","u":"rememberAs(java.lang.String)"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"rememberAs(String)","u":"rememberAs(java.lang.String)"},{"p":"mklab.JGNN.nn.operations","c":"Repeat","l":"Repeat()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.matrix","c":"RepeatMatrix","l":"RepeatMatrix(double, long, long)","u":"%3Cinit%3E(double,long,long)"},{"p":"mklab.JGNN.core.tensor","c":"RepeatTensor","l":"RepeatTensor(double, long)","u":"%3Cinit%3E(double,long)"},{"p":"mklab.JGNN.nn.loss.report","c":"VerboseLoss","l":"reset()"},{"p":"mklab.JGNN.nn","c":"Optimizer","l":"reset()"},{"p":"mklab.JGNN.nn.optimizers","c":"Adam","l":"reset()"},{"p":"mklab.JGNN.nn.optimizers","c":"BatchOptimizer","l":"reset()"},{"p":"mklab.JGNN.nn.optimizers","c":"GradientDescent","l":"reset()"},{"p":"mklab.JGNN.nn.optimizers","c":"Regularization","l":"reset()"},{"p":"mklab.JGNN.nn.operations","c":"Reshape","l":"Reshape(long, long)","u":"%3Cinit%3E(long,long)"},{"p":"mklab.JGNN.core.matrix","c":"RowRepetition","l":"RowRepetition(Tensor, long)","u":"%3Cinit%3E(mklab.JGNN.core.Tensor,long)"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"run(List)","u":"run(java.util.List)"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"run(Tensor...)","u":"run(mklab.JGNN.core.Tensor...)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"runModel(ArrayList)","u":"runModel(java.util.ArrayList)"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"runModel(Tensor...)","u":"runModel(mklab.JGNN.core.Tensor...)"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"runPrediction()"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"runPredictionAndAutosize()"},{"p":"mklab.JGNN.core.distribution","c":"Normal","l":"sample()"},{"p":"mklab.JGNN.core","c":"Distribution","l":"sample()"},{"p":"mklab.JGNN.core.distribution","c":"Uniform","l":"sample()"},{"p":"mklab.JGNN.adhoc","c":"Dataset","l":"samples()"},{"p":"mklab.JGNN.core","c":"Slice","l":"samplesAsFeatures()"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"save(Path)","u":"save(java.nio.file.Path)"},{"p":"mklab.JGNN.core","c":"Memory","l":"scope()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"selfAbs()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"selfAbs()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"selfAdd(double)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"selfAdd(double)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"selfAdd(Tensor)","u":"selfAdd(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"selfAdd(Tensor)","u":"selfAdd(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"selfAdd(Tensor)","u":"selfAdd(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"selfAdd(Tensor, double)","u":"selfAdd(mklab.JGNN.core.Tensor,double)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"selfExpMinusOne()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"selfExpMinusOne()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"selfInverse()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"selfInverse()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"selfLog()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"selfLog()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"selfMultiply(double)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"selfMultiply(double)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"selfMultiply(double)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"selfMultiply(Tensor)","u":"selfMultiply(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"selfMultiply(Tensor)","u":"selfMultiply(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"selfMultiply(Tensor)","u":"selfMultiply(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"selfNegative()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"selfNegative()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"selfSqrt()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"selfSqrt()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"selfSubtract(Tensor)","u":"selfSubtract(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"selfSubtract(Tensor)","u":"selfSubtract(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"selfSubtract(Tensor)","u":"selfSubtract(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.inputs","c":"Parameter","l":"set(Tensor)","u":"set(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"setColName(String)","u":"setColName(java.lang.String)"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"setDescription(String)","u":"setDescription(java.lang.String)"},{"p":"mklab.JGNN.core.distribution","c":"Normal","l":"setDeviation(double)"},{"p":"mklab.JGNN.core","c":"Distribution","l":"setDeviation(double)"},{"p":"mklab.JGNN.core.distribution","c":"Uniform","l":"setDeviation(double)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"setDiagonal(long, double)","u":"setDiagonal(long,double)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"setDimensionName(String)","u":"setDimensionName(java.lang.String)"},{"p":"mklab.JGNN.nn.pooling","c":"Sort","l":"setDimensionName(String)","u":"setDimensionName(java.lang.String)"},{"p":"mklab.JGNN.adhoc","c":"IdConverter","l":"setDimensionName(String, String)","u":"setDimensionName(java.lang.String,java.lang.String)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"setDimensionName(String, String)","u":"setDimensionName(java.lang.String,java.lang.String)"},{"p":"mklab.JGNN.nn.operations","c":"Reshape","l":"setDimensionName(String, String)","u":"setDimensionName(java.lang.String,java.lang.String)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"setDimensionName(Tensor)","u":"setDimensionName(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"setDimensionName(Tensor)","u":"setDimensionName(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.operations","c":"Dropout","l":"setEnabled(boolean)"},{"p":"mklab.JGNN.adhoc","c":"ModelTraining","l":"setEpochs(int)"},{"p":"mklab.JGNN.nn.loss.report","c":"VerboseLoss","l":"setInterval(int)"},{"p":"mklab.JGNN.core.util","c":"FastEntry","l":"setKey(K)"},{"p":"mklab.JGNN.adhoc","c":"ModelTraining","l":"setLoss(Loss)","u":"setLoss(mklab.JGNN.nn.Loss)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"setMainDiagonal(double)"},{"p":"mklab.JGNN.core.distribution","c":"Normal","l":"setMean(double)"},{"p":"mklab.JGNN.core","c":"Distribution","l":"setMean(double)"},{"p":"mklab.JGNN.core.distribution","c":"Uniform","l":"setMean(double)"},{"p":"mklab.JGNN.nn.loss","c":"CategoricalCrossEntropy","l":"setMeanReduction(boolean)"},{"p":"mklab.JGNN.adhoc","c":"ModelTraining","l":"setNumBatches(int)"},{"p":"mklab.JGNN.adhoc","c":"ModelTraining","l":"setOptimizer(Optimizer)","u":"setOptimizer(mklab.JGNN.nn.Optimizer)"},{"p":"mklab.JGNN.adhoc","c":"ModelTraining","l":"setParallelizedStochasticGradientDescent(boolean)"},{"p":"mklab.JGNN.adhoc","c":"ModelTraining","l":"setPatience(int)"},{"p":"mklab.JGNN.core.distribution","c":"Uniform","l":"setRange(double, double)","u":"setRange(double,double)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"setRowName(String)","u":"setRowName(java.lang.String)"},{"p":"mklab.JGNN.core.distribution","c":"Normal","l":"setSeed(long)"},{"p":"mklab.JGNN.core","c":"Distribution","l":"setSeed(long)"},{"p":"mklab.JGNN.core.distribution","c":"Uniform","l":"setSeed(long)"},{"p":"mklab.JGNN.nn.loss.report","c":"VerboseLoss","l":"setStream(PrintStream)","u":"setStream(java.io.PrintStream)"},{"p":"mklab.JGNN.nn.inputs","c":"Variable","l":"setTo(Tensor)","u":"setTo(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"setToASymmetricNormalization()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"setToNormalized()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"setToOnes()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"setToProbability()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"setToRandom()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"setToRandom(Distribution)","u":"setToRandom(mklab.JGNN.core.Distribution)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"setToSymmetricNormalization()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"setToUniform()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"setToZero()"},{"p":"mklab.JGNN.adhoc","c":"ModelTraining","l":"setValidationLoss(Loss)","u":"setValidationLoss(mklab.JGNN.nn.Loss)"},{"p":"mklab.JGNN.core.util","c":"FastEntry","l":"setValue(V)"},{"p":"mklab.JGNN.adhoc","c":"ModelTraining","l":"setVerbose(boolean)"},{"p":"mklab.JGNN.core.matrix","c":"WrapCols","l":"setZeroCopyType(Matrix)","u":"setZeroCopyType(mklab.JGNN.core.Matrix)"},{"p":"mklab.JGNN.core.matrix","c":"WrapRows","l":"setZeroCopyType(Matrix)","u":"setZeroCopyType(mklab.JGNN.core.Matrix)"},{"p":"mklab.JGNN.core","c":"Slice","l":"shuffle()"},{"p":"mklab.JGNN.core","c":"Slice","l":"shuffle(int)"},{"p":"mklab.JGNN.nn.activations","c":"Sigmoid","l":"Sigmoid()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"sigmoid(double)"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"sigmoid(Tensor)","u":"sigmoid(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"sigmoidDerivative(double)"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"sigmoidDerivative(Tensor)","u":"sigmoidDerivative(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.adhoc","c":"IdConverter","l":"size()"},{"p":"mklab.JGNN.core","c":"Slice","l":"size()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"size()"},{"p":"mklab.JGNN.core","c":"Slice","l":"Slice(Iterable)","u":"%3Cinit%3E(java.lang.Iterable)"},{"p":"mklab.JGNN.nn.pooling","c":"SoftMax","l":"SoftMax()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.pooling","c":"SoftMax","l":"SoftMax(boolean)","u":"%3Cinit%3E(boolean)"},{"p":"mklab.JGNN.core.util","c":"Sort","l":"Sort()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.pooling","c":"Sort","l":"Sort(int)","u":"%3Cinit%3E(int)"},{"p":"mklab.JGNN.core.util","c":"Sort","l":"sortedIndexes(ArrayList)","u":"sortedIndexes(java.util.ArrayList)"},{"p":"mklab.JGNN.core.util","c":"Sort","l":"sortedIndexes(double[])"},{"p":"mklab.JGNN.core.matrix","c":"SparseMatrix","l":"SparseMatrix(long, long)","u":"%3Cinit%3E(long,long)"},{"p":"mklab.JGNN.core.matrix","c":"SparseSymmetric","l":"SparseSymmetric(long, long)","u":"%3Cinit%3E(long,long)"},{"p":"mklab.JGNN.core.tensor","c":"SparseTensor","l":"SparseTensor()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.tensor","c":"SparseTensor","l":"SparseTensor(long)","u":"%3Cinit%3E(long)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"SPECIES"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"sqrt()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"sqrt()"},{"p":"mklab.JGNN.nn.operations","c":"LSTM","l":"startTape()"},{"p":"mklab.JGNN.core","c":"ThreadPool","l":"submit(Runnable)","u":"submit(java.lang.Runnable)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"subtract(Tensor)","u":"subtract(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"subtract(Tensor)","u":"subtract(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"subtract(Tensor)","u":"subtract(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"sum()"},{"p":"mklab.JGNN.nn.pooling","c":"Sum","l":"Sum()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.pooling","c":"Sum","l":"Sum(boolean)","u":"%3Cinit%3E(boolean)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"symmetricNormalization()"},{"p":"mklab.JGNN.nn.activations","c":"Tanh","l":"Tanh()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"tanh(double)"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"tanh(Tensor)","u":"tanh(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"tanhDerivative(double)"},{"p":"mklab.JGNN.core.util","c":"Loss","l":"tanhDerivative(Tensor)","u":"tanhDerivative(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core.matrix","c":"VectorizedMatrix","l":"tensor"},{"p":"mklab.JGNN.core","c":"Tensor","l":"Tensor(long)","u":"%3Cinit%3E(long)"},{"p":"mklab.JGNN.nn.operations","c":"To","l":"To()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"toArray()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"toDense()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"toDouble()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"toNonZeroString()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"toProbability()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"toSparse()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"toString()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"toString()"},{"p":"mklab.JGNN.core.util","c":"FastEntry","l":"toString()"},{"p":"mklab.JGNN.nn","c":"Model","l":"train(Loss, Optimizer, List, List)","u":"train(mklab.JGNN.nn.Loss,mklab.JGNN.nn.Optimizer,java.util.List,java.util.List)"},{"p":"mklab.JGNN.nn","c":"Model","l":"train(Loss, Optimizer, List, List, List)","u":"train(mklab.JGNN.nn.Loss,mklab.JGNN.nn.Optimizer,java.util.List,java.util.List,java.util.List)"},{"p":"mklab.JGNN.adhoc","c":"ModelTraining","l":"train(Model, Matrix, Matrix, Slice, Slice)","u":"train(mklab.JGNN.nn.Model,mklab.JGNN.core.Matrix,mklab.JGNN.core.Matrix,mklab.JGNN.core.Slice,mklab.JGNN.core.Slice)"},{"p":"mklab.JGNN.adhoc.train","c":"NodeClassification","l":"train(Model, Matrix, Matrix, Slice, Slice)","u":"train(mklab.JGNN.nn.Model,mklab.JGNN.core.Matrix,mklab.JGNN.core.Matrix,mklab.JGNN.core.Slice,mklab.JGNN.core.Slice)"},{"p":"mklab.JGNN.nn","c":"Model","l":"train(ModelTraining, Matrix, Matrix, Slice, Slice)","u":"train(mklab.JGNN.adhoc.ModelTraining,mklab.JGNN.core.Matrix,mklab.JGNN.core.Matrix,mklab.JGNN.core.Slice,mklab.JGNN.core.Slice)"},{"p":"mklab.JGNN.nn.operations","c":"LSTM","l":"train(Tensor[], Tensor)","u":"train(mklab.JGNN.core.Tensor[],mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.operations","c":"LSTM","l":"trainOnOutputError(Tensor[], Tensor)","u":"trainOnOutputError(mklab.JGNN.core.Tensor[],mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn","c":"Model","l":"trainTowardsZero(Optimizer, List)","u":"trainTowardsZero(mklab.JGNN.nn.Optimizer,java.util.List)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"transform(Tensor)","u":"transform(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.operations","c":"Transpose","l":"Transpose()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"transposed()"},{"p":"mklab.JGNN.core.matrix","c":"TransposedMatrix","l":"TransposedMatrix(Matrix)","u":"%3Cinit%3E(mklab.JGNN.core.Matrix)"},{"p":"mklab.JGNN.core.empy","c":"EmptyMatrix","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.empy","c":"EmptyTensor","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"AccessCol","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"AccessRow","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"ColumnRepetition","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"DenseMatrix","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"Diagonal","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"RepeatMatrix","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"RowRepetition","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"SparseMatrix","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"SparseSymmetric","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"TransposedMatrix","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"VectorizedMatrix","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"WrapCols","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.matrix","c":"WrapRows","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.tensor","c":"AccessSubtensor","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.tensor","c":"RepeatTensor","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.tensor","c":"SparseTensor","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"traverseNonZeroElements()"},{"p":"mklab.JGNN.core.distribution","c":"Uniform","l":"Uniform()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.distribution","c":"Uniform","l":"Uniform(double, double)","u":"%3Cinit%3E(double,double)"},{"p":"mklab.JGNN.core","c":"Memory.Scope","l":"unregister(double[])"},{"p":"mklab.JGNN.nn","c":"Optimizer","l":"update(Tensor, Tensor)","u":"update(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.optimizers","c":"Adam","l":"update(Tensor, Tensor)","u":"update(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.optimizers","c":"BatchOptimizer","l":"update(Tensor, Tensor)","u":"update(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.optimizers","c":"GradientDescent","l":"update(Tensor, Tensor)","u":"update(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.optimizers","c":"Regularization","l":"update(Tensor, Tensor)","u":"update(mklab.JGNN.core.Tensor,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.nn.optimizers","c":"BatchOptimizer","l":"updateAll()"},{"p":"mklab.JGNN.nn.operations","c":"LSTM","l":"updateTape(Tensor, LSTM.LSTMState, Tensor)","u":"updateTape(mklab.JGNN.core.Tensor,mklab.JGNN.nn.operations.LSTM.LSTMState,mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"values"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"values"},{"p":"mklab.JGNN.adhoc","c":"ModelBuilder","l":"var(String)","u":"var(java.lang.String)"},{"p":"mklab.JGNN.adhoc.parsers","c":"LayeredBuilder","l":"var(String)","u":"var(java.lang.String)"},{"p":"mklab.JGNN.adhoc.parsers","c":"Neuralang","l":"var(String)","u":"var(java.lang.String)"},{"p":"mklab.JGNN.nn.inputs","c":"Variable","l":"Variable()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.initializers","c":"VariancePreservingInitializer","l":"VariancePreservingInitializer()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"vectorization"},{"p":"mklab.JGNN.core.matrix","c":"VectorizedMatrix","l":"VectorizedMatrix(long, long)","u":"%3Cinit%3E(long,long)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"VectorizedTensor()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"VectorizedTensor(double...)","u":"%3Cinit%3E(double...)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"VectorizedTensor(Iterator)","u":"%3Cinit%3E(java.util.Iterator)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"VectorizedTensor(long)","u":"%3Cinit%3E(long)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"VectorizedTensor(String)","u":"%3Cinit%3E(java.lang.String)"},{"p":"mklab.JGNN.nn.loss.report","c":"VerboseLoss","l":"VerboseLoss(Loss)","u":"%3Cinit%3E(mklab.JGNN.nn.Loss)"},{"p":"mklab.JGNN.nn","c":"NNOperation","l":"view()"},{"p":"mklab.JGNN.core","c":"ThreadPool","l":"waitForConclusion()"},{"p":"mklab.JGNN.core.matrix","c":"WrapCols","l":"WrapCols(List)","u":"%3Cinit%3E(java.util.List)"},{"p":"mklab.JGNN.core.matrix","c":"WrapCols","l":"WrapCols(Tensor...)","u":"%3Cinit%3E(mklab.JGNN.core.Tensor...)"},{"p":"mklab.JGNN.core.matrix","c":"WrapRows","l":"WrapRows(List)","u":"%3Cinit%3E(java.util.List)"},{"p":"mklab.JGNN.core.matrix","c":"WrapRows","l":"WrapRows(Tensor...)","u":"%3Cinit%3E(mklab.JGNN.core.Tensor...)"},{"p":"mklab.JGNN.nn.initializers","c":"XavierNormal","l":"XavierNormal()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.nn.initializers","c":"XavierUniform","l":"XavierUniform()","u":"%3Cinit%3E()"},{"p":"mklab.JGNN.core","c":"Matrix","l":"zeroCopy()"},{"p":"mklab.JGNN.core","c":"Tensor","l":"zeroCopy()"},{"p":"mklab.JGNN.core.empy","c":"EmptyTensor","l":"zeroCopy(long)"},{"p":"mklab.JGNN.core.matrix","c":"AccessCol","l":"zeroCopy(long)"},{"p":"mklab.JGNN.core.matrix","c":"AccessRow","l":"zeroCopy(long)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"zeroCopy(long)"},{"p":"mklab.JGNN.core.tensor","c":"AccessSubtensor","l":"zeroCopy(long)"},{"p":"mklab.JGNN.core.tensor","c":"DenseTensor","l":"zeroCopy(long)"},{"p":"mklab.JGNN.core.tensor","c":"RepeatTensor","l":"zeroCopy(long)"},{"p":"mklab.JGNN.core.tensor","c":"SparseTensor","l":"zeroCopy(long)"},{"p":"mklab.JGNN.core.tensor","c":"VectorizedTensor","l":"zeroCopy(long)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"zeroCopy(long)"},{"p":"mklab.JGNN.core.empy","c":"EmptyMatrix","l":"zeroCopy(long, long)","u":"zeroCopy(long,long)"},{"p":"mklab.JGNN.core.matrix","c":"ColumnRepetition","l":"zeroCopy(long, long)","u":"zeroCopy(long,long)"},{"p":"mklab.JGNN.core.matrix","c":"DenseMatrix","l":"zeroCopy(long, long)","u":"zeroCopy(long,long)"},{"p":"mklab.JGNN.core.matrix","c":"Diagonal","l":"zeroCopy(long, long)","u":"zeroCopy(long,long)"},{"p":"mklab.JGNN.core.matrix","c":"RepeatMatrix","l":"zeroCopy(long, long)","u":"zeroCopy(long,long)"},{"p":"mklab.JGNN.core.matrix","c":"RowRepetition","l":"zeroCopy(long, long)","u":"zeroCopy(long,long)"},{"p":"mklab.JGNN.core.matrix","c":"SparseMatrix","l":"zeroCopy(long, long)","u":"zeroCopy(long,long)"},{"p":"mklab.JGNN.core.matrix","c":"SparseSymmetric","l":"zeroCopy(long, long)","u":"zeroCopy(long,long)"},{"p":"mklab.JGNN.core.matrix","c":"TransposedMatrix","l":"zeroCopy(long, long)","u":"zeroCopy(long,long)"},{"p":"mklab.JGNN.core.matrix","c":"VectorizedMatrix","l":"zeroCopy(long, long)","u":"zeroCopy(long,long)"},{"p":"mklab.JGNN.core.matrix","c":"WrapCols","l":"zeroCopy(long, long)","u":"zeroCopy(long,long)"},{"p":"mklab.JGNN.core.matrix","c":"WrapRows","l":"zeroCopy(long, long)","u":"zeroCopy(long,long)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"zeroCopy(long, long)","u":"zeroCopy(long,long)"},{"p":"mklab.JGNN.core","c":"Matrix","l":"zeroCopy(Tensor)","u":"zeroCopy(mklab.JGNN.core.Tensor)"},{"p":"mklab.JGNN.core","c":"Tensor","l":"zeroCopy(Tensor)","u":"zeroCopy(mklab.JGNN.core.Tensor)"}];updateSearchResults(); \ No newline at end of file diff --git a/docs/javadoc/mklab/JGNN/adhoc/Dataset.html b/docs/javadoc/mklab/JGNN/adhoc/Dataset.html index cba47862..edff57e3 100644 --- a/docs/javadoc/mklab/JGNN/adhoc/Dataset.html +++ b/docs/javadoc/mklab/JGNN/adhoc/Dataset.html @@ -1,11 +1,11 @@ - + Dataset - + @@ -84,8 +84,8 @@

Class Dataset


public class Dataset extends Object
-
This class provides the backbone with which to define datasets. - It provides common operations for downloading and importing data.
+
This class provides the backbone with which to define datasets. It provides + common operations for downloading and importing data.
Author:
Emmanouil Krasanakis
@@ -149,8 +149,9 @@

Method Summary

-
Retrieves a converter that maps samples to long identifiers that match them to - rows of features(), labels(), and graph() matrices.
+
Retrieves a converter that maps samples to long identifiers that match them + to rows of features(), labels(), and graph() + matrices.
@@ -187,9 +188,9 @@

Method Details

samples

public IdConverter samples()
-
Retrieves a converter that maps samples to long identifiers that match them to - rows of features(), labels(), and graph() matrices. - For example, a list of all node ids can be obtained per +
Retrieves a converter that maps samples to long identifiers that match them + to rows of features(), labels(), and graph() + matrices. For example, a list of all node ids can be obtained per dataset.samples().getIds()
Returns:
@@ -201,9 +202,9 @@

samples

classes

public IdConverter classes()
-
Retrieves a converter that maps class names to label dimentions. - For example, the prediction for one sample can be converted to its name - per dataset.classes().get(prediction.argmax()).
+
Retrieves a converter that maps class names to label dimentions. For example, + the prediction for one sample can be converted to its name per + dataset.classes().get(prediction.argmax()).
Returns:
An IdConverter.
diff --git a/docs/javadoc/mklab/JGNN/adhoc/IdConverter.html b/docs/javadoc/mklab/JGNN/adhoc/IdConverter.html index b820baad..e6d86824 100644 --- a/docs/javadoc/mklab/JGNN/adhoc/IdConverter.html +++ b/docs/javadoc/mklab/JGNN/adhoc/IdConverter.html @@ -1,11 +1,11 @@ - + IdConverter - + @@ -98,12 +98,13 @@

Constructor Summary

Description
-
Instantiates an empty converter to be filled with getOrCreateId(Object).
+
Instantiates an empty converter to be filled with + getOrCreateId(Object).
IdConverter(List<?> objects)
-
Instantiates the converter on a list of objects to register - with getOrCreateId(Object) on.
+
Instantiates the converter on a list of objects to register with + getOrCreateId(Object) on.
@@ -122,7 +123,8 @@

Method Summary

boolean
contains(Object object)
-
Checks whether the object has been registered with getOrCreateId(Object).
+
Checks whether the object has been registered with + getOrCreateId(Object).
get(long id)
@@ -176,15 +178,16 @@

Constructor Details

IdConverter

public IdConverter()
-
Instantiates an empty converter to be filled with getOrCreateId(Object).
+
Instantiates an empty converter to be filled with + getOrCreateId(Object).
  • IdConverter

    public IdConverter(List<?> objects)
    -
    Instantiates the converter on a list of objects to register - with getOrCreateId(Object) on.
    +
    Instantiates the converter on a list of objects to register with + getOrCreateId(Object) on.
    Parameters:
    objects - A list of objects.
    @@ -207,8 +210,10 @@

    setDimensionName

    Sets dimension names for one-hot encodings.
    Parameters:
    -
    nodeDimensionName - The dimension name for traversing nodes (e.g. "node").
    -
    featureDimensionName - The dimension name for traversing features (e.g. "label").
    +
    nodeDimensionName - The dimension name for traversing nodes (e.g. + "node").
    +
    featureDimensionName - The dimension name for traversing features (e.g. + "label").
    Returns:
    this instance
    @@ -283,7 +288,8 @@

    size

    contains

    public boolean contains(Object object)
    -
    Checks whether the object has been registered with getOrCreateId(Object).
    +
    Checks whether the object has been registered with + getOrCreateId(Object).
    Parameters:
    object - An object to check if it exists.
    @@ -296,10 +302,10 @@

    contains

    getSlice

    public Slice getSlice()
    -
    Returns a slice of all registered identifiers. - The slice is persistent across multiple calls to this method, but is - instantiated anew after getOrCreateId(Object) registers a new - object (but not if it retrieves an existing object).
    +
    Returns a slice of all registered identifiers. The slice is persistent across + multiple calls to this method, but is instantiated anew after + getOrCreateId(Object) registers a new object (but not if it + retrieves an existing object).
    Returns:
    A Slice.
    diff --git a/docs/javadoc/mklab/JGNN/adhoc/ModelBuilder.html b/docs/javadoc/mklab/JGNN/adhoc/ModelBuilder.html index b3b5f602..622ad7e5 100644 --- a/docs/javadoc/mklab/JGNN/adhoc/ModelBuilder.html +++ b/docs/javadoc/mklab/JGNN/adhoc/ModelBuilder.html @@ -1,11 +1,11 @@ - + ModelBuilder - + @@ -84,8 +84,8 @@

    Class ModelBuilder


    public class ModelBuilder extends Object
    -
    This class and subclasses can be used to create Model instances - by automatically creating and managing NNOperation instances based on +
    This class and subclasses can be used to create Model instances by + automatically creating and managing NNOperation instances based on textual descriptions.
    Author:
    @@ -135,26 +135,39 @@

    Method Summary

    Asserts that all components parsed into a call graph with - operation(String) are eventually used by at least one out(String) - component.
    + operation(String) are eventually used by at least one + out(String) component.
    autosize(List<Tensor> inputs)
    -
     
    +
    +
    Applies the createForwardValidity(List) method for the given inputs + to replace zero tensor dimensions (annotated with ? in symbolic definitions) + with a valid dimension size and name, and then checks that all computation + outcomes are valid with assertBackwardValidity().
    +
    autosize(Tensor... inputs)
    -
     
    +
    +
    Applies the createForwardValidity(List) method for the given inputs + to replace zero tensor dimensions (annotated with ? in symbolic definitions) + with a valid dimension size and name, and then checks that all computation + outcomes are valid with assertBackwardValidity().
    +
    config(String name, double value)
    -
    Declares a configuration hyperparameter, which can be used to declare - matrix and vector parameters during operation(String) expressions.
    +
    Declares a configuration hyperparameter, which can be used to declare matrix + and vector parameters during operation(String) expressions.
    config(String name, String value)
    -
     
    +
    +
    Applies config(String, double) where the set value is obtained from + another configuration hyperaparameter.
    +
    constant(String name, double value)
    @@ -170,9 +183,8 @@

    Method Summary

    -
    Asserts that all components parsed into a call graph with - operation(String) are eventually used by at least one out(String) - component.
    +
    Asserts that a forward run of the architecture is valid given some input + data.
    @@ -186,17 +198,20 @@

    Method Summary

    get(String name)
    -
    Retrieves the NNOperation registered with the provided - name, for example to investigates its value.
    +
    Retrieves the NNOperation registered with the provided name, for + example to investigates its value.
    double
    -
    getConfigOrDefault(String name, + +
    +
    Retrieves a configuration hyperparameter's value.
    +
    +
    double
    +
    getConfigOrDefault(String name, double defaultValue)
    -
     
    -
    int
    -
    getConfigOrDefault(String name, - int defaultValue)
    -
     
    +
    +
    Retrieves a configuration hyperparameter's value.
    +
    @@ -211,23 +226,25 @@

    Method Summary

    boolean
    -
    Checks whether the builder has added to its managed model a component of - the given name.
    +
    Checks whether the builder has added to its managed model a component of the + given name.
    load(Path path)
    -
     
    +
    +
    Loads a ModelBuilder instance from the provided path, such as + Paths.get("example.jgnn").
    +
    -
    Parses one or more operations split by new line characters or ; - to add to the execution graph.
    +
    Parses one or more operations split by new line characters or ; to add to the + execution graph.
    out(String name)
    -
    Declares the component with the given name an output of the - managed model.
    +
    Declares the component with the given name an output of the managed model.
    param(String name, @@ -241,8 +258,8 @@

    Method Summary

    param(String name, Tensor value)
    -
    Declares a learnable mklab.JGNN.nn.inputs.Paramater component with the given name, - zero regularization, and initial value.
    +
    Declares a learnable mklab.JGNN.nn.inputs.Paramater component with + the given name, zero regularization, and initial value.
    @@ -253,25 +270,26 @@

    Method Summary

    -
    This is a wrapper for getModel().predict(inputs) - without returning output values (use get(String) - afterwards to view outputs.
    +
    This is a wrapper for getModel().predict(inputs) without + returning output values (use get(String) afterwards to view outputs.
    runModel(Tensor... inputs)
    -
    This is a wrapper for getModel().predict(inputs) - without returning output values (use get(String) - afterwards to view outputs.
    +
    This is a wrapper for getModel().predict(inputs) without + returning output values (use get(String) afterwards to view outputs.
    save(Path path)
    -
     
    +
    +
    Serializes the model builder instance into a Path, such as + Paths.get("example.jgnn").
    +
    var(String name)
    -
    Declares a component with the given name to be used as an input - of the managed model.
    +
    Declares a component with the given name to be used as an input of the + managed model.
    @@ -314,8 +332,8 @@

    Method Details

    getModel

    public Model getModel()
    -
    Retrieves the model currently built by the builder. - This can changed depending on additional building method calls.
    +
    Retrieves the model currently built by the builder. This can changed + depending on additional building method calls.
    Returns:
    A Model instance.
    @@ -326,20 +344,37 @@

    getModel

    save

    public ModelBuilder save(Path path)
    +
    Serializes the model builder instance into a Path, such as + Paths.get("example.jgnn").
    +
    +
    Parameters:
    +
    path - A serialized path.
    +
    Returns:
    +
    This builder's instance.
    +
  • load

    public static ModelBuilder load(Path path)
    +
    Loads a ModelBuilder instance from the provided path, such as + Paths.get("example.jgnn"). The instance may have been serialized + with any class that extends the model builder.
    +
    +
    Parameters:
    +
    path - The provided path.
    +
    Returns:
    +
    The loaded ModelBuilder instance.
    +
  • hasComponent

    public boolean hasComponent(String name)
    -
    Checks whether the builder has added to its managed model a component of - the given name.
    +
    Checks whether the builder has added to its managed model a component of the + given name.
    Parameters:
    name - The component name to check for.
    @@ -352,8 +387,8 @@

    hasComponent

    var

    public ModelBuilder var(String name)
    -
    Declares a component with the given name to be used as an input - of the managed model.
    +
    Declares a component with the given name to be used as an input of the + managed model.
    Parameters:
    name - The name of the component.
    @@ -366,10 +401,9 @@

    var

    out

    public ModelBuilder out(String name)
    -
    Declares the component with the given name an output of the - managed model. The component should have already been assigned a value. - To output complex expressions use operation(String) - to define them first.
    +
    Declares the component with the given name an output of the managed model. + The component should have already been assigned a value. To output complex + expressions use operation(String) to define them first.
    Parameters:
    name - A component name.
    @@ -389,12 +423,13 @@

    param

    Parameters:
    name - The name to be assigned to the new component.
    -
    regularization - The regularization value. Zero corresponds to no regularization. - Typically, this is non-negative.
    -
    value - The initial value to be assigned to the parameter. Exact values - can be overridden by neural initialization strategies, but an initial value - should be declared nonetheless to determine the parameter type and allocate - any necessary memory.
    +
    regularization - The regularization value. Zero corresponds to no + regularization. Typically, this is non-negative.
    +
    value - The initial value to be assigned to the parameter. + Exact values can be overridden by neural initialization + strategies, but an initial value should be declared + nonetheless to determine the parameter type and + allocate any necessary memory.
    Returns:
    The builder's instance.
    See Also:
    @@ -412,14 +447,16 @@

    param

    config

    public ModelBuilder config(String name, double value)
    -
    Declares a configuration hyperparameter, which can be used to declare - matrix and vector parameters during operation(String) expressions. - For in-expression use of hyperparameters, delcare them with constant(String, double).
    +
    Declares a configuration hyperparameter, which can be used to declare matrix + and vector parameters during operation(String) expressions. For + in-expression use of hyperparameters, declare them with + constant(String, double). In Neuralang terms, this is implements the + broadcasting operation.
    Parameters:
    name - The name of the configuration hyperparameter.
    -
    value - The value to be assigned to the hyperparameter. - Typically, provide a long number.
    +
    value - The value to be assigned to the hyperparameter. This may also be + a long number.
    Returns:
    The builder's instance.
    See Also:
    @@ -428,6 +465,7 @@

    config

  • operation(String)
  • param(String, Tensor)
  • param(String, double, Tensor)
  • +
  • config(String, String)
  • @@ -438,13 +476,43 @@

    config

    config

    public ModelBuilder config(String name, String value)
    +
    Applies config(String, double) where the set value is obtained from + another configuration hyperaparameter.
    +
    +
    Parameters:
    +
    name - The name of the configuration hyperparameter to set.
    +
    value - The name of the configuration hyperparameter whose value should + be copied.
    +
    Returns:
    +
    The builder's instance.
    +
    See Also:
    +
    + +
    +
  • -
    -

    getConfigOrDefault

    -
    public int getConfigOrDefault(String name, - int defaultValue)
    +
    +

    getConfig

    +
    public double getConfig(String name)
    +
    Retrieves a configuration hyperparameter's value.
    +
    +
    Parameters:
    +
    name - The configuration's name.
    +
    Returns:
    +
    The retrieved value;
    +
    Throws:
    +
    RuntimeException - If a no configuration with the given name was found.
    +
    See Also:
    +
    + +
    +
  • @@ -452,6 +520,22 @@

    getConfigOrDefault

    getConfigOrDefault

    public double getConfigOrDefault(String name, double defaultValue)
    +
    Retrieves a configuration hyperparameter's value. If no such configuration + exists, a default value is returned instead.
    +
    +
    Parameters:
    +
    name - The configuration's name.
    +
    defaultValue - The default to be retrieved if no such configuration was + found.
    +
    Returns:
    +
    The retrieved value;
    +
    See Also:
    +
    + +
    +
  • @@ -459,15 +543,15 @@

    getConfigOrDefault

    param

    public ModelBuilder param(String name, Tensor value)
    -
    Declares a learnable mklab.JGNN.nn.inputs.Paramater component with the given name, - zero regularization, and initial value.
    +
    Declares a learnable mklab.JGNN.nn.inputs.Paramater component with + the given name, zero regularization, and initial value.
    Parameters:
    name - The name to be assigned to the new component.
    value - The initial value to be assigned to the parameter. Exact values - can be overridden by neural initialization strategies, but an initial value - should be declared nonetheless to determine the parameter type and allocate - any necessary memory.
    + can be overridden by neural initialization strategies, but an + initial value should be declared nonetheless to determine the + parameter type and allocate any necessary memory.
    Returns:
    The builder's instance.
    See Also:
    @@ -485,9 +569,9 @@

    param

    constant

    public ModelBuilder constant(String name, double value)
    -
    Declares a non-learnable constant component with the given name. - This can be used in computations. To edit the constant's values, - use get(String) to retrieve the respective component.
    +
    Declares a non-learnable constant component with the given name. This can be + used in computations. To edit the constant's values, use get(String) + to retrieve the respective component.
    Parameters:
    name - The name of the constant component.
    @@ -509,9 +593,9 @@

    constant

    constant

    public ModelBuilder constant(String name, Tensor value)
    -
    Declares a non-learnable constant component with the given name. - This can be used in computations. To edit the constant's values, - use get(String) to retrieve the respective component.
    +
    Declares a non-learnable constant component with the given name. This can be + used in computations. To edit the constant's values, use get(String) + to retrieve the respective component.
    Parameters:
    name - The name of the constant component.
    @@ -531,8 +615,8 @@

    constant

    get

    public NNOperation get(String name)
    -
    Retrieves the NNOperation registered with the provided - name, for example to investigates its value.
    +
    Retrieves the NNOperation registered with the provided name, for + example to investigates its value.
    Parameters:
    name - The name of the component.
    @@ -545,9 +629,8 @@

    get

    runModel

    public ModelBuilder runModel(Tensor... inputs)
    -
    This is a wrapper for getModel().predict(inputs) - without returning output values (use get(String) - afterwards to view outputs.
    +
    This is a wrapper for getModel().predict(inputs) without + returning output values (use get(String) afterwards to view outputs.
    Parameters:
    inputs - A variable number of Tensor inputs.
    @@ -567,9 +650,8 @@

    runModel

    runModel

    public ModelBuilder runModel(ArrayList<Tensor> inputs)
    -
    This is a wrapper for getModel().predict(inputs) - without returning output values (use get(String) - afterwards to view outputs.
    +
    This is a wrapper for getModel().predict(inputs) without + returning output values (use get(String) afterwards to view outputs.
    Parameters:
    inputs - A list of Tensor inputs.
    @@ -596,13 +678,12 @@

    function

    operation

    public ModelBuilder operation(String desc)
    -
    Parses one or more operations split by new line characters or ; - to add to the execution graph. All operations should assign a - value to a new component name and comprise operators and functions. - For a detailed description of the domain-specific language this - method accepts, please refer to the library's - - online documentation.
    +
    Parses one or more operations split by new line characters or ; to add to the + execution graph. All operations should assign a value to a new component name + and comprise operators and functions. For a detailed description of the + domain-specific language this method accepts, please refer to the library's + online + documentation.
    Parameters:
    desc - The operation to parse.
    @@ -624,26 +705,69 @@

    operation

    autosize

    public ModelBuilder autosize(Tensor... inputs)
    +
    Applies the createForwardValidity(List) method for the given inputs + to replace zero tensor dimensions (annotated with ? in symbolic definitions) + with a valid dimension size and name, and then checks that all computation + outcomes are valid with assertBackwardValidity().
    +
    +
    Parameters:
    +
    inputs -
    +
    Returns:
    +
    See Also:
    +
    + +
    +
  • autosize

    public ModelBuilder autosize(List<Tensor> inputs)
    +
    Applies the createForwardValidity(List) method for the given inputs + to replace zero tensor dimensions (annotated with ? in symbolic definitions) + with a valid dimension size and name, and then checks that all computation + outcomes are valid with assertBackwardValidity().
    +
    +
    Parameters:
    +
    inputs -
    +
    Returns:
    +
    See Also:
    +
    + +
    +
  • createForwardValidity

    public ModelBuilder createForwardValidity(List<Tensor> inputs)
    -
    Asserts that all components parsed into a call graph with - operation(String) are eventually used by at least one out(String) - component.
    +
    Asserts that a forward run of the architecture is valid given some input + data. Prefer using input data from the mklab.JGNN.core.empty package + to make this method run fastly, without performing any numerical operations. + In addition to asserting validity, this operation simultaneously resizes any + zero-dimension parameter tensors (created with the ? configuration in parsed + expressions) to automatically admit dimension names and sizes.
    +
    Parameters:
    +
    inputs - The same inputs you would pas to Model.predict(List).
    Returns:
    The builder's instance.
    Throws:
    -
    RuntimeException - if not all execution graph branches lead to declared outputs.
    +
    RuntimeException - If mismatched dimensions or dimension names, or if + it is not possible to infer the actual size of + automatically populated dimensions.
    +
    See Also:
    +
    + +
  • @@ -652,13 +776,14 @@

    createForwardValidity

    assertBackwardValidity

    public ModelBuilder assertBackwardValidity()
    Asserts that all components parsed into a call graph with - operation(String) are eventually used by at least one out(String) - component.
    + operation(String) are eventually used by at least one + out(String) component.
    Returns:
    The builder's instance.
    Throws:
    -
    RuntimeException - if not all execution graph branches lead to declared outputs.
    +
    RuntimeException - if not all execution graph branches lead to declared + outputs.
    diff --git a/docs/javadoc/mklab/JGNN/adhoc/ModelTraining.html b/docs/javadoc/mklab/JGNN/adhoc/ModelTraining.html new file mode 100644 index 00000000..5b04bedf --- /dev/null +++ b/docs/javadoc/mklab/JGNN/adhoc/ModelTraining.html @@ -0,0 +1,396 @@ + + + + +ModelTraining + + + + + + + + + + + + + + + +
    + +
    +
    + +
    + +

    Class ModelTraining

    +
    +
    java.lang.Object +
    mklab.JGNN.adhoc.ModelTraining
    +
    +
    +
    +
    Direct Known Subclasses:
    +
    NodeClassification
    +
    +
    +
    public class ModelTraining +extends Object
    +
    This is a helper class that automates the definition of training processes of + Model instances by defining the number of epochs, loss functions, + number of batches and the ability to use ThreadPool for parallelized + batch computations.
    +
    +
    Author:
    +
    Emmanouil Krasanakis
    +
    +
    +
    + +
    +
    +
      + +
    • +
      +

      Constructor Details

      +
        +
      • +
        +

        ModelTraining

        +
        public ModelTraining()
        +
        +
      • +
      +
      +
    • + +
    • +
      +

      Method Details

      +
        +
      • +
        +

        setVerbose

        +
        public ModelTraining setVerbose(boolean verbose)
        +
        Deprecated. +
        This method was available in earlier JGNN versions but will be + gradually phased out. Instead, wrap the validation loss within + VerboseLoss to replicate the + same behavior.
        +
        +
        +
        Parameters:
        +
        verbose - Whether an error message will be printed.
        +
        +
        +
      • +
      • +
        +

        setLoss

        +
        public ModelTraining setLoss(Loss loss)
        +
        Set
        +
        +
        Parameters:
        +
        loss -
        +
        Returns:
        +
        +
        +
      • +
      • +
        +

        setValidationLoss

        +
        public ModelTraining setValidationLoss(Loss loss)
        +
        +
      • +
      • +
        +

        setOptimizer

        +
        public ModelTraining setOptimizer(Optimizer optimizer)
        +
        Sets an Optimizer instance to controls parameter updates during + training. If the provided optimizer is not an instance of + BatchOptimizer, it is forcefully wrapped by the latter. Training + calls the batch optimizer's update method after every batch.
        +
        +
        Parameters:
        +
        optimizer - The desired optimizer.
        +
        Returns:
        +
        this model training instance.
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        setNumBatches

        +
        public ModelTraining setNumBatches(int numBatches)
        +
        Sets the number of batches training data slices should be split into.
        +
        +
        Parameters:
        +
        numBatches - The desired number of batches. Default is 1.
        +
        Returns:
        +
        this model training instance.
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        setParallelizedStochasticGradientDescent

        +
        public ModelTraining setParallelizedStochasticGradientDescent(boolean paralellization)
        +
        Sets whether the training strategy should reflect stochastic gradient descent + by randomly sampling from the training dataset to obtain data samples. If + true, both this feature and acceptable thread-based + paralellization is enabled. Parallelization makes use of JGNN's + ThreadPool.
        +
        +
        Parameters:
        +
        paralellization - A boolean value indicating whether this feature is + enabled.
        +
        Returns:
        +
        this model training instance.
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        setEpochs

        +
        public ModelTraining setEpochs(int epochs)
        +
        Sets the maximum number of epochs for which training runs. If no patience has + been set, training runs for exactly this number of epochs.
        +
        +
        Parameters:
        +
        epochs - The maximum number of epochs.
        +
        Returns:
        +
        this model training instance.
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        setPatience

        +
        public ModelTraining setPatience(int patience)
        +
        Sets the patience of the training strategy that performs early stopping. If + training does not encounter a smaller validation loss for this number of + epochs, it stops.
        +
        +
        Parameters:
        +
        patience - The number of patience epochs. Default is Integer.MAX_VALUE + to effectively disable this feature and let training always + reach the maximum number of set epochs.
        +
        Returns:
        +
        this model training instance.
        +
        See Also:
        +
        + +
        +
        +
        +
      • +
      • +
        +

        train

        +
        public Model train(Model model, + Matrix features, + Matrix labels, + Slice trainingSamples, + Slice validationSamples)
        +
        Deprecated. + +
        +
        This is a leftover method from an earlier version of JGNN's interface.
        +
        +
      • +
      • +
        +

        configFrom

        +
        public ModelTraining configFrom(ModelBuilder modelBuilder)
        +
        Retrieves the learning rate (lr), epochs, batches, and patience parameters + from the configurations of a
        +
        +
        Parameters:
        +
        modelBuilder -
        +
        Returns:
        +
        this model training instance.
        +
        +
        +
      • +
      +
      +
    • +
    +
    + +
    +
    +
    + + diff --git a/docs/javadoc/mklab/JGNN/adhoc/class-use/Dataset.html b/docs/javadoc/mklab/JGNN/adhoc/class-use/Dataset.html index 7f83ffd8..b8fb0b94 100644 --- a/docs/javadoc/mklab/JGNN/adhoc/class-use/Dataset.html +++ b/docs/javadoc/mklab/JGNN/adhoc/class-use/Dataset.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.adhoc.Dataset - + @@ -57,7 +57,9 @@

    Uses of ClassPackage
    Description
    -
     
    +
    +
    This package contains datasets for out-of-the-box experimentation.
    +
      diff --git a/docs/javadoc/mklab/JGNN/adhoc/class-use/IdConverter.html b/docs/javadoc/mklab/JGNN/adhoc/class-use/IdConverter.html index 2fa151fd..03bc67c4 100644 --- a/docs/javadoc/mklab/JGNN/adhoc/class-use/IdConverter.html +++ b/docs/javadoc/mklab/JGNN/adhoc/class-use/IdConverter.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.adhoc.IdConverter - + @@ -57,7 +57,9 @@

      Uses of Cla
      Package
      Description
      -
       
      +
      +
      Contains classes that simplify data loading, model building, and training.
      +
        @@ -77,8 +79,9 @@

        Uses of IdCo
        Dataset.samples()
        -
        Retrieves a converter that maps samples to long identifiers that match them to - rows of Dataset.features(), Dataset.labels(), and Dataset.graph() matrices.
        +
        Retrieves a converter that maps samples to long identifiers that match them + to rows of Dataset.features(), Dataset.labels(), and Dataset.graph() + matrices.
        IdConverter.setDimensionName(String nodeDimensionName, diff --git a/docs/javadoc/mklab/JGNN/adhoc/class-use/ModelBuilder.html b/docs/javadoc/mklab/JGNN/adhoc/class-use/ModelBuilder.html index 2bb86080..6367304f 100644 --- a/docs/javadoc/mklab/JGNN/adhoc/class-use/ModelBuilder.html +++ b/docs/javadoc/mklab/JGNN/adhoc/class-use/ModelBuilder.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.adhoc.ModelBuilder - + @@ -57,11 +57,14 @@

        Uses of Cl
        Package
        Description
        -
         
        +
        +
        Contains classes that simplify data loading, model building, and training.
        +
        -
         
        - -
         
        +
        +
        Contains model builders that parse expression of the Neuralang scripting + language to simplify mathematical parts of the definitions.
        +

          @@ -77,26 +80,39 @@

          Uses of Mod
          Asserts that all components parsed into a call graph with - operation(String) are eventually used by at least one out(String) - component.
          + operation(String) are eventually used by at least one + out(String) component.
          ModelBuilder.autosize(List<Tensor> inputs)
          -
           
          +
          +
          Applies the createForwardValidity(List) method for the given inputs + to replace zero tensor dimensions (annotated with ? in symbolic definitions) + with a valid dimension size and name, and then checks that all computation + outcomes are valid with assertBackwardValidity().
          +
          ModelBuilder.autosize(Tensor... inputs)
          -
           
          +
          +
          Applies the createForwardValidity(List) method for the given inputs + to replace zero tensor dimensions (annotated with ? in symbolic definitions) + with a valid dimension size and name, and then checks that all computation + outcomes are valid with assertBackwardValidity().
          +
          ModelBuilder.config(String name, double value)
          -
          Declares a configuration hyperparameter, which can be used to declare - matrix and vector parameters during operation(String) expressions.
          +
          Declares a configuration hyperparameter, which can be used to declare matrix + and vector parameters during operation(String) expressions.
          ModelBuilder.config(String name, String value)
          -
           
          +
          +
          Applies config(String, double) where the set value is obtained from + another configuration hyperaparameter.
          +
          ModelBuilder.constant(String name, double value)
          @@ -112,9 +128,8 @@

          Uses of Mod
          ModelBuilder.createForwardValidity(List<Tensor> inputs)
          -
          Asserts that all components parsed into a call graph with - operation(String) are eventually used by at least one out(String) - component.
          +
          Asserts that a forward run of the architecture is valid given some input + data.
          ModelBuilder.function(String name, @@ -122,18 +137,20 @@

          Uses of Mod
           
          ModelBuilder.load(Path path)
          -
           
          +
          +
          Loads a ModelBuilder instance from the provided path, such as + Paths.get("example.jgnn").
          +
          ModelBuilder.operation(String desc)
          -
          Parses one or more operations split by new line characters or ; - to add to the execution graph.
          +
          Parses one or more operations split by new line characters or ; to add to the + execution graph.
          ModelBuilder.out(String name)
          -
          Declares the component with the given name an output of the - managed model.
          +
          Declares the component with the given name an output of the managed model.
          ModelBuilder.param(String name, @@ -147,8 +164,8 @@

          Uses of Mod
          -
          Declares a learnable mklab.JGNN.nn.inputs.Paramater component with the given name, - zero regularization, and initial value.
          +
          Declares a learnable mklab.JGNN.nn.inputs.Paramater component with + the given name, zero regularization, and initial value.
          ModelBuilder.print()
          @@ -159,25 +176,38 @@

          Uses of Mod
          ModelBuilder.runModel(ArrayList<Tensor> inputs)
          -
          This is a wrapper for getModel().predict(inputs) - without returning output values (use get(String) - afterwards to view outputs.
          +
          This is a wrapper for getModel().predict(inputs) without + returning output values (use get(String) afterwards to view outputs.
          ModelBuilder.runModel(Tensor... inputs)
          -
          This is a wrapper for getModel().predict(inputs) - without returning output values (use get(String) - afterwards to view outputs.
          +
          This is a wrapper for getModel().predict(inputs) without + returning output values (use get(String) afterwards to view outputs.
          ModelBuilder.save(Path path)
          -
           
          +
          +
          Serializes the model builder instance into a Path, such as + Paths.get("example.jgnn").
          +
          ModelBuilder.var(String name)
          -
          Declares a component with the given name to be used as an input - of the managed model.
          +
          Declares a component with the given name to be used as an input of the + managed model.
          +
          +

          +
          Methods in mklab.JGNN.adhoc with parameters of type ModelBuilder
          +
          +
          Modifier and Type
          +
          Method
          +
          Description
          + +
          ModelTraining.configFrom(ModelBuilder modelBuilder)
          +
          +
          Retrieves the learning rate (lr), epochs, batches, and patience parameters + from the configurations of a

        @@ -193,32 +223,21 @@

        Uses of Mod
        class 
        -
        Extends the capabilities of LayeredBuilder to use - for node classification.
        +
        Extends the capabilities of LayeredBuilder to use for node + classification.
        class 
        -
        Extends the capabilities of the ModelBuilder - with the ability to define multilayer (e.g.
        +
        Extends the capabilities of the ModelBuilder with the ability to + define multilayer (e.g.
        class 
        -
         
        +
        +
        Extends the base ModelBuilder with the full capabilities of the + Neuralang scripting language.
        -

      - -
    • -
      -

      Uses of ModelBuilder in mklab.JGNN.nn

      -
      Methods in mklab.JGNN.nn with parameters of type ModelBuilder
      -
      -
      Modifier and Type
      -
      Method
      -
      Description
      - -
      ModelTraining.configFrom(ModelBuilder modelBuilder)
      -
       
    • diff --git a/docs/javadoc/mklab/JGNN/adhoc/class-use/ModelTraining.html b/docs/javadoc/mklab/JGNN/adhoc/class-use/ModelTraining.html new file mode 100644 index 00000000..e9cc8d2e --- /dev/null +++ b/docs/javadoc/mklab/JGNN/adhoc/class-use/ModelTraining.html @@ -0,0 +1,181 @@ + + + + +Uses of Class mklab.JGNN.adhoc.ModelTraining + + + + + + + + + + + + + + + +
      + +
      +
      +
      +

      Uses of Class
      mklab.JGNN.adhoc.ModelTraining

      +
      +
      Packages that use ModelTraining
      +
      +
      Package
      +
      Description
      + +
      +
      Contains classes that simplify data loading, model building, and training.
      +
      + +
      +
      Contains model training strategies that correspond to different predictive + tasks.
      +
      + +
      +
      Implements neural networks components that are combined to define GNNs or + other types of machine learning models.
      +
      +
      +
      + +
      +
      +
      +
      + + diff --git a/docs/javadoc/mklab/JGNN/adhoc/datasets/Citeseer.html b/docs/javadoc/mklab/JGNN/adhoc/datasets/Citeseer.html index e624d6d8..71d15f5e 100644 --- a/docs/javadoc/mklab/JGNN/adhoc/datasets/Citeseer.html +++ b/docs/javadoc/mklab/JGNN/adhoc/datasets/Citeseer.html @@ -1,11 +1,11 @@ - + Citeseer - + diff --git a/docs/javadoc/mklab/JGNN/adhoc/datasets/Cora.html b/docs/javadoc/mklab/JGNN/adhoc/datasets/Cora.html index 7b6e549c..efedbd1a 100644 --- a/docs/javadoc/mklab/JGNN/adhoc/datasets/Cora.html +++ b/docs/javadoc/mklab/JGNN/adhoc/datasets/Cora.html @@ -1,11 +1,11 @@ - + Cora - + diff --git a/docs/javadoc/mklab/JGNN/adhoc/datasets/Pubmed.html b/docs/javadoc/mklab/JGNN/adhoc/datasets/Pubmed.html index 999678cc..0b65d305 100644 --- a/docs/javadoc/mklab/JGNN/adhoc/datasets/Pubmed.html +++ b/docs/javadoc/mklab/JGNN/adhoc/datasets/Pubmed.html @@ -1,11 +1,11 @@ - + Pubmed - + diff --git a/docs/javadoc/mklab/JGNN/adhoc/datasets/class-use/Citeseer.html b/docs/javadoc/mklab/JGNN/adhoc/datasets/class-use/Citeseer.html index 6f143612..a1ec268a 100644 --- a/docs/javadoc/mklab/JGNN/adhoc/datasets/class-use/Citeseer.html +++ b/docs/javadoc/mklab/JGNN/adhoc/datasets/class-use/Citeseer.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.adhoc.datasets.Citeseer - + diff --git a/docs/javadoc/mklab/JGNN/adhoc/datasets/class-use/Cora.html b/docs/javadoc/mklab/JGNN/adhoc/datasets/class-use/Cora.html index a4b7474f..e63f3006 100644 --- a/docs/javadoc/mklab/JGNN/adhoc/datasets/class-use/Cora.html +++ b/docs/javadoc/mklab/JGNN/adhoc/datasets/class-use/Cora.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.adhoc.datasets.Cora - + diff --git a/docs/javadoc/mklab/JGNN/adhoc/datasets/class-use/Pubmed.html b/docs/javadoc/mklab/JGNN/adhoc/datasets/class-use/Pubmed.html index 59697ecd..d612d376 100644 --- a/docs/javadoc/mklab/JGNN/adhoc/datasets/class-use/Pubmed.html +++ b/docs/javadoc/mklab/JGNN/adhoc/datasets/class-use/Pubmed.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.adhoc.datasets.Pubmed - + diff --git a/docs/javadoc/mklab/JGNN/adhoc/datasets/package-summary.html b/docs/javadoc/mklab/JGNN/adhoc/datasets/package-summary.html index 0e41f8ae..75943099 100644 --- a/docs/javadoc/mklab/JGNN/adhoc/datasets/package-summary.html +++ b/docs/javadoc/mklab/JGNN/adhoc/datasets/package-summary.html @@ -1,11 +1,11 @@ - + mklab.JGNN.adhoc.datasets - + @@ -42,7 +42,7 @@
      @@ -62,6 +62,15 @@

      Package mklab.JGNN.a


      package mklab.JGNN.adhoc.datasets
      +
      +
      This package contains datasets for out-of-the-box experimentation. When run + for the first time, the datasets also download their data in a + downloads/ directory in the running path.
      +
      +
      Author:
      +
      Emmanouil Krasanakis
      +
      +
      • @@ -71,9 +80,19 @@

        Package mklab.JGNN.a
        Package
        Description
        -
         
        +
        +
        Contains classes that simplify data loading, model building, and training.
        +
        -
         
        +
        +
        Contains model builders that parse expression of the Neuralang scripting + language to simplify mathematical parts of the definitions.
        +
        + +
        +
        Contains model training strategies that correspond to different predictive + tasks.
        +

      • diff --git a/docs/javadoc/mklab/JGNN/adhoc/datasets/package-tree.html b/docs/javadoc/mklab/JGNN/adhoc/datasets/package-tree.html index ebc49880..8f49448b 100644 --- a/docs/javadoc/mklab/JGNN/adhoc/datasets/package-tree.html +++ b/docs/javadoc/mklab/JGNN/adhoc/datasets/package-tree.html @@ -1,11 +1,11 @@ - + mklab.JGNN.adhoc.datasets Class Hierarchy - + diff --git a/docs/javadoc/mklab/JGNN/adhoc/datasets/package-use.html b/docs/javadoc/mklab/JGNN/adhoc/datasets/package-use.html index 0a61e300..37719ce7 100644 --- a/docs/javadoc/mklab/JGNN/adhoc/datasets/package-use.html +++ b/docs/javadoc/mklab/JGNN/adhoc/datasets/package-use.html @@ -1,11 +1,11 @@ - + Uses of Package mklab.JGNN.adhoc.datasets - + diff --git a/docs/javadoc/mklab/JGNN/adhoc/package-summary.html b/docs/javadoc/mklab/JGNN/adhoc/package-summary.html index 8e41e246..42a56b7f 100644 --- a/docs/javadoc/mklab/JGNN/adhoc/package-summary.html +++ b/docs/javadoc/mklab/JGNN/adhoc/package-summary.html @@ -1,11 +1,11 @@ - + mklab.JGNN.adhoc - + @@ -42,7 +42,7 @@
        @@ -62,6 +62,14 @@

        Package mklab.JGNN.adhoc


        package mklab.JGNN.adhoc
        +
        +
        Contains classes that simplify data loading, model building, and training. + Top-level base classes are extended in sub-packages.
        +
        +
        Author:
        +
        Emmanouil Krasanakis
        +
        +
        • @@ -71,9 +79,19 @@

          Package mklab.JGNN.adhoc

          Package
          Description
          -
           
          +
          +
          This package contains datasets for out-of-the-box experimentation.
          +
          -
           
          +
          +
          Contains model builders that parse expression of the Neuralang scripting + language to simplify mathematical parts of the definitions.
          +
          + +
          +
          Contains model training strategies that correspond to different predictive + tasks.
          +
        • @@ -93,10 +111,17 @@

          Package mklab.JGNN.adhoc

          -
          This class and subclasses can be used to create Model instances - by automatically creating and managing NNOperation instances based on +
          This class and subclasses can be used to create Model instances by + automatically creating and managing NNOperation instances based on textual descriptions.
          + +
          +
          This is a helper class that automates the definition of training processes of + Model instances by defining the number of epochs, loss functions, + number of batches and the ability to use ThreadPool for parallelized + batch computations.
          +
          diff --git a/docs/javadoc/mklab/JGNN/adhoc/package-tree.html b/docs/javadoc/mklab/JGNN/adhoc/package-tree.html index 294a216e..f33e1e08 100644 --- a/docs/javadoc/mklab/JGNN/adhoc/package-tree.html +++ b/docs/javadoc/mklab/JGNN/adhoc/package-tree.html @@ -1,11 +1,11 @@ - + mklab.JGNN.adhoc Class Hierarchy - + @@ -64,6 +64,7 @@

          Class Hierarchy

        • mklab.JGNN.adhoc.Dataset
        • mklab.JGNN.adhoc.IdConverter
        • mklab.JGNN.adhoc.ModelBuilder
        • +
        • mklab.JGNN.adhoc.ModelTraining
      diff --git a/docs/javadoc/mklab/JGNN/adhoc/package-use.html b/docs/javadoc/mklab/JGNN/adhoc/package-use.html index 3c1498b4..77034cb6 100644 --- a/docs/javadoc/mklab/JGNN/adhoc/package-use.html +++ b/docs/javadoc/mklab/JGNN/adhoc/package-use.html @@ -1,11 +1,11 @@ - + Uses of Package mklab.JGNN.adhoc - + @@ -57,13 +57,28 @@

      Uses of Package
      mk
      Package
      Description
      -
       
      +
      +
      Contains classes that simplify data loading, model building, and training.
      +
      -
       
      +
      +
      This package contains datasets for out-of-the-box experimentation.
      +
      -
       
      - -
       
      +
      +
      Contains model builders that parse expression of the Neuralang scripting + language to simplify mathematical parts of the definitions.
      +
      + +
      +
      Contains model training strategies that correspond to different predictive + tasks.
      +
      + +
      +
      Implements neural networks components that are combined to define GNNs or + other types of machine learning models.
      +
        @@ -79,10 +94,17 @@

        Uses of Package
        mk
        -
        This class and subclasses can be used to create Model instances - by automatically creating and managing NNOperation instances based on +
        This class and subclasses can be used to create Model instances by + automatically creating and managing NNOperation instances based on textual descriptions.
        + +
        +
        This is a helper class that automates the definition of training processes of + Model instances by defining the number of epochs, loss functions, + number of batches and the ability to use ThreadPool for parallelized + batch computations.
        +

      @@ -107,24 +129,41 @@

      Uses of Package
      mk
      Description
      -
      This class and subclasses can be used to create Model instances - by automatically creating and managing NNOperation instances based on +
      This class and subclasses can be used to create Model instances by + automatically creating and managing NNOperation instances based on textual descriptions.

    • +
      + +
      +
      Class
      +
      Description
      + +
      +
      This is a helper class that automates the definition of training processes of + Model instances by defining the number of epochs, loss functions, + number of batches and the ability to use ThreadPool for parallelized + batch computations.
      +
      +
      +
      +
    • +
    • Classes in mklab.JGNN.adhoc used by mklab.JGNN.nn
      Class
      Description
      - +
      -
      This class and subclasses can be used to create Model instances - by automatically creating and managing NNOperation instances based on - textual descriptions.
      +
      This is a helper class that automates the definition of training processes of + Model instances by defining the number of epochs, loss functions, + number of batches and the ability to use ThreadPool for parallelized + batch computations.
      diff --git a/docs/javadoc/mklab/JGNN/adhoc/parsers/FastBuilder.html b/docs/javadoc/mklab/JGNN/adhoc/parsers/FastBuilder.html index b3c7f47d..dcd87811 100644 --- a/docs/javadoc/mklab/JGNN/adhoc/parsers/FastBuilder.html +++ b/docs/javadoc/mklab/JGNN/adhoc/parsers/FastBuilder.html @@ -1,11 +1,11 @@ - + FastBuilder - + @@ -82,10 +82,10 @@

      Class FastBuilder


      public class FastBuilder extends ModelBuilder
      -
      Extends the capabilities of LayeredBuilder to use - for node classification. It accepts the adjacency graph in the constructor, - to be used with the symbol A in operations or layer definitions, - and node features.
      +
      Extends the capabilities of LayeredBuilder to use for node + classification. It accepts the adjacency graph in the constructor, to be used + with the symbol A in operations or layer definitions, and node + features.
      Author:
      Emmanouil Krasanakis
      @@ -116,8 +116,8 @@

      Constructor Summary

      FastBuilder(Matrix adjacency, Matrix features)
      -
      Creates a graph neural network builder from an - normalized adjacency matrix and a node feature matrix.
      +
      Creates a graph neural network builder from an normalized adjacency matrix + and a node feature matrix.
    @@ -136,26 +136,29 @@

    Method Summary

    -
    Adds a classification layer that gather the number of inputs nodes - and applies softmax on all of them.
    +
    Adds a classification layer that gather the number of inputs nodes and + applies softmax on all of them.
    concat(int depth)
    -
    Concatenates horizontally the output of a number of given layers, - starting from the last one and going backwards.
    +
    Concatenates horizontally the output of a number of given layers, starting + from the last one and going backwards.
    config(String name, double value)
    -
    Declares a configuration hyperparameter, which can be used to declare - matrix and vector parameters during ModelBuilder.operation(String) expressions.
    +
    Declares a configuration hyperparameter, which can be used to declare matrix + and vector parameters during ModelBuilder.operation(String) expressions.
    config(String name, String value)
    -
     
    +
    +
    Applies ModelBuilder.config(String, double) where the set value is obtained from + another configuration hyperaparameter.
    +
    constant(String name, double value)
    @@ -177,21 +180,24 @@

    Method Summary

    Function<Integer,Double> func, int depth)
    -
    Defines a number of config(String, double) symbols involving a {l} - notation, for example so that they can be used during layerRepeat(String, int).
    +
    Defines a number of config(String, double) symbols involving a + {l} notation, for example so that they can be used during + layerRepeat(String, int).
    futureConstants(String constantName, Function<Integer,Double> func, int depth)
    -
    Defines a number of constant(String, double) symbols involving a {l} +
    Defines a number of constant(String, double) symbols involving a + {l} notation, for example so that they can be used during layerRepeat(String, int).
    layer(String expression)
    -
    Applies an operation(String) and increases the layer identifier count.
    +
    Applies an operation(String) and increases the layer identifier + count.
    layerRepeat(String expression, @@ -202,8 +208,8 @@

    Method Summary

    -
    Parses one or more operations split by new line characters or ; - to add to the execution graph.
    +
    Parses one or more operations split by new line characters or ; to add to the + execution graph.
    param(String name, @@ -217,8 +223,8 @@

    Method Summary

    param(String name, Tensor value)
    -
    Declares a learnable mklab.JGNN.nn.inputs.Paramater component with the given name, - zero regularization, and initial value.
    +
    Declares a learnable mklab.JGNN.nn.inputs.Paramater component with + the given name, zero regularization, and initial value.
    rememberAs(String layerId)
    @@ -232,7 +238,7 @@

    Method Summary

    +assertBackwardValidity, autosize, autosize, createForwardValidity, describe, get, getConfig, getConfigOrDefault, getExecutionGraphDot, getModel, hasComponent, load, out, print, printState, runModel, runModel, save, var

    Methods inherited from class java.lang.Object

    equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
    @@ -261,8 +267,8 @@

    FastBuilder

    FastBuilder

    public FastBuilder(Matrix adjacency, Matrix features)
    -
    Creates a graph neural network builder from an - normalized adjacency matrix and a node feature matrix.
    +
    Creates a graph neural network builder from an normalized adjacency matrix + and a node feature matrix.
    Parameters:
    adjacency - The pre-normalized adjacency matrix.
    @@ -297,7 +303,8 @@

    rememberAs

    layer

    public FastBuilder layer(String expression)
    -
    Applies an operation(String) and increases the layer identifier count.
    +
    Applies an operation(String) and increases the layer identifier + count.
    Parameters:
    expression - A parsable expression.
    @@ -316,8 +323,8 @@

    layer

    classify

    public FastBuilder classify()
    -
    Adds a classification layer that gather the number of inputs nodes - and applies softmax on all of them.
    +
    Adds a classification layer that gather the number of inputs nodes and + applies softmax on all of them.
    Returns:
    this builder.
    @@ -329,8 +336,8 @@

    classify

    layerRepeat

    public FastBuilder layerRepeat(String expression, int times)
    -
    Repeats a layer(String) definition a number of times. - Ideal for building deep architectures.
    +
    Repeats a layer(String) definition a number of times. Ideal for + building deep architectures.
    Parameters:
    expression - The expression to repeat for each layer.
    @@ -363,9 +370,24 @@

    function

    config

    public FastBuilder config(String name, String value)
    +
    Description copied from class: ModelBuilder
    +
    Applies ModelBuilder.config(String, double) where the set value is obtained from + another configuration hyperaparameter.
    Overrides:
    config in class ModelBuilder
    +
    Parameters:
    +
    name - The name of the configuration hyperparameter to set.
    +
    value - The name of the configuration hyperparameter whose value should + be copied.
    +
    Returns:
    +
    The builder's instance.
    +
    See Also:
    +
    + +
    @@ -375,16 +397,18 @@

    config

    public FastBuilder config(String name, double value)
    Description copied from class: ModelBuilder
    -
    Declares a configuration hyperparameter, which can be used to declare - matrix and vector parameters during ModelBuilder.operation(String) expressions. - For in-expression use of hyperparameters, delcare them with ModelBuilder.constant(String, double).
    +
    Declares a configuration hyperparameter, which can be used to declare matrix + and vector parameters during ModelBuilder.operation(String) expressions. For + in-expression use of hyperparameters, declare them with + ModelBuilder.constant(String, double). In Neuralang terms, this is implements the + broadcasting operation.
    Overrides:
    config in class ModelBuilder
    Parameters:
    name - The name of the configuration hyperparameter.
    -
    value - The value to be assigned to the hyperparameter. - Typically, provide a long number.
    +
    value - The value to be assigned to the hyperparameter. This may also be + a long number.
    Returns:
    The builder's instance.
    See Also:
    @@ -393,6 +417,7 @@

    config

  • ModelBuilder.operation(String)
  • ModelBuilder.param(String, Tensor)
  • ModelBuilder.param(String, double, Tensor)
  • +
  • ModelBuilder.config(String, String)
  • @@ -404,17 +429,17 @@

    param

    public FastBuilder param(String name, Tensor value)
    Description copied from class: ModelBuilder
    -
    Declares a learnable mklab.JGNN.nn.inputs.Paramater component with the given name, - zero regularization, and initial value.
    +
    Declares a learnable mklab.JGNN.nn.inputs.Paramater component with + the given name, zero regularization, and initial value.
    Overrides:
    param in class ModelBuilder
    Parameters:
    name - The name to be assigned to the new component.
    value - The initial value to be assigned to the parameter. Exact values - can be overridden by neural initialization strategies, but an initial value - should be declared nonetheless to determine the parameter type and allocate - any necessary memory.
    + can be overridden by neural initialization strategies, but an + initial value should be declared nonetheless to determine the + parameter type and allocate any necessary memory.
    Returns:
    The builder's instance.
    See Also:
    @@ -433,9 +458,9 @@

    constant

    public FastBuilder constant(String name, double value)
    Description copied from class: ModelBuilder
    -
    Declares a non-learnable constant component with the given name. - This can be used in computations. To edit the constant's values, - use ModelBuilder.get(String) to retrieve the respective component.
    +
    Declares a non-learnable constant component with the given name. This can be + used in computations. To edit the constant's values, use ModelBuilder.get(String) + to retrieve the respective component.
    Overrides:
    constant in class ModelBuilder
    @@ -460,9 +485,9 @@

    constant

    public FastBuilder constant(String name, Tensor value)
    Description copied from class: ModelBuilder
    -
    Declares a non-learnable constant component with the given name. - This can be used in computations. To edit the constant's values, - use ModelBuilder.get(String) to retrieve the respective component.
    +
    Declares a non-learnable constant component with the given name. This can be + used in computations. To edit the constant's values, use ModelBuilder.get(String) + to retrieve the respective component.
    Overrides:
    constant in class ModelBuilder
    @@ -494,12 +519,13 @@

    param

    param in class ModelBuilder
    Parameters:
    name - The name to be assigned to the new component.
    -
    regularization - The regularization value. Zero corresponds to no regularization. - Typically, this is non-negative.
    -
    value - The initial value to be assigned to the parameter. Exact values - can be overridden by neural initialization strategies, but an initial value - should be declared nonetheless to determine the parameter type and allocate - any necessary memory.
    +
    regularization - The regularization value. Zero corresponds to no + regularization. Typically, this is non-negative.
    +
    value - The initial value to be assigned to the parameter. + Exact values can be overridden by neural initialization + strategies, but an initial value should be declared + nonetheless to determine the parameter type and + allocate any necessary memory.
    Returns:
    The builder's instance.
    See Also:
    @@ -517,13 +543,12 @@

    param

    operation

    public FastBuilder operation(String desc)
    Description copied from class: ModelBuilder
    -
    Parses one or more operations split by new line characters or ; - to add to the execution graph. All operations should assign a - value to a new component name and comprise operators and functions. - For a detailed description of the domain-specific language this - method accepts, please refer to the library's - - online documentation.
    +
    Parses one or more operations split by new line characters or ; to add to the + execution graph. All operations should assign a value to a new component name + and comprise operators and functions. For a detailed description of the + domain-specific language this method accepts, please refer to the library's + online + documentation.
    Overrides:
    operation in class ModelBuilder
    @@ -549,14 +574,16 @@

    futureConfigs

    public FastBuilder futureConfigs(String config, Function<Integer,Double> func, int depth)
    -
    Defines a number of config(String, double) symbols involving a {l} - notation, for example so that they can be used during layerRepeat(String, int).
    +
    Defines a number of config(String, double) symbols involving a + {l} notation, for example so that they can be used during + layerRepeat(String, int).
    Parameters:
    -
    config - The configuration symbols (these should involve {l}).
    -
    func - A lambda Java function to calculate the configuration's value. This takes - as input an integer (starting from 0 for the current layer) and adds one for each - subsequently declared symbol.
    +
    config - The configuration symbols (these should involve + {l}).
    +
    func - A lambda Java function to calculate the configuration's value. + This takes as input an integer (starting from 0 for the current + layer) and adds one for each subsequently declared symbol.
    depth - The number of future layers expected to use the symbols.
    Returns:
    this builder.
    @@ -575,14 +602,16 @@

    futureConstants

    public FastBuilder futureConstants(String constantName, Function<Integer,Double> func, int depth)
    -
    Defines a number of constant(String, double) symbols involving a {l} - notation, for example so that they can be used during layerRepeat(String, int).
    +
    Defines a number of constant(String, double) symbols involving a + {l} + notation, for example so that they can be used during layerRepeat(String, int). + @param constantName The configuration symbols (these should involve {l}).
    Parameters:
    -
    constantName - The configuration symbols (these should involve {l}).
    -
    func - A lambda Java function to calculate the constant's value. This takes - as input an integer (starting from 0 for the current layer) and adds one for each - subsequently declared symbol.
    +
    func - A lambda Java function to calculate the constant's value. + This takes as input an integer (starting from 0 for the + current layer) and adds one for each subsequently + declared symbol.
    depth - The number of future layers expected to use the constant.
    Returns:
    this builder.
    @@ -599,9 +628,9 @@

    futureConstants

    concat

    public FastBuilder concat(int depth)
    -
    Concatenates horizontally the output of a number of given layers, - starting from the last one and going backwards. (For concatenation - of specific layers just use concat within normal operations.)
    +
    Concatenates horizontally the output of a number of given layers, starting + from the last one and going backwards. (For concatenation of specific layers + just use concat within normal operations.)
    Parameters:
    depth - The number of given layers to concatenate.
    diff --git a/docs/javadoc/mklab/JGNN/adhoc/parsers/LayeredBuilder.html b/docs/javadoc/mklab/JGNN/adhoc/parsers/LayeredBuilder.html index 5675607e..805d1701 100644 --- a/docs/javadoc/mklab/JGNN/adhoc/parsers/LayeredBuilder.html +++ b/docs/javadoc/mklab/JGNN/adhoc/parsers/LayeredBuilder.html @@ -1,11 +1,11 @@ - + LayeredBuilder - + @@ -82,10 +82,10 @@

    Class LayeredBuilder


    public class LayeredBuilder extends ModelBuilder
    -
    Extends the capabilities of the ModelBuilder - with the ability to define multilayer (e.g. deep) neural architectures. - The symbols {l} and {l+1} are replaced in all expressions - with appropriate layer identifiers (these increase by one each time a new +
    Extends the capabilities of the ModelBuilder with the ability to + define multilayer (e.g. deep) neural architectures. The symbols + {l} and {l+1} are replaced in all expressions with + appropriate layer identifiers (these increase by one each time a new layer(String) is defined.
    See Also:
    @@ -135,15 +135,15 @@

    Method Summary

    concat(int depth)
    -
    Concatenates horizontally the output of a number of given layers, - starting from the last one and going backwards.
    +
    Concatenates horizontally the output of a number of given layers, starting + from the last one and going backwards.
    config(String name, double value)
    -
    Declares a configuration hyperparameter, which can be used to declare - matrix and vector parameters during ModelBuilder.operation(String) expressions.
    +
    Declares a configuration hyperparameter, which can be used to declare matrix + and vector parameters during ModelBuilder.operation(String) expressions.
    constant(String name, @@ -162,21 +162,24 @@

    Method Summary

    Function<Integer,Double> func, int depth)
    -
    Defines a number of config(String, double) symbols involving a {l} - notation, for example so that they can be used during layerRepeat(String, int).
    +
    Defines a number of config(String, double) symbols involving a + {l} notation, for example so that they can be used during + layerRepeat(String, int).
    futureConstants(String constantName, Function<Integer,Double> func, int depth)
    -
    Defines a number of constant(String, double) symbols involving a {l} +
    Defines a number of constant(String, double) symbols involving a + {l} notation, for example so that they can be used during layerRepeat(String, int).
    layer(String expression)
    -
    Applies an operation(String) and increases the layer identifier count.
    +
    Applies an operation(String) and increases the layer identifier + count.
    layerRepeat(String expression, @@ -187,14 +190,13 @@

    Method Summary

    -
    Parses one or more operations split by new line characters or ; - to add to the execution graph.
    +
    Parses one or more operations split by new line characters or ; to add to the + execution graph.
    out(String expression)
    -
    Declares the component with the given name an output of the - managed model.
    +
    Declares the component with the given name an output of the managed model.
    param(String name, @@ -208,28 +210,28 @@

    Method Summary

    param(String name, Tensor value)
    -
    Declares a learnable mklab.JGNN.nn.inputs.Paramater component with the given name, - zero regularization, and initial value.
    +
    Declares a learnable mklab.JGNN.nn.inputs.Paramater component with + the given name, zero regularization, and initial value.
    rememberAs(String layerId)
    Sets the current layer identifier to a specific symbol layerId - so that future usage of {layerId} is automatically replaced with + so that future usage of {layerId} is automatically replaced with the identifier.
    var(String inputName)
    -
    Declares a component with the given name to be used as an input - of the managed model.
    +
    Declares a component with the given name to be used as an input of the + managed model.
    +assertBackwardValidity, autosize, autosize, config, createForwardValidity, describe, function, get, getConfig, getConfigOrDefault, getExecutionGraphDot, getModel, hasComponent, load, print, printState, runModel, runModel, save

    Methods inherited from class java.lang.Object

    equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
    @@ -248,7 +250,7 @@

    Constructor Details

    LayeredBuilder

    public LayeredBuilder()
    -
    Instantiates a layered builder with input name h0. This can be +
    Instantiates a layered builder with input name h0. This can be used by future expressions involving h{l}. You can add more architecture inputs normally with var(String).
    @@ -265,11 +267,11 @@

    LayeredBuilder

    LayeredBuilder

    public LayeredBuilder(String inputName)
    -
    Instantiates a layered builder with the given symbol as an input name. - If you plan to immediately use a layer(String) expression - that involves X{l}, where X is some symbol, - set X0 as the architecture's input. You can add more - architecture inputs normally with var(String).
    +
    Instantiates a layered builder with the given symbol as an input name. If you + plan to immediately use a layer(String) expression that involves + X{l}, where X is some symbol, set X0 + as the architecture's input. You can add more architecture inputs normally + with var(String).
    Parameters:
    inputName - The symbol to use as the built architecture's input.
    @@ -289,8 +291,8 @@

    Method Details

    var

    public LayeredBuilder var(String inputName)
    Description copied from class: ModelBuilder
    -
    Declares a component with the given name to be used as an input - of the managed model.
    +
    Declares a component with the given name to be used as an input of the + managed model.
    Overrides:
    var in class ModelBuilder
    @@ -306,7 +308,7 @@

    var

    rememberAs

    public LayeredBuilder rememberAs(String layerId)
    Sets the current layer identifier to a specific symbol layerId - so that future usage of {layerId} is automatically replaced with + so that future usage of {layerId} is automatically replaced with the identifier.
    Parameters:
    @@ -320,7 +322,8 @@

    rememberAs

    layer

    public LayeredBuilder layer(String expression)
    -
    Applies an operation(String) and increases the layer identifier count.
    +
    Applies an operation(String) and increases the layer identifier + count.
    Parameters:
    expression - A parsable expression.
    @@ -340,8 +343,8 @@

    layer

    layerRepeat

    public LayeredBuilder layerRepeat(String expression, int times)
    -
    Repeats a layer(String) definition a number of times. - Ideal for building deep architectures.
    +
    Repeats a layer(String) definition a number of times. Ideal for + building deep architectures.
    Parameters:
    expression - The expression to repeat for each layer.
    @@ -364,16 +367,18 @@

    config

    public LayeredBuilder config(String name, double value)
    Description copied from class: ModelBuilder
    -
    Declares a configuration hyperparameter, which can be used to declare - matrix and vector parameters during ModelBuilder.operation(String) expressions. - For in-expression use of hyperparameters, delcare them with ModelBuilder.constant(String, double).
    +
    Declares a configuration hyperparameter, which can be used to declare matrix + and vector parameters during ModelBuilder.operation(String) expressions. For + in-expression use of hyperparameters, declare them with + ModelBuilder.constant(String, double). In Neuralang terms, this is implements the + broadcasting operation.
    Overrides:
    config in class ModelBuilder
    Parameters:
    name - The name of the configuration hyperparameter.
    -
    value - The value to be assigned to the hyperparameter. - Typically, provide a long number.
    +
    value - The value to be assigned to the hyperparameter. This may also be + a long number.
    Returns:
    The builder's instance.
    See Also:
    @@ -382,6 +387,7 @@

    config

  • ModelBuilder.operation(String)
  • ModelBuilder.param(String, Tensor)
  • ModelBuilder.param(String, double, Tensor)
  • +
  • ModelBuilder.config(String, String)
  • @@ -393,17 +399,17 @@

    param

    public LayeredBuilder param(String name, Tensor value)
    Description copied from class: ModelBuilder
    -
    Declares a learnable mklab.JGNN.nn.inputs.Paramater component with the given name, - zero regularization, and initial value.
    +
    Declares a learnable mklab.JGNN.nn.inputs.Paramater component with + the given name, zero regularization, and initial value.
    Overrides:
    param in class ModelBuilder
    Parameters:
    name - The name to be assigned to the new component.
    value - The initial value to be assigned to the parameter. Exact values - can be overridden by neural initialization strategies, but an initial value - should be declared nonetheless to determine the parameter type and allocate - any necessary memory.
    + can be overridden by neural initialization strategies, but an + initial value should be declared nonetheless to determine the + parameter type and allocate any necessary memory.
    Returns:
    The builder's instance.
    See Also:
    @@ -422,9 +428,9 @@

    constant

    public LayeredBuilder constant(String name, double value)
    Description copied from class: ModelBuilder
    -
    Declares a non-learnable constant component with the given name. - This can be used in computations. To edit the constant's values, - use ModelBuilder.get(String) to retrieve the respective component.
    +
    Declares a non-learnable constant component with the given name. This can be + used in computations. To edit the constant's values, use ModelBuilder.get(String) + to retrieve the respective component.
    Overrides:
    constant in class ModelBuilder
    @@ -449,9 +455,9 @@

    constant

    public LayeredBuilder constant(String name, Tensor value)
    Description copied from class: ModelBuilder
    -
    Declares a non-learnable constant component with the given name. - This can be used in computations. To edit the constant's values, - use ModelBuilder.get(String) to retrieve the respective component.
    +
    Declares a non-learnable constant component with the given name. This can be + used in computations. To edit the constant's values, use ModelBuilder.get(String) + to retrieve the respective component.
    Overrides:
    constant in class ModelBuilder
    @@ -483,12 +489,13 @@

    param

    param in class ModelBuilder
    Parameters:
    name - The name to be assigned to the new component.
    -
    regularization - The regularization value. Zero corresponds to no regularization. - Typically, this is non-negative.
    -
    value - The initial value to be assigned to the parameter. Exact values - can be overridden by neural initialization strategies, but an initial value - should be declared nonetheless to determine the parameter type and allocate - any necessary memory.
    +
    regularization - The regularization value. Zero corresponds to no + regularization. Typically, this is non-negative.
    +
    value - The initial value to be assigned to the parameter. + Exact values can be overridden by neural initialization + strategies, but an initial value should be declared + nonetheless to determine the parameter type and + allocate any necessary memory.
    Returns:
    The builder's instance.
    See Also:
    @@ -506,13 +513,12 @@

    param

    operation

    public LayeredBuilder operation(String desc)
    Description copied from class: ModelBuilder
    -
    Parses one or more operations split by new line characters or ; - to add to the execution graph. All operations should assign a - value to a new component name and comprise operators and functions. - For a detailed description of the domain-specific language this - method accepts, please refer to the library's - - online documentation.
    +
    Parses one or more operations split by new line characters or ; to add to the + execution graph. All operations should assign a value to a new component name + and comprise operators and functions. For a detailed description of the + domain-specific language this method accepts, please refer to the library's + online + documentation.
    Overrides:
    operation in class ModelBuilder
    @@ -537,10 +543,9 @@

    operation

    out

    public LayeredBuilder out(String expression)
    Description copied from class: ModelBuilder
    -
    Declares the component with the given name an output of the - managed model. The component should have already been assigned a value. - To output complex expressions use ModelBuilder.operation(String) - to define them first.
    +
    Declares the component with the given name an output of the managed model. + The component should have already been assigned a value. To output complex + expressions use ModelBuilder.operation(String) to define them first.
    Overrides:
    out in class ModelBuilder
    @@ -555,9 +560,9 @@

    out

    concat

    public LayeredBuilder concat(int depth)
    -
    Concatenates horizontally the output of a number of given layers, - starting from the last one and going backwards. (For concatenation - of specific layers just use concat within normal operations.)
    +
    Concatenates horizontally the output of a number of given layers, starting + from the last one and going backwards. (For concatenation of specific layers + just use concat within normal operations.)
    Parameters:
    depth - The number of given layers to concatenate.
    @@ -572,14 +577,16 @@

    futureConfigs

    public LayeredBuilder futureConfigs(String config, Function<Integer,Double> func, int depth)
    -
    Defines a number of config(String, double) symbols involving a {l} - notation, for example so that they can be used during layerRepeat(String, int).
    +
    Defines a number of config(String, double) symbols involving a + {l} notation, for example so that they can be used during + layerRepeat(String, int).
    Parameters:
    -
    config - The configuration symbols (these should involve {l}).
    -
    func - A lambda Java function to calculate the configuration's value. This takes - as input an integer (starting from 0 for the current layer) and adds one for each - subsequently declared symbol.
    +
    config - The configuration symbols (these should involve + {l}).
    +
    func - A lambda Java function to calculate the configuration's value. + This takes as input an integer (starting from 0 for the current + layer) and adds one for each subsequently declared symbol.
    depth - The number of future layers expected to use the symbols.
    Returns:
    this layer builder.
    @@ -598,14 +605,16 @@

    futureConstants

    public LayeredBuilder futureConstants(String constantName, Function<Integer,Double> func, int depth)
    -
    Defines a number of constant(String, double) symbols involving a {l} - notation, for example so that they can be used during layerRepeat(String, int).
    +
    Defines a number of constant(String, double) symbols involving a + {l} + notation, for example so that they can be used during layerRepeat(String, int). + @param constantName The configuration symbols (these should involve {l}).
    Parameters:
    -
    constantName - The configuration symbols (these should involve {l}).
    -
    func - A lambda Java function to calculate the constant's value. This takes - as input an integer (starting from 0 for the current layer) and adds one for each - subsequently declared symbol.
    +
    func - A lambda Java function to calculate the constant's value. + This takes as input an integer (starting from 0 for the + current layer) and adds one for each subsequently + declared symbol.
    depth - The number of future layers expected to use the constant.
    Returns:
    this layer builder.
    diff --git a/docs/javadoc/mklab/JGNN/adhoc/parsers/Neuralang.html b/docs/javadoc/mklab/JGNN/adhoc/parsers/Neuralang.html index 32b8e0f7..28284dd5 100644 --- a/docs/javadoc/mklab/JGNN/adhoc/parsers/Neuralang.html +++ b/docs/javadoc/mklab/JGNN/adhoc/parsers/Neuralang.html @@ -1,11 +1,11 @@ - + Neuralang - + @@ -82,6 +82,19 @@

    Class Neuralang


    public class Neuralang extends ModelBuilder
    +
    Extends the base ModelBuilder with the full capabilities of the + Neuralang scripting language.
    +
    +
    Author:
    +
    Emmanouil Krasanakis
    +
    See Also:
    +
    + +
    +
      @@ -113,8 +126,8 @@

      Method Summary

      config(String name, double value)
      -
      Declares a configuration hyperparameter, which can be used to declare - matrix and vector parameters during ModelBuilder.operation(String) expressions.
      +
      Declares a configuration hyperparameter, which can be used to declare matrix + and vector parameters during ModelBuilder.operation(String) expressions.
      constant(String name, @@ -130,22 +143,27 @@

      Method Summary

      parse(String text)
      -
       
      +
      +
      Parses Neuralang source code by handling function declarations in addition to + other expressions.
      +
      parse(Path path)
      -
       
      +
      +
      Parses a Neuralang source code file.
      +
      var(String var)
      -
      Declares a component with the given name to be used as an input - of the managed model.
      +
      Declares a component with the given name to be used as an input of the + managed model.
    +assertBackwardValidity, autosize, autosize, config, createForwardValidity, describe, function, get, getConfig, getConfigOrDefault, getExecutionGraphDot, getModel, hasComponent, load, operation, out, param, param, print, printState, runModel, runModel, save

    Methods inherited from class java.lang.Object

    equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
    @@ -180,16 +198,18 @@

    config

    public Neuralang config(String name, double value)
    Description copied from class: ModelBuilder
    -
    Declares a configuration hyperparameter, which can be used to declare - matrix and vector parameters during ModelBuilder.operation(String) expressions. - For in-expression use of hyperparameters, delcare them with ModelBuilder.constant(String, double).
    +
    Declares a configuration hyperparameter, which can be used to declare matrix + and vector parameters during ModelBuilder.operation(String) expressions. For + in-expression use of hyperparameters, declare them with + ModelBuilder.constant(String, double). In Neuralang terms, this is implements the + broadcasting operation.
    Overrides:
    config in class ModelBuilder
    Parameters:
    name - The name of the configuration hyperparameter.
    -
    value - The value to be assigned to the hyperparameter. - Typically, provide a long number.
    +
    value - The value to be assigned to the hyperparameter. This may also be + a long number.
    Returns:
    The builder's instance.
    See Also:
    @@ -198,6 +218,7 @@

    config

  • ModelBuilder.operation(String)
  • ModelBuilder.param(String, Tensor)
  • ModelBuilder.param(String, double, Tensor)
  • +
  • ModelBuilder.config(String, String)
  • @@ -207,12 +228,35 @@

    config

    parse

    public Neuralang parse(Path path)
    +
    Parses a Neuralang source code file. Reads a file like + Paths.get("models.nn") from disk with + Files.readAllLines(Path), and parses the loaded String.
    +
    +
    Parameters:
    +
    path - The source code file.
    +
    Returns:
    +
    The Neuralang builder's instance.
    +
    See Also:
    +
    + +
    +
  • parse

    public Neuralang parse(String text)
    +
    Parses Neuralang source code by handling function declarations in addition to + other expressions.
    +
    +
    Parameters:
    +
    text - The source code to parse.
    +
    Returns:
    +
    The Neuralang builder's instance.
    +
  • @@ -221,9 +265,9 @@

    constant

    public Neuralang constant(String name, Tensor value)
    Description copied from class: ModelBuilder
    -
    Declares a non-learnable constant component with the given name. - This can be used in computations. To edit the constant's values, - use ModelBuilder.get(String) to retrieve the respective component.
    +
    Declares a non-learnable constant component with the given name. This can be + used in computations. To edit the constant's values, use ModelBuilder.get(String) + to retrieve the respective component.
    Overrides:
    constant in class ModelBuilder
    @@ -247,9 +291,9 @@

    constant

    public Neuralang constant(String name, double value)
    Description copied from class: ModelBuilder
    -
    Declares a non-learnable constant component with the given name. - This can be used in computations. To edit the constant's values, - use ModelBuilder.get(String) to retrieve the respective component.
    +
    Declares a non-learnable constant component with the given name. This can be + used in computations. To edit the constant's values, use ModelBuilder.get(String) + to retrieve the respective component.
    Overrides:
    constant in class ModelBuilder
    @@ -273,8 +317,8 @@

    constant

    var

    public Neuralang var(String var)
    Description copied from class: ModelBuilder
    -
    Declares a component with the given name to be used as an input - of the managed model.
    +
    Declares a component with the given name to be used as an input of the + managed model.
    Overrides:
    var in class ModelBuilder
    diff --git a/docs/javadoc/mklab/JGNN/adhoc/parsers/class-use/FastBuilder.html b/docs/javadoc/mklab/JGNN/adhoc/parsers/class-use/FastBuilder.html index 1fe41530..7399024d 100644 --- a/docs/javadoc/mklab/JGNN/adhoc/parsers/class-use/FastBuilder.html +++ b/docs/javadoc/mklab/JGNN/adhoc/parsers/class-use/FastBuilder.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.adhoc.parsers.FastBuilder - + @@ -57,7 +57,10 @@

    Use
    Package
    Description
    -
     
    +
    +
    Contains model builders that parse expression of the Neuralang scripting + language to simplify mathematical parts of the definitions.
    +
      @@ -72,14 +75,14 @@

      Uses of FastBuilder
      FastBuilder.classify()
      -
      Adds a classification layer that gather the number of inputs nodes - and applies softmax on all of them.
      +
      Adds a classification layer that gather the number of inputs nodes and + applies softmax on all of them.
      FastBuilder.concat(int depth)
      -
      Concatenates horizontally the output of a number of given layers, - starting from the last one and going backwards.
      +
      Concatenates horizontally the output of a number of given layers, starting + from the last one and going backwards.
      FastBuilder.config(String name, @@ -106,21 +109,24 @@

      Uses of Function<Integer,Double> func, int depth)

      -
      Defines a number of config(String, double) symbols involving a {l} - notation, for example so that they can be used during layerRepeat(String, int).
      +
      Defines a number of config(String, double) symbols involving a + {l} notation, for example so that they can be used during + layerRepeat(String, int).
      FastBuilder.futureConstants(String constantName, Function<Integer,Double> func, int depth)
      -
      Defines a number of constant(String, double) symbols involving a {l} +
      Defines a number of constant(String, double) symbols involving a + {l} notation, for example so that they can be used during layerRepeat(String, int).
      FastBuilder.layer(String expression)
      -
      Applies an operation(String) and increases the layer identifier count.
      +
      Applies an operation(String) and increases the layer identifier + count.
      FastBuilder.layerRepeat(String expression, diff --git a/docs/javadoc/mklab/JGNN/adhoc/parsers/class-use/LayeredBuilder.html b/docs/javadoc/mklab/JGNN/adhoc/parsers/class-use/LayeredBuilder.html index 4628d123..636cb323 100644 --- a/docs/javadoc/mklab/JGNN/adhoc/parsers/class-use/LayeredBuilder.html +++ b/docs/javadoc/mklab/JGNN/adhoc/parsers/class-use/LayeredBuilder.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.adhoc.parsers.LayeredBuilder - + @@ -57,7 +57,10 @@

      Package
      Description
      -
       
      +
      +
      Contains model builders that parse expression of the Neuralang scripting + language to simplify mathematical parts of the definitions.
      +

      LayeredBuilder.concat(int depth)
      -
      Concatenates horizontally the output of a number of given layers, - starting from the last one and going backwards.
      +
      Concatenates horizontally the output of a number of given layers, starting + from the last one and going backwards.
      LayeredBuilder.config(String name, @@ -92,21 +95,24 @@

      Uses of Function<Integer,Double> func, int depth)

      -
      Defines a number of config(String, double) symbols involving a {l} - notation, for example so that they can be used during layerRepeat(String, int).
      +
      Defines a number of config(String, double) symbols involving a + {l} notation, for example so that they can be used during + layerRepeat(String, int).
      LayeredBuilder.futureConstants(String constantName, Function<Integer,Double> func, int depth)
      -
      Defines a number of constant(String, double) symbols involving a {l} +
      Defines a number of constant(String, double) symbols involving a + {l} notation, for example so that they can be used during layerRepeat(String, int).
      LayeredBuilder.layer(String expression)
      -
      Applies an operation(String) and increases the layer identifier count.
      +
      Applies an operation(String) and increases the layer identifier + count.
      LayeredBuilder.layerRepeat(String expression, @@ -133,7 +139,7 @@

      Uses of LayeredBuilder.rememberAs(String layerId)

      Sets the current layer identifier to a specific symbol layerId - so that future usage of {layerId} is automatically replaced with + so that future usage of {layerId} is automatically replaced with the identifier.
      diff --git a/docs/javadoc/mklab/JGNN/adhoc/parsers/class-use/Neuralang.html b/docs/javadoc/mklab/JGNN/adhoc/parsers/class-use/Neuralang.html index 1bac48d0..8b166f04 100644 --- a/docs/javadoc/mklab/JGNN/adhoc/parsers/class-use/Neuralang.html +++ b/docs/javadoc/mklab/JGNN/adhoc/parsers/class-use/Neuralang.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.adhoc.parsers.Neuralang - + @@ -57,7 +57,10 @@

      Uses
      Package
      Description
      -
       
      +
      +
      Contains model builders that parse expression of the Neuralang scripting + language to simplify mathematical parts of the definitions.
      +

        @@ -83,10 +86,15 @@

        Uses of  
        Neuralang.parse(String text)
        -
         
        +
        +
        Parses Neuralang source code by handling function declarations in addition to + other expressions.
        +
        Neuralang.parse(Path path)
        -
         
        +
        +
        Parses a Neuralang source code file.
        +
        Neuralang.var(String var)
         
        diff --git a/docs/javadoc/mklab/JGNN/adhoc/parsers/package-summary.html b/docs/javadoc/mklab/JGNN/adhoc/parsers/package-summary.html index be8d702a..e386764b 100644 --- a/docs/javadoc/mklab/JGNN/adhoc/parsers/package-summary.html +++ b/docs/javadoc/mklab/JGNN/adhoc/parsers/package-summary.html @@ -1,11 +1,11 @@ - + mklab.JGNN.adhoc.parsers - + @@ -42,7 +42,7 @@
        @@ -62,6 +62,17 @@

        Package mklab.JGNN.ad


        package mklab.JGNN.adhoc.parsers
        +
        +
        Contains model builders that parse expression of the Neuralang scripting + language to simplify mathematical parts of the definitions. This avoid + hand-wiring data transfers and makes for concise model creation. Different + builders correspond to a different practical needs for architecture + definitions.
        +
        +
        Author:
        +
        Emmanouil Krasanakis
        +
        +
        • @@ -71,9 +82,18 @@

          Package mklab.JGNN.ad
          Package
          Description
          -
           
          +
          +
          Contains classes that simplify data loading, model building, and training.
          +
          -
           
          +
          +
          This package contains datasets for out-of-the-box experimentation.
          +
          + +
          +
          Contains model training strategies that correspond to different predictive + tasks.
          +

        • @@ -85,16 +105,19 @@

          Package mklab.JGNN.ad
          Description
          -
          Extends the capabilities of LayeredBuilder to use - for node classification.
          +
          Extends the capabilities of LayeredBuilder to use for node + classification.
          -
          Extends the capabilities of the ModelBuilder - with the ability to define multilayer (e.g.
          +
          Extends the capabilities of the ModelBuilder with the ability to + define multilayer (e.g.
          -
           
          +
          +
          Extends the base ModelBuilder with the full capabilities of the + Neuralang scripting language.
          +
          diff --git a/docs/javadoc/mklab/JGNN/adhoc/parsers/package-tree.html b/docs/javadoc/mklab/JGNN/adhoc/parsers/package-tree.html index 5e228318..5b89ed2f 100644 --- a/docs/javadoc/mklab/JGNN/adhoc/parsers/package-tree.html +++ b/docs/javadoc/mklab/JGNN/adhoc/parsers/package-tree.html @@ -1,11 +1,11 @@ - + mklab.JGNN.adhoc.parsers Class Hierarchy - + diff --git a/docs/javadoc/mklab/JGNN/adhoc/parsers/package-use.html b/docs/javadoc/mklab/JGNN/adhoc/parsers/package-use.html index c2a3314f..1a9c8b5a 100644 --- a/docs/javadoc/mklab/JGNN/adhoc/parsers/package-use.html +++ b/docs/javadoc/mklab/JGNN/adhoc/parsers/package-use.html @@ -1,11 +1,11 @@ - + Uses of Package mklab.JGNN.adhoc.parsers - + @@ -57,7 +57,10 @@

          Uses of Packa
          Package
          Description
          -
           
          +
          +
          Contains model builders that parse expression of the Neuralang scripting + language to simplify mathematical parts of the definitions.
          +
            @@ -69,16 +72,19 @@

            Uses of Packa
            Description
            -
            Extends the capabilities of LayeredBuilder to use - for node classification.
            +
            Extends the capabilities of LayeredBuilder to use for node + classification.
            -
            Extends the capabilities of the ModelBuilder - with the ability to define multilayer (e.g.
            +
            Extends the capabilities of the ModelBuilder with the ability to + define multilayer (e.g.
            -
             
            +
            +
            Extends the base ModelBuilder with the full capabilities of the + Neuralang scripting language.
            +

          diff --git a/docs/javadoc/mklab/JGNN/adhoc/train/NodeClassification.html b/docs/javadoc/mklab/JGNN/adhoc/train/NodeClassification.html new file mode 100644 index 00000000..22d31681 --- /dev/null +++ b/docs/javadoc/mklab/JGNN/adhoc/train/NodeClassification.html @@ -0,0 +1,202 @@ + + + + +NodeClassification + + + + + + + + + + + + + + + +
          + +
          +
          + +
          + +

          Class NodeClassification

          +
          +
          java.lang.Object +
          mklab.JGNN.adhoc.ModelTraining +
          mklab.JGNN.adhoc.train.NodeClassification
          +
          +
          +
          +
          +
          public class NodeClassification +extends ModelTraining
          +
          Extends the ModelTraining class with a method to explicitly train a + model from feature and label matrices.
          +
          +
          Author:
          +
          Emmanouil Krasanakis
          +
          See Also:
          +
          + +
          +
          +
          +
          + +
          +
          +
            + +
          • +
            +

            Constructor Details

            +
              +
            • +
              +

              NodeClassification

              +
              public NodeClassification()
              +
              +
            • +
            +
            +
          • + +
          • +
            +

            Method Details

            +
              +
            • +
              +

              train

              +
              public Model train(Model model, + Matrix features, + Matrix labels, + Slice trainingSamples, + Slice validationSamples)
              +
              Trains a Model instance based on current settings. The graph is + assumed to be known and declared as an architecture constant.
              +
              +
              Overrides:
              +
              train in class ModelTraining
              +
              Parameters:
              +
              model - The model instance to train.
              +
              features - A matrix whose columns correspond to sample features.
              +
              labels - A matrix whose columns correspond to sample (one hot) + labels.
              +
              trainingSamples - Which columns to select for training.
              +
              Returns:
              +
              The trained model (the same instance as the first + argument).
              +
              +
              +
            • +
            +
            +
          • +
          +
          + +
          +
          +
          + + diff --git a/docs/javadoc/mklab/JGNN/adhoc/train/class-use/NodeClassification.html b/docs/javadoc/mklab/JGNN/adhoc/train/class-use/NodeClassification.html new file mode 100644 index 00000000..cd2e5200 --- /dev/null +++ b/docs/javadoc/mklab/JGNN/adhoc/train/class-use/NodeClassification.html @@ -0,0 +1,59 @@ + + + + +Uses of Class mklab.JGNN.adhoc.train.NodeClassification + + + + + + + + + + + + + + + +
          + +
          +
          +
          +

          Uses of Class
          mklab.JGNN.adhoc.train.NodeClassification

          +
          +No usage of mklab.JGNN.adhoc.train.NodeClassification
          +
          +
          + + diff --git a/docs/javadoc/mklab/JGNN/adhoc/train/package-summary.html b/docs/javadoc/mklab/JGNN/adhoc/train/package-summary.html new file mode 100644 index 00000000..7ac0f654 --- /dev/null +++ b/docs/javadoc/mklab/JGNN/adhoc/train/package-summary.html @@ -0,0 +1,117 @@ + + + + +mklab.JGNN.adhoc.train + + + + + + + + + + + + + + + +
          + +
          +
          +
          +

          Package mklab.JGNN.adhoc.train

          +
          +
          +
          package mklab.JGNN.adhoc.train
          +
          +
          Contains model training strategies that correspond to different predictive + tasks. Example tasks are node and graph classification.
          +
          +
          Author:
          +
          Emmanouil Krasanakis
          +
          +
          +
          +
            +
          • + +
          • +
          • +
            +
            Classes
            +
            +
            Class
            +
            Description
            + +
            +
            Extends the ModelTraining class with a method to explicitly train a + model from feature and label matrices.
            +
            +
            +
            +
          • +
          +
          +
          +
          +
          + + diff --git a/docs/javadoc/mklab/JGNN/adhoc/train/package-tree.html b/docs/javadoc/mklab/JGNN/adhoc/train/package-tree.html new file mode 100644 index 00000000..501cefb5 --- /dev/null +++ b/docs/javadoc/mklab/JGNN/adhoc/train/package-tree.html @@ -0,0 +1,77 @@ + + + + +mklab.JGNN.adhoc.train Class Hierarchy + + + + + + + + + + + + + + + +
          + +
          +
          +
          +

          Hierarchy For Package mklab.JGNN.adhoc.train

          +Package Hierarchies: + +
          +
          +

          Class Hierarchy

          + +
          +
          +
          +
          + + diff --git a/docs/javadoc/mklab/JGNN/adhoc/train/package-use.html b/docs/javadoc/mklab/JGNN/adhoc/train/package-use.html new file mode 100644 index 00000000..88e6ceab --- /dev/null +++ b/docs/javadoc/mklab/JGNN/adhoc/train/package-use.html @@ -0,0 +1,59 @@ + + + + +Uses of Package mklab.JGNN.adhoc.train + + + + + + + + + + + + + + + +
          + +
          +
          +
          +

          Uses of Package
          mklab.JGNN.adhoc.train

          +
          +No usage of mklab.JGNN.adhoc.train
          +
          +
          + + diff --git a/docs/javadoc/mklab/JGNN/core/Distribution.html b/docs/javadoc/mklab/JGNN/core/Distribution.html index ac45784d..d7c85826 100644 --- a/docs/javadoc/mklab/JGNN/core/Distribution.html +++ b/docs/javadoc/mklab/JGNN/core/Distribution.html @@ -1,11 +1,11 @@ - + Distribution - + @@ -80,9 +80,8 @@

          Interface Distribution


    public interface Distribution
    -
    This interface abstracts a probability distribution - that can be passed to Tensor.setToRandom(Distribution) - for random tensor initialization.
    +
    This interface abstracts a probability distribution that can be passed to + Tensor.setToRandom(Distribution) for random tensor initialization.
    Author:
    Emmanouil Krasanakis
    diff --git a/docs/javadoc/mklab/JGNN/core/Matrix.html b/docs/javadoc/mklab/JGNN/core/Matrix.html index fcd3e288..3f01c874 100644 --- a/docs/javadoc/mklab/JGNN/core/Matrix.html +++ b/docs/javadoc/mklab/JGNN/core/Matrix.html @@ -1,11 +1,11 @@ - + Matrix - + @@ -91,10 +91,10 @@

    Class Matrix

    public abstract class Matrix extends Tensor
    This class provides an abstract implementation of Matrix functionalities. - Matrices inherit Tensor operations, such as addition, - element-by-element multiplication, randomizing them and producing zero copies. - Additionally, matrix multiplication, transposition and access operations are - provided.
    + Matrices inherit Tensor operations, such as addition, + element-by-element multiplication, randomizing them and producing zero + copies. Additionally, matrix multiplication, transposition and access + operations are provided.
    Author:
    Emmanouil Krasanakis
    @@ -181,8 +181,8 @@

    Method Summary

    -
    Creates a transposed version of the matrix that accesses the same elements (thus, editing one - edits the other) without allocating additional memory.
    +
    Creates a transposed version of the matrix that accesses the same elements + (thus, editing one edits the other) without allocating additional memory.
    @@ -227,8 +227,8 @@

    Method Summary

    -
    Retrieves an iterable that traverses (row, col) entry pairs - of non zero entries.
    +
    Retrieves an iterable that traverses (row, col) entry pairs of non zero + entries.
    @@ -241,20 +241,20 @@

    Method Summary

    matmul(Matrix with)
    -
    Performs the matrix multiplication of this*with and the recipient.
    +
    Performs the matrix multiplication of this*with and the + recipient.
    matmul(Matrix with, boolean transposeSelf, boolean transposeWith)
    -
    Can be used to perform fast computation of the matrix multiplications -
    this*with, -
    this.transposed()*with -
    this*with.transposed(), -
    this.transposed()*with.transposed() -
    while avoiding the overhead of calling - transposed().
    +
    Can be used to perform fast computation of the matrix multiplications
    + this*with,
    + this.transposed()*with
    + this*with.transposed(),
    + this.transposed()*with.transposed()
    + while avoiding the overhead of calling transposed().
    @@ -303,14 +303,14 @@

    Method Summary

    -
    Sets the Matrix to its asymmetrically normalized transformation - by appropriately adjusting its element values.
    +
    Sets the Matrix to its asymmetrically normalized transformation by + appropriately adjusting its element values.
    -
    Sets the Matrix to its symmetrically normalized transformation - by appropriately adjusting its element values.
    +
    Sets the Matrix to its symmetrically normalized transformation by + appropriately adjusting its element values.
    @@ -333,12 +333,14 @@

    Method Summary

    -
    A string serialization of the tensor that can be used by the constructor DenseTensor(String) to create an identical copy.
    +
    A string serialization of the tensor that can be used by the constructor + DenseTensor(String) to create an identical copy.
    -
    Performs the linear algebra transformation A*x where A is this matrix and x a vector
    +
    Performs the linear algebra transformation A*x where A is this matrix and x a + vector
    @@ -348,25 +350,27 @@

    Method Summary

    -
    Creates a Matrix with the same class and dimensions and all element set to zero.
    +
    Creates a Matrix with the same class and dimensions and all element set to + zero.
    zeroCopy(long size)
    -
    Creates a Matrix with the same class and dimensions and all element set to zero.
    +
    Creates a Matrix with the same class and dimensions and all element set to + zero.
    abstract Matrix
    zeroCopy(long rows, long cols)
    -
    Creates a matrix of the same class and all element set to zero, but with - a given number of rows and columns.
    +
    Creates a matrix of the same class and all element set to zero, but with a + given number of rows and columns.
    zeroCopy(Tensor prototype)
    -
    Creates a tensor of the same class and all elements set to zero, - but size and dimension names are obtained from a prototype tensor.
    +
    Creates a tensor of the same class and all elements set to zero, but size and + dimension names are obtained from a prototype tensor.
    @@ -408,12 +412,13 @@

    getColName

    setDimensionName

    public Matrix setDimensionName(String rowName, String colName)
    -
    Sets a name for the matrix's row and column dimensions. If set, names are checked for - compatibility during matrix operations.
    +
    Sets a name for the matrix's row and column dimensions. If set, names are + checked for compatibility during matrix operations.
    Parameters:
    rowName - The new row name or null to remove current name.
    -
    colName - The new column name or null to remove current name.
    +
    colName - The new column name or null to remove current + name.
    Returns:
    this Matrix instance.
    See Also:
    @@ -460,7 +465,8 @@

    setColName

    compatibility during matrix operations.
    Parameters:
    -
    colName - The new column name or null to remove current name.
    +
    colName - The new column name or null to remove current + name.
    Returns:
    this Matrix instance.
    See Also:
    @@ -478,8 +484,8 @@

    setColName

    getNonZeroEntries

    public abstract Iterable<Map.Entry<Long,Long>> getNonZeroEntries()
    -
    Retrieves an iterable that traverses (row, col) entry pairs - of non zero entries.
    +
    Retrieves an iterable that traverses (row, col) entry pairs of non zero + entries.
    Returns:
    An Entry iterable.
    @@ -497,9 +503,10 @@

    getNonZeroEntries

    setDimensionName

    public Matrix setDimensionName(Tensor other)
    Description copied from class: Tensor
    -
    Fills in dimension names per an example Tensor.isMatching(mklab.JGNN.core.Tensor) tensor. This appropriately fills in dimension - names of inherited classes too, such as matrices. Effectively, this method automatically infers - dimension names during operations.
    +
    Fills in dimension names per an example Tensor.isMatching(mklab.JGNN.core.Tensor) tensor. This + appropriately fills in dimension names of inherited classes too, such as + matrices. Effectively, this method automatically infers dimension names + during operations.
    Overrides:
    setDimensionName in class Tensor
    @@ -514,7 +521,8 @@

    setDimensionName

    zeroCopy

    public Matrix zeroCopy()
    -
    Creates a Matrix with the same class and dimensions and all element set to zero.
    +
    Creates a Matrix with the same class and dimensions and all element set to + zero.
    Overrides:
    zeroCopy in class Tensor
    @@ -533,8 +541,9 @@

    zeroCopy

    zeroCopy

    public Tensor zeroCopy(long size)
    -
    Creates a Matrix with the same class and dimensions and all element set to zero. This - checks that the copy has a total number of elements equal to the given size.
    +
    Creates a Matrix with the same class and dimensions and all element set to + zero. This checks that the copy has a total number of elements equal to the + given size.
    Specified by:
    zeroCopy in class Tensor
    @@ -557,8 +566,8 @@

    zeroCopy

    zeroCopy

    public Tensor zeroCopy(Tensor prototype)
    -
    Creates a tensor of the same class and all elements set to zero, - but size and dimension names are obtained from a prototype tensor.
    +
    Creates a tensor of the same class and all elements set to zero, but size and + dimension names are obtained from a prototype tensor.
    Overrides:
    zeroCopy in class Tensor
    @@ -570,8 +579,8 @@

    zeroCopy

    zeroCopy

    public abstract Matrix zeroCopy(long rows, long cols)
    -
    Creates a matrix of the same class and all element set to zero, but with - a given number of rows and columns.
    +
    Creates a matrix of the same class and all element set to zero, but with a + given number of rows and columns.
    Parameters:
    rows - The number of rows of the matrix.
    @@ -632,11 +641,11 @@

    getDimensionSize

    Parameters:
    name - The given name.
    Returns:
    -
    Either the number of rows or the number of cols, depending on which dimension - the given name matches,
    +
    Either the number of rows or the number of cols, depending on which + dimension the given name matches,
    Throws:
    -
    RuntimeException - if both matrix dimensions have the same name or if the given - name is not a matrix dimension.
    +
    RuntimeException - if both matrix dimensions have the same name or if + the given name is not a matrix dimension.
    See Also:
      @@ -686,10 +695,9 @@

      put

      transposed

      public Matrix transposed()
      -
      Creates a transposed copy of the matrix. - Note: Contrary to typical tensor operations, in-place transposition is not supported. - However, related methods can help avoid explicit transposition without allocating more - memory.
      +
      Creates a transposed copy of the matrix. Note: Contrary to typical tensor + operations, in-place transposition is not supported. However, related methods + can help avoid explicit transposition without allocating more memory.
      Returns:
      A transposed copy of the matrix.
      @@ -707,8 +715,8 @@

      transposed

      asTransposed

      public Matrix asTransposed()
      -
      Creates a transposed version of the matrix that accesses the same elements (thus, editing one - edits the other) without allocating additional memory.
      +
      Creates a transposed version of the matrix that accesses the same elements + (thus, editing one edits the other) without allocating additional memory.
      Returns:
      A TransposedMatrix.
      @@ -719,7 +727,8 @@

      asTransposed

      transform

      public Tensor transform(Tensor x)
      -
      Performs the linear algebra transformation A*x where A is this matrix and x a vector
      +
      Performs the linear algebra transformation A*x where A is this matrix and x a + vector
      Parameters:
      x - The one-dimensional tensor which is the vector being transformed.
      @@ -732,7 +741,8 @@

      transform

      matmul

      public Matrix matmul(Matrix with)
      -
      Performs the matrix multiplication of this*with and the recipient.
      +
      Performs the matrix multiplication of this*with and the + recipient.
      Parameters:
      with - The matrix to multiply with.
      @@ -753,19 +763,20 @@

      matmul

      public Matrix matmul(Matrix with, boolean transposeSelf, boolean transposeWith)
      -
      Can be used to perform fast computation of the matrix multiplications -
      this*with, -
      this.transposed()*with -
      this*with.transposed(), -
      this.transposed()*with.transposed() -
      while avoiding the overhead of calling - transposed(). In this first of those cases, this operation - becomes equivalent to matmul(Matrix).
      +
      Can be used to perform fast computation of the matrix multiplications
      + this*with,
      + this.transposed()*with
      + this*with.transposed(),
      + this.transposed()*with.transposed()
      + while avoiding the overhead of calling transposed(). In this first + of those cases, this operation becomes equivalent to matmul(Matrix).
      Parameters:
      with - The matrix to multiply with.
      -
      transposeSelf - Whether this matrix should be transposed before multiplication.
      -
      transposeWith - Whether the multiplied with matrix should be transposed before multiplication.
      +
      transposeSelf - Whether this matrix should be transposed + before multiplication.
      +
      transposeWith - Whether the multiplied with matrix should + be transposed before multiplication.
      Returns:
      A matrix that stores the outcome of the multiplication.
      See Also:
      @@ -783,8 +794,8 @@

      matmul

      external

      public static Matrix external(Tensor horizontal, Tensor vertical)
      -
      Produces the external product of two tensors. - This is equivalent but faster to calling matmul(horizontal.asColum(), vertical.asRow()).
      +
      Produces the external product of two tensors. This is equivalent but faster + to calling matmul(horizontal.asColum(), vertical.asRow()).
      Parameters:
      horizontal - The first tensor.
      @@ -812,8 +823,8 @@

      describe

      onesMask

      public Matrix onesMask()
      -
      Produces a mask that indicates the non-zero elements of the matrix. - Element's correspond to the matrix's whose non-zero ones are set to 1.
      +
      Produces a mask that indicates the non-zero elements of the matrix. Element's + correspond to the matrix's whose non-zero ones are set to 1.
      Returns:
      A matrix of the same dimensions.
      @@ -881,8 +892,8 @@

      setDiagonal

      setToSymmetricNormalization

      public Matrix setToSymmetricNormalization()
      -
      Sets the Matrix to its symmetrically normalized transformation - by appropriately adjusting its element values.
      +
      Sets the Matrix to its symmetrically normalized transformation by + appropriately adjusting its element values.
      Returns:
      this Matrix instance.
      @@ -899,8 +910,8 @@

      setToSymmetricNormalization

      setToASymmetricNormalization

      public Matrix setToASymmetricNormalization()
      -
      Sets the Matrix to its asymmetrically normalized transformation - by appropriately adjusting its element values.
      +
      Sets the Matrix to its asymmetrically normalized transformation by + appropriately adjusting its element values.
      Returns:
      this Matrix instance.
      @@ -939,9 +950,8 @@

      accessDim

      accessRow

      public Tensor accessRow(long row)
      -
      Retrieves the given row as a tensor. Editing the result - also edits the original matrix. - No new memory is allocated for matrix values.
      +
      Retrieves the given row as a tensor. Editing the result also edits the + original matrix. No new memory is allocated for matrix values.
      Parameters:
      row - The given row.
      @@ -964,9 +974,8 @@

      accessRow

      accessCol

      public Tensor accessCol(long col)
      -
      Retrieves the given column as a tensor. Editing the result - also edits the original matrix. - No new memory is allocated for matrix values.
      +
      Retrieves the given column as a tensor. Editing the result also edits the + original matrix. No new memory is allocated for matrix values.
      Parameters:
      col - The given column.
      @@ -990,7 +999,8 @@

      accessCol

      toString

      public String toString()
      Description copied from class: Tensor
      -
      A string serialization of the tensor that can be used by the constructor DenseTensor(String) to create an identical copy.
      +
      A string serialization of the tensor that can be used by the constructor + DenseTensor(String) to create an identical copy.
      Overrides:
      toString in class Tensor
      @@ -1009,9 +1019,9 @@

      toNonZeroString

      accessRows

      public List<Tensor> accessRows()
      -
      Organizes matrix rows to a list of tensors that share entries. - This operation does not allocate memory for matrix elements and editing - tensor elements edits the original matrix's elements.
      +
      Organizes matrix rows to a list of tensors that share entries. This operation + does not allocate memory for matrix elements and editing tensor elements + edits the original matrix's elements.
      Returns:
      A list of AccessRow instances.
      @@ -1033,7 +1043,7 @@

      accessRows

      accessColumns

      public List<Tensor> accessColumns()
      Organizes specific matrix columns to a list of tensors that share entries. - This operation does not allocate memory for matrix elements and editing + This operation does not allocate memory for matrix elements and editing tensor elements edits the original matrix's elements.
      Returns:
      @@ -1055,9 +1065,9 @@

      accessColumns

      accessRows

      public Matrix accessRows(long... rows)
      -
      Organizes specific matrix rows to a list of tensors that share entries. - This operation does not allocate memory for matrix elements and editing - tensor elements edits the original matrix's elements.
      +
      Organizes specific matrix rows to a list of tensors that share entries. This + operation does not allocate memory for matrix elements and editing tensor + elements edits the original matrix's elements.
      Parameters:
      rows - An array of rows to access.
      @@ -1081,7 +1091,7 @@

      accessRows

      accessColumns

      public Matrix accessColumns(long... cols)
      Organizes specific matrix columns to a list of tensors that share entries. - This operation does not allocate memory for matrix elements and editing + This operation does not allocate memory for matrix elements and editing tensor elements edits the original matrix's elements.
      Parameters:
      @@ -1105,9 +1115,9 @@

      accessColumns

      accessRows

      public Matrix accessRows(Tensor rows)
      -
      Organizes specific matrix rows to a list of tensors that share entries. - This operation does not allocate memory for matrix elements and editing - tensor elements edits the original matrix's elements.
      +
      Organizes specific matrix rows to a list of tensors that share entries. This + operation does not allocate memory for matrix elements and editing tensor + elements edits the original matrix's elements.
      Parameters:
      rows - A tensor whose values hold the rows to access.
      @@ -1130,9 +1140,9 @@

      accessRows

      accessColumns

      public Matrix accessColumns(Tensor cols)
      -
      Organizes matrix columns to a list of tensors that share entries. - This operation does not allocate memory for matrix elements and editing - tensor elements edits the original matrix's elements.
      +
      Organizes matrix columns to a list of tensors that share entries. This + operation does not allocate memory for matrix elements and editing tensor + elements edits the original matrix's elements.
      Parameters:
      cols - A tensor whose values hold the columns to access.
      @@ -1155,9 +1165,9 @@

      accessColumns

      accessRows

      public List<Tensor> accessRows(Iterable<Long> rowIds)
      -
      Organizes some matrix rows to a list of tensors that share entries. - This operation does not allocate memory for matrix elements and editing - tensor elements edits the original matrix's elements.
      +
      Organizes some matrix rows to a list of tensors that share entries. This + operation does not allocate memory for matrix elements and editing tensor + elements edits the original matrix's elements.
      Parameters:
      rowIds - The rows to access.
      @@ -1180,9 +1190,9 @@

      accessRows

      accessColumns

      public List<Tensor> accessColumns(Iterable<Long> colIds)
      -
      Organizes some matrix columns to a list of tensors that share entries. - This operation does not allocate memory for matrix elements and editing - tensor elements edits the original matrix's elements.
      +
      Organizes some matrix columns to a list of tensors that share entries. This + operation does not allocate memory for matrix elements and editing tensor + elements edits the original matrix's elements.
      Parameters:
      colIds - The columns to access.
      diff --git a/docs/javadoc/mklab/JGNN/core/Memory.Scope.html b/docs/javadoc/mklab/JGNN/core/Memory.Scope.html index 2b8ac064..df040c33 100644 --- a/docs/javadoc/mklab/JGNN/core/Memory.Scope.html +++ b/docs/javadoc/mklab/JGNN/core/Memory.Scope.html @@ -1,11 +1,11 @@ - + Memory.Scope - + diff --git a/docs/javadoc/mklab/JGNN/core/Memory.html b/docs/javadoc/mklab/JGNN/core/Memory.html index 76526f9c..e4dbab32 100644 --- a/docs/javadoc/mklab/JGNN/core/Memory.html +++ b/docs/javadoc/mklab/JGNN/core/Memory.html @@ -1,11 +1,11 @@ - + Memory - + @@ -80,9 +80,10 @@

      Class Memory


      public class Memory extends Object
      -
      A memory management system for thread-safe allocation and release of arrays of doubles. - Soft references to allocated arrays kept so that released ones can be reused by future - allocation calls without explicitly initializing memory.
      +
      A memory management system for thread-safe allocation and release of arrays + of doubles. Soft references to allocated arrays kept so that released ones + can be reused by future allocation calls without explicitly initializing + memory.
      Author:
      Emmanouil Krasanakis
      diff --git a/docs/javadoc/mklab/JGNN/core/Slice.html b/docs/javadoc/mklab/JGNN/core/Slice.html index e6cd6b28..ca015586 100644 --- a/docs/javadoc/mklab/JGNN/core/Slice.html +++ b/docs/javadoc/mklab/JGNN/core/Slice.html @@ -1,11 +1,11 @@ - + Slice - + @@ -85,8 +85,8 @@

      Class Slice

      public class Slice extends Object implements Iterable<Long>
      -
      This class provices an interface with which to define data slices, - for instance to sample labels.
      +
      This class provices an interface with which to define data slices, for + instance to sample labels.
      Author:
      Emmanouil Krasanakis
      @@ -132,10 +132,9 @@

      Method Summary

      range(double from, double end)
      -
      Performs the range(int, int) operation - while replacing values of from and end - with (int)(from*size()) and (int)(end*size()) - so that fractional ranges can be obtained.
      +
      Performs the range(int, int) operation while replacing values of + from and end with (int)(from*size()) + and (int)(end*size()) so that fractional ranges can be obtained.
      range(int from, @@ -146,10 +145,10 @@

      Method Summary

      -
      Constructs a column matrix holding identifiers in - the range 0,1,..size()-1 so that the pattern +
      Constructs a column matrix holding identifiers in the range + 0,1,..size()-1 so that the pattern slice.samplesAsFeatures().accessRows(slice.range(from, end)) - retrieves one-element tensors holding + retrieves one-element tensors holding slice[from], slice[from+1], ...
      @@ -251,9 +250,11 @@

      range

      Parameters:
      from - The beginning of the identifiers' position in the slice.
      -
      end - The end (non-inclusive) of the identifiers' position in the slice.
      +
      end - The end (non-inclusive) of the identifiers' position in the + slice.
      Returns:
      -
      A new Slice instance holding the position identifiers in this one's given range.
      +
      A new Slice instance holding the position identifiers in this one's + given range.
      See Also:
        @@ -267,12 +268,12 @@

        range

        samplesAsFeatures

        public Matrix samplesAsFeatures()
        -
        Constructs a column matrix holding identifiers in - the range 0,1,..size()-1 so that the pattern +
        Constructs a column matrix holding identifiers in the range + 0,1,..size()-1 so that the pattern slice.samplesAsFeatures().accessRows(slice.range(from, end)) - retrieves one-element tensors holding - slice[from], slice[from+1], ... slice[end]. - The constructed matrix is typically used as node identifier data. + retrieves one-element tensors holding + slice[from], slice[from+1], ... slice[end]. The constructed + matrix is typically used as node identifier data. This is different than asTensor().
        @@ -286,18 +287,18 @@

        samplesAsFeatures

        range

        public Slice range(double from, double end)
        -
        Performs the range(int, int) operation - while replacing values of from and end - with (int)(from*size()) and (int)(end*size()) - so that fractional ranges can be obtained. For example, - you can call slice.shuffle().range(0.5, 1) to obtain a - random subset of the slice's identifiers.
        +
        Performs the range(int, int) operation while replacing values of + from and end with (int)(from*size()) + and (int)(end*size()) so that fractional ranges can be obtained. + For example, you can call slice.shuffle().range(0.5, 1) to + obtain a random subset of the slice's identifiers.
        Parameters:
        from - An integer at least 1 or a double in the range [0,1).
        end - An integer greater than 1 or a double in the range [0,1].
        Returns:
        -
        A new Slice instance holding the position identifiers in this one's given range.
        +
        A new Slice instance holding the position identifiers in this one's + given range.
        See Also:
          diff --git a/docs/javadoc/mklab/JGNN/core/Tensor.html b/docs/javadoc/mklab/JGNN/core/Tensor.html index 64292c7b..6a9682fb 100644 --- a/docs/javadoc/mklab/JGNN/core/Tensor.html +++ b/docs/javadoc/mklab/JGNN/core/Tensor.html @@ -1,11 +1,11 @@ - + Tensor - + @@ -146,15 +146,13 @@

          Method Summary

          accessSubtensor(long from)
          -
          Wraps a range of elements within a tensor - without allocating memory anew.
          +
          Wraps a range of elements within a tensor without allocating memory anew.
          accessSubtensor(long from, long to)
          -
          Wraps a range of elements within a tensor - without allocating memory anew.
          +
          Wraps a range of elements within a tensor without allocating memory anew.
          add(double value)
          @@ -175,12 +173,14 @@

          Method Summary

          -
          Accesses the tensor through a single-column matrix with the tensor as the only row.
          +
          Accesses the tensor through a single-column matrix with the tensor as the + only row.
          -
          Accesses the tensor through a single-row matrix with the tensor as the only column.
          +
          Accesses the tensor through a single-row matrix with the tensor as the only + column.
          void
          @@ -205,13 +205,14 @@

          Method Summary

          <Type> Type
          cast(Class<Type> type)
          -
          Performs the equivalent of Java's typecasting that fits - in functional interfaces.
          +
          Performs the equivalent of Java's typecasting that fits in functional + interfaces.
          -
          Creates a zeroCopy() and transfers to it all potentially non-zero element values.
          +
          Creates a zeroCopy() and transfers to it all potentially non-zero + element values.
          double
          @@ -238,9 +239,9 @@

          Method Summary

          long
          -
          Provides an estimation for the non-zero number of elements stored in the tensor, - where this number is equal to the size for dense tensors, but equal to the actual - number of non-zero elements for sparse tensors.
          +
          Provides an estimation for the non-zero number of elements stored in the + tensor, where this number is equal to the size for dense tensors, but equal + to the actual number of non-zero elements for sparse tensors.
          @@ -261,7 +262,8 @@

          Method Summary

          fromRange(long start, long end)
          -
          Creates a dense tensor holding the desired range [start, start+1, ..., end-1].
          +
          Creates a dense tensor holding the desired range [start, start+1, ..., + end-1].
          abstract double
          get(long pos)
          @@ -318,8 +320,8 @@

          Method Summary

          Deprecated. -
          This method may not be present in future versions - of the library, depending on whether memory reuse proves useful or nor.
          +
          This method may not be present in future versions of the library, + depending on whether memory reuse proves useful or nor.
          abstract Tensor
          @@ -338,8 +340,8 @@

          Method Summary

          Deprecated. -
          This method may not be present in future versions - of the library, depending on whether memory reuse proves useful or nor.
          +
          This method may not be present in future versions of the library, + depending on whether memory reuse proves useful or nor.
          @@ -361,7 +363,8 @@

          Method Summary

          selfAdd(Tensor tensor, double weight)
          -
          Performs in-memory weighted addition to the Tensor, storing the result in itself.
          +
          Performs in-memory weighted addition to the Tensor, storing the result in + itself.
          @@ -376,17 +379,20 @@

          Method Summary

          -
          Performs in-memory set of each element to the logarithm of its absolute value.
          +
          Performs in-memory set of each element to the logarithm of its absolute + value.
          selfMultiply(double value)
          -
          Performs in-memory multiplication on the Tensor, storing the result to itself.
          +
          Performs in-memory multiplication on the Tensor, storing the result to + itself.
          -
          Performs in-memory multiplication on the Tensor, storing the result in itself .
          +
          Performs in-memory multiplication on the Tensor, storing the result in itself + .
          @@ -396,7 +402,8 @@

          Method Summary

          -
          Performs in-memory set of each element to the square root of its absolute value.
          +
          Performs in-memory set of each element to the square root of its absolute + value.
          @@ -436,8 +443,8 @@

          Method Summary

          setToRandom(Distribution distribution)
          -
          Set tensor elements to random values by sampling them from a given Distribution - instance.
          +
          Set tensor elements to random values by sampling them from a given + Distribution instance.
          @@ -479,7 +486,8 @@

          Method Summary

          -
          A string serialization of the tensor that can be used by the constructor DenseTensor(String) to create an identical copy.
          +
          A string serialization of the tensor that can be used by the constructor + DenseTensor(String) to create an identical copy.
          abstract Iterator<Long>
          @@ -489,18 +497,20 @@

          Method Summary

          -
          Creates a tensor of the same class with the same size and all element set to zero.
          +
          Creates a tensor of the same class with the same size and all element set to + zero.
          abstract Tensor
          zeroCopy(long size)
          -
          Creates a tensor of the same class with a given size and all element set to zero.
          +
          Creates a tensor of the same class with a given size and all element set to + zero.
          zeroCopy(Tensor prototype)
          -
          Creates a tensor of the same class and all elements set to zero, - but size and dimension names are obtained from a prototype tensor.
          +
          Creates a tensor of the same class and all elements set to zero, but size and + dimension names are obtained from a prototype tensor.
      @@ -560,11 +570,12 @@

      Method Details

      setDimensionName

      public Tensor setDimensionName(String dimensionName)
      Sets a name for the tensor's one dimension. If set, names are checked for - compatibility during operations, so that tensors laying across different dimensions - do not match. Removed dimension names are matched to anything.
      + compatibility during operations, so that tensors laying across different + dimensions do not match. Removed dimension names are matched to anything.
      Parameters:
      -
      dimensionName - The new row name or null to remove current name.
      +
      dimensionName - The new row name or null to remove current + name.
      Returns:
      this Tensor instance.
      See Also:
      @@ -597,8 +608,8 @@

      setToRandom

      setToRandom

      public Tensor setToRandom(Distribution distribution)
      -
      Set tensor elements to random values by sampling them from a given Distribution - instance.
      +
      Set tensor elements to random values by sampling them from a given + Distribution instance.
      Parameters:
      distribution - The distribution instance to sample from.
      @@ -611,10 +622,9 @@

      setToRandom

      assertFinite

      public void assertFinite()
      -
      Asserts that the tensor holds only finite values. Helps catch errors - early on and avoid misidentifying models as high quality by comparing - desired outcomes with NaN when in reality they pass through infinity and hence - don't converge.
      +
      Asserts that the tensor holds only finite values. Helps catch errors early on + and avoid misidentifying models as high quality by comparing desired outcomes + with NaN when in reality they pass through infinity and hence don't converge.
      Throws:
      RuntimeException - if one or more tensor elements are NaN or Inf.
      @@ -626,13 +636,12 @@

      assertFinite

      release

      public abstract void release()
      Deprecated. -
      This method may not be present in future versions - of the library, depending on whether memory reuse proves useful or nor.
      +
      This method may not be present in future versions of the library, + depending on whether memory reuse proves useful or nor.
      -
      If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up. This - memory will be released anyway by Java once there are no more - references to the object.
      +
      If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up. This memory will be + released anyway by Java once there are no more references to the object.
      See Also:
      @@ -648,12 +657,12 @@

      release

      persist

      public abstract void persist()
      Deprecated. -
      This method may not be present in future versions - of the library, depending on whether memory reuse proves useful or nor.
      +
      This method may not be present in future versions of the library, + depending on whether memory reuse proves useful or nor.
      If supported by the subclassed tensor, invalidates calls to - release() so that memory is a de-allocated only when - object references expire.
      + release() so that memory is a de-allocated only when object + references expire.
      See Also:
      @@ -669,8 +678,8 @@

      persist

      put

      public abstract Tensor put(long pos, double value)
      -
      Assign a value to a tensor element. All tensor operations use this function to wrap - element assignments.
      +
      Assign a value to a tensor element. All tensor operations use this function + to wrap element assignments.
      Parameters:
      pos - The position of the tensor element
      @@ -678,7 +687,8 @@

      put

      Returns:
      this Tensor instance.
      Throws:
      -
      RuntimeException - If the value is NaN or the element position is less than 0 or greater than size()-1.
      +
      RuntimeException - If the value is NaN or the element position is less + than 0 or greater than size()-1.
      @@ -686,15 +696,16 @@

      put

      get

      public abstract double get(long pos)
      -
      Retrieves the value of a tensor element at a given position. All tensor operations use this function to wrap - element retrieval.
      +
      Retrieves the value of a tensor element at a given position. All tensor + operations use this function to wrap element retrieval.
      Parameters:
      pos - The position of the tensor element
      Returns:
      The value of the tensor element
      Throws:
      -
      RuntimeException - If the element position is less than 0 or greater than size()-1.
      +
      RuntimeException - If the element position is less than 0 or greater + than size()-1.
      @@ -746,10 +757,11 @@

      assertSize

      assertMatching

      public Tensor assertMatching(Tensor other)
      -
      Asserts that the tensor's dimensions match with another tensor. This check can be made - more complex by derived classes, but for a base Tensor instance it is equivalent assertSize(long). - This method calls isMatching(Tensor) to compare the tensors and throws an exception - if it returns false.
      +
      Asserts that the tensor's dimensions match with another tensor. This check + can be made more complex by derived classes, but for a base Tensor instance + it is equivalent assertSize(long). This method calls + isMatching(Tensor) to compare the tensors and throws an exception if + it returns false.
      Parameters:
      other - The other tensor to compare with.
      @@ -762,7 +774,8 @@

      assertMatching

      zeroCopy

      public Tensor zeroCopy()
      -
      Creates a tensor of the same class with the same size and all element set to zero.
      +
      Creates a tensor of the same class with the same size and all element set to + zero.
      Returns:
      A tensor with the same size.
      @@ -779,17 +792,18 @@

      zeroCopy

      zeroCopy

      public Tensor zeroCopy(Tensor prototype)
      -
      Creates a tensor of the same class and all elements set to zero, - but size and dimension names are obtained from a prototype tensor.
      +
      Creates a tensor of the same class and all elements set to zero, but size and + dimension names are obtained from a prototype tensor.
    • setDimensionName

      public Tensor setDimensionName(Tensor other)
      -
      Fills in dimension names per an example isMatching(mklab.JGNN.core.Tensor) tensor. This appropriately fills in dimension - names of inherited classes too, such as matrices. Effectively, this method automatically infers - dimension names during operations.
      +
      Fills in dimension names per an example isMatching(mklab.JGNN.core.Tensor) tensor. This + appropriately fills in dimension names of inherited classes too, such as + matrices. Effectively, this method automatically infers dimension names + during operations.
      Parameters:
      other - The tensor from which to retrieve dimension names.
      @@ -802,7 +816,8 @@

      setDimensionName

      zeroCopy

      public abstract Tensor zeroCopy(long size)
      -
      Creates a tensor of the same class with a given size and all element set to zero.
      +
      Creates a tensor of the same class with a given size and all element set to + zero.
      Parameters:
      size - The size of the new tensor.
      @@ -831,10 +846,10 @@

      iterator

      getNonZeroElements

      public Iterable<Long> getNonZeroElements()
      -
      Retrieves an iterable that wraps traverseNonZeroElements(). - For the time being, this is returned by implementing Iterable, - but this only serves the practical purpose of avoiding to instantiate - a new object in case many tensors are used.
      +
      Retrieves an iterable that wraps traverseNonZeroElements(). For the + time being, this is returned by implementing Iterable, but this + only serves the practical purpose of avoiding to instantiate a new object in + case many tensors are used.
      Returns:
      An iterable of tensor positions.
      @@ -845,10 +860,10 @@

      getNonZeroElements

      estimateNumNonZeroElements

      public long estimateNumNonZeroElements()
      -
      Provides an estimation for the non-zero number of elements stored in the tensor, - where this number is equal to the size for dense tensors, but equal to the actual - number of non-zero elements for sparse tensors. - Basically, this quantity is proportional to the allocated memory.
      +
      Provides an estimation for the non-zero number of elements stored in the + tensor, where this number is equal to the size for dense tensors, but equal + to the actual number of non-zero elements for sparse tensors. Basically, this + quantity is proportional to the allocated memory.
      Returns:
      A long number equal to or less to the tensor size.
      @@ -866,8 +881,8 @@

      estimateNumNonZeroElements

      density

      public double density()
      Provides the memory allocation density of getNonZeroElements() - compare to the size of the tensor. 1 indicates fully dense tensors, - and lower values sparser data.
      + compare to the size of the tensor. 1 indicates fully dense tensors, and lower + values sparser data.
      Returns:
      A double in the range [0,1].
      @@ -878,12 +893,12 @@

      density

      traverseNonZeroElements

      public abstract Iterator<Long> traverseNonZeroElements()
      -
      Retrieves positions within the tensor that may hold non-zero elements. - This guarantees that all non-zero elements positions are traversed - but some of the returned positions could hold zero elements. - For example, DenseTensor traverses all - of its elements this way, whereas SparseTensor - indeed traverses only non-zero elements.
      +
      Retrieves positions within the tensor that may hold non-zero elements. This + guarantees that all non-zero elements positions are traversed but + some of the returned positions could hold zero elements. For example, + DenseTensor traverses all of its elements this + way, whereas SparseTensor indeed traverses + only non-zero elements.
      Returns:
      An iterator that traverses positions within the tensor.
      @@ -894,7 +909,8 @@

      traverseNonZeroElements

      copy

      public Tensor copy()
      -
      Creates a zeroCopy() and transfers to it all potentially non-zero element values.
      +
      Creates a zeroCopy() and transfers to it all potentially non-zero + element values.
      Returns:
      a copy of the Tensor with the same size and contents
      @@ -969,11 +985,13 @@

      selfAdd

      selfAdd

      public Tensor selfAdd(Tensor tensor, double weight)
      -
      Performs in-memory weighted addition to the Tensor, storing the result in itself.
      +
      Performs in-memory weighted addition to the Tensor, storing the result in + itself.
      Parameters:
      tensor - The tensor to add (it's not affected).
      -
      weight - The weight to multiply the added tensor's elements with during addition.
      +
      weight - The weight to multiply the added tensor's elements with during + addition.
      Returns:
      this Tensor instance.
      @@ -1033,10 +1051,12 @@

      multiply

      selfMultiply

      public Tensor selfMultiply(Tensor tensor)
      -
      Performs in-memory multiplication on the Tensor, storing the result in itself .
      +
      Performs in-memory multiplication on the Tensor, storing the result in itself + .
      Parameters:
      -
      tensor - The tensor to perform element-wise multiplication with (it's not affected).
      +
      tensor - The tensor to perform element-wise multiplication with (it's + not affected).
      Returns:
      this Tensor instance.
      @@ -1058,7 +1078,8 @@

      multiply

      selfMultiply

      public Tensor selfMultiply(double value)
      -
      Performs in-memory multiplication on the Tensor, storing the result to itself.
      +
      Performs in-memory multiplication on the Tensor, storing the result to + itself.
      Parameters:
      value - A number to multiply all tensor elements with.
      @@ -1074,7 +1095,8 @@

      sqrt

      Computes the square root of tensor elements.
      Returns:
      -
      A new Tensor that stores the outcome of finding the absolute square root of each element.
      +
      A new Tensor that stores the outcome of finding the absolute square + root of each element.
    • @@ -1082,7 +1104,8 @@

      sqrt

      selfSqrt

      public Tensor selfSqrt()
      -
      Performs in-memory set of each element to the square root of its absolute value.
      +
      Performs in-memory set of each element to the square root of its absolute + value.
      Returns:
      this Tensor instance.
      @@ -1096,7 +1119,8 @@

      expMinusOne

      Computes the exponential minus 1 of tensor elements.
      Returns:
      -
      A new Tensor that stores the outcome of finding the operation on each element.
      +
      A new Tensor that stores the outcome of finding the operation on each + element.
      @@ -1118,7 +1142,8 @@

      log

      Computes the logarithm of tensor elements.
      Returns:
      -
      A new Tensor that stores the outcome of finding the logarithm of the absolute of each element.
      +
      A new Tensor that stores the outcome of finding the logarithm of the + absolute of each element.
      @@ -1126,7 +1151,8 @@

      log

      selfLog

      public Tensor selfLog()
      -
      Performs in-memory set of each element to the logarithm of its absolute value.
      +
      Performs in-memory set of each element to the logarithm of its absolute + value.
      Returns:
      this Tensor instance.
      @@ -1140,7 +1166,8 @@

      negative

      Computes the negative of tensor elements.
      Returns:
      -
      A new Tensor that stores the outcome of finding the negative of each element.
      +
      A new Tensor that stores the outcome of finding the negative of each + element.
      @@ -1162,7 +1189,8 @@

      abs

      Computes the absolute value of tensor elements.
      Returns:
      -
      A new Tensor that stores the outcome of finding the absolute value of each element.
      +
      A new Tensor that stores the outcome of finding the absolute value of + each element.
      @@ -1250,11 +1278,10 @@

      sum

      accessSubtensor

      public Tensor accessSubtensor(long from)
      -
      Wraps a range of elements within a tensor - without allocating memory anew. Editing the returned - tensor also affects the original one and conversely. - The elements are accessed so that the starting position - is accessed at position 0 of the starting tensor.
      +
      Wraps a range of elements within a tensor without allocating memory anew. + Editing the returned tensor also affects the original one and conversely. The + elements are accessed so that the starting position is accessed at position 0 + of the starting tensor.
      Parameters:
      from - The starting position of the subtensor till its end.
      @@ -1274,14 +1301,12 @@

      accessSubtensor

      accessSubtensor

      public Tensor accessSubtensor(long from, long to)
      -
      Wraps a range of elements within a tensor - without allocating memory anew. Editing the returned - tensor also affects the original one and conversely. - The elements are accessed so that the starting position - is accessed at position 0 of the starting tensor. Accessing - stops up to but not including the end poisition, - so that accessSubtensor(0, size()) is - a see-through copy of the original tensor.
      +
      Wraps a range of elements within a tensor without allocating memory anew. + Editing the returned tensor also affects the original one and conversely. The + elements are accessed so that the starting position is accessed at position 0 + of the starting tensor. Accessing stops up to but not including the end + poisition, so that accessSubtensor(0, size()) is a see-through + copy of the original tensor.
      Parameters:
      from - The starting position of the subtensor.
      @@ -1301,7 +1326,7 @@

      accessSubtensor

      max

      public double max()
      -
      Computes the maximum tensor element. If the tensor has zero size(), +
      Computes the maximum tensor element. If the tensor has zero size(), this returns Double.NEGATIVE_INFINITY.
      Returns:
      @@ -1320,8 +1345,8 @@

      max

      argmax

      public long argmax()
      -
      Computes the position of the maximum tensor element. If the tensor has zero size(), - this returns -1.
      +
      Computes the position of the maximum tensor element. If the tensor has zero + size(), this returns -1.
      Returns:
      The position of the maximum tensor element
      @@ -1339,7 +1364,7 @@

      argmax

      min

      public double min()
      -
      Computes the minimum tensor element. If the tensor has zero size(), +
      Computes the minimum tensor element. If the tensor has zero size(), this returns Double.POSITIVE_INFINITY.
      Returns:
      @@ -1358,8 +1383,8 @@

      min

      argmin

      public long argmin()
      -
      Computes the position of the minimum tensor element. If the tensor has zero size(), - this returns -1.
      +
      Computes the position of the minimum tensor element. If the tensor has zero + size(), this returns -1.
      Returns:
      The position of the minimum tensor element
      @@ -1377,7 +1402,8 @@

      argmin

      toString

      public String toString()
      -
      A string serialization of the tensor that can be used by the constructor DenseTensor(String) to create an identical copy.
      +
      A string serialization of the tensor that can be used by the constructor + DenseTensor(String) to create an identical copy.
      Overrides:
      toString in class Object
      @@ -1408,8 +1434,9 @@

      toProbability

      public Tensor toProbability()
      Returns:
      -
      A copy of the tensor on which division with the sum has been performed - (if the tensor contains no negative elements, this is equivalent to L1 normalization)
      +
      A copy of the tensor on which division with the sum has been + performed (if the tensor contains no negative elements, this is + equivalent to L1 normalization)
      See Also:
        @@ -1423,7 +1450,8 @@

        toProbability

        setToNormalized

        public Tensor setToNormalized()
        -
        L2-normalizes the tensor's elements. Does nothing if the norm() is zero.
        +
        L2-normalizes the tensor's elements. Does nothing if the norm() is + zero.
        Returns:
        this Tensor instance.
        @@ -1440,7 +1468,8 @@

        setToNormalized

        setToProbability

        public Tensor setToProbability()
        -
        Divides the tensor's elements with their sum. Does nothing if the sum() is zero.
        +
        Divides the tensor's elements with their sum. Does nothing if the + sum() is zero.
        Returns:
        this Tensor instance.
        @@ -1522,8 +1551,8 @@

        fromDouble

        fromRange

        public static Tensor fromRange(long start, long end)
        -
        Creates a dense tensor holding the desired range [start, start+1, ..., end-1]. - This allocates a new tensor.
        +
        Creates a dense tensor holding the desired range [start, start+1, ..., + end-1]. This allocates a new tensor.
        Parameters:
        start - The start of the range.
        @@ -1543,8 +1572,8 @@

        fromRange

        fromRange

        public static Tensor fromRange(long end)
        -
        Creates a dense tensor holding the desired range [0, 1, ..., end-1]. - This allocates a new tensor.
        +
        Creates a dense tensor holding the desired range [0, 1, ..., end-1]. This + allocates a new tensor.
        Parameters:
        end - The end of the range.
        @@ -1563,7 +1592,8 @@

        fromRange

        toDouble

        public double toDouble()
        -
        Converts a tensor of size()==1 to double. Throws an exception otherwise.
        +
        Converts a tensor of size()==1 to double. Throws an exception + otherwise.
        Returns:
        A double.
        @@ -1582,9 +1612,9 @@

        toDouble

        asColumn

        public WrapCols asColumn()
        -
        Accesses the tensor through a single-column matrix with the tensor as the only row. - Editing the returned matrix also edits the original tensor. - No new memory is allocated for tensor values.
        +
        Accesses the tensor through a single-column matrix with the tensor as the + only row. Editing the returned matrix also edits the original tensor. No new + memory is allocated for tensor values.
        Returns:
        A WrapCols instance.
        @@ -1601,9 +1631,9 @@

        asColumn

        asRow

        public WrapRows asRow()
        -
        Accesses the tensor through a single-row matrix with the tensor as the only column. - Editing the returned matrix also edits the original tensor. - No new memory is allocated for tensor values.
        +
        Accesses the tensor through a single-row matrix with the tensor as the only + column. Editing the returned matrix also edits the original tensor. No new + memory is allocated for tensor values.
        Returns:
        A WrapRows instance.
        @@ -1631,8 +1661,8 @@

        describe

        cast

        public <Type> Type cast(Class<Type> type)
        -
        Performs the equivalent of Java's typecasting that fits - in functional interfaces.
        +
        Performs the equivalent of Java's typecasting that fits in functional + interfaces.
        Type Parameters:
        Type - The automatically inferred type of the class.
        diff --git a/docs/javadoc/mklab/JGNN/core/ThreadPool.html b/docs/javadoc/mklab/JGNN/core/ThreadPool.html index 6a574b06..4365ecdc 100644 --- a/docs/javadoc/mklab/JGNN/core/ThreadPool.html +++ b/docs/javadoc/mklab/JGNN/core/ThreadPool.html @@ -1,11 +1,11 @@ - + ThreadPool - + @@ -80,9 +80,10 @@

        Class ThreadPool


        public class ThreadPool extends Object
        -
        This class provides thread execution pool utilities while keeping track of thread - identifiers for use by thread-specific NNOperation. - Threads scheduling relies on Java's ThreadPoolExecutor.
        +
        This class provides thread execution pool utilities while keeping track of + thread identifiers for use by thread-specific + NNOperation. Threads scheduling relies on Java's + ThreadPoolExecutor.
        Author:
        Emmanouil Krasanakis
        @@ -114,8 +115,9 @@

        Method Summary

        void
        submit(Runnable runnable)
        -
        Submits a runnable to be executed at some future point by a thread, - for example via ThreadPool.getInstance().submit(new Runnable(){public void run(){...}});.
        +
        Submits a runnable to be executed at some future point by a thread, for + example via + ThreadPool.getInstance().submit(new Runnable(){public void run(){...}});.
        void
        @@ -154,8 +156,9 @@

        getInstance

        submit

        public void submit(Runnable runnable)
        -
        Submits a runnable to be executed at some future point by a thread, - for example via ThreadPool.getInstance().submit(new Runnable(){public void run(){...}});.
        +
        Submits a runnable to be executed at some future point by a thread, for + example via + ThreadPool.getInstance().submit(new Runnable(){public void run(){...}});.
        Parameters:
        runnable - A Java Runnable.
        @@ -183,8 +186,8 @@

        getCurrentThreadId

        waitForConclusion

        public void waitForConclusion()
        -
        Waits until all threads in the pool have finished. This concludes only - if all submitted runnable conclude.
        +
        Waits until all threads in the pool have finished. This concludes only if all + submitted runnable conclude.
        See Also:
        diff --git a/docs/javadoc/mklab/JGNN/core/class-use/Distribution.html b/docs/javadoc/mklab/JGNN/core/class-use/Distribution.html index d9eb2028..f01f0a31 100644 --- a/docs/javadoc/mklab/JGNN/core/class-use/Distribution.html +++ b/docs/javadoc/mklab/JGNN/core/class-use/Distribution.html @@ -1,11 +1,11 @@ - + Uses of Interface mklab.JGNN.core.Distribution - + @@ -57,9 +57,14 @@

        Uses of
        Package
        Description
        -
         
        +
        +
        Contains base numerical data classes, as well as supporting abstract classes.
        +
        -
         
        +
        +
        Contains data distributions that produce one numerical value and can be used + for tensor value initialization.
        +

      @@ -112,7 +117,8 @@

      Uses of
      class 
      -
      Implements a Normal Distribution of given mean and standard deviation.
      +
      Implements a Normal Distribution of given mean and standard + deviation.
      class 
      diff --git a/docs/javadoc/mklab/JGNN/core/class-use/Matrix.html b/docs/javadoc/mklab/JGNN/core/class-use/Matrix.html index 5ab2171b..52d350b0 100644 --- a/docs/javadoc/mklab/JGNN/core/class-use/Matrix.html +++ b/docs/javadoc/mklab/JGNN/core/class-use/Matrix.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.Matrix - + @@ -57,17 +57,39 @@

      Uses of Class
      Package
      Description
      -
       
      +
      +
      Contains classes that simplify data loading, model building, and training.
      +
      -
       
      - -
       
      - -
       
      - -
       
      - -
       
      +
      +
      Contains model builders that parse expression of the Neuralang scripting + language to simplify mathematical parts of the definitions.
      +
      + +
      +
      Contains model training strategies that correspond to different predictive + tasks.
      +
      + +
      +
      Contains base numerical data classes, as well as supporting abstract classes.
      +
      + +
      +
      Contains empty extensions of datatypes that hold only dimension names and + sizes but no ddata.
      +
      + +
      +
      Contains implementations of matrix classes, of transparent access to parts of + these classes, and of column/row repetitions that broadcast vectors into + matrices.
      +
      + +
      +
      Implements neural networks components that are combined to define GNNs or + other types of machine learning models.
      +
    • @@ -109,8 +149,28 @@

      Uses of Matrix
      FastBuilder(Matrix adjacency, Matrix features)
      -
      Creates a graph neural network builder from an - normalized adjacency matrix and a node feature matrix.
      +
      Creates a graph neural network builder from an normalized adjacency matrix + and a node feature matrix.
      +
      + +

    • + +
    • +
      +

      Uses of Matrix in mklab.JGNN.adhoc.train

      +
      Methods in mklab.JGNN.adhoc.train with parameters of type Matrix
      +
      +
      Modifier and Type
      +
      Method
      +
      Description
      + +
      NodeClassification.train(Model model, + Matrix features, + Matrix labels, + Slice trainingSamples, + Slice validationSamples)
      +
      +
      Trains a Model instance based on current settings.
      @@ -146,8 +206,8 @@

      Uses of Matrix
      Matrix.asTransposed()
      -
      Creates a transposed version of the matrix that accesses the same elements (thus, editing one - edits the other) without allocating additional memory.
      +
      Creates a transposed version of the matrix that accesses the same elements + (thus, editing one edits the other) without allocating additional memory.
      static Matrix
      Matrix.external(Tensor horizontal, @@ -163,20 +223,20 @@

      Uses of Matrix
      Matrix.matmul(Matrix with)
      -
      Performs the matrix multiplication of this*with and the recipient.
      +
      Performs the matrix multiplication of this*with and the + recipient.
      Matrix.matmul(Matrix with, boolean transposeSelf, boolean transposeWith)
      -
      Can be used to perform fast computation of the matrix multiplications -
      this*with, -
      this.transposed()*with -
      this*with.transposed(), -
      this.transposed()*with.transposed() -
      while avoiding the overhead of calling - transposed().
      +
      Can be used to perform fast computation of the matrix multiplications
      + this*with,
      + this.transposed()*with
      + this*with.transposed(),
      + this.transposed()*with.transposed()
      + while avoiding the overhead of calling transposed().
      Matrix.onesMask()
      @@ -193,10 +253,10 @@

      Uses of Matrix
      -
      Constructs a column matrix holding identifiers in - the range 0,1,..Slice.size()-1 so that the pattern +
      Constructs a column matrix holding identifiers in the range + 0,1,..Slice.size()-1 so that the pattern slice.samplesAsFeatures().accessRows(slice.range(from, end)) - retrieves one-element tensors holding + retrieves one-element tensors holding slice[from], slice[from+1], ...
      @@ -232,14 +292,14 @@

      Uses of Matrix
      -
      Sets the Matrix to its asymmetrically normalized transformation - by appropriately adjusting its element values.
      +
      Sets the Matrix to its asymmetrically normalized transformation by + appropriately adjusting its element values.
      -
      Sets the Matrix to its symmetrically normalized transformation - by appropriately adjusting its element values.
      +
      Sets the Matrix to its symmetrically normalized transformation by + appropriately adjusting its element values.
      @@ -264,14 +324,15 @@

      Uses of Matrix
      Matrix.zeroCopy()
      -
      Creates a Matrix with the same class and dimensions and all element set to zero.
      +
      Creates a Matrix with the same class and dimensions and all element set to + zero.
      abstract Matrix
      Matrix.zeroCopy(long rows, long cols)
      -
      Creates a matrix of the same class and all element set to zero, but with - a given number of rows and columns.
      +
      Creates a matrix of the same class and all element set to zero, but with a + given number of rows and columns.

      Methods in mklab.JGNN.core with parameters of type Matrix
      @@ -282,20 +343,20 @@

      Uses of Matrix
      Matrix.matmul(Matrix with)
      -
      Performs the matrix multiplication of this*with and the recipient.
      +
      Performs the matrix multiplication of this*with and the + recipient.
      Matrix.matmul(Matrix with, boolean transposeSelf, boolean transposeWith)
      -
      Can be used to perform fast computation of the matrix multiplications -
      this*with, -
      this.transposed()*with -
      this*with.transposed(), -
      this.transposed()*with.transposed() -
      while avoiding the overhead of calling - transposed().
      +
      Can be used to perform fast computation of the matrix multiplications
      + this*with,
      + this.transposed()*with
      + this*with.transposed(),
      + this.transposed()*with.transposed()
      + while avoiding the overhead of calling transposed().

    • @@ -345,8 +406,8 @@

      Uses of Matrix
      class 
      -
      Implements a square matrix whose diagonal elements are determined by the correspond values of - an underlying tensor and off-diagonal elements are zero.
      +
      Implements a square matrix whose diagonal elements are determined by the + correspond values of an underlying tensor and off-diagonal elements are zero.
      class 
      @@ -373,7 +434,8 @@

      Uses of Matrix
      class 
      -
      Generates a transposed version of a base matrix, with which it shares elements.
      +
      Generates a transposed version of a base matrix, with which it shares + elements.
      class 
      @@ -526,25 +588,16 @@

      Uses of Matrix
      Method
      Description
      -
      Model.train(ModelTraining trainer, +
      Model.train(ModelTraining trainer, Matrix features, Matrix labels, Slice trainingSamples, Slice validationSamples)
      Trains the model by appropriately calling - ModelTraining.train(Model, Matrix, Matrix, Slice, Slice) + ModelTraining.train(Model, Matrix, Matrix, Slice, Slice) with the provided parameters.
      - -
      ModelTraining.train(Model model, - Matrix features, - Matrix labels, - Slice trainingSamples, - Slice validationSamples)
      -
      -
      Trains a Model instance based on current settings.
      -

      diff --git a/docs/javadoc/mklab/JGNN/core/class-use/Memory.Scope.html b/docs/javadoc/mklab/JGNN/core/class-use/Memory.Scope.html index a1e7875d..54f2ac68 100644 --- a/docs/javadoc/mklab/JGNN/core/class-use/Memory.Scope.html +++ b/docs/javadoc/mklab/JGNN/core/class-use/Memory.Scope.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.Memory.Scope - + @@ -57,7 +57,9 @@

      Uses of Cla
      Package
      Description
      -
       
      +
      +
      Contains base numerical data classes, as well as supporting abstract classes.
      +
        diff --git a/docs/javadoc/mklab/JGNN/core/class-use/Memory.html b/docs/javadoc/mklab/JGNN/core/class-use/Memory.html index 015e7767..f0002e24 100644 --- a/docs/javadoc/mklab/JGNN/core/class-use/Memory.html +++ b/docs/javadoc/mklab/JGNN/core/class-use/Memory.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.Memory - + diff --git a/docs/javadoc/mklab/JGNN/core/class-use/Slice.html b/docs/javadoc/mklab/JGNN/core/class-use/Slice.html index b64353a0..dad60015 100644 --- a/docs/javadoc/mklab/JGNN/core/class-use/Slice.html +++ b/docs/javadoc/mklab/JGNN/core/class-use/Slice.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.Slice - + @@ -57,11 +57,23 @@

        Uses of Class
        m
        Package
        Description
        -
         
        - -
         
        - -
         
        +
        +
        Contains classes that simplify data loading, model building, and training.
        +
        + +
        +
        Contains model training strategies that correspond to different predictive + tasks.
        +
        + +
        +
        Contains base numerical data classes, as well as supporting abstract classes.
        +
        + +
        +
        Implements neural networks components that are combined to define GNNs or + other types of machine learning models.
        +
        + +
      • +
        +

        Uses of Slice in mklab.JGNN.adhoc.train

        +
        Methods in mklab.JGNN.adhoc.train with parameters of type Slice
        +
        +
        Modifier and Type
        +
        Method
        +
        Description
        + +
        NodeClassification.train(Model model, + Matrix features, + Matrix labels, + Slice trainingSamples, + Slice validationSamples)
        +
        +
        Trains a Model instance based on current settings.
        +
        +
      • @@ -93,10 +143,9 @@

        Uses of Slice i
        Slice.range(double from, double end)
        -
        Performs the range(int, int) operation - while replacing values of from and end - with (int)(from*size()) and (int)(end*size()) - so that fractional ranges can be obtained.
        +
        Performs the range(int, int) operation while replacing values of + from and end with (int)(from*size()) + and (int)(end*size()) so that fractional ranges can be obtained.
        Slice.range(int from, @@ -126,25 +175,16 @@

        Uses of Slice i
        Method
        Description
        -
        Model.train(ModelTraining trainer, +
        Model.train(ModelTraining trainer, Matrix features, Matrix labels, Slice trainingSamples, Slice validationSamples)
        Trains the model by appropriately calling - ModelTraining.train(Model, Matrix, Matrix, Slice, Slice) + ModelTraining.train(Model, Matrix, Matrix, Slice, Slice) with the provided parameters.
        - -
        ModelTraining.train(Model model, - Matrix features, - Matrix labels, - Slice trainingSamples, - Slice validationSamples)
        -
        -
        Trains a Model instance based on current settings.
        -

      diff --git a/docs/javadoc/mklab/JGNN/core/class-use/Tensor.html b/docs/javadoc/mklab/JGNN/core/class-use/Tensor.html index a321a400..d60f5b24 100644 --- a/docs/javadoc/mklab/JGNN/core/class-use/Tensor.html +++ b/docs/javadoc/mklab/JGNN/core/class-use/Tensor.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.Tensor - + @@ -57,35 +57,73 @@

      Uses of Class
      Package
      Description
      -
       
      +
      +
      Contains classes that simplify data loading, model building, and training.
      +
      -
       
      +
      +
      Contains model builders that parse expression of the Neuralang scripting + language to simplify mathematical parts of the definitions.
      +
      -
       
      +
      +
      Contains base numerical data classes, as well as supporting abstract classes.
      +
      -
       
      +
      +
      Contains empty extensions of datatypes that hold only dimension names and + sizes but no ddata.
      +
      -
       
      +
      +
      Contains implementations of matrix classes, of transparent access to parts of + these classes, and of column/row repetitions that broadcast vectors into + matrices.
      +
      -
       
      +
      +
      Contains implementations of tensor classes, as well as transparent access to + parts of these classes.
      +
      -
       
      +
      +
      Contains utility functions that are employed internally, mainly optimized 1D + and 2D iterators.
      +
      -
       
      +
      +
      Implements neural networks components that are combined to define GNNs or + other types of machine learning models.
      +
      -
       
      +
      +
      Implements activations function to be used as model operations.
      +
      -
       
      +
      +
      Contains various types of neural architecture inputs.
      +
      -
       
      +
      +
      Contains classes for instantiating loss function.
      +
      -
       
      +
      +
      Contains losses that wrap other losses and augment their numeric computations + with live reporting of the training status.
      +
      -
       
      +
      +
      Contains popular neural network and GNN operations.
      +
      -
       
      +
      +
      Contains optimizers that can be used to update training losses.
      +
      -
       
      +
      +
      Contains pooling/reduction operations that reduce the dimensions of inputs.
      +
      @@ -238,15 +283,13 @@

      Uses of Tensor
      Tensor.accessSubtensor(long from)
      -
      Wraps a range of elements within a tensor - without allocating memory anew.
      +
      Wraps a range of elements within a tensor without allocating memory anew.
      Tensor.accessSubtensor(long from, long to)
      -
      Wraps a range of elements within a tensor - without allocating memory anew.
      +
      Wraps a range of elements within a tensor without allocating memory anew.
      Tensor.add(double value)
      @@ -272,7 +315,8 @@

      Uses of Tensor
      Tensor.copy()
      -
      Creates a zeroCopy() and transfers to it all potentially non-zero element values.
      +
      Creates a zeroCopy() and transfers to it all potentially non-zero + element values.
      Tensor.expMinusOne()
      @@ -293,7 +337,8 @@

      Uses of Tensor
      Tensor.fromRange(long start, long end)
      -
      Creates a dense tensor holding the desired range [start, start+1, ..., end-1].
      +
      Creates a dense tensor holding the desired range [start, start+1, ..., + end-1].
      Tensor.inverse()
      @@ -348,7 +393,8 @@

      Uses of Tensor
      Tensor.selfAdd(Tensor tensor, double weight)
      -
      Performs in-memory weighted addition to the Tensor, storing the result in itself.
      +
      Performs in-memory weighted addition to the Tensor, storing the result in + itself.
      @@ -363,17 +409,20 @@

      Uses of Tensor
      Tensor.selfLog()
      -
      Performs in-memory set of each element to the logarithm of its absolute value.
      +
      Performs in-memory set of each element to the logarithm of its absolute + value.
      Tensor.selfMultiply(double value)
      -
      Performs in-memory multiplication on the Tensor, storing the result to itself.
      +
      Performs in-memory multiplication on the Tensor, storing the result to + itself.
      Tensor.selfMultiply(Tensor tensor)
      -
      Performs in-memory multiplication on the Tensor, storing the result in itself .
      +
      Performs in-memory multiplication on the Tensor, storing the result in itself + .
      Tensor.selfNegative()
      @@ -383,7 +432,8 @@

      Uses of Tensor
      Tensor.selfSqrt()
      -
      Performs in-memory set of each element to the square root of its absolute value.
      +
      Performs in-memory set of each element to the square root of its absolute + value.
      Tensor.selfSubtract(Tensor tensor)
      @@ -423,8 +473,8 @@

      Uses of Tensor
      Tensor.setToRandom(Distribution distribution)
      -
      Set tensor elements to random values by sampling them from a given Distribution - instance.
      +
      Set tensor elements to random values by sampling them from a given + Distribution instance.
      Tensor.setToUniform()
      @@ -450,34 +500,38 @@

      Uses of Tensor
      Matrix.transform(Tensor x)
      -
      Performs the linear algebra transformation A*x where A is this matrix and x a vector
      +
      Performs the linear algebra transformation A*x where A is this matrix and x a + vector
      Matrix.zeroCopy(long size)
      -
      Creates a Matrix with the same class and dimensions and all element set to zero.
      +
      Creates a Matrix with the same class and dimensions and all element set to + zero.
      Matrix.zeroCopy(Tensor prototype)
      -
      Creates a tensor of the same class and all elements set to zero, - but size and dimension names are obtained from a prototype tensor.
      +
      Creates a tensor of the same class and all elements set to zero, but size and + dimension names are obtained from a prototype tensor.
      Tensor.zeroCopy()
      -
      Creates a tensor of the same class with the same size and all element set to zero.
      +
      Creates a tensor of the same class with the same size and all element set to + zero.
      abstract Tensor
      Tensor.zeroCopy(long size)
      -
      Creates a tensor of the same class with a given size and all element set to zero.
      +
      Creates a tensor of the same class with a given size and all element set to + zero.
      Tensor.zeroCopy(Tensor prototype)
      -
      Creates a tensor of the same class and all elements set to zero, - but size and dimension names are obtained from a prototype tensor.
      +
      Creates a tensor of the same class and all elements set to zero, but size and + dimension names are obtained from a prototype tensor.
      Methods in mklab.JGNN.core that return types with arguments of type Tensor
      @@ -563,12 +617,14 @@

      Uses of Tensor
      Tensor.selfAdd(Tensor tensor, double weight)
      -
      Performs in-memory weighted addition to the Tensor, storing the result in itself.
      +
      Performs in-memory weighted addition to the Tensor, storing the result in + itself.
      Tensor.selfMultiply(Tensor tensor)
      -
      Performs in-memory multiplication on the Tensor, storing the result in itself .
      +
      Performs in-memory multiplication on the Tensor, storing the result in itself + .
      Tensor.selfSubtract(Tensor tensor)
      @@ -589,19 +645,20 @@

      Uses of Tensor
      Matrix.transform(Tensor x)
      -
      Performs the linear algebra transformation A*x where A is this matrix and x a vector
      +
      Performs the linear algebra transformation A*x where A is this matrix and x a + vector
      Matrix.zeroCopy(Tensor prototype)
      -
      Creates a tensor of the same class and all elements set to zero, - but size and dimension names are obtained from a prototype tensor.
      +
      Creates a tensor of the same class and all elements set to zero, but size and + dimension names are obtained from a prototype tensor.
      Tensor.zeroCopy(Tensor prototype)
      -
      Creates a tensor of the same class and all elements set to zero, - but size and dimension names are obtained from a prototype tensor.
      +
      Creates a tensor of the same class and all elements set to zero, but size and + dimension names are obtained from a prototype tensor.

      @@ -671,8 +728,8 @@

      Uses of Tensor
      class 
      -
      Implements a square matrix whose diagonal elements are determined by the correspond values of - an underlying tensor and off-diagonal elements are zero.
      +
      Implements a square matrix whose diagonal elements are determined by the + correspond values of an underlying tensor and off-diagonal elements are zero.
      class 
      @@ -699,7 +756,8 @@

      Uses of Tensor
      class 
      -
      Generates a transposed version of a base matrix, with which it shares elements.
      +
      Generates a transposed version of a base matrix, with which it shares + elements.
      class 
      @@ -1232,8 +1290,8 @@

      Uses of Tensor
      Loss.evaluate(Tensor output, Tensor desired)
      -
      Provides a numerical evaluation of a loss function, so that - lower values correspond to better predictions.
      +
      Provides a numerical evaluation of a loss function, so that lower values + correspond to better predictions.
      Model.predict(Tensor... inputs)
      diff --git a/docs/javadoc/mklab/JGNN/core/class-use/ThreadPool.html b/docs/javadoc/mklab/JGNN/core/class-use/ThreadPool.html index 1f62ace0..180e1f5e 100644 --- a/docs/javadoc/mklab/JGNN/core/class-use/ThreadPool.html +++ b/docs/javadoc/mklab/JGNN/core/class-use/ThreadPool.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.ThreadPool - + @@ -57,7 +57,9 @@

      Uses of Class
      Package
      Description
      -
       
      +
      +
      Contains base numerical data classes, as well as supporting abstract classes.
      +
        diff --git a/docs/javadoc/mklab/JGNN/core/distribution/Normal.html b/docs/javadoc/mklab/JGNN/core/distribution/Normal.html index 00292d1c..7841076e 100644 --- a/docs/javadoc/mklab/JGNN/core/distribution/Normal.html +++ b/docs/javadoc/mklab/JGNN/core/distribution/Normal.html @@ -1,11 +1,11 @@ - + Normal - + @@ -85,7 +85,8 @@

        Class Normal

        public class Normal extends Object implements Distribution
        -
        Implements a Normal Distribution of given mean and standard deviation.
        +
        Implements a Normal Distribution of given mean and standard + deviation.
        Author:
        Emmanouil Krasanakis
        @@ -103,7 +104,8 @@

        Constructor Summary

        Description
        -
        Instantiates a normal distribution with zero mean and standard deviation equal to 1.
        +
        Instantiates a normal distribution with zero mean and standard deviation + equal to 1.
        Normal(double mean, double std)
        @@ -175,7 +177,8 @@

        Constructor Details

        Normal

        public Normal()
        -
        Instantiates a normal distribution with zero mean and standard deviation equal to 1.
        +
        Instantiates a normal distribution with zero mean and standard deviation + equal to 1.
      • diff --git a/docs/javadoc/mklab/JGNN/core/distribution/Uniform.html b/docs/javadoc/mklab/JGNN/core/distribution/Uniform.html index 7c1922b5..e4a829ae 100644 --- a/docs/javadoc/mklab/JGNN/core/distribution/Uniform.html +++ b/docs/javadoc/mklab/JGNN/core/distribution/Uniform.html @@ -1,11 +1,11 @@ - + Uniform - + @@ -108,7 +108,8 @@

        Constructor Summary

        Uniform(double from, double to)
        -
        Instantiates a uniform distribution that samples values from the given range [from, to].
        +
        Instantiates a uniform distribution that samples values from the given range + [from, to].
      @@ -189,7 +190,8 @@

      Uniform

      Uniform

      public Uniform(double from, double to)
      -
      Instantiates a uniform distribution that samples values from the given range [from, to].
      +
      Instantiates a uniform distribution that samples values from the given range + [from, to].
      Parameters:
      from - The minimum value of the distribution.
      diff --git a/docs/javadoc/mklab/JGNN/core/distribution/class-use/Normal.html b/docs/javadoc/mklab/JGNN/core/distribution/class-use/Normal.html index 8feaee9d..17b75042 100644 --- a/docs/javadoc/mklab/JGNN/core/distribution/class-use/Normal.html +++ b/docs/javadoc/mklab/JGNN/core/distribution/class-use/Normal.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.distribution.Normal - + @@ -57,7 +57,10 @@

      Uses
      Package
      Description
      -
       
      +
      +
      Contains data distributions that produce one numerical value and can be used + for tensor value initialization.
      +
        diff --git a/docs/javadoc/mklab/JGNN/core/distribution/class-use/Uniform.html b/docs/javadoc/mklab/JGNN/core/distribution/class-use/Uniform.html index 1f703ad3..cb080234 100644 --- a/docs/javadoc/mklab/JGNN/core/distribution/class-use/Uniform.html +++ b/docs/javadoc/mklab/JGNN/core/distribution/class-use/Uniform.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.distribution.Uniform - + @@ -57,7 +57,10 @@

        Use
        Package
        Description
        -
         
        +
        +
        Contains data distributions that produce one numerical value and can be used + for tensor value initialization.
        +
          diff --git a/docs/javadoc/mklab/JGNN/core/distribution/package-summary.html b/docs/javadoc/mklab/JGNN/core/distribution/package-summary.html index 189f1c61..293859c6 100644 --- a/docs/javadoc/mklab/JGNN/core/distribution/package-summary.html +++ b/docs/javadoc/mklab/JGNN/core/distribution/package-summary.html @@ -1,11 +1,11 @@ - + mklab.JGNN.core.distribution - + @@ -42,7 +42,7 @@
          @@ -62,6 +62,15 @@

          Package mklab.JGN


          package mklab.JGNN.core.distribution
          +
          +
          Contains data distributions that produce one numerical value and can be used + for tensor value initialization. The mklab.JGNN.nn.initializers + package uses these distributions to initialize trainable parameters.
          +
          +
          Author:
          +
          Emmanouil Krasanakis
          +
          +
          • @@ -71,15 +80,30 @@

            Package mklab.JGN
            Package
            Description
            -
             
            +
            +
            Contains base numerical data classes, as well as supporting abstract classes.
            +
            -
             
            +
            +
            Contains empty extensions of datatypes that hold only dimension names and + sizes but no ddata.
            +
            -
             
            +
            +
            Contains implementations of matrix classes, of transparent access to parts of + these classes, and of column/row repetitions that broadcast vectors into + matrices.
            +
            -
             
            +
            +
            Contains implementations of tensor classes, as well as transparent access to + parts of these classes.
            +
            -
             
            +
            +
            Contains utility functions that are employed internally, mainly optimized 1D + and 2D iterators.
            +

          • @@ -91,7 +115,8 @@

            Package mklab.JGN
            Description
            -
            Implements a Normal Distribution of given mean and standard deviation.
            +
            Implements a Normal Distribution of given mean and standard + deviation.
            diff --git a/docs/javadoc/mklab/JGNN/core/distribution/package-tree.html b/docs/javadoc/mklab/JGNN/core/distribution/package-tree.html index 165fe733..4d2dcec0 100644 --- a/docs/javadoc/mklab/JGNN/core/distribution/package-tree.html +++ b/docs/javadoc/mklab/JGNN/core/distribution/package-tree.html @@ -1,11 +1,11 @@ - + mklab.JGNN.core.distribution Class Hierarchy - + diff --git a/docs/javadoc/mklab/JGNN/core/distribution/package-use.html b/docs/javadoc/mklab/JGNN/core/distribution/package-use.html index 0e4af0ce..67f468bc 100644 --- a/docs/javadoc/mklab/JGNN/core/distribution/package-use.html +++ b/docs/javadoc/mklab/JGNN/core/distribution/package-use.html @@ -1,11 +1,11 @@ - + Uses of Package mklab.JGNN.core.distribution - + @@ -57,7 +57,10 @@

            Uses of P
            Package
            Description
            -
             
            +
            +
            Contains data distributions that produce one numerical value and can be used + for tensor value initialization.
            +

              @@ -69,7 +72,8 @@

              Uses of P
              Description
              -
              Implements a Normal Distribution of given mean and standard deviation.
              +
              Implements a Normal Distribution of given mean and standard + deviation.
              diff --git a/docs/javadoc/mklab/JGNN/core/empy/EmptyMatrix.html b/docs/javadoc/mklab/JGNN/core/empy/EmptyMatrix.html index 03b916be..c9ed943a 100644 --- a/docs/javadoc/mklab/JGNN/core/empy/EmptyMatrix.html +++ b/docs/javadoc/mklab/JGNN/core/empy/EmptyMatrix.html @@ -1,11 +1,11 @@ - + EmptyMatrix - + @@ -133,15 +133,15 @@

              Method Summary

              -
              Retrieves an iterable that traverses (row, col) entry pairs - of non zero entries.
              +
              Retrieves an iterable that traverses (row, col) entry pairs of non zero + entries.
              void
              If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
              + Tensor.release() so that memory is a de-allocated only when object + references expire.
              put(long pos, @@ -152,8 +152,8 @@

              Method Summary

              void
              -
              If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up.
              +
              If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up.
              @@ -164,8 +164,8 @@

              Method Summary

              zeroCopy(long rows, long cols)
              -
              Creates a matrix of the same class and all element set to zero, but with - a given number of rows and columns.
              +
              Creates a matrix of the same class and all element set to zero, but with a + given number of rows and columns.
              @@ -213,8 +213,8 @@

              Method Details

              getNonZeroEntries

              public Iterable<Map.Entry<Long,Long>> getNonZeroEntries()
              Description copied from class: Matrix
              -
              Retrieves an iterable that traverses (row, col) entry pairs - of non zero entries.
              +
              Retrieves an iterable that traverses (row, col) entry pairs of non zero + entries.
              Specified by:
              getNonZeroEntries in class Matrix
              @@ -235,8 +235,8 @@

              zeroCopy

              public Matrix zeroCopy(long rows, long cols)
              Description copied from class: Matrix
              -
              Creates a matrix of the same class and all element set to zero, but with - a given number of rows and columns.
              +
              Creates a matrix of the same class and all element set to zero, but with a + given number of rows and columns.
              Specified by:
              zeroCopy in class Matrix
              @@ -259,10 +259,9 @@

              zeroCopy

              release

              public void release()
              Description copied from class: Tensor
              -
              If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up. This - memory will be released anyway by Java once there are no more - references to the object.
              +
              If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up. This memory will be + released anyway by Java once there are no more references to the object.
              Specified by:
              release in class Tensor
              @@ -281,8 +280,8 @@

              persist

              public void persist()
              Description copied from class: Tensor
              If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
              + Tensor.release() so that memory is a de-allocated only when object + references expire.
              Specified by:
              persist in class Tensor
              @@ -301,8 +300,8 @@

              put

              public Tensor put(long pos, double value)
              Description copied from class: Tensor
              -
              Assign a value to a tensor element. All tensor operations use this function to wrap - element assignments.
              +
              Assign a value to a tensor element. All tensor operations use this function + to wrap element assignments.
              Specified by:
              put in class Tensor
              @@ -319,8 +318,8 @@

              put

              get

              public double get(long pos)
              Description copied from class: Tensor
              -
              Retrieves the value of a tensor element at a given position. All tensor operations use this function to wrap - element retrieval.
              +
              Retrieves the value of a tensor element at a given position. All tensor + operations use this function to wrap element retrieval.
              Specified by:
              get in class Tensor
              @@ -336,12 +335,12 @@

              get

              traverseNonZeroElements

              public Iterator<Long> traverseNonZeroElements()
              Description copied from class: Tensor
              -
              Retrieves positions within the tensor that may hold non-zero elements. - This guarantees that all non-zero elements positions are traversed - but some of the returned positions could hold zero elements. - For example, DenseTensor traverses all - of its elements this way, whereas SparseTensor - indeed traverses only non-zero elements.
              +
              Retrieves positions within the tensor that may hold non-zero elements. This + guarantees that all non-zero elements positions are traversed but + some of the returned positions could hold zero elements. For example, + DenseTensor traverses all of its elements this + way, whereas SparseTensor indeed traverses + only non-zero elements.
              Specified by:
              traverseNonZeroElements in class Tensor
              diff --git a/docs/javadoc/mklab/JGNN/core/empy/EmptyTensor.html b/docs/javadoc/mklab/JGNN/core/empy/EmptyTensor.html index f07a0315..62bbca4f 100644 --- a/docs/javadoc/mklab/JGNN/core/empy/EmptyTensor.html +++ b/docs/javadoc/mklab/JGNN/core/empy/EmptyTensor.html @@ -1,11 +1,11 @@ - + EmptyTensor - + @@ -133,8 +133,8 @@

              Method Summary

              If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
              + Tensor.release() so that memory is a de-allocated only when object + references expire.
              put(long pos, @@ -145,8 +145,8 @@

              Method Summary

              void
              -
              If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up.
              +
              If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up.
              @@ -156,7 +156,8 @@

              Method Summary

              zeroCopy(long size)
              -
              Creates a tensor of the same class with a given size and all element set to zero.
              +
              Creates a tensor of the same class with a given size and all element set to + zero.
              @@ -206,10 +207,9 @@

              Method Details

              release

              public void release()
              Description copied from class: Tensor
              -
              If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up. This - memory will be released anyway by Java once there are no more - references to the object.
              +
              If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up. This memory will be + released anyway by Java once there are no more references to the object.
              Specified by:
              release in class Tensor
              @@ -228,8 +228,8 @@

              persist

              public void persist()
              Description copied from class: Tensor
              If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
              + Tensor.release() so that memory is a de-allocated only when object + references expire.
              Specified by:
              persist in class Tensor
              @@ -248,8 +248,8 @@

              put

              public Tensor put(long pos, double value)
              Description copied from class: Tensor
              -
              Assign a value to a tensor element. All tensor operations use this function to wrap - element assignments.
              +
              Assign a value to a tensor element. All tensor operations use this function + to wrap element assignments.
              Specified by:
              put in class Tensor
              @@ -266,8 +266,8 @@

              put

              get

              public double get(long pos)
              Description copied from class: Tensor
              -
              Retrieves the value of a tensor element at a given position. All tensor operations use this function to wrap - element retrieval.
              +
              Retrieves the value of a tensor element at a given position. All tensor + operations use this function to wrap element retrieval.
              Specified by:
              get in class Tensor
              @@ -283,7 +283,8 @@

              get

              zeroCopy

              public Tensor zeroCopy(long size)
              Description copied from class: Tensor
              -
              Creates a tensor of the same class with a given size and all element set to zero.
              +
              Creates a tensor of the same class with a given size and all element set to + zero.
              Specified by:
              zeroCopy in class Tensor
              @@ -305,12 +306,12 @@

              zeroCopy

              traverseNonZeroElements

              public Iterator<Long> traverseNonZeroElements()
              Description copied from class: Tensor
              -
              Retrieves positions within the tensor that may hold non-zero elements. - This guarantees that all non-zero elements positions are traversed - but some of the returned positions could hold zero elements. - For example, DenseTensor traverses all - of its elements this way, whereas SparseTensor - indeed traverses only non-zero elements.
              +
              Retrieves positions within the tensor that may hold non-zero elements. This + guarantees that all non-zero elements positions are traversed but + some of the returned positions could hold zero elements. For example, + DenseTensor traverses all of its elements this + way, whereas SparseTensor indeed traverses + only non-zero elements.
              Specified by:
              traverseNonZeroElements in class Tensor
              diff --git a/docs/javadoc/mklab/JGNN/core/empy/class-use/EmptyMatrix.html b/docs/javadoc/mklab/JGNN/core/empy/class-use/EmptyMatrix.html index f5abf933..47bb7747 100644 --- a/docs/javadoc/mklab/JGNN/core/empy/class-use/EmptyMatrix.html +++ b/docs/javadoc/mklab/JGNN/core/empy/class-use/EmptyMatrix.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.empy.EmptyMatrix - + diff --git a/docs/javadoc/mklab/JGNN/core/empy/class-use/EmptyTensor.html b/docs/javadoc/mklab/JGNN/core/empy/class-use/EmptyTensor.html index 813b981d..8e1a1a99 100644 --- a/docs/javadoc/mklab/JGNN/core/empy/class-use/EmptyTensor.html +++ b/docs/javadoc/mklab/JGNN/core/empy/class-use/EmptyTensor.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.empy.EmptyTensor - + diff --git a/docs/javadoc/mklab/JGNN/core/empy/package-summary.html b/docs/javadoc/mklab/JGNN/core/empy/package-summary.html index 013f4f41..dbe4a99f 100644 --- a/docs/javadoc/mklab/JGNN/core/empy/package-summary.html +++ b/docs/javadoc/mklab/JGNN/core/empy/package-summary.html @@ -1,11 +1,11 @@ - + mklab.JGNN.core.empy - + @@ -42,7 +42,7 @@
              @@ -62,6 +62,18 @@

              Package mklab.JGNN.core.e


              package mklab.JGNN.core.empy
              +
              +
              Contains empty extensions of datatypes that hold only dimension names and + sizes but no ddata. These types are pervasive in that any operation they are + involved in has an empty outcome too. Main usage of empty data types is to + verify created model integrity in terms of operations without actually + performing any computations. For example, empty inputs are preferred for + ModelBuilder.autosize(mklab.JGNN.core.Tensor...)
              +
              +
              Author:
              +
              Emmanouil Krasanakis
              +
              +
              • @@ -71,15 +83,30 @@

                Package mklab.JGNN.core.e
                Package
                Description
                -
                 
                +
                +
                Contains base numerical data classes, as well as supporting abstract classes.
                +
                -
                 
                +
                +
                Contains data distributions that produce one numerical value and can be used + for tensor value initialization.
                +
                -
                 
                +
                +
                Contains implementations of matrix classes, of transparent access to parts of + these classes, and of column/row repetitions that broadcast vectors into + matrices.
                +
                -
                 
                +
                +
                Contains implementations of tensor classes, as well as transparent access to + parts of these classes.
                +
                -
                 
                +
                +
                Contains utility functions that are employed internally, mainly optimized 1D + and 2D iterators.
                +

              • diff --git a/docs/javadoc/mklab/JGNN/core/empy/package-tree.html b/docs/javadoc/mklab/JGNN/core/empy/package-tree.html index 955d7126..bd16bcb9 100644 --- a/docs/javadoc/mklab/JGNN/core/empy/package-tree.html +++ b/docs/javadoc/mklab/JGNN/core/empy/package-tree.html @@ -1,11 +1,11 @@ - + mklab.JGNN.core.empy Class Hierarchy - + diff --git a/docs/javadoc/mklab/JGNN/core/empy/package-use.html b/docs/javadoc/mklab/JGNN/core/empy/package-use.html index 25c0b7e8..fde0b697 100644 --- a/docs/javadoc/mklab/JGNN/core/empy/package-use.html +++ b/docs/javadoc/mklab/JGNN/core/empy/package-use.html @@ -1,11 +1,11 @@ - + Uses of Package mklab.JGNN.core.empy - + diff --git a/docs/javadoc/mklab/JGNN/core/matrix/AccessCol.html b/docs/javadoc/mklab/JGNN/core/matrix/AccessCol.html index c3248977..60ea1e29 100644 --- a/docs/javadoc/mklab/JGNN/core/matrix/AccessCol.html +++ b/docs/javadoc/mklab/JGNN/core/matrix/AccessCol.html @@ -1,11 +1,11 @@ - + AccessCol - + @@ -87,9 +87,9 @@

                Class AccessCol

                public class AccessCol extends Tensor
                Accesses a column of a Matrix as if it were a dense Tensor. - Prefer using Matrix.accessCol(long), which wraps usage - of this class. Instances of this class share elements with the matrix which - they access and do not allocate new memory.
                + Prefer using Matrix.accessCol(long), which wraps + usage of this class. Instances of this class share elements with the matrix + which they access and do not allocate new memory.
                Author:
                Emmanouil Krasanakis
                @@ -142,9 +142,9 @@

                Method Summary

                long
                -
                Provides an estimation for the non-zero number of elements stored in the tensor, - where this number is equal to the size for dense tensors, but equal to the actual - number of non-zero elements for sparse tensors.
                +
                Provides an estimation for the non-zero number of elements stored in the + tensor, where this number is equal to the size for dense tensors, but equal + to the actual number of non-zero elements for sparse tensors.
                double
                get(long pos)
                @@ -155,8 +155,8 @@

                Method Summary

                If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
                + Tensor.release() so that memory is a de-allocated only when object + references expire.
                put(long pos, @@ -167,8 +167,8 @@

                Method Summary

                void
                -
                If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up.
                +
                If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up.
                @@ -178,7 +178,8 @@

                Method Summary

                zeroCopy(long size)
                -
                Creates a tensor of the same class with a given size and all element set to zero.
                +
                Creates a tensor of the same class with a given size and all element set to + zero.
                @@ -229,10 +230,10 @@

                Method Details

                estimateNumNonZeroElements

                public long estimateNumNonZeroElements()
                Description copied from class: Tensor
                -
                Provides an estimation for the non-zero number of elements stored in the tensor, - where this number is equal to the size for dense tensors, but equal to the actual - number of non-zero elements for sparse tensors. - Basically, this quantity is proportional to the allocated memory.
                +
                Provides an estimation for the non-zero number of elements stored in the + tensor, where this number is equal to the size for dense tensors, but equal + to the actual number of non-zero elements for sparse tensors. Basically, this + quantity is proportional to the allocated memory.
                Overrides:
                estimateNumNonZeroElements in class Tensor
                @@ -253,8 +254,8 @@

                put

                public Tensor put(long pos, double value)
                Description copied from class: Tensor
                -
                Assign a value to a tensor element. All tensor operations use this function to wrap - element assignments.
                +
                Assign a value to a tensor element. All tensor operations use this function + to wrap element assignments.
                Specified by:
                put in class Tensor
                @@ -271,8 +272,8 @@

                put

                get

                public double get(long pos)
                Description copied from class: Tensor
                -
                Retrieves the value of a tensor element at a given position. All tensor operations use this function to wrap - element retrieval.
                +
                Retrieves the value of a tensor element at a given position. All tensor + operations use this function to wrap element retrieval.
                Specified by:
                get in class Tensor
                @@ -288,7 +289,8 @@

                get

                zeroCopy

                public Tensor zeroCopy(long size)
                Description copied from class: Tensor
                -
                Creates a tensor of the same class with a given size and all element set to zero.
                +
                Creates a tensor of the same class with a given size and all element set to + zero.
                Specified by:
                zeroCopy in class Tensor
                @@ -310,12 +312,12 @@

                zeroCopy

                traverseNonZeroElements

                public Iterator<Long> traverseNonZeroElements()
                Description copied from class: Tensor
                -
                Retrieves positions within the tensor that may hold non-zero elements. - This guarantees that all non-zero elements positions are traversed - but some of the returned positions could hold zero elements. - For example, DenseTensor traverses all - of its elements this way, whereas SparseTensor - indeed traverses only non-zero elements.
                +
                Retrieves positions within the tensor that may hold non-zero elements. This + guarantees that all non-zero elements positions are traversed but + some of the returned positions could hold zero elements. For example, + DenseTensor traverses all of its elements this + way, whereas SparseTensor indeed traverses + only non-zero elements.
                Specified by:
                traverseNonZeroElements in class Tensor
                @@ -329,10 +331,9 @@

                traverseNonZeroElements

                release

                public void release()
                Description copied from class: Tensor
                -
                If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up. This - memory will be released anyway by Java once there are no more - references to the object.
                +
                If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up. This memory will be + released anyway by Java once there are no more references to the object.
                Specified by:
                release in class Tensor
                @@ -351,8 +352,8 @@

                persist

                public void persist()
                Description copied from class: Tensor
                If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
                + Tensor.release() so that memory is a de-allocated only when object + references expire.
                Specified by:
                persist in class Tensor
                diff --git a/docs/javadoc/mklab/JGNN/core/matrix/AccessRow.html b/docs/javadoc/mklab/JGNN/core/matrix/AccessRow.html index 6eeaedb2..40403fed 100644 --- a/docs/javadoc/mklab/JGNN/core/matrix/AccessRow.html +++ b/docs/javadoc/mklab/JGNN/core/matrix/AccessRow.html @@ -1,11 +1,11 @@ - + AccessRow - + @@ -87,9 +87,9 @@

                Class AccessRow

                public class AccessRow extends Tensor
                Accesses a row of a Matrix as if it were a dense Tensor. - Prefer using Matrix.accessRow(long), which wraps usage - of this class. Instances of this class share elements with the matrix which - they access and do not allocate new memory.
                + Prefer using Matrix.accessRow(long), which wraps + usage of this class. Instances of this class share elements with the matrix + which they access and do not allocate new memory.
                Author:
                Emmanouil Krasanakis
                @@ -142,9 +142,9 @@

                Method Summary

                long
                -
                Provides an estimation for the non-zero number of elements stored in the tensor, - where this number is equal to the size for dense tensors, but equal to the actual - number of non-zero elements for sparse tensors.
                +
                Provides an estimation for the non-zero number of elements stored in the + tensor, where this number is equal to the size for dense tensors, but equal + to the actual number of non-zero elements for sparse tensors.
                double
                get(long pos)
                @@ -155,8 +155,8 @@

                Method Summary

                If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
                + Tensor.release() so that memory is a de-allocated only when object + references expire.
                put(long pos, @@ -167,8 +167,8 @@

                Method Summary

                void
                -
                If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up.
                +
                If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up.
                @@ -178,7 +178,8 @@

                Method Summary

                zeroCopy(long size)
                -
                Creates a tensor of the same class with a given size and all element set to zero.
                +
                Creates a tensor of the same class with a given size and all element set to + zero.
                @@ -229,10 +230,10 @@

                Method Details

                estimateNumNonZeroElements

                public long estimateNumNonZeroElements()
                Description copied from class: Tensor
                -
                Provides an estimation for the non-zero number of elements stored in the tensor, - where this number is equal to the size for dense tensors, but equal to the actual - number of non-zero elements for sparse tensors. - Basically, this quantity is proportional to the allocated memory.
                +
                Provides an estimation for the non-zero number of elements stored in the + tensor, where this number is equal to the size for dense tensors, but equal + to the actual number of non-zero elements for sparse tensors. Basically, this + quantity is proportional to the allocated memory.
                Overrides:
                estimateNumNonZeroElements in class Tensor
                @@ -253,8 +254,8 @@

                put

                public Tensor put(long pos, double value)
                Description copied from class: Tensor
                -
                Assign a value to a tensor element. All tensor operations use this function to wrap - element assignments.
                +
                Assign a value to a tensor element. All tensor operations use this function + to wrap element assignments.
                Specified by:
                put in class Tensor
                @@ -271,8 +272,8 @@

                put

                get

                public double get(long pos)
                Description copied from class: Tensor
                -
                Retrieves the value of a tensor element at a given position. All tensor operations use this function to wrap - element retrieval.
                +
                Retrieves the value of a tensor element at a given position. All tensor + operations use this function to wrap element retrieval.
                Specified by:
                get in class Tensor
                @@ -288,7 +289,8 @@

                get

                zeroCopy

                public Tensor zeroCopy(long size)
                Description copied from class: Tensor
                -
                Creates a tensor of the same class with a given size and all element set to zero.
                +
                Creates a tensor of the same class with a given size and all element set to + zero.
                Specified by:
                zeroCopy in class Tensor
                @@ -310,12 +312,12 @@

                zeroCopy

                traverseNonZeroElements

                public Iterator<Long> traverseNonZeroElements()
                Description copied from class: Tensor
                -
                Retrieves positions within the tensor that may hold non-zero elements. - This guarantees that all non-zero elements positions are traversed - but some of the returned positions could hold zero elements. - For example, DenseTensor traverses all - of its elements this way, whereas SparseTensor - indeed traverses only non-zero elements.
                +
                Retrieves positions within the tensor that may hold non-zero elements. This + guarantees that all non-zero elements positions are traversed but + some of the returned positions could hold zero elements. For example, + DenseTensor traverses all of its elements this + way, whereas SparseTensor indeed traverses + only non-zero elements.
                Specified by:
                traverseNonZeroElements in class Tensor
                @@ -329,10 +331,9 @@

                traverseNonZeroElements

                release

                public void release()
                Description copied from class: Tensor
                -
                If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up. This - memory will be released anyway by Java once there are no more - references to the object.
                +
                If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up. This memory will be + released anyway by Java once there are no more references to the object.
                Specified by:
                release in class Tensor
                @@ -351,8 +352,8 @@

                persist

                public void persist()
                Description copied from class: Tensor
                If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
                + Tensor.release() so that memory is a de-allocated only when object + references expire.
                Specified by:
                persist in class Tensor
                diff --git a/docs/javadoc/mklab/JGNN/core/matrix/ColumnRepetition.html b/docs/javadoc/mklab/JGNN/core/matrix/ColumnRepetition.html index 50d8be60..c51a64da 100644 --- a/docs/javadoc/mklab/JGNN/core/matrix/ColumnRepetition.html +++ b/docs/javadoc/mklab/JGNN/core/matrix/ColumnRepetition.html @@ -1,11 +1,11 @@ - + ColumnRepetition - + @@ -88,9 +88,9 @@

                Class ColumnRepetition


                public class ColumnRepetition extends Matrix
                -
                Defines a matrix whose columns are all a copy of a Tensor. - To avoid potential confusion, setting element values (and all supported operations) throws - an exception.
                +
                Defines a matrix whose columns are all a copy of a Tensor. To avoid + potential confusion, setting element values (and all supported operations) + throws an exception.
                Author:
                Emmanouil Krasanakis
                @@ -153,15 +153,15 @@

                Method Summary

                -
                Retrieves an iterable that traverses (row, col) entry pairs - of non zero entries.
                +
                Retrieves an iterable that traverses (row, col) entry pairs of non zero + entries.
                void
                If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
                + Tensor.release() so that memory is a de-allocated only when object + references expire.
                put(long pos, @@ -172,8 +172,8 @@

                Method Summary

                void
                -
                If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up.
                +
                If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up.
                @@ -184,8 +184,8 @@

                Method Summary

                zeroCopy(long rows, long cols)
                -
                Creates a matrix of the same class and all element set to zero, but with - a given number of rows and columns.
                +
                Creates a matrix of the same class and all element set to zero, but with a + given number of rows and columns.
                @@ -251,8 +251,8 @@

                zeroCopy

                public Matrix zeroCopy(long rows, long cols)
                Description copied from class: Matrix
                -
                Creates a matrix of the same class and all element set to zero, but with - a given number of rows and columns.
                +
                Creates a matrix of the same class and all element set to zero, but with a + given number of rows and columns.
                Specified by:
                zeroCopy in class Matrix
                @@ -276,8 +276,8 @@

                put

                public Tensor put(long pos, double value)
                Description copied from class: Tensor
                -
                Assign a value to a tensor element. All tensor operations use this function to wrap - element assignments.
                +
                Assign a value to a tensor element. All tensor operations use this function + to wrap element assignments.
                Specified by:
                put in class Tensor
                @@ -294,8 +294,8 @@

                put

                get

                public double get(long pos)
                Description copied from class: Tensor
                -
                Retrieves the value of a tensor element at a given position. All tensor operations use this function to wrap - element retrieval.
                +
                Retrieves the value of a tensor element at a given position. All tensor + operations use this function to wrap element retrieval.
                Specified by:
                get in class Tensor
                @@ -311,12 +311,12 @@

                get

                traverseNonZeroElements

                public Iterator<Long> traverseNonZeroElements()
                Description copied from class: Tensor
                -
                Retrieves positions within the tensor that may hold non-zero elements. - This guarantees that all non-zero elements positions are traversed - but some of the returned positions could hold zero elements. - For example, DenseTensor traverses all - of its elements this way, whereas SparseTensor - indeed traverses only non-zero elements.
                +
                Retrieves positions within the tensor that may hold non-zero elements. This + guarantees that all non-zero elements positions are traversed but + some of the returned positions could hold zero elements. For example, + DenseTensor traverses all of its elements this + way, whereas SparseTensor indeed traverses + only non-zero elements.
                Specified by:
                traverseNonZeroElements in class Tensor
                @@ -330,8 +330,8 @@

                traverseNonZeroElements

                getNonZeroEntries

                public Iterable<Map.Entry<Long,Long>> getNonZeroEntries()
                Description copied from class: Matrix
                -
                Retrieves an iterable that traverses (row, col) entry pairs - of non zero entries.
                +
                Retrieves an iterable that traverses (row, col) entry pairs of non zero + entries.
                Specified by:
                getNonZeroEntries in class Matrix
                @@ -351,10 +351,9 @@

                getNonZeroEntries

                release

                public void release()
                Description copied from class: Tensor
                -
                If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up. This - memory will be released anyway by Java once there are no more - references to the object.
                +
                If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up. This memory will be + released anyway by Java once there are no more references to the object.
                Specified by:
                release in class Tensor
                @@ -373,8 +372,8 @@

                persist

                public void persist()
                Description copied from class: Tensor
                If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
                + Tensor.release() so that memory is a de-allocated only when object + references expire.
                Specified by:
                persist in class Tensor
                diff --git a/docs/javadoc/mklab/JGNN/core/matrix/DenseMatrix.html b/docs/javadoc/mklab/JGNN/core/matrix/DenseMatrix.html index af2c773c..85d3a427 100644 --- a/docs/javadoc/mklab/JGNN/core/matrix/DenseMatrix.html +++ b/docs/javadoc/mklab/JGNN/core/matrix/DenseMatrix.html @@ -1,11 +1,11 @@ - + DenseMatrix - + @@ -89,8 +89,8 @@

                Class DenseMatrix

                public class DenseMatrix extends Matrix
                Implements a dense Matrix where all elements are stored in memory. - For matrices with more than MAXINT number of elements or many zeros use the SparseMatrix - structure.
                + For matrices with more than MAXINT number of elements or many zeros use the + SparseMatrix structure.
                Author:
                Emmanouil Krasanakis
                @@ -142,33 +142,33 @@

                Method Summary

                -
                Retrieves an iterable that traverses (row, col) entry pairs - of non zero entries.
                +
                Retrieves an iterable that traverses (row, col) entry pairs of non zero + entries.
                matmul(Matrix with)
                -
                Performs the matrix multiplication of this*with and the recipient.
                +
                Performs the matrix multiplication of this*with and the + recipient.
                matmul(Matrix with, boolean transposeThis, boolean transposeWith)
                -
                Can be used to perform fast computation of the matrix multiplications -
                this*with, -
                this.transposed()*with -
                this*with.transposed(), -
                this.transposed()*with.transposed() -
                while avoiding the overhead of calling - Matrix.transposed().
                +
                Can be used to perform fast computation of the matrix multiplications
                + this*with,
                + this.transposed()*with
                + this*with.transposed(),
                + this.transposed()*with.transposed()
                + while avoiding the overhead of calling Matrix.transposed().
                void
                If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
                + Tensor.release() so that memory is a de-allocated only when object + references expire.
                put(long pos, @@ -179,8 +179,8 @@

                Method Summary

                void
                -
                If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up.
                +
                If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up.
                @@ -191,8 +191,8 @@

                Method Summary

                zeroCopy(long rows, long cols)
                -
                Creates a matrix of the same class and all element set to zero, but with - a given number of rows and columns.
                +
                Creates a matrix of the same class and all element set to zero, but with a + given number of rows and columns.
                @@ -247,8 +247,8 @@

                zeroCopy

                public Matrix zeroCopy(long rows, long cols)
                Description copied from class: Matrix
                -
                Creates a matrix of the same class and all element set to zero, but with - a given number of rows and columns.
                +
                Creates a matrix of the same class and all element set to zero, but with a + given number of rows and columns.
                Specified by:
                zeroCopy in class Matrix
                @@ -272,8 +272,8 @@

                put

                public Tensor put(long pos, double value)
                Description copied from class: Tensor
                -
                Assign a value to a tensor element. All tensor operations use this function to wrap - element assignments.
                +
                Assign a value to a tensor element. All tensor operations use this function + to wrap element assignments.
                Specified by:
                put in class Tensor
                @@ -290,8 +290,8 @@

                put

                get

                public double get(long pos)
                Description copied from class: Tensor
                -
                Retrieves the value of a tensor element at a given position. All tensor operations use this function to wrap - element retrieval.
                +
                Retrieves the value of a tensor element at a given position. All tensor + operations use this function to wrap element retrieval.
                Specified by:
                get in class Tensor
                @@ -307,12 +307,12 @@

                get

                traverseNonZeroElements

                public Iterator<Long> traverseNonZeroElements()
                Description copied from class: Tensor
                -
                Retrieves positions within the tensor that may hold non-zero elements. - This guarantees that all non-zero elements positions are traversed - but some of the returned positions could hold zero elements. - For example, DenseTensor traverses all - of its elements this way, whereas SparseTensor - indeed traverses only non-zero elements.
                +
                Retrieves positions within the tensor that may hold non-zero elements. This + guarantees that all non-zero elements positions are traversed but + some of the returned positions could hold zero elements. For example, + DenseTensor traverses all of its elements this + way, whereas SparseTensor indeed traverses + only non-zero elements.
                Specified by:
                traverseNonZeroElements in class Tensor
                @@ -326,8 +326,8 @@

                traverseNonZeroElements

                getNonZeroEntries

                public Iterable<Map.Entry<Long,Long>> getNonZeroEntries()
                Description copied from class: Matrix
                -
                Retrieves an iterable that traverses (row, col) entry pairs - of non zero entries.
                +
                Retrieves an iterable that traverses (row, col) entry pairs of non zero + entries.
                Specified by:
                getNonZeroEntries in class Matrix
                @@ -347,10 +347,9 @@

                getNonZeroEntries

                release

                public void release()
                Description copied from class: Tensor
                -
                If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up. This - memory will be released anyway by Java once there are no more - references to the object.
                +
                If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up. This memory will be + released anyway by Java once there are no more references to the object.
                Specified by:
                release in class Tensor
                @@ -369,8 +368,8 @@

                persist

                public void persist()
                Description copied from class: Tensor
                If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
                + Tensor.release() so that memory is a de-allocated only when object + references expire.
                Specified by:
                persist in class Tensor
                @@ -388,7 +387,8 @@

                persist

                matmul

                public Matrix matmul(Matrix with)
                Description copied from class: Matrix
                -
                Performs the matrix multiplication of this*with and the recipient.
                +
                Performs the matrix multiplication of this*with and the + recipient.
                Overrides:
                matmul in class Matrix
                @@ -412,21 +412,22 @@

                matmul

                boolean transposeThis, boolean transposeWith)
                Description copied from class: Matrix
                -
                Can be used to perform fast computation of the matrix multiplications -
                this*with, -
                this.transposed()*with -
                this*with.transposed(), -
                this.transposed()*with.transposed() -
                while avoiding the overhead of calling - Matrix.transposed(). In this first of those cases, this operation - becomes equivalent to Matrix.matmul(Matrix).
                +
                Can be used to perform fast computation of the matrix multiplications
                + this*with,
                + this.transposed()*with
                + this*with.transposed(),
                + this.transposed()*with.transposed()
                + while avoiding the overhead of calling Matrix.transposed(). In this first + of those cases, this operation becomes equivalent to Matrix.matmul(Matrix).
                Overrides:
                matmul in class Matrix
                Parameters:
                with - The matrix to multiply with.
                -
                transposeThis - Whether this matrix should be transposed before multiplication.
                -
                transposeWith - Whether the multiplied with matrix should be transposed before multiplication.
                +
                transposeThis - Whether this matrix should be transposed + before multiplication.
                +
                transposeWith - Whether the multiplied with matrix should + be transposed before multiplication.
                Returns:
                A matrix that stores the outcome of the multiplication.
                See Also:
                diff --git a/docs/javadoc/mklab/JGNN/core/matrix/Diagonal.html b/docs/javadoc/mklab/JGNN/core/matrix/Diagonal.html index 26c583fb..6a530985 100644 --- a/docs/javadoc/mklab/JGNN/core/matrix/Diagonal.html +++ b/docs/javadoc/mklab/JGNN/core/matrix/Diagonal.html @@ -1,11 +1,11 @@ - + Diagonal - + @@ -88,9 +88,10 @@

                Class Diagonal


                public class Diagonal extends Matrix
                -
                Implements a square matrix whose diagonal elements are determined by the correspond values of - an underlying tensor and off-diagonal elements are zero. Elements are shared between the matrix - and its diagonal tensor. This structure is similar to a sparse matrix.
                +
                Implements a square matrix whose diagonal elements are determined by the + correspond values of an underlying tensor and off-diagonal elements are zero. + Elements are shared between the matrix and its diagonal tensor. This + structure is similar to a sparse matrix.
                Author:
                Emmanouil Krasanakis
                @@ -121,9 +122,9 @@

                Method Summary

                long
                -
                Provides an estimation for the non-zero number of elements stored in the tensor, - where this number is equal to the size for dense tensors, but equal to the actual - number of non-zero elements for sparse tensors.
                +
                Provides an estimation for the non-zero number of elements stored in the + tensor, where this number is equal to the size for dense tensors, but equal + to the actual number of non-zero elements for sparse tensors.
                double
                get(long pos)
                @@ -133,15 +134,15 @@

                Method Summary

                -
                Retrieves an iterable that traverses (row, col) entry pairs - of non zero entries.
                +
                Retrieves an iterable that traverses (row, col) entry pairs of non zero + entries.
                void
                If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
                + Tensor.release() so that memory is a de-allocated only when object + references expire.
                put(long pos, @@ -152,8 +153,8 @@

                Method Summary

                void
                -
                If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up.
                +
                If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up.
                @@ -164,8 +165,8 @@

                Method Summary

                zeroCopy(long rows, long cols)
                -
                Creates a matrix of the same class and all element set to zero, but with - a given number of rows and columns.
                +
                Creates a matrix of the same class and all element set to zero, but with a + given number of rows and columns.
                @@ -198,10 +199,10 @@

                Method Details

                estimateNumNonZeroElements

                public long estimateNumNonZeroElements()
                Description copied from class: Tensor
                -
                Provides an estimation for the non-zero number of elements stored in the tensor, - where this number is equal to the size for dense tensors, but equal to the actual - number of non-zero elements for sparse tensors. - Basically, this quantity is proportional to the allocated memory.
                +
                Provides an estimation for the non-zero number of elements stored in the + tensor, where this number is equal to the size for dense tensors, but equal + to the actual number of non-zero elements for sparse tensors. Basically, this + quantity is proportional to the allocated memory.
                Overrides:
                estimateNumNonZeroElements in class Tensor
                @@ -221,8 +222,8 @@

                estimateNumNonZeroElements

                getNonZeroEntries

                public Iterable<Map.Entry<Long,Long>> getNonZeroEntries()
                Description copied from class: Matrix
                -
                Retrieves an iterable that traverses (row, col) entry pairs - of non zero entries.
                +
                Retrieves an iterable that traverses (row, col) entry pairs of non zero + entries.
                Specified by:
                getNonZeroEntries in class Matrix
                @@ -243,8 +244,8 @@

                zeroCopy

                public Matrix zeroCopy(long rows, long cols)
                Description copied from class: Matrix
                -
                Creates a matrix of the same class and all element set to zero, but with - a given number of rows and columns.
                +
                Creates a matrix of the same class and all element set to zero, but with a + given number of rows and columns.
                Specified by:
                zeroCopy in class Matrix
                @@ -268,8 +269,8 @@

                put

                public Tensor put(long pos, double value)
                Description copied from class: Tensor
                -
                Assign a value to a tensor element. All tensor operations use this function to wrap - element assignments.
                +
                Assign a value to a tensor element. All tensor operations use this function + to wrap element assignments.
                Specified by:
                put in class Tensor
                @@ -286,8 +287,8 @@

                put

                get

                public double get(long pos)
                Description copied from class: Tensor
                -
                Retrieves the value of a tensor element at a given position. All tensor operations use this function to wrap - element retrieval.
                +
                Retrieves the value of a tensor element at a given position. All tensor + operations use this function to wrap element retrieval.
                Specified by:
                get in class Tensor
                @@ -303,12 +304,12 @@

                get

                traverseNonZeroElements

                public Iterator<Long> traverseNonZeroElements()
                Description copied from class: Tensor
                -
                Retrieves positions within the tensor that may hold non-zero elements. - This guarantees that all non-zero elements positions are traversed - but some of the returned positions could hold zero elements. - For example, DenseTensor traverses all - of its elements this way, whereas SparseTensor - indeed traverses only non-zero elements.
                +
                Retrieves positions within the tensor that may hold non-zero elements. This + guarantees that all non-zero elements positions are traversed but + some of the returned positions could hold zero elements. For example, + DenseTensor traverses all of its elements this + way, whereas SparseTensor indeed traverses + only non-zero elements.
                Specified by:
                traverseNonZeroElements in class Tensor
                @@ -322,10 +323,9 @@

                traverseNonZeroElements

                release

                public void release()
                Description copied from class: Tensor
                -
                If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up. This - memory will be released anyway by Java once there are no more - references to the object.
                +
                If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up. This memory will be + released anyway by Java once there are no more references to the object.
                Specified by:
                release in class Tensor
                @@ -344,8 +344,8 @@

                persist

                public void persist()
                Description copied from class: Tensor
                If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
                + Tensor.release() so that memory is a de-allocated only when object + references expire.
                Specified by:
                persist in class Tensor
                diff --git a/docs/javadoc/mklab/JGNN/core/matrix/RepeatMatrix.html b/docs/javadoc/mklab/JGNN/core/matrix/RepeatMatrix.html index 11bfefa6..7d4e721a 100644 --- a/docs/javadoc/mklab/JGNN/core/matrix/RepeatMatrix.html +++ b/docs/javadoc/mklab/JGNN/core/matrix/RepeatMatrix.html @@ -1,11 +1,11 @@ - + RepeatMatrix - + @@ -141,15 +141,15 @@

                Method Summary

                -
                Retrieves an iterable that traverses (row, col) entry pairs - of non zero entries.
                +
                Retrieves an iterable that traverses (row, col) entry pairs of non zero + entries.
                void
                If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
                + Tensor.release() so that memory is a de-allocated only when object + references expire.
                put(long pos, @@ -160,8 +160,8 @@

                Method Summary

                void
                -
                If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up.
                +
                If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up.
                @@ -172,8 +172,8 @@

                Method Summary

                zeroCopy(long rows, long cols)
                -
                Creates a matrix of the same class and all element set to zero, but with - a given number of rows and columns.
                +
                Creates a matrix of the same class and all element set to zero, but with a + given number of rows and columns.
                @@ -229,8 +229,8 @@

                zeroCopy

                public Matrix zeroCopy(long rows, long cols)
                Description copied from class: Matrix
                -
                Creates a matrix of the same class and all element set to zero, but with - a given number of rows and columns.
                +
                Creates a matrix of the same class and all element set to zero, but with a + given number of rows and columns.
                Specified by:
                zeroCopy in class Matrix
                @@ -254,8 +254,8 @@

                put

                public Tensor put(long pos, double value)
                Description copied from class: Tensor
                -
                Assign a value to a tensor element. All tensor operations use this function to wrap - element assignments.
                +
                Assign a value to a tensor element. All tensor operations use this function + to wrap element assignments.
                Specified by:
                put in class Tensor
                @@ -272,8 +272,8 @@

                put

                get

                public double get(long pos)
                Description copied from class: Tensor
                -
                Retrieves the value of a tensor element at a given position. All tensor operations use this function to wrap - element retrieval.
                +
                Retrieves the value of a tensor element at a given position. All tensor + operations use this function to wrap element retrieval.
                Specified by:
                get in class Tensor
                @@ -289,12 +289,12 @@

                get

                traverseNonZeroElements

                public Iterator<Long> traverseNonZeroElements()
                Description copied from class: Tensor
                -
                Retrieves positions within the tensor that may hold non-zero elements. - This guarantees that all non-zero elements positions are traversed - but some of the returned positions could hold zero elements. - For example, DenseTensor traverses all - of its elements this way, whereas SparseTensor - indeed traverses only non-zero elements.
                +
                Retrieves positions within the tensor that may hold non-zero elements. This + guarantees that all non-zero elements positions are traversed but + some of the returned positions could hold zero elements. For example, + DenseTensor traverses all of its elements this + way, whereas SparseTensor indeed traverses + only non-zero elements.
                Specified by:
                traverseNonZeroElements in class Tensor
                @@ -308,8 +308,8 @@

                traverseNonZeroElements

                getNonZeroEntries

                public Iterable<Map.Entry<Long,Long>> getNonZeroEntries()
                Description copied from class: Matrix
                -
                Retrieves an iterable that traverses (row, col) entry pairs - of non zero entries.
                +
                Retrieves an iterable that traverses (row, col) entry pairs of non zero + entries.
                Specified by:
                getNonZeroEntries in class Matrix
                @@ -329,10 +329,9 @@

                getNonZeroEntries

                release

                public void release()
                Description copied from class: Tensor
                -
                If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up. This - memory will be released anyway by Java once there are no more - references to the object.
                +
                If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up. This memory will be + released anyway by Java once there are no more references to the object.
                Specified by:
                release in class Tensor
                @@ -351,8 +350,8 @@

                persist

                public void persist()
                Description copied from class: Tensor
                If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
                + Tensor.release() so that memory is a de-allocated only when object + references expire.
                Specified by:
                persist in class Tensor
                diff --git a/docs/javadoc/mklab/JGNN/core/matrix/RowRepetition.html b/docs/javadoc/mklab/JGNN/core/matrix/RowRepetition.html index 53b96015..eadd99d0 100644 --- a/docs/javadoc/mklab/JGNN/core/matrix/RowRepetition.html +++ b/docs/javadoc/mklab/JGNN/core/matrix/RowRepetition.html @@ -1,11 +1,11 @@ - + RowRepetition - + @@ -88,9 +88,9 @@

                Class RowRepetition


                public class RowRepetition extends Matrix
                -
                Defines a matrix whose rows are all a copy of a Tensor. - To avoid potential confusion, setting element values (and all supported operations) throws - an exception.
                +
                Defines a matrix whose rows are all a copy of a Tensor. To avoid + potential confusion, setting element values (and all supported operations) + throws an exception.
                Author:
                Emmanouil Krasanakis
                @@ -148,15 +148,15 @@

                Method Summary

                -
                Retrieves an iterable that traverses (row, col) entry pairs - of non zero entries.
                +
                Retrieves an iterable that traverses (row, col) entry pairs of non zero + entries.
                void
                If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
                + Tensor.release() so that memory is a de-allocated only when object + references expire.
                put(long pos, @@ -167,8 +167,8 @@

                Method Summary

                void
                -
                If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up.
                +
                If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up.
                @@ -179,8 +179,8 @@

                Method Summary

                zeroCopy(long rows, long cols)
                -
                Creates a matrix of the same class and all element set to zero, but with - a given number of rows and columns.
                +
                Creates a matrix of the same class and all element set to zero, but with a + given number of rows and columns.
                @@ -235,8 +235,8 @@

                zeroCopy

                public Matrix zeroCopy(long rows, long cols)
                Description copied from class: Matrix
                -
                Creates a matrix of the same class and all element set to zero, but with - a given number of rows and columns.
                +
                Creates a matrix of the same class and all element set to zero, but with a + given number of rows and columns.
                Specified by:
                zeroCopy in class Matrix
                @@ -260,8 +260,8 @@

                put

                public Tensor put(long pos, double value)
                Description copied from class: Tensor
                -
                Assign a value to a tensor element. All tensor operations use this function to wrap - element assignments.
                +
                Assign a value to a tensor element. All tensor operations use this function + to wrap element assignments.
                Specified by:
                put in class Tensor
                @@ -278,8 +278,8 @@

                put

                get

                public double get(long pos)
                Description copied from class: Tensor
                -
                Retrieves the value of a tensor element at a given position. All tensor operations use this function to wrap - element retrieval.
                +
                Retrieves the value of a tensor element at a given position. All tensor + operations use this function to wrap element retrieval.
                Specified by:
                get in class Tensor
                @@ -295,12 +295,12 @@

                get

                traverseNonZeroElements

                public Iterator<Long> traverseNonZeroElements()
                Description copied from class: Tensor
                -
                Retrieves positions within the tensor that may hold non-zero elements. - This guarantees that all non-zero elements positions are traversed - but some of the returned positions could hold zero elements. - For example, DenseTensor traverses all - of its elements this way, whereas SparseTensor - indeed traverses only non-zero elements.
                +
                Retrieves positions within the tensor that may hold non-zero elements. This + guarantees that all non-zero elements positions are traversed but + some of the returned positions could hold zero elements. For example, + DenseTensor traverses all of its elements this + way, whereas SparseTensor indeed traverses + only non-zero elements.
                Specified by:
                traverseNonZeroElements in class Tensor
                @@ -314,8 +314,8 @@

                traverseNonZeroElements

                getNonZeroEntries

                public Iterable<Map.Entry<Long,Long>> getNonZeroEntries()
                Description copied from class: Matrix
                -
                Retrieves an iterable that traverses (row, col) entry pairs - of non zero entries.
                +
                Retrieves an iterable that traverses (row, col) entry pairs of non zero + entries.
                Specified by:
                getNonZeroEntries in class Matrix
                @@ -335,10 +335,9 @@

                getNonZeroEntries

                release

                public void release()
                Description copied from class: Tensor
                -
                If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up. This - memory will be released anyway by Java once there are no more - references to the object.
                +
                If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up. This memory will be + released anyway by Java once there are no more references to the object.
                Specified by:
                release in class Tensor
                @@ -357,8 +356,8 @@

                persist

                public void persist()
                Description copied from class: Tensor
                If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
                + Tensor.release() so that memory is a de-allocated only when object + references expire.
                Specified by:
                persist in class Tensor
                diff --git a/docs/javadoc/mklab/JGNN/core/matrix/SparseMatrix.html b/docs/javadoc/mklab/JGNN/core/matrix/SparseMatrix.html index b488b2ec..c858dcb2 100644 --- a/docs/javadoc/mklab/JGNN/core/matrix/SparseMatrix.html +++ b/docs/javadoc/mklab/JGNN/core/matrix/SparseMatrix.html @@ -1,11 +1,11 @@ - + SparseMatrix - + @@ -88,8 +88,9 @@

                Class SparseMatrix


                public class SparseMatrix extends Matrix
                -
                A sparse Matrix that allocates memory only for non-zero elements. Operations - that involve all matrix elements are slower compared to a DenseMatrix.
                +
                A sparse Matrix that allocates memory only for non-zero elements. + Operations that involve all matrix elements are slower compared to a + DenseMatrix.
                Author:
                Emmanouil Krasanakis
                @@ -141,9 +142,9 @@

                Method Summary

                long
                -
                Provides an estimation for the non-zero number of elements stored in the tensor, - where this number is equal to the size for dense tensors, but equal to the actual - number of non-zero elements for sparse tensors.
                +
                Provides an estimation for the non-zero number of elements stored in the + tensor, where this number is equal to the size for dense tensors, but equal + to the actual number of non-zero elements for sparse tensors.
                double
                get(long pos)
                @@ -153,15 +154,15 @@

                Method Summary

                -
                Retrieves an iterable that traverses (row, col) entry pairs - of non zero entries.
                +
                Retrieves an iterable that traverses (row, col) entry pairs of non zero + entries.
                void
                If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
                + Tensor.release() so that memory is a de-allocated only when object + references expire.
                put(long pos, @@ -172,8 +173,8 @@

                Method Summary

                void
                -
                If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up.
                +
                If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up.
                @@ -184,8 +185,8 @@

                Method Summary

                zeroCopy(long rows, long cols)
                -
                Creates a matrix of the same class and all element set to zero, but with - a given number of rows and columns.
                +
                Creates a matrix of the same class and all element set to zero, but with a + given number of rows and columns.
                @@ -240,8 +241,8 @@

                zeroCopy

                public Matrix zeroCopy(long rows, long cols)
                Description copied from class: Matrix
                -
                Creates a matrix of the same class and all element set to zero, but with - a given number of rows and columns.
                +
                Creates a matrix of the same class and all element set to zero, but with a + given number of rows and columns.
                Specified by:
                zeroCopy in class Matrix
                @@ -265,8 +266,8 @@

                put

                public Tensor put(long pos, double value)
                Description copied from class: Tensor
                -
                Assign a value to a tensor element. All tensor operations use this function to wrap - element assignments.
                +
                Assign a value to a tensor element. All tensor operations use this function + to wrap element assignments.
                Specified by:
                put in class Tensor
                @@ -283,8 +284,8 @@

                put

                get

                public double get(long pos)
                Description copied from class: Tensor
                -
                Retrieves the value of a tensor element at a given position. All tensor operations use this function to wrap - element retrieval.
                +
                Retrieves the value of a tensor element at a given position. All tensor + operations use this function to wrap element retrieval.
                Specified by:
                get in class Tensor
                @@ -300,12 +301,12 @@

                get

                traverseNonZeroElements

                public Iterator<Long> traverseNonZeroElements()
                Description copied from class: Tensor
                -
                Retrieves positions within the tensor that may hold non-zero elements. - This guarantees that all non-zero elements positions are traversed - but some of the returned positions could hold zero elements. - For example, DenseTensor traverses all - of its elements this way, whereas SparseTensor - indeed traverses only non-zero elements.
                +
                Retrieves positions within the tensor that may hold non-zero elements. This + guarantees that all non-zero elements positions are traversed but + some of the returned positions could hold zero elements. For example, + DenseTensor traverses all of its elements this + way, whereas SparseTensor indeed traverses + only non-zero elements.
                Specified by:
                traverseNonZeroElements in class Tensor
                @@ -333,10 +334,10 @@

                describe

                estimateNumNonZeroElements

                public long estimateNumNonZeroElements()
                Description copied from class: Tensor
                -
                Provides an estimation for the non-zero number of elements stored in the tensor, - where this number is equal to the size for dense tensors, but equal to the actual - number of non-zero elements for sparse tensors. - Basically, this quantity is proportional to the allocated memory.
                +
                Provides an estimation for the non-zero number of elements stored in the + tensor, where this number is equal to the size for dense tensors, but equal + to the actual number of non-zero elements for sparse tensors. Basically, this + quantity is proportional to the allocated memory.
                Overrides:
                estimateNumNonZeroElements in class Tensor
                @@ -356,8 +357,8 @@

                estimateNumNonZeroElements

                getNonZeroEntries

                public Iterable<Map.Entry<Long,Long>> getNonZeroEntries()
                Description copied from class: Matrix
                -
                Retrieves an iterable that traverses (row, col) entry pairs - of non zero entries.
                +
                Retrieves an iterable that traverses (row, col) entry pairs of non zero + entries.
                Specified by:
                getNonZeroEntries in class Matrix
                @@ -377,10 +378,9 @@

                getNonZeroEntries

                release

                public void release()
                Description copied from class: Tensor
                -
                If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up. This - memory will be released anyway by Java once there are no more - references to the object.
                +
                If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up. This memory will be + released anyway by Java once there are no more references to the object.
                Specified by:
                release in class Tensor
                @@ -399,8 +399,8 @@

                persist

                public void persist()
                Description copied from class: Tensor
                If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
                + Tensor.release() so that memory is a de-allocated only when object + references expire.
                Specified by:
                persist in class Tensor
                diff --git a/docs/javadoc/mklab/JGNN/core/matrix/SparseSymmetric.html b/docs/javadoc/mklab/JGNN/core/matrix/SparseSymmetric.html index 8f4ff2a5..c510627a 100644 --- a/docs/javadoc/mklab/JGNN/core/matrix/SparseSymmetric.html +++ b/docs/javadoc/mklab/JGNN/core/matrix/SparseSymmetric.html @@ -1,11 +1,11 @@ - + SparseSymmetric - + @@ -91,8 +91,8 @@

                Class SparseSymmetric

                Deprecated.
                Under development.
                -
                Defines a SparseMatrix that is constrained to be symmetric - in that it returns the sum of values put on elements (i,j) and (j ,i).
                +
                Defines a SparseMatrix that is constrained to be symmetric in that it + returns the sum of values put on elements (i,j) and (j ,i).
                Author:
                Emmanouil Krasanakis
                @@ -153,16 +153,16 @@

                Method Summary

                Deprecated.
                -
                Retrieves an iterable that traverses (row, col) entry pairs - of non zero entries.
                +
                Retrieves an iterable that traverses (row, col) entry pairs of non zero + entries.
                void
                Deprecated.
                If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
                + Tensor.release() so that memory is a de-allocated only when object + references expire.
                put(long pos, @@ -175,8 +175,8 @@

                Method Summary

                Deprecated.
                -
                If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up.
                +
                If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up.
                @@ -189,8 +189,8 @@

                Method Summary

                long cols)
                Deprecated.
                -
                Creates a matrix of the same class and all element set to zero, but with - a given number of rows and columns.
                +
                Creates a matrix of the same class and all element set to zero, but with a + given number of rows and columns.
                @@ -247,8 +247,8 @@

                zeroCopy

                long cols)
                Deprecated.
                Description copied from class: Matrix
                -
                Creates a matrix of the same class and all element set to zero, but with - a given number of rows and columns.
                +
                Creates a matrix of the same class and all element set to zero, but with a + given number of rows and columns.
                Specified by:
                zeroCopy in class Matrix
                @@ -273,8 +273,8 @@

                put

                double value)
                Deprecated.
                Description copied from class: Tensor
                -
                Assign a value to a tensor element. All tensor operations use this function to wrap - element assignments.
                +
                Assign a value to a tensor element. All tensor operations use this function + to wrap element assignments.
                Specified by:
                put in class Tensor
                @@ -292,8 +292,8 @@

                get

                public double get(long pos)
                Deprecated.
                Description copied from class: Tensor
                -
                Retrieves the value of a tensor element at a given position. All tensor operations use this function to wrap - element retrieval.
                +
                Retrieves the value of a tensor element at a given position. All tensor + operations use this function to wrap element retrieval.
                Specified by:
                get in class Tensor
                @@ -310,12 +310,12 @@

                traverseNonZeroElements

                public Iterator<Long> traverseNonZeroElements()
                Deprecated.
                Description copied from class: Tensor
                -
                Retrieves positions within the tensor that may hold non-zero elements. - This guarantees that all non-zero elements positions are traversed - but some of the returned positions could hold zero elements. - For example, DenseTensor traverses all - of its elements this way, whereas SparseTensor - indeed traverses only non-zero elements.
                +
                Retrieves positions within the tensor that may hold non-zero elements. This + guarantees that all non-zero elements positions are traversed but + some of the returned positions could hold zero elements. For example, + DenseTensor traverses all of its elements this + way, whereas SparseTensor indeed traverses + only non-zero elements.
                Specified by:
                traverseNonZeroElements in class Tensor
                @@ -345,8 +345,8 @@

                getNonZeroEntries

                public Iterable<Map.Entry<Long,Long>> getNonZeroEntries()
                Deprecated.
                Description copied from class: Matrix
                -
                Retrieves an iterable that traverses (row, col) entry pairs - of non zero entries.
                +
                Retrieves an iterable that traverses (row, col) entry pairs of non zero + entries.
                Specified by:
                getNonZeroEntries in class Matrix
                @@ -367,10 +367,9 @@

                release

                public void release()
                Deprecated.
                Description copied from class: Tensor
                -
                If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up. This - memory will be released anyway by Java once there are no more - references to the object.
                +
                If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up. This memory will be + released anyway by Java once there are no more references to the object.
                Specified by:
                release in class Tensor
                @@ -390,8 +389,8 @@

                persist

                Deprecated.
                Description copied from class: Tensor
                If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
                + Tensor.release() so that memory is a de-allocated only when object + references expire.
                Specified by:
                persist in class Tensor
                diff --git a/docs/javadoc/mklab/JGNN/core/matrix/TransposedMatrix.html b/docs/javadoc/mklab/JGNN/core/matrix/TransposedMatrix.html index 374f143a..992336e5 100644 --- a/docs/javadoc/mklab/JGNN/core/matrix/TransposedMatrix.html +++ b/docs/javadoc/mklab/JGNN/core/matrix/TransposedMatrix.html @@ -1,11 +1,11 @@ - + TransposedMatrix - + @@ -88,10 +88,11 @@

                Class TransposedMatrix


                public class TransposedMatrix extends Matrix
                -
                Generates a transposed version of a base matrix, with which it shares elements. - This avoids excessive memory allocation and can be used to quickly perform - operations with a transposed version of a matrix. Prefer using - Matrix.asTransposed(), which wraps usage of this class.
                +
                Generates a transposed version of a base matrix, with which it shares + elements. This avoids excessive memory allocation and can be used to quickly + perform operations with a transposed version of a matrix. Prefer using + Matrix.asTransposed(), which wraps usage of this + class.
                Author:
                Emmanouil Krasanakis
                @@ -135,8 +136,8 @@

                Method Summary

                -
                Creates a transposed version of the matrix that accesses the same elements (thus, editing one - edits the other) without allocating additional memory.
                +
                Creates a transposed version of the matrix that accesses the same elements + (thus, editing one edits the other) without allocating additional memory.
                @@ -146,9 +147,9 @@

                Method Summary

                long
                -
                Provides an estimation for the non-zero number of elements stored in the tensor, - where this number is equal to the size for dense tensors, but equal to the actual - number of non-zero elements for sparse tensors.
                +
                Provides an estimation for the non-zero number of elements stored in the + tensor, where this number is equal to the size for dense tensors, but equal + to the actual number of non-zero elements for sparse tensors.
                double
                get(long pos)
                @@ -158,15 +159,15 @@

                Method Summary

                -
                Retrieves an iterable that traverses (row, col) entry pairs - of non zero entries.
                +
                Retrieves an iterable that traverses (row, col) entry pairs of non zero + entries.
                void
                If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
                + Tensor.release() so that memory is a de-allocated only when object + references expire.
                put(long pos, @@ -177,8 +178,8 @@

                Method Summary

                void
                -
                If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up.
                +
                If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up.
                @@ -189,8 +190,8 @@

                Method Summary

                zeroCopy(long rows, long cols)
                -
                Creates a matrix of the same class and all element set to zero, but with - a given number of rows and columns.
                +
                Creates a matrix of the same class and all element set to zero, but with a + given number of rows and columns.
                @@ -237,10 +238,10 @@

                Method Details

                estimateNumNonZeroElements

                public long estimateNumNonZeroElements()
                Description copied from class: Tensor
                -
                Provides an estimation for the non-zero number of elements stored in the tensor, - where this number is equal to the size for dense tensors, but equal to the actual - number of non-zero elements for sparse tensors. - Basically, this quantity is proportional to the allocated memory.
                +
                Provides an estimation for the non-zero number of elements stored in the + tensor, where this number is equal to the size for dense tensors, but equal + to the actual number of non-zero elements for sparse tensors. Basically, this + quantity is proportional to the allocated memory.
                Overrides:
                estimateNumNonZeroElements in class Tensor
                @@ -260,8 +261,8 @@

                estimateNumNonZeroElements

                getNonZeroEntries

                public Iterable<Map.Entry<Long,Long>> getNonZeroEntries()
                Description copied from class: Matrix
                -
                Retrieves an iterable that traverses (row, col) entry pairs - of non zero entries.
                +
                Retrieves an iterable that traverses (row, col) entry pairs of non zero + entries.
                Specified by:
                getNonZeroEntries in class Matrix
                @@ -282,8 +283,8 @@

                zeroCopy

                public Matrix zeroCopy(long rows, long cols)
                Description copied from class: Matrix
                -
                Creates a matrix of the same class and all element set to zero, but with - a given number of rows and columns.
                +
                Creates a matrix of the same class and all element set to zero, but with a + given number of rows and columns.
                Specified by:
                zeroCopy in class Matrix
                @@ -307,8 +308,8 @@

                put

                public Tensor put(long pos, double value)
                Description copied from class: Tensor
                -
                Assign a value to a tensor element. All tensor operations use this function to wrap - element assignments.
                +
                Assign a value to a tensor element. All tensor operations use this function + to wrap element assignments.
                Specified by:
                put in class Tensor
                @@ -325,8 +326,8 @@

                put

                get

                public double get(long pos)
                Description copied from class: Tensor
                -
                Retrieves the value of a tensor element at a given position. All tensor operations use this function to wrap - element retrieval.
                +
                Retrieves the value of a tensor element at a given position. All tensor + operations use this function to wrap element retrieval.
                Specified by:
                get in class Tensor
                @@ -342,12 +343,12 @@

                get

                traverseNonZeroElements

                public Iterator<Long> traverseNonZeroElements()
                Description copied from class: Tensor
                -
                Retrieves positions within the tensor that may hold non-zero elements. - This guarantees that all non-zero elements positions are traversed - but some of the returned positions could hold zero elements. - For example, DenseTensor traverses all - of its elements this way, whereas SparseTensor - indeed traverses only non-zero elements.
                +
                Retrieves positions within the tensor that may hold non-zero elements. This + guarantees that all non-zero elements positions are traversed but + some of the returned positions could hold zero elements. For example, + DenseTensor traverses all of its elements this + way, whereas SparseTensor indeed traverses + only non-zero elements.
                Specified by:
                traverseNonZeroElements in class Tensor
                @@ -361,8 +362,8 @@

                traverseNonZeroElements

                asTransposed

                public Matrix asTransposed()
                Description copied from class: Matrix
                -
                Creates a transposed version of the matrix that accesses the same elements (thus, editing one - edits the other) without allocating additional memory.
                +
                Creates a transposed version of the matrix that accesses the same elements + (thus, editing one edits the other) without allocating additional memory.
                Overrides:
                asTransposed in class Matrix
                @@ -390,10 +391,9 @@

                describe

                release

                public void release()
                Description copied from class: Tensor
                -
                If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up. This - memory will be released anyway by Java once there are no more - references to the object.
                +
                If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up. This memory will be + released anyway by Java once there are no more references to the object.
                Specified by:
                release in class Tensor
                @@ -412,8 +412,8 @@

                persist

                public void persist()
                Description copied from class: Tensor
                If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
                + Tensor.release() so that memory is a de-allocated only when object + references expire.
                Specified by:
                persist in class Tensor
                diff --git a/docs/javadoc/mklab/JGNN/core/matrix/VectorizedMatrix.html b/docs/javadoc/mklab/JGNN/core/matrix/VectorizedMatrix.html index 7c7913e9..1bc444da 100644 --- a/docs/javadoc/mklab/JGNN/core/matrix/VectorizedMatrix.html +++ b/docs/javadoc/mklab/JGNN/core/matrix/VectorizedMatrix.html @@ -1,11 +1,11 @@ - + VectorizedMatrix - + @@ -89,8 +89,8 @@

                Class VectorizedMatrix

                public class VectorizedMatrix extends Matrix
                Implements a dense Matrix where all elements are stored in memory. - For matrices with more than MAXINT number of elements or many zeros use the SparseMatrix - structure.
                + For matrices with more than MAXINT number of elements or many zeros use the + SparseMatrix structure.
                Author:
                Emmanouil Krasanakis
                @@ -151,33 +151,33 @@

                Method Summary

                -
                Retrieves an iterable that traverses (row, col) entry pairs - of non zero entries.
                +
                Retrieves an iterable that traverses (row, col) entry pairs of non zero + entries.
                matmul(Matrix with)
                -
                Performs the matrix multiplication of this*with and the recipient.
                +
                Performs the matrix multiplication of this*with and the + recipient.
                matmul(Matrix with, boolean transposeThis, boolean transposeWith)
                -
                Can be used to perform fast computation of the matrix multiplications -
                this*with, -
                this.transposed()*with -
                this*with.transposed(), -
                this.transposed()*with.transposed() -
                while avoiding the overhead of calling - Matrix.transposed().
                +
                Can be used to perform fast computation of the matrix multiplications
                + this*with,
                + this.transposed()*with
                + this*with.transposed(),
                + this.transposed()*with.transposed()
                + while avoiding the overhead of calling Matrix.transposed().
                void
                If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
                + Tensor.release() so that memory is a de-allocated only when object + references expire.
                put(long pos, @@ -188,8 +188,8 @@

                Method Summary

                void
                -
                If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up.
                +
                If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up.
                @@ -200,8 +200,8 @@

                Method Summary

                zeroCopy(long rows, long cols)
                -
                Creates a matrix of the same class and all element set to zero, but with - a given number of rows and columns.
                +
                Creates a matrix of the same class and all element set to zero, but with a + given number of rows and columns.
                @@ -270,8 +270,8 @@

                zeroCopy

                public Matrix zeroCopy(long rows, long cols)
                Description copied from class: Matrix
                -
                Creates a matrix of the same class and all element set to zero, but with - a given number of rows and columns.
                +
                Creates a matrix of the same class and all element set to zero, but with a + given number of rows and columns.
                Specified by:
                zeroCopy in class Matrix
                @@ -295,8 +295,8 @@

                put

                public Tensor put(long pos, double value)
                Description copied from class: Tensor
                -
                Assign a value to a tensor element. All tensor operations use this function to wrap - element assignments.
                +
                Assign a value to a tensor element. All tensor operations use this function + to wrap element assignments.
                Specified by:
                put in class Tensor
                @@ -313,8 +313,8 @@

                put

                get

                public double get(long pos)
                Description copied from class: Tensor
                -
                Retrieves the value of a tensor element at a given position. All tensor operations use this function to wrap - element retrieval.
                +
                Retrieves the value of a tensor element at a given position. All tensor + operations use this function to wrap element retrieval.
                Specified by:
                get in class Tensor
                @@ -330,12 +330,12 @@

                get

                traverseNonZeroElements

                public Iterator<Long> traverseNonZeroElements()
                Description copied from class: Tensor
                -
                Retrieves positions within the tensor that may hold non-zero elements. - This guarantees that all non-zero elements positions are traversed - but some of the returned positions could hold zero elements. - For example, DenseTensor traverses all - of its elements this way, whereas SparseTensor - indeed traverses only non-zero elements.
                +
                Retrieves positions within the tensor that may hold non-zero elements. This + guarantees that all non-zero elements positions are traversed but + some of the returned positions could hold zero elements. For example, + DenseTensor traverses all of its elements this + way, whereas SparseTensor indeed traverses + only non-zero elements.
                Specified by:
                traverseNonZeroElements in class Tensor
                @@ -349,8 +349,8 @@

                traverseNonZeroElements

                getNonZeroEntries

                public Iterable<Map.Entry<Long,Long>> getNonZeroEntries()
                Description copied from class: Matrix
                -
                Retrieves an iterable that traverses (row, col) entry pairs - of non zero entries.
                +
                Retrieves an iterable that traverses (row, col) entry pairs of non zero + entries.
                Specified by:
                getNonZeroEntries in class Matrix
                @@ -370,10 +370,9 @@

                getNonZeroEntries

                release

                public void release()
                Description copied from class: Tensor
                -
                If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up. This - memory will be released anyway by Java once there are no more - references to the object.
                +
                If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up. This memory will be + released anyway by Java once there are no more references to the object.
                Specified by:
                release in class Tensor
                @@ -392,8 +391,8 @@

                persist

                public void persist()
                Description copied from class: Tensor
                If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
                + Tensor.release() so that memory is a de-allocated only when object + references expire.
                Specified by:
                persist in class Tensor
                @@ -411,7 +410,8 @@

                persist

                matmul

                public Matrix matmul(Matrix with)
                Description copied from class: Matrix
                -
                Performs the matrix multiplication of this*with and the recipient.
                +
                Performs the matrix multiplication of this*with and the + recipient.
                Overrides:
                matmul in class Matrix
                @@ -435,21 +435,22 @@

                matmul

                boolean transposeThis, boolean transposeWith)
                Description copied from class: Matrix
                -
                Can be used to perform fast computation of the matrix multiplications -
                this*with, -
                this.transposed()*with -
                this*with.transposed(), -
                this.transposed()*with.transposed() -
                while avoiding the overhead of calling - Matrix.transposed(). In this first of those cases, this operation - becomes equivalent to Matrix.matmul(Matrix).
                +
                Can be used to perform fast computation of the matrix multiplications
                + this*with,
                + this.transposed()*with
                + this*with.transposed(),
                + this.transposed()*with.transposed()
                + while avoiding the overhead of calling Matrix.transposed(). In this first + of those cases, this operation becomes equivalent to Matrix.matmul(Matrix).
                Overrides:
                matmul in class Matrix
                Parameters:
                with - The matrix to multiply with.
                -
                transposeThis - Whether this matrix should be transposed before multiplication.
                -
                transposeWith - Whether the multiplied with matrix should be transposed before multiplication.
                +
                transposeThis - Whether this matrix should be transposed + before multiplication.
                +
                transposeWith - Whether the multiplied with matrix should + be transposed before multiplication.
                Returns:
                A matrix that stores the outcome of the multiplication.
                See Also:
                diff --git a/docs/javadoc/mklab/JGNN/core/matrix/WrapCols.html b/docs/javadoc/mklab/JGNN/core/matrix/WrapCols.html index 4d59bf91..2feb6cb8 100644 --- a/docs/javadoc/mklab/JGNN/core/matrix/WrapCols.html +++ b/docs/javadoc/mklab/JGNN/core/matrix/WrapCols.html @@ -1,11 +1,11 @@ - + WrapCols - + @@ -88,10 +88,9 @@

                Class WrapCols


                public class WrapCols extends Matrix
                -
                Wraps a list of tensors into a matrix with the tensors as columns. - Does not allocate additional elements. Editing the matrix edits - the original tensors and conversely. -
                +
                Wraps a list of tensors into a matrix with the tensors as columns. Does not + allocate additional elements. Editing the matrix edits the original tensors + and conversely.
                Author:
                Emmanouil Krasanakis
                @@ -142,9 +141,9 @@

                Method Summary

                long
                -
                Provides an estimation for the non-zero number of elements stored in the tensor, - where this number is equal to the size for dense tensors, but equal to the actual - number of non-zero elements for sparse tensors.
                +
                Provides an estimation for the non-zero number of elements stored in the + tensor, where this number is equal to the size for dense tensors, but equal + to the actual number of non-zero elements for sparse tensors.
                double
                get(long pos)
                @@ -154,15 +153,15 @@

                Method Summary

                -
                Retrieves an iterable that traverses (row, col) entry pairs - of non zero entries.
                +
                Retrieves an iterable that traverses (row, col) entry pairs of non zero + entries.
                void
                If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
                + Tensor.release() so that memory is a de-allocated only when object + references expire.
                put(long pos, @@ -173,8 +172,8 @@

                Method Summary

                void
                -
                If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up.
                +
                If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up.
                setZeroCopyType(Matrix zeroCopyType)
                @@ -190,8 +189,8 @@

                Method Summary

                zeroCopy(long rows, long cols)
                -
                Creates a matrix of the same class and all element set to zero, but with - a given number of rows and columns.
                +
                Creates a matrix of the same class and all element set to zero, but with a + given number of rows and columns.
                @@ -244,10 +243,10 @@

                Method Details

                estimateNumNonZeroElements

                public long estimateNumNonZeroElements()
                Description copied from class: Tensor
                -
                Provides an estimation for the non-zero number of elements stored in the tensor, - where this number is equal to the size for dense tensors, but equal to the actual - number of non-zero elements for sparse tensors. - Basically, this quantity is proportional to the allocated memory.
                +
                Provides an estimation for the non-zero number of elements stored in the + tensor, where this number is equal to the size for dense tensors, but equal + to the actual number of non-zero elements for sparse tensors. Basically, this + quantity is proportional to the allocated memory.
                Overrides:
                estimateNumNonZeroElements in class Tensor
                @@ -269,7 +268,8 @@

                setZeroCopyType

                Sets a prototype matrix from which to borrow copying operations.
                Parameters:
                -
                zeroCopyType - A Matrix instance from which to borrow zeroCopy(long, long).
                +
                zeroCopyType - A Matrix instance from which to borrow + zeroCopy(long, long).
                Returns:
                this object
                @@ -281,8 +281,8 @@

                zeroCopy

                public Matrix zeroCopy(long rows, long cols)
                Description copied from class: Matrix
                -
                Creates a matrix of the same class and all element set to zero, but with - a given number of rows and columns.
                +
                Creates a matrix of the same class and all element set to zero, but with a + given number of rows and columns.
                Specified by:
                zeroCopy in class Matrix
                @@ -306,8 +306,8 @@

                put

                public Tensor put(long pos, double value)
                Description copied from class: Tensor
                -
                Assign a value to a tensor element. All tensor operations use this function to wrap - element assignments.
                +
                Assign a value to a tensor element. All tensor operations use this function + to wrap element assignments.
                Specified by:
                put in class Tensor
                @@ -324,8 +324,8 @@

                put

                get

                public double get(long pos)
                Description copied from class: Tensor
                -
                Retrieves the value of a tensor element at a given position. All tensor operations use this function to wrap - element retrieval.
                +
                Retrieves the value of a tensor element at a given position. All tensor + operations use this function to wrap element retrieval.
                Specified by:
                get in class Tensor
                @@ -341,12 +341,12 @@

                get

                traverseNonZeroElements

                public Iterator<Long> traverseNonZeroElements()
                Description copied from class: Tensor
                -
                Retrieves positions within the tensor that may hold non-zero elements. - This guarantees that all non-zero elements positions are traversed - but some of the returned positions could hold zero elements. - For example, DenseTensor traverses all - of its elements this way, whereas SparseTensor - indeed traverses only non-zero elements.
                +
                Retrieves positions within the tensor that may hold non-zero elements. This + guarantees that all non-zero elements positions are traversed but + some of the returned positions could hold zero elements. For example, + DenseTensor traverses all of its elements this + way, whereas SparseTensor indeed traverses + only non-zero elements.
                Specified by:
                traverseNonZeroElements in class Tensor
                @@ -360,8 +360,8 @@

                traverseNonZeroElements

                getNonZeroEntries

                public Iterable<Map.Entry<Long,Long>> getNonZeroEntries()
                Description copied from class: Matrix
                -
                Retrieves an iterable that traverses (row, col) entry pairs - of non zero entries.
                +
                Retrieves an iterable that traverses (row, col) entry pairs of non zero + entries.
                Specified by:
                getNonZeroEntries in class Matrix
                @@ -381,9 +381,8 @@

                getNonZeroEntries

                accessCol

                public Tensor accessCol(long col)
                Description copied from class: Matrix
                -
                Retrieves the given column as a tensor. Editing the result - also edits the original matrix. - No new memory is allocated for matrix values.
                +
                Retrieves the given column as a tensor. Editing the result also edits the + original matrix. No new memory is allocated for matrix values.
                Overrides:
                accessCol in class Matrix
                @@ -409,10 +408,9 @@

                accessCol

                release

                public void release()
                Description copied from class: Tensor
                -
                If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up. This - memory will be released anyway by Java once there are no more - references to the object.
                +
                If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up. This memory will be + released anyway by Java once there are no more references to the object.
                Specified by:
                release in class Tensor
                @@ -431,8 +429,8 @@

                persist

                public void persist()
                Description copied from class: Tensor
                If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
                + Tensor.release() so that memory is a de-allocated only when object + references expire.
                Specified by:
                persist in class Tensor
                diff --git a/docs/javadoc/mklab/JGNN/core/matrix/WrapRows.html b/docs/javadoc/mklab/JGNN/core/matrix/WrapRows.html index 8eaad533..558d7805 100644 --- a/docs/javadoc/mklab/JGNN/core/matrix/WrapRows.html +++ b/docs/javadoc/mklab/JGNN/core/matrix/WrapRows.html @@ -1,11 +1,11 @@ - + WrapRows - + @@ -142,9 +142,9 @@

                Method Summary

                long
                -
                Provides an estimation for the non-zero number of elements stored in the tensor, - where this number is equal to the size for dense tensors, but equal to the actual - number of non-zero elements for sparse tensors.
                +
                Provides an estimation for the non-zero number of elements stored in the + tensor, where this number is equal to the size for dense tensors, but equal + to the actual number of non-zero elements for sparse tensors.
                double
                get(long pos)
                @@ -154,15 +154,15 @@

                Method Summary

                -
                Retrieves an iterable that traverses (row, col) entry pairs - of non zero entries.
                +
                Retrieves an iterable that traverses (row, col) entry pairs of non zero + entries.
                void
                If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
                + Tensor.release() so that memory is a de-allocated only when object + references expire.
                put(long pos, @@ -173,8 +173,8 @@

                Method Summary

                void
                -
                If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up.
                +
                If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up.
                setZeroCopyType(Matrix zeroCopyType)
                @@ -190,8 +190,8 @@

                Method Summary

                zeroCopy(long rows, long cols)
                -
                Creates a matrix of the same class and all element set to zero, but with - a given number of rows and columns.
                +
                Creates a matrix of the same class and all element set to zero, but with a + given number of rows and columns.
                @@ -244,10 +244,10 @@

                Method Details

                estimateNumNonZeroElements

                public long estimateNumNonZeroElements()
                Description copied from class: Tensor
                -
                Provides an estimation for the non-zero number of elements stored in the tensor, - where this number is equal to the size for dense tensors, but equal to the actual - number of non-zero elements for sparse tensors. - Basically, this quantity is proportional to the allocated memory.
                +
                Provides an estimation for the non-zero number of elements stored in the + tensor, where this number is equal to the size for dense tensors, but equal + to the actual number of non-zero elements for sparse tensors. Basically, this + quantity is proportional to the allocated memory.
                Overrides:
                estimateNumNonZeroElements in class Tensor
                @@ -281,8 +281,8 @@

                zeroCopy

                public Matrix zeroCopy(long rows, long cols)
                Description copied from class: Matrix
                -
                Creates a matrix of the same class and all element set to zero, but with - a given number of rows and columns.
                +
                Creates a matrix of the same class and all element set to zero, but with a + given number of rows and columns.
                Specified by:
                zeroCopy in class Matrix
                @@ -306,8 +306,8 @@

                put

                public Tensor put(long pos, double value)
                Description copied from class: Tensor
                -
                Assign a value to a tensor element. All tensor operations use this function to wrap - element assignments.
                +
                Assign a value to a tensor element. All tensor operations use this function + to wrap element assignments.
                Specified by:
                put in class Tensor
                @@ -324,8 +324,8 @@

                put

                get

                public double get(long pos)
                Description copied from class: Tensor
                -
                Retrieves the value of a tensor element at a given position. All tensor operations use this function to wrap - element retrieval.
                +
                Retrieves the value of a tensor element at a given position. All tensor + operations use this function to wrap element retrieval.
                Specified by:
                get in class Tensor
                @@ -341,12 +341,12 @@

                get

                traverseNonZeroElements

                public Iterator<Long> traverseNonZeroElements()
                Description copied from class: Tensor
                -
                Retrieves positions within the tensor that may hold non-zero elements. - This guarantees that all non-zero elements positions are traversed - but some of the returned positions could hold zero elements. - For example, DenseTensor traverses all - of its elements this way, whereas SparseTensor - indeed traverses only non-zero elements.
                +
                Retrieves positions within the tensor that may hold non-zero elements. This + guarantees that all non-zero elements positions are traversed but + some of the returned positions could hold zero elements. For example, + DenseTensor traverses all of its elements this + way, whereas SparseTensor indeed traverses + only non-zero elements.
                Specified by:
                traverseNonZeroElements in class Tensor
                @@ -360,8 +360,8 @@

                traverseNonZeroElements

                getNonZeroEntries

                public Iterable<Map.Entry<Long,Long>> getNonZeroEntries()
                Description copied from class: Matrix
                -
                Retrieves an iterable that traverses (row, col) entry pairs - of non zero entries.
                +
                Retrieves an iterable that traverses (row, col) entry pairs of non zero + entries.
                Specified by:
                getNonZeroEntries in class Matrix
                @@ -381,9 +381,8 @@

                getNonZeroEntries

                accessRow

                public Tensor accessRow(long row)
                Description copied from class: Matrix
                -
                Retrieves the given row as a tensor. Editing the result - also edits the original matrix. - No new memory is allocated for matrix values.
                +
                Retrieves the given row as a tensor. Editing the result also edits the + original matrix. No new memory is allocated for matrix values.
                Overrides:
                accessRow in class Matrix
                @@ -409,10 +408,9 @@

                accessRow

                release

                public void release()
                Description copied from class: Tensor
                -
                If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up. This - memory will be released anyway by Java once there are no more - references to the object.
                +
                If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up. This memory will be + released anyway by Java once there are no more references to the object.
                Specified by:
                release in class Tensor
                @@ -431,8 +429,8 @@

                persist

                public void persist()
                Description copied from class: Tensor
                If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
                + Tensor.release() so that memory is a de-allocated only when object + references expire.
                Specified by:
                persist in class Tensor
                diff --git a/docs/javadoc/mklab/JGNN/core/matrix/class-use/AccessCol.html b/docs/javadoc/mklab/JGNN/core/matrix/class-use/AccessCol.html index 9a90d33d..b5f96786 100644 --- a/docs/javadoc/mklab/JGNN/core/matrix/class-use/AccessCol.html +++ b/docs/javadoc/mklab/JGNN/core/matrix/class-use/AccessCol.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.matrix.AccessCol - + diff --git a/docs/javadoc/mklab/JGNN/core/matrix/class-use/AccessRow.html b/docs/javadoc/mklab/JGNN/core/matrix/class-use/AccessRow.html index c0713668..ec7acae7 100644 --- a/docs/javadoc/mklab/JGNN/core/matrix/class-use/AccessRow.html +++ b/docs/javadoc/mklab/JGNN/core/matrix/class-use/AccessRow.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.matrix.AccessRow - + diff --git a/docs/javadoc/mklab/JGNN/core/matrix/class-use/ColumnRepetition.html b/docs/javadoc/mklab/JGNN/core/matrix/class-use/ColumnRepetition.html index ae15dfe9..29e5601a 100644 --- a/docs/javadoc/mklab/JGNN/core/matrix/class-use/ColumnRepetition.html +++ b/docs/javadoc/mklab/JGNN/core/matrix/class-use/ColumnRepetition.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.matrix.ColumnRepetition - + diff --git a/docs/javadoc/mklab/JGNN/core/matrix/class-use/DenseMatrix.html b/docs/javadoc/mklab/JGNN/core/matrix/class-use/DenseMatrix.html index 21ffef49..d7292f22 100644 --- a/docs/javadoc/mklab/JGNN/core/matrix/class-use/DenseMatrix.html +++ b/docs/javadoc/mklab/JGNN/core/matrix/class-use/DenseMatrix.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.matrix.DenseMatrix - + diff --git a/docs/javadoc/mklab/JGNN/core/matrix/class-use/Diagonal.html b/docs/javadoc/mklab/JGNN/core/matrix/class-use/Diagonal.html index f848dfd8..b3589bea 100644 --- a/docs/javadoc/mklab/JGNN/core/matrix/class-use/Diagonal.html +++ b/docs/javadoc/mklab/JGNN/core/matrix/class-use/Diagonal.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.matrix.Diagonal - + diff --git a/docs/javadoc/mklab/JGNN/core/matrix/class-use/RepeatMatrix.html b/docs/javadoc/mklab/JGNN/core/matrix/class-use/RepeatMatrix.html index 4da2565c..c9bf669e 100644 --- a/docs/javadoc/mklab/JGNN/core/matrix/class-use/RepeatMatrix.html +++ b/docs/javadoc/mklab/JGNN/core/matrix/class-use/RepeatMatrix.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.matrix.RepeatMatrix - + diff --git a/docs/javadoc/mklab/JGNN/core/matrix/class-use/RowRepetition.html b/docs/javadoc/mklab/JGNN/core/matrix/class-use/RowRepetition.html index 441bbda0..5dc2c5f1 100644 --- a/docs/javadoc/mklab/JGNN/core/matrix/class-use/RowRepetition.html +++ b/docs/javadoc/mklab/JGNN/core/matrix/class-use/RowRepetition.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.matrix.RowRepetition - + diff --git a/docs/javadoc/mklab/JGNN/core/matrix/class-use/SparseMatrix.html b/docs/javadoc/mklab/JGNN/core/matrix/class-use/SparseMatrix.html index 3caf74f9..372e6331 100644 --- a/docs/javadoc/mklab/JGNN/core/matrix/class-use/SparseMatrix.html +++ b/docs/javadoc/mklab/JGNN/core/matrix/class-use/SparseMatrix.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.matrix.SparseMatrix - + @@ -57,7 +57,9 @@

                Uses
                Package
                Description
                -
                 
                +
                +
                Contains base numerical data classes, as well as supporting abstract classes.
                +
                  diff --git a/docs/javadoc/mklab/JGNN/core/matrix/class-use/SparseSymmetric.html b/docs/javadoc/mklab/JGNN/core/matrix/class-use/SparseSymmetric.html index fab4171d..43fc852c 100644 --- a/docs/javadoc/mklab/JGNN/core/matrix/class-use/SparseSymmetric.html +++ b/docs/javadoc/mklab/JGNN/core/matrix/class-use/SparseSymmetric.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.matrix.SparseSymmetric - + diff --git a/docs/javadoc/mklab/JGNN/core/matrix/class-use/TransposedMatrix.html b/docs/javadoc/mklab/JGNN/core/matrix/class-use/TransposedMatrix.html index 70400140..ed9401fa 100644 --- a/docs/javadoc/mklab/JGNN/core/matrix/class-use/TransposedMatrix.html +++ b/docs/javadoc/mklab/JGNN/core/matrix/class-use/TransposedMatrix.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.matrix.TransposedMatrix - + diff --git a/docs/javadoc/mklab/JGNN/core/matrix/class-use/VectorizedMatrix.html b/docs/javadoc/mklab/JGNN/core/matrix/class-use/VectorizedMatrix.html index df7cf8cf..5a909684 100644 --- a/docs/javadoc/mklab/JGNN/core/matrix/class-use/VectorizedMatrix.html +++ b/docs/javadoc/mklab/JGNN/core/matrix/class-use/VectorizedMatrix.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.matrix.VectorizedMatrix - + diff --git a/docs/javadoc/mklab/JGNN/core/matrix/class-use/WrapCols.html b/docs/javadoc/mklab/JGNN/core/matrix/class-use/WrapCols.html index 43dc5477..4cdb1562 100644 --- a/docs/javadoc/mklab/JGNN/core/matrix/class-use/WrapCols.html +++ b/docs/javadoc/mklab/JGNN/core/matrix/class-use/WrapCols.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.matrix.WrapCols - + @@ -57,9 +57,15 @@

                  Uses of
                  Package
                  Description
                  -
                   
                  +
                  +
                  Contains base numerical data classes, as well as supporting abstract classes.
                  +
                  -
                   
                  +
                  +
                  Contains implementations of matrix classes, of transparent access to parts of + these classes, and of column/row repetitions that broadcast vectors into + matrices.
                  +
                    @@ -74,7 +80,8 @@

                    Uses of W
                    Tensor.asColumn()
                    -
                    Accesses the tensor through a single-column matrix with the tensor as the only row.
                    +
                    Accesses the tensor through a single-column matrix with the tensor as the + only row.

                  diff --git a/docs/javadoc/mklab/JGNN/core/matrix/class-use/WrapRows.html b/docs/javadoc/mklab/JGNN/core/matrix/class-use/WrapRows.html index dffa483f..a97434ff 100644 --- a/docs/javadoc/mklab/JGNN/core/matrix/class-use/WrapRows.html +++ b/docs/javadoc/mklab/JGNN/core/matrix/class-use/WrapRows.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.matrix.WrapRows - + @@ -57,9 +57,15 @@

                  Uses of
                  Package
                  Description
                  -
                   
                  +
                  +
                  Contains base numerical data classes, as well as supporting abstract classes.
                  +
                  -
                   
                  +
                  +
                  Contains implementations of matrix classes, of transparent access to parts of + these classes, and of column/row repetitions that broadcast vectors into + matrices.
                  +
                    @@ -74,7 +80,8 @@

                    Uses of W
                    Tensor.asRow()
                    -
                    Accesses the tensor through a single-row matrix with the tensor as the only column.
                    +
                    Accesses the tensor through a single-row matrix with the tensor as the only + column.

                  diff --git a/docs/javadoc/mklab/JGNN/core/matrix/package-summary.html b/docs/javadoc/mklab/JGNN/core/matrix/package-summary.html index d1906a46..6ba6fe54 100644 --- a/docs/javadoc/mklab/JGNN/core/matrix/package-summary.html +++ b/docs/javadoc/mklab/JGNN/core/matrix/package-summary.html @@ -1,11 +1,11 @@ - + mklab.JGNN.core.matrix - + @@ -42,7 +42,7 @@
                  @@ -62,6 +62,15 @@

                  Package mklab.JGNN.core


                  package mklab.JGNN.core.matrix
                  +
                  +
                  Contains implementations of matrix classes, of transparent access to parts of + these classes, and of column/row repetitions that broadcast vectors into + matrices. Matrices rely on tensors for density/sparsity.
                  +
                  +
                  Author:
                  +
                  Emmanouil Krasanakis
                  +
                  +
                  • @@ -71,15 +80,29 @@

                    Package mklab.JGNN.core
                    Package
                    Description
                    -
                     
                    +
                    +
                    Contains base numerical data classes, as well as supporting abstract classes.
                    +
                    -
                     
                    +
                    +
                    Contains data distributions that produce one numerical value and can be used + for tensor value initialization.
                    +
                    -
                     
                    +
                    +
                    Contains empty extensions of datatypes that hold only dimension names and + sizes but no ddata.
                    +
                    -
                     
                    +
                    +
                    Contains implementations of tensor classes, as well as transparent access to + parts of these classes.
                    +
                    -
                     
                    +
                    +
                    Contains utility functions that are employed internally, mainly optimized 1D + and 2D iterators.
                    +

                  • @@ -107,8 +130,8 @@

                    Package mklab.JGNN.core
                    -
                    Implements a square matrix whose diagonal elements are determined by the correspond values of - an underlying tensor and off-diagonal elements are zero.
                    +
                    Implements a square matrix whose diagonal elements are determined by the + correspond values of an underlying tensor and off-diagonal elements are zero.
                    @@ -128,7 +151,8 @@

                    Package mklab.JGNN.core

                    -
                    Generates a transposed version of a base matrix, with which it shares elements.
                    +
                    Generates a transposed version of a base matrix, with which it shares + elements.
                    diff --git a/docs/javadoc/mklab/JGNN/core/matrix/package-tree.html b/docs/javadoc/mklab/JGNN/core/matrix/package-tree.html index 7ab9201b..8bbaa6c1 100644 --- a/docs/javadoc/mklab/JGNN/core/matrix/package-tree.html +++ b/docs/javadoc/mklab/JGNN/core/matrix/package-tree.html @@ -1,11 +1,11 @@ - + mklab.JGNN.core.matrix Class Hierarchy - + diff --git a/docs/javadoc/mklab/JGNN/core/matrix/package-use.html b/docs/javadoc/mklab/JGNN/core/matrix/package-use.html index 7b16a886..46d61d27 100644 --- a/docs/javadoc/mklab/JGNN/core/matrix/package-use.html +++ b/docs/javadoc/mklab/JGNN/core/matrix/package-use.html @@ -1,11 +1,11 @@ - + Uses of Package mklab.JGNN.core.matrix - + @@ -57,9 +57,15 @@

                    Uses of Package
                    Package
                    Description
                    -
                     
                    +
                    +
                    Contains base numerical data classes, as well as supporting abstract classes.
                    +
                    -
                     
                    +
                    +
                    Contains implementations of matrix classes, of transparent access to parts of + these classes, and of column/row repetitions that broadcast vectors into + matrices.
                    +

                      diff --git a/docs/javadoc/mklab/JGNN/core/package-summary.html b/docs/javadoc/mklab/JGNN/core/package-summary.html index 08db3450..70f4e4f1 100644 --- a/docs/javadoc/mklab/JGNN/core/package-summary.html +++ b/docs/javadoc/mklab/JGNN/core/package-summary.html @@ -1,11 +1,11 @@ - + mklab.JGNN.core - + @@ -46,7 +46,7 @@
                      @@ -66,6 +66,19 @@

                      Package mklab.JGNN.core


                      package mklab.JGNN.core
                      +
                      +
                      Contains base numerical data classes, as well as supporting abstract classes. + Provided classes are endowed with binary, unary, and in-place editing + operations. Sub-packages are responsible for implementing variations, for + example organize data in sparse, dense, or SIMD dense tensors, or provide + views to segments of larger structures. This package and its sub-packages can + be used as a standalone product for vector/tensor and matrix operations using + only native Java.
                      +
                      +
                      Author:
                      +
                      Emmanouil Krasanakis
                      +
                      +
                      • @@ -75,15 +88,31 @@

                        Package mklab.JGNN.core

                        Package
                        Description
                        -
                         
                        +
                        +
                        Contains data distributions that produce one numerical value and can be used + for tensor value initialization.
                        +
                        -
                         
                        +
                        +
                        Contains empty extensions of datatypes that hold only dimension names and + sizes but no ddata.
                        +
                        -
                         
                        +
                        +
                        Contains implementations of matrix classes, of transparent access to parts of + these classes, and of column/row repetitions that broadcast vectors into + matrices.
                        +
                        -
                         
                        +
                        +
                        Contains implementations of tensor classes, as well as transparent access to + parts of these classes.
                        +
                        -
                         
                        +
                        +
                        Contains utility functions that are employed internally, mainly optimized 1D + and 2D iterators.
                        +
                      • @@ -96,9 +125,8 @@

                        Package mklab.JGNN.core

                        Description
                        -
                        This interface abstracts a probability distribution - that can be passed to Tensor.setToRandom(Distribution) - for random tensor initialization.
                        +
                        This interface abstracts a probability distribution that can be passed to + Tensor.setToRandom(Distribution) for random tensor initialization.
                        @@ -106,14 +134,15 @@

                        Package mklab.JGNN.core

                        -
                        A memory management system for thread-safe allocation and release of arrays of doubles.
                        +
                        A memory management system for thread-safe allocation and release of arrays + of doubles.
                         
                        -
                        This class provices an interface with which to define data slices, - for instance to sample labels.
                        +
                        This class provices an interface with which to define data slices, for + instance to sample labels.
                        @@ -121,8 +150,9 @@

                        Package mklab.JGNN.core

                        -
                        This class provides thread execution pool utilities while keeping track of thread - identifiers for use by thread-specific NNOperation.
                        +
                        This class provides thread execution pool utilities while keeping track of + thread identifiers for use by thread-specific + NNOperation.
                        diff --git a/docs/javadoc/mklab/JGNN/core/package-tree.html b/docs/javadoc/mklab/JGNN/core/package-tree.html index 54b214e7..97282853 100644 --- a/docs/javadoc/mklab/JGNN/core/package-tree.html +++ b/docs/javadoc/mklab/JGNN/core/package-tree.html @@ -1,11 +1,11 @@ - + mklab.JGNN.core Class Hierarchy - + diff --git a/docs/javadoc/mklab/JGNN/core/package-use.html b/docs/javadoc/mklab/JGNN/core/package-use.html index caed193e..917a4d97 100644 --- a/docs/javadoc/mklab/JGNN/core/package-use.html +++ b/docs/javadoc/mklab/JGNN/core/package-use.html @@ -1,11 +1,11 @@ - + Uses of Package mklab.JGNN.core - + @@ -57,37 +57,83 @@

                        Uses of Package
                        mkl
                        Package
                        Description
                        -
                         
                        +
                        +
                        Contains classes that simplify data loading, model building, and training.
                        +
                        -
                         
                        - -
                         
                        - -
                         
                        - -
                         
                        - -
                         
                        - -
                         
                        - -
                         
                        - -
                         
                        - -
                         
                        - -
                         
                        - -
                         
                        - -
                         
                        - -
                         
                        - -
                         
                        - -
                         
                        +
                        +
                        Contains model builders that parse expression of the Neuralang scripting + language to simplify mathematical parts of the definitions.
                        +
                        + +
                        +
                        Contains model training strategies that correspond to different predictive + tasks.
                        +
                        + +
                        +
                        Contains base numerical data classes, as well as supporting abstract classes.
                        +
                        + +
                        +
                        Contains data distributions that produce one numerical value and can be used + for tensor value initialization.
                        +
                        + +
                        +
                        Contains empty extensions of datatypes that hold only dimension names and + sizes but no ddata.
                        +
                        + +
                        +
                        Contains implementations of matrix classes, of transparent access to parts of + these classes, and of column/row repetitions that broadcast vectors into + matrices.
                        +
                        + +
                        +
                        Contains implementations of tensor classes, as well as transparent access to + parts of these classes.
                        +
                        + +
                        +
                        Contains utility functions that are employed internally, mainly optimized 1D + and 2D iterators.
                        +
                        + +
                        +
                        Implements neural networks components that are combined to define GNNs or + other types of machine learning models.
                        +
                        + +
                        +
                        Implements activations function to be used as model operations.
                        +
                        + +
                        +
                        Contains various types of neural architecture inputs.
                        +
                        + +
                        +
                        Contains classes for instantiating loss function.
                        +
                        + +
                        +
                        Contains losses that wrap other losses and augment their numeric computations + with live reporting of the training status.
                        +
                        + +
                        +
                        Contains popular neural network and GNN operations.
                        +
                        + +
                        +
                        Contains optimizers that can be used to update training losses.
                        +
                        + +
                        +
                        Contains pooling/reduction operations that reduce the dimensions of inputs.
                        +
                          @@ -103,8 +149,8 @@

                          Uses of Package
                          mkl
                          -
                          This class provices an interface with which to define data slices, - for instance to sample labels.
                          +
                          This class provices an interface with which to define data slices, for + instance to sample labels.
                          @@ -131,6 +177,24 @@

                          Uses of Package
                          mkl

                      • +
                        + +
                        +
                        Class
                        +
                        Description
                        + +
                        +
                        This class provides an abstract implementation of Matrix functionalities.
                        +
                        + +
                        +
                        This class provices an interface with which to define data slices, for + instance to sample labels.
                        +
                        +
                        +
                        +
                      • +
                      • Classes in mklab.JGNN.core used by mklab.JGNN.core
                        @@ -138,9 +202,8 @@

                        Uses of Package
                        mkl
                        Description
                        -
                        This interface abstracts a probability distribution - that can be passed to Tensor.setToRandom(Distribution) - for random tensor initialization.
                        +
                        This interface abstracts a probability distribution that can be passed to + Tensor.setToRandom(Distribution) for random tensor initialization.
                        @@ -150,8 +213,8 @@

                        Uses of Package
                        mkl
                         
                        -
                        This class provices an interface with which to define data slices, - for instance to sample labels.
                        +
                        This class provices an interface with which to define data slices, for + instance to sample labels.
                        @@ -159,8 +222,9 @@

                        Uses of Package
                        mkl

                        -
                        This class provides thread execution pool utilities while keeping track of thread - identifiers for use by thread-specific NNOperation.
                        +
                        This class provides thread execution pool utilities while keeping track of + thread identifiers for use by thread-specific + NNOperation.

                        @@ -173,9 +237,8 @@

                        Uses of Package
                        mkl
                        Description
                        -
                        This interface abstracts a probability distribution - that can be passed to Tensor.setToRandom(Distribution) - for random tensor initialization.
                        +
                        This interface abstracts a probability distribution that can be passed to + Tensor.setToRandom(Distribution) for random tensor initialization.

                      @@ -252,8 +315,8 @@

                      Uses of Package
                      mkl
                      -
                      This class provices an interface with which to define data slices, - for instance to sample labels.
                      +
                      This class provices an interface with which to define data slices, for + instance to sample labels.
                      diff --git a/docs/javadoc/mklab/JGNN/core/tensor/AccessSubtensor.html b/docs/javadoc/mklab/JGNN/core/tensor/AccessSubtensor.html index 7e64b2c4..e680abea 100644 --- a/docs/javadoc/mklab/JGNN/core/tensor/AccessSubtensor.html +++ b/docs/javadoc/mklab/JGNN/core/tensor/AccessSubtensor.html @@ -1,11 +1,11 @@ - + AccessSubtensor - + @@ -149,8 +149,8 @@

                      Method Summary

                      If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
                      + Tensor.release() so that memory is a de-allocated only when object + references expire.
                      put(long pos, @@ -161,8 +161,8 @@

                      Method Summary

                      void
                      -
                      If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up.
                      +
                      If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up.
                      @@ -172,7 +172,8 @@

                      Method Summary

                      zeroCopy(long size)
                      -
                      Creates a tensor of the same class with a given size and all element set to zero.
                      +
                      Creates a tensor of the same class with a given size and all element set to + zero.
                      @@ -239,8 +240,8 @@

                      put

                      public Tensor put(long pos, double value)
                      Description copied from class: Tensor
                      -
                      Assign a value to a tensor element. All tensor operations use this function to wrap - element assignments.
                      +
                      Assign a value to a tensor element. All tensor operations use this function + to wrap element assignments.
                      Specified by:
                      put in class Tensor
                      @@ -257,8 +258,8 @@

                      put

                      get

                      public double get(long pos)
                      Description copied from class: Tensor
                      -
                      Retrieves the value of a tensor element at a given position. All tensor operations use this function to wrap - element retrieval.
                      +
                      Retrieves the value of a tensor element at a given position. All tensor + operations use this function to wrap element retrieval.
                      Specified by:
                      get in class Tensor
                      @@ -274,7 +275,8 @@

                      get

                      zeroCopy

                      public Tensor zeroCopy(long size)
                      Description copied from class: Tensor
                      -
                      Creates a tensor of the same class with a given size and all element set to zero.
                      +
                      Creates a tensor of the same class with a given size and all element set to + zero.
                      Specified by:
                      zeroCopy in class Tensor
                      @@ -296,12 +298,12 @@

                      zeroCopy

                      traverseNonZeroElements

                      public Iterator<Long> traverseNonZeroElements()
                      Description copied from class: Tensor
                      -
                      Retrieves positions within the tensor that may hold non-zero elements. - This guarantees that all non-zero elements positions are traversed - but some of the returned positions could hold zero elements. - For example, DenseTensor traverses all - of its elements this way, whereas SparseTensor - indeed traverses only non-zero elements.
                      +
                      Retrieves positions within the tensor that may hold non-zero elements. This + guarantees that all non-zero elements positions are traversed but + some of the returned positions could hold zero elements. For example, + DenseTensor traverses all of its elements this + way, whereas SparseTensor indeed traverses + only non-zero elements.
                      Specified by:
                      traverseNonZeroElements in class Tensor
                      @@ -315,10 +317,9 @@

                      traverseNonZeroElements

                      release

                      public void release()
                      Description copied from class: Tensor
                      -
                      If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up. This - memory will be released anyway by Java once there are no more - references to the object.
                      +
                      If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up. This memory will be + released anyway by Java once there are no more references to the object.
                      Specified by:
                      release in class Tensor
                      @@ -337,8 +338,8 @@

                      persist

                      public void persist()
                      Description copied from class: Tensor
                      If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
                      + Tensor.release() so that memory is a de-allocated only when object + references expire.
                      Specified by:
                      persist in class Tensor
                      diff --git a/docs/javadoc/mklab/JGNN/core/tensor/DenseTensor.html b/docs/javadoc/mklab/JGNN/core/tensor/DenseTensor.html index af1c0b48..95f5f76a 100644 --- a/docs/javadoc/mklab/JGNN/core/tensor/DenseTensor.html +++ b/docs/javadoc/mklab/JGNN/core/tensor/DenseTensor.html @@ -1,11 +1,11 @@ - + DenseTensor - + @@ -198,8 +198,8 @@

                      Method Summary

                      If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
                      + Tensor.release() so that memory is a de-allocated only when object + references expire.
                      final Tensor
                      put(int pos, @@ -218,8 +218,8 @@

                      Method Summary

                      void
                      -
                      If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up.
                      +
                      If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up.
                      @@ -249,17 +249,20 @@

                      Method Summary

                      -
                      Performs in-memory set of each element to the logarithm of its absolute value.
                      +
                      Performs in-memory set of each element to the logarithm of its absolute + value.
                      selfMultiply(double value)
                      -
                      Performs in-memory multiplication on the Tensor, storing the result to itself.
                      +
                      Performs in-memory multiplication on the Tensor, storing the result to + itself.
                      -
                      Performs in-memory multiplication on the Tensor, storing the result in itself .
                      +
                      Performs in-memory multiplication on the Tensor, storing the result in itself + .
                      @@ -269,7 +272,8 @@

                      Method Summary

                      -
                      Performs in-memory set of each element to the square root of its absolute value.
                      +
                      Performs in-memory set of each element to the square root of its absolute + value.
                      @@ -292,7 +296,8 @@

                      Method Summary

                      zeroCopy(long size)
                      -
                      Creates a tensor of the same class with a given size and all element set to zero.
                      +
                      Creates a tensor of the same class with a given size and all element set to + zero.
                      @@ -394,8 +399,8 @@

                      put

                      public final Tensor put(long pos, double value)
                      Description copied from class: Tensor
                      -
                      Assign a value to a tensor element. All tensor operations use this function to wrap - element assignments.
                      +
                      Assign a value to a tensor element. All tensor operations use this function + to wrap element assignments.
                      Specified by:
                      put in class Tensor
                      @@ -426,8 +431,8 @@

                      putAdd

                      get

                      public final double get(long pos)
                      Description copied from class: Tensor
                      -
                      Retrieves the value of a tensor element at a given position. All tensor operations use this function to wrap - element retrieval.
                      +
                      Retrieves the value of a tensor element at a given position. All tensor + operations use this function to wrap element retrieval.
                      Specified by:
                      get in class Tensor
                      @@ -449,7 +454,8 @@

                      get

                      zeroCopy

                      public Tensor zeroCopy(long size)
                      Description copied from class: Tensor
                      -
                      Creates a tensor of the same class with a given size and all element set to zero.
                      +
                      Creates a tensor of the same class with a given size and all element set to + zero.
                      Specified by:
                      zeroCopy in class Tensor
                      @@ -471,12 +477,12 @@

                      zeroCopy

                      traverseNonZeroElements

                      public Iterator<Long> traverseNonZeroElements()
                      Description copied from class: Tensor
                      -
                      Retrieves positions within the tensor that may hold non-zero elements. - This guarantees that all non-zero elements positions are traversed - but some of the returned positions could hold zero elements. - For example, DenseTensor traverses all - of its elements this way, whereas SparseTensor - indeed traverses only non-zero elements.
                      +
                      Retrieves positions within the tensor that may hold non-zero elements. This + guarantees that all non-zero elements positions are traversed but + some of the returned positions could hold zero elements. For example, + DenseTensor traverses all of its elements this + way, whereas SparseTensor indeed traverses + only non-zero elements.
                      Specified by:
                      traverseNonZeroElements in class Tensor
                      @@ -490,10 +496,9 @@

                      traverseNonZeroElements

                      release

                      public void release()
                      Description copied from class: Tensor
                      -
                      If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up. This - memory will be released anyway by Java once there are no more - references to the object.
                      +
                      If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up. This memory will be + released anyway by Java once there are no more references to the object.
                      Specified by:
                      release in class Tensor
                      @@ -512,8 +517,8 @@

                      persist

                      public void persist()
                      Description copied from class: Tensor
                      If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
                      + Tensor.release() so that memory is a de-allocated only when object + references expire.
                      Specified by:
                      persist in class Tensor
                      @@ -649,12 +654,14 @@

                      multiply

                      selfMultiply

                      public Tensor selfMultiply(Tensor tensor)
                      Description copied from class: Tensor
                      -
                      Performs in-memory multiplication on the Tensor, storing the result in itself .
                      +
                      Performs in-memory multiplication on the Tensor, storing the result in itself + .
                      Overrides:
                      selfMultiply in class Tensor
                      Parameters:
                      -
                      tensor - The tensor to perform element-wise multiplication with (it's not affected).
                      +
                      tensor - The tensor to perform element-wise multiplication with (it's + not affected).
                      Returns:
                      this Tensor instance.
                      @@ -665,7 +672,8 @@

                      selfMultiply

                      selfMultiply

                      public Tensor selfMultiply(double value)
                      Description copied from class: Tensor
                      -
                      Performs in-memory multiplication on the Tensor, storing the result to itself.
                      +
                      Performs in-memory multiplication on the Tensor, storing the result to + itself.
                      Overrides:
                      selfMultiply in class Tensor
                      @@ -686,7 +694,8 @@

                      sqrt

                      Overrides:
                      sqrt in class Tensor
                      Returns:
                      -
                      A new Tensor that stores the outcome of finding the absolute square root of each element.
                      +
                      A new Tensor that stores the outcome of finding the absolute square + root of each element.

                    @@ -695,7 +704,8 @@

                    sqrt

                    selfSqrt

                    public Tensor selfSqrt()
                    Description copied from class: Tensor
                    -
                    Performs in-memory set of each element to the square root of its absolute value.
                    +
                    Performs in-memory set of each element to the square root of its absolute + value.
                    Overrides:
                    selfSqrt in class Tensor
                    @@ -714,7 +724,8 @@

                    expMinusOne

                    Overrides:
                    expMinusOne in class Tensor
                    Returns:
                    -
                    A new Tensor that stores the outcome of finding the operation on each element.
                    +
                    A new Tensor that stores the outcome of finding the operation on each + element.

                  @@ -742,7 +753,8 @@

                  log

                  Overrides:
                  log in class Tensor
                  Returns:
                  -
                  A new Tensor that stores the outcome of finding the logarithm of the absolute of each element.
                  +
                  A new Tensor that stores the outcome of finding the logarithm of the + absolute of each element.

              @@ -751,7 +763,8 @@

              log

              selfLog

              public Tensor selfLog()
              Description copied from class: Tensor
              -
              Performs in-memory set of each element to the logarithm of its absolute value.
              +
              Performs in-memory set of each element to the logarithm of its absolute + value.
              Overrides:
              selfLog in class Tensor
              @@ -770,7 +783,8 @@

              negative

              Overrides:
              negative in class Tensor
              Returns:
              -
              A new Tensor that stores the outcome of finding the negative of each element.
              +
              A new Tensor that stores the outcome of finding the negative of each + element.

            @@ -798,7 +812,8 @@

            abs

            Overrides:
            abs in class Tensor
            Returns:
            -
            A new Tensor that stores the outcome of finding the absolute value of each element.
            +
            A new Tensor that stores the outcome of finding the absolute value of + each element.

      diff --git a/docs/javadoc/mklab/JGNN/core/tensor/RepeatTensor.html b/docs/javadoc/mklab/JGNN/core/tensor/RepeatTensor.html index 350303bd..f49ff874 100644 --- a/docs/javadoc/mklab/JGNN/core/tensor/RepeatTensor.html +++ b/docs/javadoc/mklab/JGNN/core/tensor/RepeatTensor.html @@ -1,11 +1,11 @@ - + RepeatTensor - + @@ -139,8 +139,8 @@

      Method Summary

      If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
      + Tensor.release() so that memory is a de-allocated only when object + references expire.
      final Tensor
      put(long pos, @@ -151,8 +151,8 @@

      Method Summary

      void
      -
      If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up.
      +
      If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up.
      @@ -162,7 +162,8 @@

      Method Summary

      zeroCopy(long size)
      -
      Creates a tensor of the same class with a given size and all element set to zero.
      +
      Creates a tensor of the same class with a given size and all element set to + zero.
      @@ -208,8 +209,8 @@

      put

      public final Tensor put(long pos, double value)
      Description copied from class: Tensor
      -
      Assign a value to a tensor element. All tensor operations use this function to wrap - element assignments.
      +
      Assign a value to a tensor element. All tensor operations use this function + to wrap element assignments.
      Specified by:
      put in class Tensor
      @@ -226,8 +227,8 @@

      put

      get

      public final double get(long pos)
      Description copied from class: Tensor
      -
      Retrieves the value of a tensor element at a given position. All tensor operations use this function to wrap - element retrieval.
      +
      Retrieves the value of a tensor element at a given position. All tensor + operations use this function to wrap element retrieval.
      Specified by:
      get in class Tensor
      @@ -243,7 +244,8 @@

      get

      zeroCopy

      public Tensor zeroCopy(long size)
      Description copied from class: Tensor
      -
      Creates a tensor of the same class with a given size and all element set to zero.
      +
      Creates a tensor of the same class with a given size and all element set to + zero.
      Specified by:
      zeroCopy in class Tensor
      @@ -265,12 +267,12 @@

      zeroCopy

      traverseNonZeroElements

      public Iterator<Long> traverseNonZeroElements()
      Description copied from class: Tensor
      -
      Retrieves positions within the tensor that may hold non-zero elements. - This guarantees that all non-zero elements positions are traversed - but some of the returned positions could hold zero elements. - For example, DenseTensor traverses all - of its elements this way, whereas SparseTensor - indeed traverses only non-zero elements.
      +
      Retrieves positions within the tensor that may hold non-zero elements. This + guarantees that all non-zero elements positions are traversed but + some of the returned positions could hold zero elements. For example, + DenseTensor traverses all of its elements this + way, whereas SparseTensor indeed traverses + only non-zero elements.
      Specified by:
      traverseNonZeroElements in class Tensor
      @@ -284,10 +286,9 @@

      traverseNonZeroElements

      release

      public void release()
      Description copied from class: Tensor
      -
      If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up. This - memory will be released anyway by Java once there are no more - references to the object.
      +
      If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up. This memory will be + released anyway by Java once there are no more references to the object.
      Specified by:
      release in class Tensor
      @@ -306,8 +307,8 @@

      persist

      public void persist()
      Description copied from class: Tensor
      If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
      + Tensor.release() so that memory is a de-allocated only when object + references expire.
      Specified by:
      persist in class Tensor
      diff --git a/docs/javadoc/mklab/JGNN/core/tensor/SparseTensor.html b/docs/javadoc/mklab/JGNN/core/tensor/SparseTensor.html index 1b97d43c..a2affb23 100644 --- a/docs/javadoc/mklab/JGNN/core/tensor/SparseTensor.html +++ b/docs/javadoc/mklab/JGNN/core/tensor/SparseTensor.html @@ -1,11 +1,11 @@ - + SparseTensor - + @@ -138,9 +138,9 @@

      Method Summary

      long
      -
      Provides an estimation for the non-zero number of elements stored in the tensor, - where this number is equal to the size for dense tensors, but equal to the actual - number of non-zero elements for sparse tensors.
      +
      Provides an estimation for the non-zero number of elements stored in the + tensor, where this number is equal to the size for dense tensors, but equal + to the actual number of non-zero elements for sparse tensors.
      final double
      get(long pos)
      @@ -151,8 +151,8 @@

      Method Summary

      If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
      + Tensor.release() so that memory is a de-allocated only when object + references expire.
      final Tensor
      put(long pos, @@ -163,8 +163,8 @@

      Method Summary

      void
      -
      If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up.
      +
      If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up.
      @@ -174,7 +174,8 @@

      Method Summary

      zeroCopy(long size)
      -
      Creates a tensor of the same class with a given size and all element set to zero.
      +
      Creates a tensor of the same class with a given size and all element set to + zero.
      @@ -225,8 +226,8 @@

      put

      public final Tensor put(long pos, double value)
      Description copied from class: Tensor
      -
      Assign a value to a tensor element. All tensor operations use this function to wrap - element assignments.
      +
      Assign a value to a tensor element. All tensor operations use this function + to wrap element assignments.
      Specified by:
      put in class Tensor
      @@ -243,8 +244,8 @@

      put

      get

      public final double get(long pos)
      Description copied from class: Tensor
      -
      Retrieves the value of a tensor element at a given position. All tensor operations use this function to wrap - element retrieval.
      +
      Retrieves the value of a tensor element at a given position. All tensor + operations use this function to wrap element retrieval.
      Specified by:
      get in class Tensor
      @@ -260,7 +261,8 @@

      get

      zeroCopy

      public Tensor zeroCopy(long size)
      Description copied from class: Tensor
      -
      Creates a tensor of the same class with a given size and all element set to zero.
      +
      Creates a tensor of the same class with a given size and all element set to + zero.
      Specified by:
      zeroCopy in class Tensor
      @@ -282,12 +284,12 @@

      zeroCopy

      traverseNonZeroElements

      public Iterator<Long> traverseNonZeroElements()
      Description copied from class: Tensor
      -
      Retrieves positions within the tensor that may hold non-zero elements. - This guarantees that all non-zero elements positions are traversed - but some of the returned positions could hold zero elements. - For example, DenseTensor traverses all - of its elements this way, whereas SparseTensor - indeed traverses only non-zero elements.
      +
      Retrieves positions within the tensor that may hold non-zero elements. This + guarantees that all non-zero elements positions are traversed but + some of the returned positions could hold zero elements. For example, + DenseTensor traverses all of its elements this + way, whereas SparseTensor indeed traverses + only non-zero elements.
      Specified by:
      traverseNonZeroElements in class Tensor
      @@ -301,10 +303,10 @@

      traverseNonZeroElements

      estimateNumNonZeroElements

      public long estimateNumNonZeroElements()
      Description copied from class: Tensor
      -
      Provides an estimation for the non-zero number of elements stored in the tensor, - where this number is equal to the size for dense tensors, but equal to the actual - number of non-zero elements for sparse tensors. - Basically, this quantity is proportional to the allocated memory.
      +
      Provides an estimation for the non-zero number of elements stored in the + tensor, where this number is equal to the size for dense tensors, but equal + to the actual number of non-zero elements for sparse tensors. Basically, this + quantity is proportional to the allocated memory.
      Overrides:
      estimateNumNonZeroElements in class Tensor
      @@ -324,10 +326,9 @@

      estimateNumNonZeroElements

      release

      public void release()
      Description copied from class: Tensor
      -
      If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up. This - memory will be released anyway by Java once there are no more - references to the object.
      +
      If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up. This memory will be + released anyway by Java once there are no more references to the object.
      Specified by:
      release in class Tensor
      @@ -346,8 +347,8 @@

      persist

      public void persist()
      Description copied from class: Tensor
      If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
      + Tensor.release() so that memory is a de-allocated only when object + references expire.
      Specified by:
      persist in class Tensor
      diff --git a/docs/javadoc/mklab/JGNN/core/tensor/VectorizedTensor.html b/docs/javadoc/mklab/JGNN/core/tensor/VectorizedTensor.html index a03b85a8..62f0567c 100644 --- a/docs/javadoc/mklab/JGNN/core/tensor/VectorizedTensor.html +++ b/docs/javadoc/mklab/JGNN/core/tensor/VectorizedTensor.html @@ -1,11 +1,11 @@ - + VectorizedTensor - + @@ -174,8 +174,8 @@

      Method Summary

      If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
      + Tensor.release() so that memory is a de-allocated only when object + references expire.
      final Tensor
      put(int pos, @@ -194,8 +194,8 @@

      Method Summary

      void
      -
      If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up.
      +
      If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up.
      selfAdd(Tensor tensor)
      @@ -205,12 +205,14 @@

      Method Summary

      selfMultiply(double value)
      -
      Performs in-memory multiplication on the Tensor, storing the result to itself.
      +
      Performs in-memory multiplication on the Tensor, storing the result to + itself.
      -
      Performs in-memory multiplication on the Tensor, storing the result in itself .
      +
      Performs in-memory multiplication on the Tensor, storing the result in itself + .
      @@ -228,7 +230,8 @@

      Method Summary

      zeroCopy(long size)
      -
      Creates a tensor of the same class with a given size and all element set to zero.
      +
      Creates a tensor of the same class with a given size and all element set to + zero.
      @@ -335,8 +338,8 @@

      put

      public final Tensor put(long pos, double value)
      Description copied from class: Tensor
      -
      Assign a value to a tensor element. All tensor operations use this function to wrap - element assignments.
      +
      Assign a value to a tensor element. All tensor operations use this function + to wrap element assignments.
      Specified by:
      put in class Tensor
      @@ -367,8 +370,8 @@

      putAdd

      get

      public final double get(long pos)
      Description copied from class: Tensor
      -
      Retrieves the value of a tensor element at a given position. All tensor operations use this function to wrap - element retrieval.
      +
      Retrieves the value of a tensor element at a given position. All tensor + operations use this function to wrap element retrieval.
      Specified by:
      get in class Tensor
      @@ -390,7 +393,8 @@

      get

      zeroCopy

      public Tensor zeroCopy(long size)
      Description copied from class: Tensor
      -
      Creates a tensor of the same class with a given size and all element set to zero.
      +
      Creates a tensor of the same class with a given size and all element set to + zero.
      Specified by:
      zeroCopy in class Tensor
      @@ -412,12 +416,12 @@

      zeroCopy

      traverseNonZeroElements

      public Iterator<Long> traverseNonZeroElements()
      Description copied from class: Tensor
      -
      Retrieves positions within the tensor that may hold non-zero elements. - This guarantees that all non-zero elements positions are traversed - but some of the returned positions could hold zero elements. - For example, DenseTensor traverses all - of its elements this way, whereas SparseTensor - indeed traverses only non-zero elements.
      +
      Retrieves positions within the tensor that may hold non-zero elements. This + guarantees that all non-zero elements positions are traversed but + some of the returned positions could hold zero elements. For example, + DenseTensor traverses all of its elements this + way, whereas SparseTensor indeed traverses + only non-zero elements.
      Specified by:
      traverseNonZeroElements in class Tensor
      @@ -431,10 +435,9 @@

      traverseNonZeroElements

      release

      public void release()
      Description copied from class: Tensor
      -
      If the subclassed tensor allows it, release all memory it takes up - so that the garbage collector will eventually clean it up. This - memory will be released anyway by Java once there are no more - references to the object.
      +
      If the subclassed tensor allows it, release all memory it takes up so that + the garbage collector will eventually clean it up. This memory will be + released anyway by Java once there are no more references to the object.
      Specified by:
      release in class Tensor
      @@ -453,8 +456,8 @@

      persist

      public void persist()
      Description copied from class: Tensor
      If supported by the subclassed tensor, invalidates calls to - Tensor.release() so that memory is a de-allocated only when - object references expire.
      + Tensor.release() so that memory is a de-allocated only when object + references expire.
      Specified by:
      persist in class Tensor
      @@ -546,12 +549,14 @@

      multiply

      selfMultiply

      public Tensor selfMultiply(Tensor tensor)
      Description copied from class: Tensor
      -
      Performs in-memory multiplication on the Tensor, storing the result in itself .
      +
      Performs in-memory multiplication on the Tensor, storing the result in itself + .
      Overrides:
      selfMultiply in class Tensor
      Parameters:
      -
      tensor - The tensor to perform element-wise multiplication with (it's not affected).
      +
      tensor - The tensor to perform element-wise multiplication with (it's + not affected).
      Returns:
      this Tensor instance.
      @@ -576,7 +581,8 @@

      multiply

      selfMultiply

      public Tensor selfMultiply(double value)
      Description copied from class: Tensor
      -
      Performs in-memory multiplication on the Tensor, storing the result to itself.
      +
      Performs in-memory multiplication on the Tensor, storing the result to + itself.
      Overrides:
      selfMultiply in class Tensor
      diff --git a/docs/javadoc/mklab/JGNN/core/tensor/class-use/AccessSubtensor.html b/docs/javadoc/mklab/JGNN/core/tensor/class-use/AccessSubtensor.html index e86ba309..598ceacc 100644 --- a/docs/javadoc/mklab/JGNN/core/tensor/class-use/AccessSubtensor.html +++ b/docs/javadoc/mklab/JGNN/core/tensor/class-use/AccessSubtensor.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.tensor.AccessSubtensor - + diff --git a/docs/javadoc/mklab/JGNN/core/tensor/class-use/DenseTensor.html b/docs/javadoc/mklab/JGNN/core/tensor/class-use/DenseTensor.html index 727d47bb..f35dd414 100644 --- a/docs/javadoc/mklab/JGNN/core/tensor/class-use/DenseTensor.html +++ b/docs/javadoc/mklab/JGNN/core/tensor/class-use/DenseTensor.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.tensor.DenseTensor - + diff --git a/docs/javadoc/mklab/JGNN/core/tensor/class-use/RepeatTensor.html b/docs/javadoc/mklab/JGNN/core/tensor/class-use/RepeatTensor.html index 60bee9f2..185d1824 100644 --- a/docs/javadoc/mklab/JGNN/core/tensor/class-use/RepeatTensor.html +++ b/docs/javadoc/mklab/JGNN/core/tensor/class-use/RepeatTensor.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.tensor.RepeatTensor - + diff --git a/docs/javadoc/mklab/JGNN/core/tensor/class-use/SparseTensor.html b/docs/javadoc/mklab/JGNN/core/tensor/class-use/SparseTensor.html index 8b9aeda9..23fc13e6 100644 --- a/docs/javadoc/mklab/JGNN/core/tensor/class-use/SparseTensor.html +++ b/docs/javadoc/mklab/JGNN/core/tensor/class-use/SparseTensor.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.tensor.SparseTensor - + diff --git a/docs/javadoc/mklab/JGNN/core/tensor/class-use/VectorizedTensor.html b/docs/javadoc/mklab/JGNN/core/tensor/class-use/VectorizedTensor.html index 3ddfca96..db530ee0 100644 --- a/docs/javadoc/mklab/JGNN/core/tensor/class-use/VectorizedTensor.html +++ b/docs/javadoc/mklab/JGNN/core/tensor/class-use/VectorizedTensor.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.tensor.VectorizedTensor - + @@ -57,7 +57,11 @@

      Package
      Description
      -
       
      +
      +
      Contains implementations of matrix classes, of transparent access to parts of + these classes, and of column/row repetitions that broadcast vectors into + matrices.
      +
        diff --git a/docs/javadoc/mklab/JGNN/core/tensor/package-summary.html b/docs/javadoc/mklab/JGNN/core/tensor/package-summary.html index 5daa3c18..35f5a11e 100644 --- a/docs/javadoc/mklab/JGNN/core/tensor/package-summary.html +++ b/docs/javadoc/mklab/JGNN/core/tensor/package-summary.html @@ -1,11 +1,11 @@ - + mklab.JGNN.core.tensor - + @@ -42,7 +42,7 @@
        @@ -62,6 +62,17 @@

        Package mklab.JGNN.core


        package mklab.JGNN.core.tensor
        +
        +
        Contains implementations of tensor classes, as well as transparent access to + parts of these classes. Depending on the type of tensor, internal data can be + sparse or dense, with dense tensors being further subdivided in traditional + Java implementations and implementations levering SIMD (Single Input/Multiple + Data) optimizations.
        +
        +
        Author:
        +
        Emmanouil Krasanakis
        +
        +
        • @@ -71,15 +82,30 @@

          Package mklab.JGNN.core
          Package
          Description
          -
           
          +
          +
          Contains base numerical data classes, as well as supporting abstract classes.
          +
          -
           
          +
          +
          Contains data distributions that produce one numerical value and can be used + for tensor value initialization.
          +
          -
           
          +
          +
          Contains empty extensions of datatypes that hold only dimension names and + sizes but no ddata.
          +
          -
           
          +
          +
          Contains implementations of matrix classes, of transparent access to parts of + these classes, and of column/row repetitions that broadcast vectors into + matrices.
          +
          -
           
          +
          +
          Contains utility functions that are employed internally, mainly optimized 1D + and 2D iterators.
          +

        • diff --git a/docs/javadoc/mklab/JGNN/core/tensor/package-tree.html b/docs/javadoc/mklab/JGNN/core/tensor/package-tree.html index 62bb13a9..1a52abd7 100644 --- a/docs/javadoc/mklab/JGNN/core/tensor/package-tree.html +++ b/docs/javadoc/mklab/JGNN/core/tensor/package-tree.html @@ -1,11 +1,11 @@ - + mklab.JGNN.core.tensor Class Hierarchy - + diff --git a/docs/javadoc/mklab/JGNN/core/tensor/package-use.html b/docs/javadoc/mklab/JGNN/core/tensor/package-use.html index 1eb3c150..226c8e0c 100644 --- a/docs/javadoc/mklab/JGNN/core/tensor/package-use.html +++ b/docs/javadoc/mklab/JGNN/core/tensor/package-use.html @@ -1,11 +1,11 @@ - + Uses of Package mklab.JGNN.core.tensor - + @@ -57,7 +57,11 @@

          Uses of Package
          Package
          Description
          -
           
          +
          +
          Contains implementations of matrix classes, of transparent access to parts of + these classes, and of column/row repetitions that broadcast vectors into + matrices.
          +
            diff --git a/docs/javadoc/mklab/JGNN/core/util/FastEntry.html b/docs/javadoc/mklab/JGNN/core/util/FastEntry.html index 11eca642..8c405bc8 100644 --- a/docs/javadoc/mklab/JGNN/core/util/FastEntry.html +++ b/docs/javadoc/mklab/JGNN/core/util/FastEntry.html @@ -1,11 +1,11 @@ - + FastEntry - + diff --git a/docs/javadoc/mklab/JGNN/core/util/Loss.html b/docs/javadoc/mklab/JGNN/core/util/Loss.html index e1686fe3..2b57325e 100644 --- a/docs/javadoc/mklab/JGNN/core/util/Loss.html +++ b/docs/javadoc/mklab/JGNN/core/util/Loss.html @@ -1,11 +1,11 @@ - + Loss - + diff --git a/docs/javadoc/mklab/JGNN/core/util/Range.html b/docs/javadoc/mklab/JGNN/core/util/Range.html index 738b5af8..b6b74bb0 100644 --- a/docs/javadoc/mklab/JGNN/core/util/Range.html +++ b/docs/javadoc/mklab/JGNN/core/util/Range.html @@ -1,11 +1,11 @@ - + Range - + diff --git a/docs/javadoc/mklab/JGNN/core/util/Range2D.html b/docs/javadoc/mklab/JGNN/core/util/Range2D.html index b815bcfc..299bb376 100644 --- a/docs/javadoc/mklab/JGNN/core/util/Range2D.html +++ b/docs/javadoc/mklab/JGNN/core/util/Range2D.html @@ -1,11 +1,11 @@ - + Range2D - + diff --git a/docs/javadoc/mklab/JGNN/core/util/Sort.html b/docs/javadoc/mklab/JGNN/core/util/Sort.html index 30a930d9..964139bd 100644 --- a/docs/javadoc/mklab/JGNN/core/util/Sort.html +++ b/docs/javadoc/mklab/JGNN/core/util/Sort.html @@ -1,11 +1,11 @@ - + Sort - + diff --git a/docs/javadoc/mklab/JGNN/core/util/class-use/FastEntry.html b/docs/javadoc/mklab/JGNN/core/util/class-use/FastEntry.html index 0133d091..72949e98 100644 --- a/docs/javadoc/mklab/JGNN/core/util/class-use/FastEntry.html +++ b/docs/javadoc/mklab/JGNN/core/util/class-use/FastEntry.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.util.FastEntry - + diff --git a/docs/javadoc/mklab/JGNN/core/util/class-use/Loss.html b/docs/javadoc/mklab/JGNN/core/util/class-use/Loss.html index 6ff08973..ebd8011f 100644 --- a/docs/javadoc/mklab/JGNN/core/util/class-use/Loss.html +++ b/docs/javadoc/mklab/JGNN/core/util/class-use/Loss.html @@ -1,11 +1,11 @@ - + Uses of Interface mklab.JGNN.core.util.Loss - + diff --git a/docs/javadoc/mklab/JGNN/core/util/class-use/Range.html b/docs/javadoc/mklab/JGNN/core/util/class-use/Range.html index 4c091f69..a6ff655f 100644 --- a/docs/javadoc/mklab/JGNN/core/util/class-use/Range.html +++ b/docs/javadoc/mklab/JGNN/core/util/class-use/Range.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.util.Range - + diff --git a/docs/javadoc/mklab/JGNN/core/util/class-use/Range2D.html b/docs/javadoc/mklab/JGNN/core/util/class-use/Range2D.html index 0ca30086..f30cb5d6 100644 --- a/docs/javadoc/mklab/JGNN/core/util/class-use/Range2D.html +++ b/docs/javadoc/mklab/JGNN/core/util/class-use/Range2D.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.util.Range2D - + diff --git a/docs/javadoc/mklab/JGNN/core/util/class-use/Sort.html b/docs/javadoc/mklab/JGNN/core/util/class-use/Sort.html index f9e3afc8..0899385c 100644 --- a/docs/javadoc/mklab/JGNN/core/util/class-use/Sort.html +++ b/docs/javadoc/mklab/JGNN/core/util/class-use/Sort.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.core.util.Sort - + diff --git a/docs/javadoc/mklab/JGNN/core/util/package-summary.html b/docs/javadoc/mklab/JGNN/core/util/package-summary.html index 37806559..911f4009 100644 --- a/docs/javadoc/mklab/JGNN/core/util/package-summary.html +++ b/docs/javadoc/mklab/JGNN/core/util/package-summary.html @@ -1,11 +1,11 @@ - + mklab.JGNN.core.util - + @@ -46,7 +46,7 @@
            @@ -66,6 +66,14 @@

            Package mklab.JGNN.core.u


            package mklab.JGNN.core.util
            +
            +
            Contains utility functions that are employed internally, mainly optimized 1D + and 2D iterators.
            +
            +
            Author:
            +
            Emmanouil Krasanakis
            +
            +
            • @@ -75,15 +83,30 @@

              Package mklab.JGNN.core.u
              Package
              Description
              -
               
              +
              +
              Contains base numerical data classes, as well as supporting abstract classes.
              +
              -
               
              +
              +
              Contains data distributions that produce one numerical value and can be used + for tensor value initialization.
              +
              -
               
              +
              +
              Contains empty extensions of datatypes that hold only dimension names and + sizes but no ddata.
              +
              -
               
              +
              +
              Contains implementations of matrix classes, of transparent access to parts of + these classes, and of column/row repetitions that broadcast vectors into + matrices.
              +
              -
               
              +
              +
              Contains implementations of tensor classes, as well as transparent access to + parts of these classes.
              +

            • diff --git a/docs/javadoc/mklab/JGNN/core/util/package-tree.html b/docs/javadoc/mklab/JGNN/core/util/package-tree.html index 2b51fd76..1ff1c9dc 100644 --- a/docs/javadoc/mklab/JGNN/core/util/package-tree.html +++ b/docs/javadoc/mklab/JGNN/core/util/package-tree.html @@ -1,11 +1,11 @@ - + mklab.JGNN.core.util Class Hierarchy - + diff --git a/docs/javadoc/mklab/JGNN/core/util/package-use.html b/docs/javadoc/mklab/JGNN/core/util/package-use.html index aa4c8b8b..11e9f593 100644 --- a/docs/javadoc/mklab/JGNN/core/util/package-use.html +++ b/docs/javadoc/mklab/JGNN/core/util/package-use.html @@ -1,11 +1,11 @@ - + Uses of Package mklab.JGNN.core.util - + diff --git a/docs/javadoc/mklab/JGNN/nn/Initializer.html b/docs/javadoc/mklab/JGNN/nn/Initializer.html index 5f9aeebb..162843d3 100644 --- a/docs/javadoc/mklab/JGNN/nn/Initializer.html +++ b/docs/javadoc/mklab/JGNN/nn/Initializer.html @@ -1,11 +1,11 @@ - + Initializer - + diff --git a/docs/javadoc/mklab/JGNN/nn/Loss.html b/docs/javadoc/mklab/JGNN/nn/Loss.html index 52b53ad3..3760cf99 100644 --- a/docs/javadoc/mklab/JGNN/nn/Loss.html +++ b/docs/javadoc/mklab/JGNN/nn/Loss.html @@ -1,11 +1,11 @@ - + Loss - + @@ -84,9 +84,9 @@

              Class Loss


              public abstract class Loss extends Object
              -
              This class provides an abstract implementation of loss functions - to be used during Model training. Preferred use is by - passing loss instances to ModelTrainings.
              +
              This class provides an abstract implementation of loss functions to be used + during Model training. Preferred use is by passing loss instances to + ModelTrainings.
              Author:
              Emmanouil Krasanakis
              @@ -128,8 +128,8 @@

              Method Summary

              evaluate(Tensor output, Tensor desired)
              -
              Provides a numerical evaluation of a loss function, so that - lower values correspond to better predictions.
              +
              Provides a numerical evaluation of a loss function, so that lower values + correspond to better predictions.
              @@ -167,15 +167,15 @@

              Method Details

              evaluate

              public abstract double evaluate(Tensor output, Tensor desired)
              -
              Provides a numerical evaluation of a loss function, so that - lower values correspond to better predictions.
              +
              Provides a numerical evaluation of a loss function, so that lower values + correspond to better predictions.
              Parameters:
              output - A model's estimation of true outputs.
              desired - The expected outputs.
              Returns:
              -
              A double value (is negative if smaller - values are better).
              +
              A double value (is negative if smaller values are + better).
              See Also:
          • -
            +

            train

            -
            public Model train(ModelTraining trainer, +
            public Model train(ModelTraining trainer, Matrix features, Matrix labels, Slice trainingSamples, Slice validationSamples)
            Trains the model by appropriately calling - ModelTraining.train(Model, Matrix, Matrix, Slice, Slice) + ModelTraining.train(Model, Matrix, Matrix, Slice, Slice) with the provided parameters.
            Parameters:
            -
            trainer - The ModelTraining instance in charge of the training.
            +
            trainer - The ModelTraining instance in charge of the training.
            features - A training feature Matrix, where each sample resides in one row.
            labels - A training label Matrix corresponding to features.
            trainingSamples - A slice of samples to use for training.
            diff --git a/docs/javadoc/mklab/JGNN/nn/NNOperation.html b/docs/javadoc/mklab/JGNN/nn/NNOperation.html index a19926b8..a7886a11 100644 --- a/docs/javadoc/mklab/JGNN/nn/NNOperation.html +++ b/docs/javadoc/mklab/JGNN/nn/NNOperation.html @@ -1,11 +1,11 @@ - + NNOperation - + diff --git a/docs/javadoc/mklab/JGNN/nn/Optimizer.html b/docs/javadoc/mklab/JGNN/nn/Optimizer.html index c4b484da..70c65754 100644 --- a/docs/javadoc/mklab/JGNN/nn/Optimizer.html +++ b/docs/javadoc/mklab/JGNN/nn/Optimizer.html @@ -1,11 +1,11 @@ - + Optimizer - + @@ -80,9 +80,10 @@

            Interface Optimizer


            public interface Optimizer
            -
            Provides an interface for training tensors. Has a reset() method that starts potential training memory from scratch. - Has an update(Tensor, Tensor) method that, given a current Tensor - and a gradient operates on the former and adjusts its value.
            +
            Provides an interface for training tensors. Has a reset() method + that starts potential training memory from scratch. Has an + update(Tensor, Tensor) method that, given a current Tensor and a + gradient operates on the former and adjusts its value.
            Author:
            Emmanouil Krasanakis
            @@ -131,10 +132,10 @@

            Method Details

            update

            void update(Tensor value, Tensor gradient)
            -
            In-place updates the value of a tensor given its gradient. - Some optimizers (e.g. Adama) require the exact same tensor instance to be provided - so as to keep track of its optimization progress. The library makes sure to keep - this constraint.
            +
            In-place updates the value of a tensor given its gradient. Some optimizers + (e.g. Adama) require the exact same tensor instance to be provided so as to + keep track of its optimization progress. The library makes sure to keep this + constraint.
            Parameters:
            value - The tensor to update.
            @@ -146,8 +147,8 @@

            update

            reset

            default void reset()
            -
            Resets (and lets the garbage collector free) optimizer memory. - Should be called at the beginning of training (not after each epoch).
            +
            Resets (and lets the garbage collector free) optimizer memory. Should be + called at the beginning of training (not after each epoch).
          diff --git a/docs/javadoc/mklab/JGNN/nn/activations/Exp.html b/docs/javadoc/mklab/JGNN/nn/activations/Exp.html index 19068fe5..09667cbd 100644 --- a/docs/javadoc/mklab/JGNN/nn/activations/Exp.html +++ b/docs/javadoc/mklab/JGNN/nn/activations/Exp.html @@ -1,11 +1,11 @@ - + Exp - + @@ -82,7 +82,8 @@

          Class Exp


          public class Exp extends NNOperation
          -
          Implements a NNOperation that performs an exponential transformation of its single input.
          +
          Implements a NNOperation that performs an element-by-element + exponential transformation of its one input tensor.
          Author:
          Emmanouil Krasanakis
          diff --git a/docs/javadoc/mklab/JGNN/nn/activations/L1.html b/docs/javadoc/mklab/JGNN/nn/activations/L1.html index 7b29a15d..6af0af4f 100644 --- a/docs/javadoc/mklab/JGNN/nn/activations/L1.html +++ b/docs/javadoc/mklab/JGNN/nn/activations/L1.html @@ -1,11 +1,11 @@ - + L1 - + @@ -82,8 +82,9 @@

          Class L1


          public class L1 extends NNOperation
          -
          Implements a NNOperation that performs a L1 transformation of its single input - by row or column.
          +
          Implements a NNOperation that performs a L1 transformation of its one + input tensor by row or by column. If the input tensor is not a matrix, it is + just L1-normalized.
          Author:
          Emmanouil Krasanakis
          @@ -109,9 +110,14 @@

          Constructor Summary

          Constructor
          Description
          L1()
          -
           
          +
          +
          Instantiates an L1 operation that transforms inputs by row.
          +
          L1(boolean colMode)
          -
           
          +
          +
          Instantiates an L1 operation that transforms inputs alongside the dimension + signified by its argument.
          +
          @@ -153,12 +159,28 @@

          Constructor Details

          L1

          public L1()
          +
          Instantiates an L1 operation that transforms inputs by row.
          +
          +
          See Also:
          +
          + +
          +
        • L1

          public L1(boolean colMode)
          +
          Instantiates an L1 operation that transforms inputs alongside the dimension + signified by its argument.
          +
          +
          Parameters:
          +
          colMode - True to perform the normalization on each column, otherwise it + is performed on each row.
          +
        diff --git a/docs/javadoc/mklab/JGNN/nn/activations/LRelu.html b/docs/javadoc/mklab/JGNN/nn/activations/LRelu.html index 5ebbbcdb..f2e3e3f9 100644 --- a/docs/javadoc/mklab/JGNN/nn/activations/LRelu.html +++ b/docs/javadoc/mklab/JGNN/nn/activations/LRelu.html @@ -1,11 +1,11 @@ - + LRelu - + @@ -82,10 +82,13 @@

        Class LRelu


        public class LRelu extends NNOperation
        -
        Implements a NNOperation that performs a leaky relu operation, where the first argument is a tensor on which - it is applied and the second one should be a tensor wrapping a double value (consider initializing this with as a - Constant holding a tensor generated with Tensor.fromDouble(double)) where - the wrapped value indicates the negative region's slope. If the negative slope is zero, leaky relu is reduced to Relu.
        +
        Implements a NNOperation that performs a leaky relu operation, where + the first argument is a tensor on which it is applied and the second one + should be a tensor wrapping a double value (consider initializing this with + as a Constant holding a tensor generated with + Tensor.fromDouble(double)) where the wrapped value indicates the + negative region's slope. If the negative slope is zero, leaky relu is reduced + to Relu.
        Author:
        Emmanouil Krasanakis
        diff --git a/docs/javadoc/mklab/JGNN/nn/activations/NExp.html b/docs/javadoc/mklab/JGNN/nn/activations/NExp.html index 825a9236..54fce355 100644 --- a/docs/javadoc/mklab/JGNN/nn/activations/NExp.html +++ b/docs/javadoc/mklab/JGNN/nn/activations/NExp.html @@ -1,11 +1,11 @@ - + NExp - + @@ -82,8 +82,8 @@

        Class NExp


        public class NExp extends NNOperation
        -
        Implements a NNOperation that performs an exponential transformation of - its single input, but only on the non-zero elements.
        +
        Implements a NNOperation that performs an exponential transformation + of its single input, but only on the non-zero elements.
        Author:
        Emmanouil Krasanakis
        diff --git a/docs/javadoc/mklab/JGNN/nn/activations/PRelu.html b/docs/javadoc/mklab/JGNN/nn/activations/PRelu.html index fa50dbd5..4f7cd625 100644 --- a/docs/javadoc/mklab/JGNN/nn/activations/PRelu.html +++ b/docs/javadoc/mklab/JGNN/nn/activations/PRelu.html @@ -1,11 +1,11 @@ - + PRelu - + diff --git a/docs/javadoc/mklab/JGNN/nn/activations/Relu.html b/docs/javadoc/mklab/JGNN/nn/activations/Relu.html index 95df27df..65aae674 100644 --- a/docs/javadoc/mklab/JGNN/nn/activations/Relu.html +++ b/docs/javadoc/mklab/JGNN/nn/activations/Relu.html @@ -1,11 +1,11 @@ - + Relu - + @@ -82,11 +82,11 @@

        Class Relu


        public class Relu extends NNOperation
        -
        Implements a NNOperation that performs a relu transformation of its single input first introduced by - Hahnloser, Richard HR, Rahul Sarpeshkar, Misha A. Mahowald, Rodney J. Douglas, and H. Sebastian Seung. - "Digital selection and analogue amplification coexist in a cortex-inspired silicon circuit." - Nature 405, no. 6789 (2000): 947-951. -
        +
        Implements a NNOperation that performs a relu transformation of its + one input tensor. This transformation was first introduced by Hahnloser, + Richard HR, Rahul Sarpeshkar, Misha A. Mahowald, Rodney J. Douglas, and H. + Sebastian Seung. "Digital selection and analogue amplification coexist in a + cortex-inspired silicon circuit." Nature 405, no. 6789 (2000): 947-951.
        Author:
        Emmanouil Krasanakis
        diff --git a/docs/javadoc/mklab/JGNN/nn/activations/Sigmoid.html b/docs/javadoc/mklab/JGNN/nn/activations/Sigmoid.html index de824361..23ab2c16 100644 --- a/docs/javadoc/mklab/JGNN/nn/activations/Sigmoid.html +++ b/docs/javadoc/mklab/JGNN/nn/activations/Sigmoid.html @@ -1,11 +1,11 @@ - + Sigmoid - + @@ -82,7 +82,8 @@

        Class Sigmoid


        public class Sigmoid extends NNOperation
        -
        Implements a NNOperation that performs a sigmoid transformation of its single input.
        +
        Implements a NNOperation that performs a sigmoid transformation of + its single input.
        Author:
        Emmanouil Krasanakis
        diff --git a/docs/javadoc/mklab/JGNN/nn/activations/Tanh.html b/docs/javadoc/mklab/JGNN/nn/activations/Tanh.html index 67c24940..87997193 100644 --- a/docs/javadoc/mklab/JGNN/nn/activations/Tanh.html +++ b/docs/javadoc/mklab/JGNN/nn/activations/Tanh.html @@ -1,11 +1,11 @@ - + Tanh - + @@ -82,7 +82,8 @@

        Class Tanh


        public class Tanh extends NNOperation
        -
        Implements a NNOperation that performs a tanh transformation of its single input.
        +
        Implements a NNOperation that performs a tanh transformation of its + single input.
        Author:
        Emmanouil Krasanakis
        diff --git a/docs/javadoc/mklab/JGNN/nn/activations/class-use/Exp.html b/docs/javadoc/mklab/JGNN/nn/activations/class-use/Exp.html index df3d5c8c..684494e9 100644 --- a/docs/javadoc/mklab/JGNN/nn/activations/class-use/Exp.html +++ b/docs/javadoc/mklab/JGNN/nn/activations/class-use/Exp.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.activations.Exp - + diff --git a/docs/javadoc/mklab/JGNN/nn/activations/class-use/L1.html b/docs/javadoc/mklab/JGNN/nn/activations/class-use/L1.html index 1ccc551b..96ca564e 100644 --- a/docs/javadoc/mklab/JGNN/nn/activations/class-use/L1.html +++ b/docs/javadoc/mklab/JGNN/nn/activations/class-use/L1.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.activations.L1 - + diff --git a/docs/javadoc/mklab/JGNN/nn/activations/class-use/LRelu.html b/docs/javadoc/mklab/JGNN/nn/activations/class-use/LRelu.html index 3ba3d5ed..046fa503 100644 --- a/docs/javadoc/mklab/JGNN/nn/activations/class-use/LRelu.html +++ b/docs/javadoc/mklab/JGNN/nn/activations/class-use/LRelu.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.activations.LRelu - + diff --git a/docs/javadoc/mklab/JGNN/nn/activations/class-use/NExp.html b/docs/javadoc/mklab/JGNN/nn/activations/class-use/NExp.html index d1e33bce..c84a0644 100644 --- a/docs/javadoc/mklab/JGNN/nn/activations/class-use/NExp.html +++ b/docs/javadoc/mklab/JGNN/nn/activations/class-use/NExp.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.activations.NExp - + diff --git a/docs/javadoc/mklab/JGNN/nn/activations/class-use/PRelu.html b/docs/javadoc/mklab/JGNN/nn/activations/class-use/PRelu.html index 865d2f29..3548aa03 100644 --- a/docs/javadoc/mklab/JGNN/nn/activations/class-use/PRelu.html +++ b/docs/javadoc/mklab/JGNN/nn/activations/class-use/PRelu.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.activations.PRelu - + diff --git a/docs/javadoc/mklab/JGNN/nn/activations/class-use/Relu.html b/docs/javadoc/mklab/JGNN/nn/activations/class-use/Relu.html index 5e4aa76c..e0953b8b 100644 --- a/docs/javadoc/mklab/JGNN/nn/activations/class-use/Relu.html +++ b/docs/javadoc/mklab/JGNN/nn/activations/class-use/Relu.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.activations.Relu - + diff --git a/docs/javadoc/mklab/JGNN/nn/activations/class-use/Sigmoid.html b/docs/javadoc/mklab/JGNN/nn/activations/class-use/Sigmoid.html index 9a5ef820..be4a0c8c 100644 --- a/docs/javadoc/mklab/JGNN/nn/activations/class-use/Sigmoid.html +++ b/docs/javadoc/mklab/JGNN/nn/activations/class-use/Sigmoid.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.activations.Sigmoid - + diff --git a/docs/javadoc/mklab/JGNN/nn/activations/class-use/Tanh.html b/docs/javadoc/mklab/JGNN/nn/activations/class-use/Tanh.html index 3d980cd0..c0cd2109 100644 --- a/docs/javadoc/mklab/JGNN/nn/activations/class-use/Tanh.html +++ b/docs/javadoc/mklab/JGNN/nn/activations/class-use/Tanh.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.activations.Tanh - + diff --git a/docs/javadoc/mklab/JGNN/nn/activations/package-summary.html b/docs/javadoc/mklab/JGNN/nn/activations/package-summary.html index ff064d88..e6fabe79 100644 --- a/docs/javadoc/mklab/JGNN/nn/activations/package-summary.html +++ b/docs/javadoc/mklab/JGNN/nn/activations/package-summary.html @@ -1,11 +1,11 @@ - + mklab.JGNN.nn.activations - + @@ -42,7 +42,7 @@
        @@ -62,6 +62,13 @@

        Package mklab.JGNN.n


        package mklab.JGNN.nn.activations
        +
        +
        Implements activations function to be used as model operations.
        +
        +
        Author:
        +
        Emmanouil Krasanakis
        +
        +
        • @@ -71,7 +78,10 @@

          Package mklab.JGNN.n
          Package
          Description
          -
           
          +
          +
          Implements neural networks components that are combined to define GNNs or + other types of machine learning models.
          +

        • @@ -83,39 +93,44 @@

          Package mklab.JGNN.n
          Description
          -
          Implements a NNOperation that performs an exponential transformation of its single input.
          +
          Implements a NNOperation that performs an element-by-element + exponential transformation of its one input tensor.
          -
          Implements a NNOperation that performs a L1 transformation of its single input - by row or column.
          +
          Implements a NNOperation that performs a L1 transformation of its one + input tensor by row or by column.
          -
          Implements a NNOperation that performs a leaky relu operation, where the first argument is a tensor on which - it is applied and the second one should be a tensor wrapping a double value (consider initializing this with as a - Constant holding a tensor generated with Tensor.fromDouble(double)) where - the wrapped value indicates the negative region's slope.
          +
          Implements a NNOperation that performs a leaky relu operation, where + the first argument is a tensor on which it is applied and the second one + should be a tensor wrapping a double value (consider initializing this with + as a Constant holding a tensor generated with + Tensor.fromDouble(double)) where the wrapped value indicates the + negative region's slope.
          -
          Implements a NNOperation that performs an exponential transformation of - its single input, but only on the non-zero elements.
          +
          Implements a NNOperation that performs an exponential transformation + of its single input, but only on the non-zero elements.
           
          -
          Implements a NNOperation that performs a relu transformation of its single input first introduced by - Hahnloser, Richard HR, Rahul Sarpeshkar, Misha A.
          +
          Implements a NNOperation that performs a relu transformation of its + one input tensor.
          -
          Implements a NNOperation that performs a sigmoid transformation of its single input.
          +
          Implements a NNOperation that performs a sigmoid transformation of + its single input.
          -
          Implements a NNOperation that performs a tanh transformation of its single input.
          +
          Implements a NNOperation that performs a tanh transformation of its + single input.
          diff --git a/docs/javadoc/mklab/JGNN/nn/activations/package-tree.html b/docs/javadoc/mklab/JGNN/nn/activations/package-tree.html index 9e4927bf..6c8fe8be 100644 --- a/docs/javadoc/mklab/JGNN/nn/activations/package-tree.html +++ b/docs/javadoc/mklab/JGNN/nn/activations/package-tree.html @@ -1,11 +1,11 @@ - + mklab.JGNN.nn.activations Class Hierarchy - + diff --git a/docs/javadoc/mklab/JGNN/nn/activations/package-use.html b/docs/javadoc/mklab/JGNN/nn/activations/package-use.html index 0653d0f2..47d509ad 100644 --- a/docs/javadoc/mklab/JGNN/nn/activations/package-use.html +++ b/docs/javadoc/mklab/JGNN/nn/activations/package-use.html @@ -1,11 +1,11 @@ - + Uses of Package mklab.JGNN.nn.activations - + diff --git a/docs/javadoc/mklab/JGNN/nn/class-use/Initializer.html b/docs/javadoc/mklab/JGNN/nn/class-use/Initializer.html index 034868df..d10ffcf2 100644 --- a/docs/javadoc/mklab/JGNN/nn/class-use/Initializer.html +++ b/docs/javadoc/mklab/JGNN/nn/class-use/Initializer.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.Initializer - + @@ -57,9 +57,16 @@

          Uses of Class<
          Package
          Description
          -
           
          +
          +
          Implements neural networks components that are combined to define GNNs or + other types of machine learning models.
          +
          -
           
          +
          +
          Implements initializers to be applied on Model + parameters to stochastically induce some desired property at the first + training epoch.
          +
            diff --git a/docs/javadoc/mklab/JGNN/nn/class-use/Loss.html b/docs/javadoc/mklab/JGNN/nn/class-use/Loss.html index f87488f9..bb32f1a4 100644 --- a/docs/javadoc/mklab/JGNN/nn/class-use/Loss.html +++ b/docs/javadoc/mklab/JGNN/nn/class-use/Loss.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.Loss - + @@ -56,31 +56,54 @@

            Uses of Class
            mkla
            Package
            Description
            - -
             
            - -
             
            - -
             
            + +
            +
            Contains classes that simplify data loading, model building, and training.
            +
            + +
            +
            Implements neural networks components that are combined to define GNNs or + other types of machine learning models.
            +
            + +
            +
            Contains classes for instantiating loss function.
            +
            + +
            +
            Contains losses that wrap other losses and augment their numeric computations + with live reporting of the training status.
            +
            @@ -265,7 +283,8 @@

            Uses of NNOpera
            class 
            -
            Implements a NNOperation that performs the operation 1-x for its simple input x.
            +
            Implements a NNOperation that performs the operation 1-x for its + simple input x.
            class 
            @@ -275,18 +294,21 @@

            Uses of NNOpera
            class 
            -
            Implements a NNOperation that converts its first argument to a ColumnRepetition matrix - with a number of columns equal to the second argument.
            +
            Implements a NNOperation that converts its first argument to a + ColumnRepetition matrix with a number of columns equal to the second + argument.
            class 
            -
            Implements a NNOperation that lists the first element of the 2D matrix element iterator.
            +
            Implements a NNOperation that lists the first element of the 2D + matrix element iterator.
            class 
            -
            Implements a NNOperation that performs the equivalent of TensorFlow's gather operation.
            +
            Implements a NNOperation that performs the equivalent of TensorFlow's + gather operation.
            class 
            @@ -296,7 +318,8 @@

            Uses of NNOpera
            class 
            -
            Implements a NNOperation that outputs the natural logarithm of its single input.
            +
            Implements a NNOperation that outputs the natural logarithm of its + single input.
            class 
            @@ -306,7 +329,8 @@

            Uses of NNOpera
            class 
            -
            Implements a NNOperation that multiplies its two inputs element-by-element.
            +
            Implements a NNOperation that multiplies its two inputs + element-by-element.
            class 
            @@ -314,8 +338,9 @@

            Uses of NNOpera
            class 
            -
            Implements a NNOperation that converts its first argument to a ColumnRepetition matrix - with a number of columns equal to the second argument.
            +
            Implements a NNOperation that converts its first argument to a + ColumnRepetition matrix with a number of columns equal to the second + argument.
            class 
            @@ -325,7 +350,8 @@

            Uses of NNOpera
            class 
            -
            Implements a NNOperation that lists the second element of the 2D matrix element iterator.
            +
            Implements a NNOperation that lists the second element of the 2D + matrix element iterator.
            class 
            diff --git a/docs/javadoc/mklab/JGNN/nn/class-use/Optimizer.html b/docs/javadoc/mklab/JGNN/nn/class-use/Optimizer.html index 32f70c2b..b0826241 100644 --- a/docs/javadoc/mklab/JGNN/nn/class-use/Optimizer.html +++ b/docs/javadoc/mklab/JGNN/nn/class-use/Optimizer.html @@ -1,11 +1,11 @@ - + Uses of Interface mklab.JGNN.nn.Optimizer - + @@ -56,16 +56,44 @@

            Uses of Inte
            Package
            Description
            - -
             
            - -
             
            - -
             
            + +
            +
            Contains classes that simplify data loading, model building, and training.
            +
            + +
            +
            Implements neural networks components that are combined to define GNNs or + other types of machine learning models.
            +
            + +
            +
            Contains popular neural network and GNN operations.
            +
            + +
            +
            Contains optimizers that can be used to update training losses.
            +
            • +
              +

              Uses of Optimizer in mklab.JGNN.adhoc

              +
              Methods in mklab.JGNN.adhoc with parameters of type Optimizer
              +
              +
              Modifier and Type
              +
              Method
              +
              Description
              + +
              ModelTraining.setOptimizer(Optimizer optimizer)
              +
              +
              Sets an Optimizer instance to controls parameter updates during + training.
              +
              +
              +
              +
            • +
            • Uses of Optimizer in mklab.JGNN.nn

              Methods in mklab.JGNN.nn with parameters of type Optimizer
              @@ -73,32 +101,27 @@

              Uses of Optim
              Modifier and Type
              Method
              Description
              -
              -
              ModelTraining.setOptimizer(Optimizer optimizer)
              -
              -
              Sets an Optimizer instance to controls parameter updates during training.
              -
              - -
              Model.train(Loss loss, + +
              Model.train(Loss loss, Optimizer optimizer, List<Tensor> inputs, List<Tensor> desiredOutputs)
              -
              +
              Performs the training of #train(Optimizer, List, List, List) for unit weights.
              - -
              Model.train(Loss loss, + +
              Model.train(Loss loss, Optimizer optimizer, List<Tensor> inputs, List<Tensor> desiredOutputs, List<Tensor> weights)
              -
              +
              Performs one parameter adjustment step (e.g.
              -
              double
              -
              Model.trainTowardsZero(Optimizer optimizer, +
              double
              +
              Model.trainTowardsZero(Optimizer optimizer, List<Tensor> inputs)
              -
              +
              Is equivalent to calling Model.train(Loss, Optimizer, List, List) for new Zero() loss.
              @@ -159,8 +182,8 @@

              Uses of Optim
              class 
              -
              Wraps an Optimizer by applying the derivative of L2 loss - on every tensor during update(Tensor, Tensor).
              +
              Wraps an Optimizer by applying the derivative of L2 loss on every + tensor during update(Tensor, Tensor).

              Constructors in mklab.JGNN.nn.optimizers with parameters of type Optimizer
              diff --git a/docs/javadoc/mklab/JGNN/nn/initializers/KaimingNormal.html b/docs/javadoc/mklab/JGNN/nn/initializers/KaimingNormal.html index 5124dcf0..5019db01 100644 --- a/docs/javadoc/mklab/JGNN/nn/initializers/KaimingNormal.html +++ b/docs/javadoc/mklab/JGNN/nn/initializers/KaimingNormal.html @@ -1,11 +1,11 @@ - + KaimingNormal - + diff --git a/docs/javadoc/mklab/JGNN/nn/initializers/KaimingUniform.html b/docs/javadoc/mklab/JGNN/nn/initializers/KaimingUniform.html index 4df6accb..f86baca1 100644 --- a/docs/javadoc/mklab/JGNN/nn/initializers/KaimingUniform.html +++ b/docs/javadoc/mklab/JGNN/nn/initializers/KaimingUniform.html @@ -1,11 +1,11 @@ - + KaimingUniform - + diff --git a/docs/javadoc/mklab/JGNN/nn/initializers/VariancePreservingInitializer.html b/docs/javadoc/mklab/JGNN/nn/initializers/VariancePreservingInitializer.html index 66961891..21808f5a 100644 --- a/docs/javadoc/mklab/JGNN/nn/initializers/VariancePreservingInitializer.html +++ b/docs/javadoc/mklab/JGNN/nn/initializers/VariancePreservingInitializer.html @@ -1,11 +1,11 @@ - + VariancePreservingInitializer - + diff --git a/docs/javadoc/mklab/JGNN/nn/initializers/XavierNormal.html b/docs/javadoc/mklab/JGNN/nn/initializers/XavierNormal.html index f2ad54c4..187991db 100644 --- a/docs/javadoc/mklab/JGNN/nn/initializers/XavierNormal.html +++ b/docs/javadoc/mklab/JGNN/nn/initializers/XavierNormal.html @@ -1,11 +1,11 @@ - + XavierNormal - + diff --git a/docs/javadoc/mklab/JGNN/nn/initializers/XavierUniform.html b/docs/javadoc/mklab/JGNN/nn/initializers/XavierUniform.html index d397f393..0ee8fab3 100644 --- a/docs/javadoc/mklab/JGNN/nn/initializers/XavierUniform.html +++ b/docs/javadoc/mklab/JGNN/nn/initializers/XavierUniform.html @@ -1,11 +1,11 @@ - + XavierUniform - + diff --git a/docs/javadoc/mklab/JGNN/nn/initializers/class-use/KaimingNormal.html b/docs/javadoc/mklab/JGNN/nn/initializers/class-use/KaimingNormal.html index 120292ac..b06ce9d0 100644 --- a/docs/javadoc/mklab/JGNN/nn/initializers/class-use/KaimingNormal.html +++ b/docs/javadoc/mklab/JGNN/nn/initializers/class-use/KaimingNormal.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.initializers.KaimingNormal - + diff --git a/docs/javadoc/mklab/JGNN/nn/initializers/class-use/KaimingUniform.html b/docs/javadoc/mklab/JGNN/nn/initializers/class-use/KaimingUniform.html index d4e467ee..5a674dd3 100644 --- a/docs/javadoc/mklab/JGNN/nn/initializers/class-use/KaimingUniform.html +++ b/docs/javadoc/mklab/JGNN/nn/initializers/class-use/KaimingUniform.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.initializers.KaimingUniform - + diff --git a/docs/javadoc/mklab/JGNN/nn/initializers/class-use/VariancePreservingInitializer.html b/docs/javadoc/mklab/JGNN/nn/initializers/class-use/VariancePreservingInitializer.html index 59caca3b..53d0ab72 100644 --- a/docs/javadoc/mklab/JGNN/nn/initializers/class-use/VariancePreservingInitializer.html +++ b/docs/javadoc/mklab/JGNN/nn/initializers/class-use/VariancePreservingInitializer.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.initializers.VariancePreservingInitializer - + @@ -57,7 +57,11 @@

              Package

              Description
              -
               
              +
              +
              Implements initializers to be applied on Model + parameters to stochastically induce some desired property at the first + training epoch.
              +
                diff --git a/docs/javadoc/mklab/JGNN/nn/initializers/class-use/XavierNormal.html b/docs/javadoc/mklab/JGNN/nn/initializers/class-use/XavierNormal.html index 8875dfa6..9c7a980d 100644 --- a/docs/javadoc/mklab/JGNN/nn/initializers/class-use/XavierNormal.html +++ b/docs/javadoc/mklab/JGNN/nn/initializers/class-use/XavierNormal.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.initializers.XavierNormal - + diff --git a/docs/javadoc/mklab/JGNN/nn/initializers/class-use/XavierUniform.html b/docs/javadoc/mklab/JGNN/nn/initializers/class-use/XavierUniform.html index 436c8f4a..4ce68f72 100644 --- a/docs/javadoc/mklab/JGNN/nn/initializers/class-use/XavierUniform.html +++ b/docs/javadoc/mklab/JGNN/nn/initializers/class-use/XavierUniform.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.initializers.XavierUniform - + diff --git a/docs/javadoc/mklab/JGNN/nn/initializers/package-summary.html b/docs/javadoc/mklab/JGNN/nn/initializers/package-summary.html index c3e1c221..860ea196 100644 --- a/docs/javadoc/mklab/JGNN/nn/initializers/package-summary.html +++ b/docs/javadoc/mklab/JGNN/nn/initializers/package-summary.html @@ -1,11 +1,11 @@ - + mklab.JGNN.nn.initializers - + @@ -42,7 +42,7 @@
                @@ -62,6 +62,15 @@

                Package mklab.JGNN.


                package mklab.JGNN.nn.initializers
                +
                +
                Implements initializers to be applied on Model + parameters to stochastically induce some desired property at the first + training epoch.
                +
                +
                Author:
                +
                Emmanouil Krasanakis
                +
                +
                • @@ -71,7 +80,10 @@

                  Package mklab.JGNN.
                  Package
                  Description
                  -
                   
                  +
                  +
                  Implements neural networks components that are combined to define GNNs or + other types of machine learning models.
                  +

            • diff --git a/docs/javadoc/mklab/JGNN/nn/initializers/package-tree.html b/docs/javadoc/mklab/JGNN/nn/initializers/package-tree.html index 67d952b2..97cfac7f 100644 --- a/docs/javadoc/mklab/JGNN/nn/initializers/package-tree.html +++ b/docs/javadoc/mklab/JGNN/nn/initializers/package-tree.html @@ -1,11 +1,11 @@ - + mklab.JGNN.nn.initializers Class Hierarchy - + diff --git a/docs/javadoc/mklab/JGNN/nn/initializers/package-use.html b/docs/javadoc/mklab/JGNN/nn/initializers/package-use.html index c2e92358..a0912ba5 100644 --- a/docs/javadoc/mklab/JGNN/nn/initializers/package-use.html +++ b/docs/javadoc/mklab/JGNN/nn/initializers/package-use.html @@ -1,11 +1,11 @@ - + Uses of Package mklab.JGNN.nn.initializers - + @@ -57,7 +57,11 @@

              Uses of Pac
              Package
              Description
              -
               
              +
              +
              Implements initializers to be applied on Model + parameters to stochastically induce some desired property at the first + training epoch.
              +
                diff --git a/docs/javadoc/mklab/JGNN/nn/inputs/Constant.html b/docs/javadoc/mklab/JGNN/nn/inputs/Constant.html index f7455208..db4285ff 100644 --- a/docs/javadoc/mklab/JGNN/nn/inputs/Constant.html +++ b/docs/javadoc/mklab/JGNN/nn/inputs/Constant.html @@ -1,11 +1,11 @@ - + Constant - + diff --git a/docs/javadoc/mklab/JGNN/nn/inputs/Parameter.html b/docs/javadoc/mklab/JGNN/nn/inputs/Parameter.html index 096f5743..878b3ec2 100644 --- a/docs/javadoc/mklab/JGNN/nn/inputs/Parameter.html +++ b/docs/javadoc/mklab/JGNN/nn/inputs/Parameter.html @@ -1,11 +1,11 @@ - + Parameter - + diff --git a/docs/javadoc/mklab/JGNN/nn/inputs/Variable.html b/docs/javadoc/mklab/JGNN/nn/inputs/Variable.html index 3c82fab2..e890e73c 100644 --- a/docs/javadoc/mklab/JGNN/nn/inputs/Variable.html +++ b/docs/javadoc/mklab/JGNN/nn/inputs/Variable.html @@ -1,11 +1,11 @@ - + Variable - + diff --git a/docs/javadoc/mklab/JGNN/nn/inputs/class-use/Constant.html b/docs/javadoc/mklab/JGNN/nn/inputs/class-use/Constant.html index ec3ec3e0..6e63c013 100644 --- a/docs/javadoc/mklab/JGNN/nn/inputs/class-use/Constant.html +++ b/docs/javadoc/mklab/JGNN/nn/inputs/class-use/Constant.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.inputs.Constant - + diff --git a/docs/javadoc/mklab/JGNN/nn/inputs/class-use/Parameter.html b/docs/javadoc/mklab/JGNN/nn/inputs/class-use/Parameter.html index 2744349e..611bd36b 100644 --- a/docs/javadoc/mklab/JGNN/nn/inputs/class-use/Parameter.html +++ b/docs/javadoc/mklab/JGNN/nn/inputs/class-use/Parameter.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.inputs.Parameter - + @@ -57,9 +57,14 @@

                Uses of C
                Package
                Description
                -
                 
                +
                +
                Implements neural networks components that are combined to define GNNs or + other types of machine learning models.
                +
                -
                 
                +
                +
                Contains various types of neural architecture inputs.
                +
                  diff --git a/docs/javadoc/mklab/JGNN/nn/inputs/class-use/Variable.html b/docs/javadoc/mklab/JGNN/nn/inputs/class-use/Variable.html index 65826024..aea5321f 100644 --- a/docs/javadoc/mklab/JGNN/nn/inputs/class-use/Variable.html +++ b/docs/javadoc/mklab/JGNN/nn/inputs/class-use/Variable.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.inputs.Variable - + @@ -57,7 +57,10 @@

                  Uses of Cl
                  Package
                  Description
                  -
                   
                  +
                  +
                  Implements neural networks components that are combined to define GNNs or + other types of machine learning models.
                  +
                    diff --git a/docs/javadoc/mklab/JGNN/nn/inputs/package-summary.html b/docs/javadoc/mklab/JGNN/nn/inputs/package-summary.html index e3b7a696..abc983b2 100644 --- a/docs/javadoc/mklab/JGNN/nn/inputs/package-summary.html +++ b/docs/javadoc/mklab/JGNN/nn/inputs/package-summary.html @@ -1,11 +1,11 @@ - + mklab.JGNN.nn.inputs - + @@ -42,7 +42,7 @@
                    @@ -62,6 +62,15 @@

                    Package mklab.JGNN.nn.inp


                    package mklab.JGNN.nn.inputs
                    +
                    +
                    Contains various types of neural architecture inputs. These different on + whether they are constants, parameters to be training, or variables manually + set in each architecture run.
                    +
                    +
                    Author:
                    +
                    Emmanouil Krasanakis
                    +
                    +
                    • @@ -71,7 +80,10 @@

                      Package mklab.JGNN.nn.inp
                      Package
                      Description
                      -
                       
                      +
                      +
                      Implements neural networks components that are combined to define GNNs or + other types of machine learning models.
                      +

                    • diff --git a/docs/javadoc/mklab/JGNN/nn/inputs/package-tree.html b/docs/javadoc/mklab/JGNN/nn/inputs/package-tree.html index 45709e14..02216992 100644 --- a/docs/javadoc/mklab/JGNN/nn/inputs/package-tree.html +++ b/docs/javadoc/mklab/JGNN/nn/inputs/package-tree.html @@ -1,11 +1,11 @@ - + mklab.JGNN.nn.inputs Class Hierarchy - + diff --git a/docs/javadoc/mklab/JGNN/nn/inputs/package-use.html b/docs/javadoc/mklab/JGNN/nn/inputs/package-use.html index f11e9120..02837adb 100644 --- a/docs/javadoc/mklab/JGNN/nn/inputs/package-use.html +++ b/docs/javadoc/mklab/JGNN/nn/inputs/package-use.html @@ -1,11 +1,11 @@ - + Uses of Package mklab.JGNN.nn.inputs - + @@ -57,9 +57,14 @@

                      Uses of PackagePackage
                      Description
                      -
                       
                      +
                      +
                      Implements neural networks components that are combined to define GNNs or + other types of machine learning models.
                      +
                      -
                       
                      +
                      +
                      Contains various types of neural architecture inputs.
                      +
                        diff --git a/docs/javadoc/mklab/JGNN/nn/loss/Accuracy.html b/docs/javadoc/mklab/JGNN/nn/loss/Accuracy.html index 194ac8c3..a1bdf59c 100644 --- a/docs/javadoc/mklab/JGNN/nn/loss/Accuracy.html +++ b/docs/javadoc/mklab/JGNN/nn/loss/Accuracy.html @@ -1,11 +1,11 @@ - + Accuracy - + @@ -129,8 +129,8 @@

                        Method Summary

                        evaluate(Tensor output, Tensor desired)
                        -
                        Provides a numerical evaluation of a loss function, so that - lower values correspond to better predictions.
                        +
                        Provides a numerical evaluation of a loss function, so that lower values + correspond to better predictions.
                        @@ -173,8 +173,8 @@

                        evaluate

                        public double evaluate(Tensor output, Tensor desired)
                        Description copied from class: Loss
                        -
                        Provides a numerical evaluation of a loss function, so that - lower values correspond to better predictions.
                        +
                        Provides a numerical evaluation of a loss function, so that lower values + correspond to better predictions.
                        Specified by:
                        evaluate in class Loss
                        @@ -182,8 +182,8 @@

                        evaluate

                        output - A model's estimation of true outputs.
                        desired - The expected outputs.
                        Returns:
                        -
                        A double value (is negative if smaller - values are better).
                        +
                        A double value (is negative if smaller values are + better).
                        See Also:
                          diff --git a/docs/javadoc/mklab/JGNN/nn/loss/BinaryCrossEntropy.html b/docs/javadoc/mklab/JGNN/nn/loss/BinaryCrossEntropy.html index dd9d4200..88a404a7 100644 --- a/docs/javadoc/mklab/JGNN/nn/loss/BinaryCrossEntropy.html +++ b/docs/javadoc/mklab/JGNN/nn/loss/BinaryCrossEntropy.html @@ -1,11 +1,11 @@ - + BinaryCrossEntropy - + @@ -132,8 +132,8 @@

                          Method Summary

                          evaluate(Tensor output, Tensor desired)
                          -
                          Provides a numerical evaluation of a loss function, so that - lower values correspond to better predictions.
                          +
                          Provides a numerical evaluation of a loss function, so that lower values + correspond to better predictions.
                          @@ -201,8 +201,8 @@

                          evaluate

                          public double evaluate(Tensor output, Tensor desired)
                          Description copied from class: Loss
                          -
                          Provides a numerical evaluation of a loss function, so that - lower values correspond to better predictions.
                          +
                          Provides a numerical evaluation of a loss function, so that lower values + correspond to better predictions.
                          Specified by:
                          evaluate in class Loss
                          @@ -210,8 +210,8 @@

                          evaluate

                          output - A model's estimation of true outputs.
                          desired - The expected outputs.
                          Returns:
                          -
                          A double value (is negative if smaller - values are better).
                          +
                          A double value (is negative if smaller values are + better).
                          See Also:
                            diff --git a/docs/javadoc/mklab/JGNN/nn/loss/CategoricalCrossEntropy.html b/docs/javadoc/mklab/JGNN/nn/loss/CategoricalCrossEntropy.html index 5328f355..28b61222 100644 --- a/docs/javadoc/mklab/JGNN/nn/loss/CategoricalCrossEntropy.html +++ b/docs/javadoc/mklab/JGNN/nn/loss/CategoricalCrossEntropy.html @@ -1,11 +1,11 @@ - + CategoricalCrossEntropy - + @@ -132,8 +132,8 @@

                            Method Summary

                            evaluate(Tensor output, Tensor desired)
                            -
                            Provides a numerical evaluation of a loss function, so that - lower values correspond to better predictions.
                            +
                            Provides a numerical evaluation of a loss function, so that lower values + correspond to better predictions.
                            setMeanReduction(boolean meanReduction)
                            @@ -220,8 +220,8 @@

                            evaluate

                            public double evaluate(Tensor output, Tensor desired)
                            Description copied from class: Loss
                            -
                            Provides a numerical evaluation of a loss function, so that - lower values correspond to better predictions.
                            +
                            Provides a numerical evaluation of a loss function, so that lower values + correspond to better predictions.
                            Specified by:
                            evaluate in class Loss
                            @@ -229,8 +229,8 @@

                            evaluate

                            output - A model's estimation of true outputs.
                            desired - The expected outputs.
                            Returns:
                            -
                            A double value (is negative if smaller - values are better).
                            +
                            A double value (is negative if smaller values are + better).
                            See Also:
                              diff --git a/docs/javadoc/mklab/JGNN/nn/loss/class-use/Accuracy.html b/docs/javadoc/mklab/JGNN/nn/loss/class-use/Accuracy.html index 73eb3acc..4ef96801 100644 --- a/docs/javadoc/mklab/JGNN/nn/loss/class-use/Accuracy.html +++ b/docs/javadoc/mklab/JGNN/nn/loss/class-use/Accuracy.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.loss.Accuracy - + diff --git a/docs/javadoc/mklab/JGNN/nn/loss/class-use/BinaryCrossEntropy.html b/docs/javadoc/mklab/JGNN/nn/loss/class-use/BinaryCrossEntropy.html index daebaa81..e626fb75 100644 --- a/docs/javadoc/mklab/JGNN/nn/loss/class-use/BinaryCrossEntropy.html +++ b/docs/javadoc/mklab/JGNN/nn/loss/class-use/BinaryCrossEntropy.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.loss.BinaryCrossEntropy - + diff --git a/docs/javadoc/mklab/JGNN/nn/loss/class-use/CategoricalCrossEntropy.html b/docs/javadoc/mklab/JGNN/nn/loss/class-use/CategoricalCrossEntropy.html index ce9a5d5d..a94cbe16 100644 --- a/docs/javadoc/mklab/JGNN/nn/loss/class-use/CategoricalCrossEntropy.html +++ b/docs/javadoc/mklab/JGNN/nn/loss/class-use/CategoricalCrossEntropy.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.loss.CategoricalCrossEntropy - + @@ -57,7 +57,9 @@

                              Package
                              Description
                              -
                               
                              +
                              +
                              Contains classes for instantiating loss function.
                              +
                                diff --git a/docs/javadoc/mklab/JGNN/nn/loss/package-summary.html b/docs/javadoc/mklab/JGNN/nn/loss/package-summary.html index 56d2b3ac..38e2191a 100644 --- a/docs/javadoc/mklab/JGNN/nn/loss/package-summary.html +++ b/docs/javadoc/mklab/JGNN/nn/loss/package-summary.html @@ -1,11 +1,11 @@ - + mklab.JGNN.nn.loss - + @@ -42,7 +42,7 @@
                                @@ -62,6 +62,14 @@

                                Package mklab.JGNN.nn.loss<


                                package mklab.JGNN.nn.loss
                                +
                                +
                                Contains classes for instantiating loss function. These are then to be used + during model training.
                                +
                                +
                                Author:
                                +
                                Emmanouil Krasanakis
                                +
                                +
                                • @@ -71,9 +79,15 @@

                                  Package mklab.JGNN.nn.loss<
                                  Package
                                  Description
                                  -
                                   
                                  +
                                  +
                                  Implements neural networks components that are combined to define GNNs or + other types of machine learning models.
                                  +
                                  -
                                   
                                  +
                                  +
                                  Contains losses that wrap other losses and augment their numeric computations + with live reporting of the training status.
                                  +

                                • diff --git a/docs/javadoc/mklab/JGNN/nn/loss/package-tree.html b/docs/javadoc/mklab/JGNN/nn/loss/package-tree.html index aa8fc8c7..1d22eab9 100644 --- a/docs/javadoc/mklab/JGNN/nn/loss/package-tree.html +++ b/docs/javadoc/mklab/JGNN/nn/loss/package-tree.html @@ -1,11 +1,11 @@ - + mklab.JGNN.nn.loss Class Hierarchy - + diff --git a/docs/javadoc/mklab/JGNN/nn/loss/package-use.html b/docs/javadoc/mklab/JGNN/nn/loss/package-use.html index 3e5da23f..c1315928 100644 --- a/docs/javadoc/mklab/JGNN/nn/loss/package-use.html +++ b/docs/javadoc/mklab/JGNN/nn/loss/package-use.html @@ -1,11 +1,11 @@ - + Uses of Package mklab.JGNN.nn.loss - + @@ -57,7 +57,9 @@

                                  Uses of Package
                                  Package
                                  Description
                                  -
                                   
                                  +
                                  +
                                  Contains classes for instantiating loss function.
                                  +
                                    diff --git a/docs/javadoc/mklab/JGNN/nn/loss/report/VerboseLoss.html b/docs/javadoc/mklab/JGNN/nn/loss/report/VerboseLoss.html index d2273b71..d8088d77 100644 --- a/docs/javadoc/mklab/JGNN/nn/loss/report/VerboseLoss.html +++ b/docs/javadoc/mklab/JGNN/nn/loss/report/VerboseLoss.html @@ -1,11 +1,11 @@ - + VerboseLoss - + @@ -82,8 +82,9 @@

                                    Class VerboseLoss


                                    public class VerboseLoss extends Loss
                                    -
                                    Implements a Loss that wraps other losses and outputs their value during training to an output stream - (to System.out by default). This is the simplest loss wrapper to keep track of training progress.
                                    +
                                    Implements a Loss that wraps other losses and outputs their value + during training to an output stream (to System.out by default). This + is the simplest loss wrapper to keep track of training progress.
                                    Author:
                                    Emmanouil Krasanakis
                                    @@ -133,8 +134,8 @@

                                    Method Summary

                                    evaluate(Tensor output, Tensor desired)
                                    -
                                    Provides a numerical evaluation of a loss function, so that - lower values correspond to better predictions.
                                    +
                                    Provides a numerical evaluation of a loss function, so that lower values + correspond to better predictions.
                                    void
                                    @@ -170,9 +171,9 @@

                                    Constructor Details

                                    VerboseLoss

                                    public VerboseLoss(Loss baseLoss)
                                    -
                                    Instantiates a VerboseLoss given a base loss to be wrapped. - Use a method chain to modify when losses should be reported, and which - output stream is used.
                                    +
                                    Instantiates a VerboseLoss given a base loss to be wrapped. Use a + method chain to modify when losses should be reported, and which output + stream is used.
                                    Parameters:
                                    baseLoss -
                                    @@ -207,7 +208,8 @@

                                    setInterval

                                    Changes on which epochs the loss should be reported.
                                    Parameters:
                                    -
                                    every - The loss is reported on epochs 0, every, 2every, ... Default is 1.
                                    +
                                    every - The loss is reported on epochs 0, every, 2every, ... Default is + 1.
                                    Returns:
                                    this verbose loss instance.
                                    @@ -232,8 +234,8 @@

                                    evaluate

                                    public double evaluate(Tensor output, Tensor desired)
                                    Description copied from class: Loss
                                    -
                                    Provides a numerical evaluation of a loss function, so that - lower values correspond to better predictions.
                                    +
                                    Provides a numerical evaluation of a loss function, so that lower values + correspond to better predictions.
                                    Specified by:
                                    evaluate in class Loss
                                    @@ -241,8 +243,8 @@

                                    evaluate

                                    output - A model's estimation of true outputs.
                                    desired - The expected outputs.
                                    Returns:
                                    -
                                    A double value (is negative if smaller - values are better).
                                    +
                                    A double value (is negative if smaller values are + better).
                                    See Also:
                                      diff --git a/docs/javadoc/mklab/JGNN/nn/loss/report/class-use/VerboseLoss.html b/docs/javadoc/mklab/JGNN/nn/loss/report/class-use/VerboseLoss.html index 9cc2c0e3..aa43a9c0 100644 --- a/docs/javadoc/mklab/JGNN/nn/loss/report/class-use/VerboseLoss.html +++ b/docs/javadoc/mklab/JGNN/nn/loss/report/class-use/VerboseLoss.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.loss.report.VerboseLoss - + @@ -57,7 +57,10 @@

                                      Us
                                      Package
                                      Description
                                      -
                                       
                                      +
                                      +
                                      Contains losses that wrap other losses and augment their numeric computations + with live reporting of the training status.
                                      +
                                        diff --git a/docs/javadoc/mklab/JGNN/nn/loss/report/package-summary.html b/docs/javadoc/mklab/JGNN/nn/loss/report/package-summary.html index e321d266..8c0f6a66 100644 --- a/docs/javadoc/mklab/JGNN/nn/loss/report/package-summary.html +++ b/docs/javadoc/mklab/JGNN/nn/loss/report/package-summary.html @@ -1,11 +1,11 @@ - + mklab.JGNN.nn.loss.report - + @@ -42,7 +42,7 @@
                                        @@ -62,6 +62,15 @@

                                        Package mklab.JGNN.n


                                        package mklab.JGNN.nn.loss.report
                                        +
                                        +
                                        Contains losses that wrap other losses and augment their numeric computations + with live reporting of the training status. For example, these can be used + for printing or logging training progress.
                                        +
                                        +
                                        Author:
                                        +
                                        Emmanouil Krasanakis
                                        +
                                        +
                                        • @@ -71,7 +80,9 @@

                                          Package mklab.JGNN.n
                                          Package
                                          Description
                                          -
                                           
                                          +
                                          +
                                          Contains classes for instantiating loss function.
                                          +

                                        • @@ -83,8 +94,8 @@

                                          Package mklab.JGNN.n
                                          Description
                                          -
                                          Implements a Loss that wraps other losses and outputs their value during training to an output stream - (to System.out by default).
                                          +
                                          Implements a Loss that wraps other losses and outputs their value + during training to an output stream (to System.out by default).
                                          diff --git a/docs/javadoc/mklab/JGNN/nn/loss/report/package-tree.html b/docs/javadoc/mklab/JGNN/nn/loss/report/package-tree.html index 7496394b..78b3f188 100644 --- a/docs/javadoc/mklab/JGNN/nn/loss/report/package-tree.html +++ b/docs/javadoc/mklab/JGNN/nn/loss/report/package-tree.html @@ -1,11 +1,11 @@ - + mklab.JGNN.nn.loss.report Class Hierarchy - + diff --git a/docs/javadoc/mklab/JGNN/nn/loss/report/package-use.html b/docs/javadoc/mklab/JGNN/nn/loss/report/package-use.html index e98264e4..0db01d43 100644 --- a/docs/javadoc/mklab/JGNN/nn/loss/report/package-use.html +++ b/docs/javadoc/mklab/JGNN/nn/loss/report/package-use.html @@ -1,11 +1,11 @@ - + Uses of Package mklab.JGNN.nn.loss.report - + @@ -57,7 +57,10 @@

                                          Uses of Pack
                                          Package
                                          Description
                                          -
                                           
                                          +
                                          +
                                          Contains losses that wrap other losses and augment their numeric computations + with live reporting of the training status.
                                          +
                                            @@ -69,8 +72,8 @@

                                            Uses of Pack
                                            Description
                                            -
                                            Implements a Loss that wraps other losses and outputs their value during training to an output stream - (to System.out by default).
                                            +
                                            Implements a Loss that wraps other losses and outputs their value + during training to an output stream (to System.out by default).

                                          diff --git a/docs/javadoc/mklab/JGNN/nn/operations/Add.html b/docs/javadoc/mklab/JGNN/nn/operations/Add.html index 14ba7bc5..9bd693f4 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/Add.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/Add.html @@ -1,11 +1,11 @@ - + Add - + diff --git a/docs/javadoc/mklab/JGNN/nn/operations/Attention.html b/docs/javadoc/mklab/JGNN/nn/operations/Attention.html index 88c4db6e..8c4dd515 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/Attention.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/Attention.html @@ -1,11 +1,11 @@ - + Attention - + diff --git a/docs/javadoc/mklab/JGNN/nn/operations/Complement.html b/docs/javadoc/mklab/JGNN/nn/operations/Complement.html index 23ad1fbb..f8746564 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/Complement.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/Complement.html @@ -1,11 +1,11 @@ - + Complement - + @@ -78,7 +78,8 @@

                                          Class Complement


                                          public class Complement extends NNOperation
                                          -
                                          Implements a NNOperation that performs the operation 1-x for its simple input x.
                                          +
                                          Implements a NNOperation that performs the operation 1-x for its + simple input x.
                                          Author:
                                          Emmanouil Krasanakis
                                          diff --git a/docs/javadoc/mklab/JGNN/nn/operations/Concat.html b/docs/javadoc/mklab/JGNN/nn/operations/Concat.html index 06ac6759..0ad4d20e 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/Concat.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/Concat.html @@ -1,11 +1,11 @@ - + Concat - + diff --git a/docs/javadoc/mklab/JGNN/nn/operations/Dropout.html b/docs/javadoc/mklab/JGNN/nn/operations/Dropout.html index 81bd14c1..e546b73c 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/Dropout.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/Dropout.html @@ -1,11 +1,11 @@ - + Dropout - + @@ -82,8 +82,9 @@

                                          Class Dropout


                                          public class Dropout extends NNOperation
                                          -
                                          Implements a NNOperation that converts its first argument to a ColumnRepetition matrix - with a number of columns equal to the second argument.
                                          +
                                          Implements a NNOperation that converts its first argument to a + ColumnRepetition matrix with a number of columns equal to the second + argument.
                                          Author:
                                          Emmanouil Krasanakis
                                          diff --git a/docs/javadoc/mklab/JGNN/nn/operations/From.html b/docs/javadoc/mklab/JGNN/nn/operations/From.html index b1d334a3..d3a46abd 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/From.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/From.html @@ -1,11 +1,11 @@ - + From - + @@ -82,7 +82,8 @@

                                          Class From


                                          public class From extends NNOperation
                                          -
                                          Implements a NNOperation that lists the first element of the 2D matrix element iterator.
                                          +
                                          Implements a NNOperation that lists the first element of the 2D + matrix element iterator.
                                          Author:
                                          Emmanouil Krasanakis
                                          diff --git a/docs/javadoc/mklab/JGNN/nn/operations/Gather.html b/docs/javadoc/mklab/JGNN/nn/operations/Gather.html index 327d593e..2ab5d813 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/Gather.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/Gather.html @@ -1,11 +1,11 @@ - + Gather - + @@ -78,7 +78,8 @@

                                          Class Gather


                                          public class Gather extends NNOperation
                                          -
                                          Implements a NNOperation that performs the equivalent of TensorFlow's gather operation.
                                          +
                                          Implements a NNOperation that performs the equivalent of TensorFlow's + gather operation.
                                          Author:
                                          Emmanouil Krasanakis
                                          diff --git a/docs/javadoc/mklab/JGNN/nn/operations/Identity.html b/docs/javadoc/mklab/JGNN/nn/operations/Identity.html index eaf12686..049aaf20 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/Identity.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/Identity.html @@ -1,11 +1,11 @@ - + Identity - + diff --git a/docs/javadoc/mklab/JGNN/nn/operations/LSTM.LSTMState.html b/docs/javadoc/mklab/JGNN/nn/operations/LSTM.LSTMState.html index 5dae2d10..e03a4011 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/LSTM.LSTMState.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/LSTM.LSTMState.html @@ -1,11 +1,11 @@ - + LSTM.LSTMState - + diff --git a/docs/javadoc/mklab/JGNN/nn/operations/LSTM.html b/docs/javadoc/mklab/JGNN/nn/operations/LSTM.html index 884f995b..8c7ce8a2 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/LSTM.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/LSTM.html @@ -1,11 +1,11 @@ - + LSTM - + diff --git a/docs/javadoc/mklab/JGNN/nn/operations/Log.html b/docs/javadoc/mklab/JGNN/nn/operations/Log.html index 51def8e4..287da0a1 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/Log.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/Log.html @@ -1,11 +1,11 @@ - + Log - + @@ -78,7 +78,8 @@

                                          Class Log


                                          public class Log extends NNOperation
                                          -
                                          Implements a NNOperation that outputs the natural logarithm of its single input.
                                          +
                                          Implements a NNOperation that outputs the natural logarithm of its + single input.
                                          Author:
                                          Emmanouil Krasanakis
                                          diff --git a/docs/javadoc/mklab/JGNN/nn/operations/MatMul.html b/docs/javadoc/mklab/JGNN/nn/operations/MatMul.html index d17ad4d1..34d9532a 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/MatMul.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/MatMul.html @@ -1,11 +1,11 @@ - + MatMul - + diff --git a/docs/javadoc/mklab/JGNN/nn/operations/Multiply.html b/docs/javadoc/mklab/JGNN/nn/operations/Multiply.html index a139837c..73e3609f 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/Multiply.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/Multiply.html @@ -1,11 +1,11 @@ - + Multiply - + @@ -82,7 +82,8 @@

                                          Class Multiply


                                          public class Multiply extends NNOperation
                                          -
                                          Implements a NNOperation that multiplies its two inputs element-by-element.
                                          +
                                          Implements a NNOperation that multiplies its two inputs + element-by-element.
                                          Author:
                                          Emmanouil Krasanakis
                                          diff --git a/docs/javadoc/mklab/JGNN/nn/operations/Reduce.html b/docs/javadoc/mklab/JGNN/nn/operations/Reduce.html index b47b38ab..e6f151d0 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/Reduce.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/Reduce.html @@ -1,11 +1,11 @@ - + Reduce - + diff --git a/docs/javadoc/mklab/JGNN/nn/operations/Repeat.html b/docs/javadoc/mklab/JGNN/nn/operations/Repeat.html index b594ba37..d4ba6239 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/Repeat.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/Repeat.html @@ -1,11 +1,11 @@ - + Repeat - + @@ -78,8 +78,9 @@

                                          Class Repeat


                                          public class Repeat extends NNOperation
                                          -
                                          Implements a NNOperation that converts its first argument to a ColumnRepetition matrix - with a number of columns equal to the second argument.
                                          +
                                          Implements a NNOperation that converts its first argument to a + ColumnRepetition matrix with a number of columns equal to the second + argument.
                                          Author:
                                          Emmanouil Krasanakis
                                          diff --git a/docs/javadoc/mklab/JGNN/nn/operations/Reshape.html b/docs/javadoc/mklab/JGNN/nn/operations/Reshape.html index a763f458..7eeaefe9 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/Reshape.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/Reshape.html @@ -1,11 +1,11 @@ - + Reshape - + diff --git a/docs/javadoc/mklab/JGNN/nn/operations/To.html b/docs/javadoc/mklab/JGNN/nn/operations/To.html index 36eeab15..bead1bd0 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/To.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/To.html @@ -1,11 +1,11 @@ - + To - + @@ -82,7 +82,8 @@

                                          Class To


                                          public class To extends NNOperation
                                          -
                                          Implements a NNOperation that lists the second element of the 2D matrix element iterator.
                                          +
                                          Implements a NNOperation that lists the second element of the 2D + matrix element iterator.
                                          Author:
                                          Emmanouil Krasanakis
                                          diff --git a/docs/javadoc/mklab/JGNN/nn/operations/Transpose.html b/docs/javadoc/mklab/JGNN/nn/operations/Transpose.html index d5796ea7..3fc839a1 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/Transpose.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/Transpose.html @@ -1,11 +1,11 @@ - + Transpose - + diff --git a/docs/javadoc/mklab/JGNN/nn/operations/class-use/Add.html b/docs/javadoc/mklab/JGNN/nn/operations/class-use/Add.html index 91070433..b25ee5a1 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/class-use/Add.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/class-use/Add.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.operations.Add - + diff --git a/docs/javadoc/mklab/JGNN/nn/operations/class-use/Attention.html b/docs/javadoc/mklab/JGNN/nn/operations/class-use/Attention.html index 67ac7c92..477d7fe2 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/class-use/Attention.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/class-use/Attention.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.operations.Attention - + diff --git a/docs/javadoc/mklab/JGNN/nn/operations/class-use/Complement.html b/docs/javadoc/mklab/JGNN/nn/operations/class-use/Complement.html index 253655b9..d4f8593d 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/class-use/Complement.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/class-use/Complement.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.operations.Complement - + diff --git a/docs/javadoc/mklab/JGNN/nn/operations/class-use/Concat.html b/docs/javadoc/mklab/JGNN/nn/operations/class-use/Concat.html index e9c64d90..bb50966f 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/class-use/Concat.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/class-use/Concat.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.operations.Concat - + diff --git a/docs/javadoc/mklab/JGNN/nn/operations/class-use/Dropout.html b/docs/javadoc/mklab/JGNN/nn/operations/class-use/Dropout.html index 494b6f62..18c0a1f0 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/class-use/Dropout.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/class-use/Dropout.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.operations.Dropout - + diff --git a/docs/javadoc/mklab/JGNN/nn/operations/class-use/From.html b/docs/javadoc/mklab/JGNN/nn/operations/class-use/From.html index ee55a6a7..fdc495e0 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/class-use/From.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/class-use/From.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.operations.From - + diff --git a/docs/javadoc/mklab/JGNN/nn/operations/class-use/Gather.html b/docs/javadoc/mklab/JGNN/nn/operations/class-use/Gather.html index 93198976..192429e9 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/class-use/Gather.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/class-use/Gather.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.operations.Gather - + diff --git a/docs/javadoc/mklab/JGNN/nn/operations/class-use/Identity.html b/docs/javadoc/mklab/JGNN/nn/operations/class-use/Identity.html index d6ad398c..d6cdafe8 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/class-use/Identity.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/class-use/Identity.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.operations.Identity - + diff --git a/docs/javadoc/mklab/JGNN/nn/operations/class-use/LSTM.LSTMState.html b/docs/javadoc/mklab/JGNN/nn/operations/class-use/LSTM.LSTMState.html index c1dbb4f2..26c8c266 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/class-use/LSTM.LSTMState.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/class-use/LSTM.LSTMState.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.operations.LSTM.LSTMState - + @@ -57,7 +57,9 @@

                                          Package
                                          Description
                                          -
                                           
                                          +
                                          +
                                          Contains popular neural network and GNN operations.
                                          +
                                            diff --git a/docs/javadoc/mklab/JGNN/nn/operations/class-use/LSTM.html b/docs/javadoc/mklab/JGNN/nn/operations/class-use/LSTM.html index 38e8a716..df752014 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/class-use/LSTM.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/class-use/LSTM.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.operations.LSTM - + @@ -57,7 +57,9 @@

                                            Uses of Cl
                                            Package
                                            Description
                                            -
                                             
                                            +
                                            +
                                            Contains popular neural network and GNN operations.
                                            +
                                              diff --git a/docs/javadoc/mklab/JGNN/nn/operations/class-use/Log.html b/docs/javadoc/mklab/JGNN/nn/operations/class-use/Log.html index 74737b8f..2d55eb81 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/class-use/Log.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/class-use/Log.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.operations.Log - + diff --git a/docs/javadoc/mklab/JGNN/nn/operations/class-use/MatMul.html b/docs/javadoc/mklab/JGNN/nn/operations/class-use/MatMul.html index 7b72ff18..af843947 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/class-use/MatMul.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/class-use/MatMul.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.operations.MatMul - + diff --git a/docs/javadoc/mklab/JGNN/nn/operations/class-use/Multiply.html b/docs/javadoc/mklab/JGNN/nn/operations/class-use/Multiply.html index 6d38a6e1..93040207 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/class-use/Multiply.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/class-use/Multiply.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.operations.Multiply - + diff --git a/docs/javadoc/mklab/JGNN/nn/operations/class-use/Reduce.html b/docs/javadoc/mklab/JGNN/nn/operations/class-use/Reduce.html index edef4105..5061174b 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/class-use/Reduce.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/class-use/Reduce.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.operations.Reduce - + diff --git a/docs/javadoc/mklab/JGNN/nn/operations/class-use/Repeat.html b/docs/javadoc/mklab/JGNN/nn/operations/class-use/Repeat.html index ce60a34e..14e2a2c3 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/class-use/Repeat.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/class-use/Repeat.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.operations.Repeat - + diff --git a/docs/javadoc/mklab/JGNN/nn/operations/class-use/Reshape.html b/docs/javadoc/mklab/JGNN/nn/operations/class-use/Reshape.html index e23b354b..fc26bdf3 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/class-use/Reshape.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/class-use/Reshape.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.operations.Reshape - + @@ -57,7 +57,9 @@

                                              Uses of
                                              Package
                                              Description
                                              -
                                               
                                              +
                                              +
                                              Contains popular neural network and GNN operations.
                                              +
                                                diff --git a/docs/javadoc/mklab/JGNN/nn/operations/class-use/To.html b/docs/javadoc/mklab/JGNN/nn/operations/class-use/To.html index f6a05956..094c9742 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/class-use/To.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/class-use/To.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.operations.To - + diff --git a/docs/javadoc/mklab/JGNN/nn/operations/class-use/Transpose.html b/docs/javadoc/mklab/JGNN/nn/operations/class-use/Transpose.html index b3023ef6..d9c692eb 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/class-use/Transpose.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/class-use/Transpose.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.operations.Transpose - + diff --git a/docs/javadoc/mklab/JGNN/nn/operations/package-summary.html b/docs/javadoc/mklab/JGNN/nn/operations/package-summary.html index d7734705..65c28266 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/package-summary.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/package-summary.html @@ -1,11 +1,11 @@ - + mklab.JGNN.nn.operations - + @@ -42,7 +42,7 @@
                                                @@ -62,6 +62,15 @@

                                                Package mklab.JGNN.nn


                                                package mklab.JGNN.nn.operations
                                                +
                                                +
                                                Contains popular neural network and GNN operations. These are intermediate + representations of parsed ModelBuilder expressions + that call operations of the mklab.JGNN.core package.
                                                +
                                                +
                                                Author:
                                                +
                                                Emmanouil Krasanakis
                                                +
                                                +
                                                • @@ -71,7 +80,10 @@

                                                  Package mklab.JGNN.nn
                                                  Package
                                                  Description
                                                  -
                                                   
                                                  +
                                                  +
                                                  Implements neural networks components that are combined to define GNNs or + other types of machine learning models.
                                                  +

                                                • @@ -92,7 +104,8 @@

                                                  Package mklab.JGNN.nn
                                                  -
                                                  Implements a NNOperation that performs the operation 1-x for its simple input x.
                                                  +
                                                  Implements a NNOperation that performs the operation 1-x for its + simple input x.
                                                  @@ -100,16 +113,19 @@

                                                  Package mklab.JGNN.nn

                                                  -
                                                  Implements a NNOperation that converts its first argument to a ColumnRepetition matrix - with a number of columns equal to the second argument.
                                                  +
                                                  Implements a NNOperation that converts its first argument to a + ColumnRepetition matrix with a number of columns equal to the second + argument.
                                                  -
                                                  Implements a NNOperation that lists the first element of the 2D matrix element iterator.
                                                  +
                                                  Implements a NNOperation that lists the first element of the 2D + matrix element iterator.
                                                  -
                                                  Implements a NNOperation that performs the equivalent of TensorFlow's gather operation.
                                                  +
                                                  Implements a NNOperation that performs the equivalent of TensorFlow's + gather operation.
                                                  @@ -117,7 +133,8 @@

                                                  Package mklab.JGNN.nn

                                                  -
                                                  Implements a NNOperation that outputs the natural logarithm of its single input.
                                                  +
                                                  Implements a NNOperation that outputs the natural logarithm of its + single input.
                                                   
                                                  @@ -129,14 +146,16 @@

                                                  Package mklab.JGNN.nn
                                                  -
                                                  Implements a NNOperation that multiplies its two inputs element-by-element.
                                                  +
                                                  Implements a NNOperation that multiplies its two inputs + element-by-element.
                                                   
                                                  -
                                                  Implements a NNOperation that converts its first argument to a ColumnRepetition matrix - with a number of columns equal to the second argument.
                                                  +
                                                  Implements a NNOperation that converts its first argument to a + ColumnRepetition matrix with a number of columns equal to the second + argument.
                                                  @@ -144,7 +163,8 @@

                                                  Package mklab.JGNN.nn

                                                  -
                                                  Implements a NNOperation that lists the second element of the 2D matrix element iterator.
                                                  +
                                                  Implements a NNOperation that lists the second element of the 2D + matrix element iterator.
                                                  diff --git a/docs/javadoc/mklab/JGNN/nn/operations/package-tree.html b/docs/javadoc/mklab/JGNN/nn/operations/package-tree.html index 6137e38a..81961127 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/package-tree.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/package-tree.html @@ -1,11 +1,11 @@ - + mklab.JGNN.nn.operations Class Hierarchy - + diff --git a/docs/javadoc/mklab/JGNN/nn/operations/package-use.html b/docs/javadoc/mklab/JGNN/nn/operations/package-use.html index 5f3e7acf..8f16de3e 100644 --- a/docs/javadoc/mklab/JGNN/nn/operations/package-use.html +++ b/docs/javadoc/mklab/JGNN/nn/operations/package-use.html @@ -1,11 +1,11 @@ - + Uses of Package mklab.JGNN.nn.operations - + @@ -57,7 +57,9 @@

                                                  Uses of Packa
                                                  Package
                                                  Description
                                                  -
                                                   
                                                  +
                                                  +
                                                  Contains popular neural network and GNN operations.
                                                  +

                                                    diff --git a/docs/javadoc/mklab/JGNN/nn/optimizers/Adam.html b/docs/javadoc/mklab/JGNN/nn/optimizers/Adam.html index 85f5cb99..965501c1 100644 --- a/docs/javadoc/mklab/JGNN/nn/optimizers/Adam.html +++ b/docs/javadoc/mklab/JGNN/nn/optimizers/Adam.html @@ -1,11 +1,11 @@ - + Adam - + @@ -250,10 +250,10 @@

                                                    update

                                                    public void update(Tensor value, Tensor gradient)
                                                    Description copied from interface: Optimizer
                                                    -
                                                    In-place updates the value of a tensor given its gradient. - Some optimizers (e.g. Adama) require the exact same tensor instance to be provided - so as to keep track of its optimization progress. The library makes sure to keep - this constraint.
                                                    +
                                                    In-place updates the value of a tensor given its gradient. Some optimizers + (e.g. Adama) require the exact same tensor instance to be provided so as to + keep track of its optimization progress. The library makes sure to keep this + constraint.
                                                    Specified by:
                                                    update in interface Optimizer
                                                    @@ -268,8 +268,8 @@

                                                    update

                                                    reset

                                                    public void reset()
                                                    Description copied from interface: Optimizer
                                                    -
                                                    Resets (and lets the garbage collector free) optimizer memory. - Should be called at the beginning of training (not after each epoch).
                                                    +
                                                    Resets (and lets the garbage collector free) optimizer memory. Should be + called at the beginning of training (not after each epoch).
                                                    Specified by:
                                                    reset in interface Optimizer
                                                    diff --git a/docs/javadoc/mklab/JGNN/nn/optimizers/BatchOptimizer.html b/docs/javadoc/mklab/JGNN/nn/optimizers/BatchOptimizer.html index 955636af..d392e804 100644 --- a/docs/javadoc/mklab/JGNN/nn/optimizers/BatchOptimizer.html +++ b/docs/javadoc/mklab/JGNN/nn/optimizers/BatchOptimizer.html @@ -1,11 +1,11 @@ - + BatchOptimizer - + @@ -213,10 +213,10 @@

                                                    update

                                                    public void update(Tensor value, Tensor gradient)
                                                    Description copied from interface: Optimizer
                                                    -
                                                    In-place updates the value of a tensor given its gradient. - Some optimizers (e.g. Adama) require the exact same tensor instance to be provided - so as to keep track of its optimization progress. The library makes sure to keep - this constraint.
                                                    +
                                                    In-place updates the value of a tensor given its gradient. Some optimizers + (e.g. Adama) require the exact same tensor instance to be provided so as to + keep track of its optimization progress. The library makes sure to keep this + constraint.
                                                    Specified by:
                                                    update in interface Optimizer
                                                    @@ -231,8 +231,8 @@

                                                    update

                                                    reset

                                                    public void reset()
                                                    Description copied from interface: Optimizer
                                                    -
                                                    Resets (and lets the garbage collector free) optimizer memory. - Should be called at the beginning of training (not after each epoch).
                                                    +
                                                    Resets (and lets the garbage collector free) optimizer memory. Should be + called at the beginning of training (not after each epoch).
                                                    Specified by:
                                                    reset in interface Optimizer
                                                    diff --git a/docs/javadoc/mklab/JGNN/nn/optimizers/GradientDescent.html b/docs/javadoc/mklab/JGNN/nn/optimizers/GradientDescent.html index a43a575a..d3fda0ef 100644 --- a/docs/javadoc/mklab/JGNN/nn/optimizers/GradientDescent.html +++ b/docs/javadoc/mklab/JGNN/nn/optimizers/GradientDescent.html @@ -1,11 +1,11 @@ - + GradientDescent - + @@ -190,10 +190,10 @@

                                                    update

                                                    public void update(Tensor value, Tensor gradient)
                                                    Description copied from interface: Optimizer
                                                    -
                                                    In-place updates the value of a tensor given its gradient. - Some optimizers (e.g. Adama) require the exact same tensor instance to be provided - so as to keep track of its optimization progress. The library makes sure to keep - this constraint.
                                                    +
                                                    In-place updates the value of a tensor given its gradient. Some optimizers + (e.g. Adama) require the exact same tensor instance to be provided so as to + keep track of its optimization progress. The library makes sure to keep this + constraint.
                                                    Specified by:
                                                    update in interface Optimizer
                                                    @@ -208,8 +208,8 @@

                                                    update

                                                    reset

                                                    public void reset()
                                                    Description copied from interface: Optimizer
                                                    -
                                                    Resets (and lets the garbage collector free) optimizer memory. - Should be called at the beginning of training (not after each epoch).
                                                    +
                                                    Resets (and lets the garbage collector free) optimizer memory. Should be + called at the beginning of training (not after each epoch).
                                                    Specified by:
                                                    reset in interface Optimizer
                                                    diff --git a/docs/javadoc/mklab/JGNN/nn/optimizers/Regularization.html b/docs/javadoc/mklab/JGNN/nn/optimizers/Regularization.html index 6d640ba4..6e6bc27d 100644 --- a/docs/javadoc/mklab/JGNN/nn/optimizers/Regularization.html +++ b/docs/javadoc/mklab/JGNN/nn/optimizers/Regularization.html @@ -1,11 +1,11 @@ - + Regularization - + @@ -85,8 +85,8 @@

                                                    Class Regularization

                                                    public class Regularization extends Object implements Optimizer
                                                    -
                                                    Wraps an Optimizer by applying the derivative of L2 loss - on every tensor during Optimizer.update(Tensor, Tensor).
                                                    +
                                                    Wraps an Optimizer by applying the derivative of L2 loss on every + tensor during Optimizer.update(Tensor, Tensor).
                                                    Author:
                                                    Emmanouil Krasanakis
                                                    @@ -176,10 +176,10 @@

                                                    update

                                                    public void update(Tensor value, Tensor gradient)
                                                    Description copied from interface: Optimizer
                                                    -
                                                    In-place updates the value of a tensor given its gradient. - Some optimizers (e.g. Adama) require the exact same tensor instance to be provided - so as to keep track of its optimization progress. The library makes sure to keep - this constraint.
                                                    +
                                                    In-place updates the value of a tensor given its gradient. Some optimizers + (e.g. Adama) require the exact same tensor instance to be provided so as to + keep track of its optimization progress. The library makes sure to keep this + constraint.
                                                    Specified by:
                                                    update in interface Optimizer
                                                    @@ -194,8 +194,8 @@

                                                    update

                                                    reset

                                                    public void reset()
                                                    Description copied from interface: Optimizer
                                                    -
                                                    Resets (and lets the garbage collector free) optimizer memory. - Should be called at the beginning of training (not after each epoch).
                                                    +
                                                    Resets (and lets the garbage collector free) optimizer memory. Should be + called at the beginning of training (not after each epoch).
                                                    Specified by:
                                                    reset in interface Optimizer
                                                    diff --git a/docs/javadoc/mklab/JGNN/nn/optimizers/class-use/Adam.html b/docs/javadoc/mklab/JGNN/nn/optimizers/class-use/Adam.html index 77998dc6..b5fe3793 100644 --- a/docs/javadoc/mklab/JGNN/nn/optimizers/class-use/Adam.html +++ b/docs/javadoc/mklab/JGNN/nn/optimizers/class-use/Adam.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.optimizers.Adam - + diff --git a/docs/javadoc/mklab/JGNN/nn/optimizers/class-use/BatchOptimizer.html b/docs/javadoc/mklab/JGNN/nn/optimizers/class-use/BatchOptimizer.html index 35dffe3d..8403a972 100644 --- a/docs/javadoc/mklab/JGNN/nn/optimizers/class-use/BatchOptimizer.html +++ b/docs/javadoc/mklab/JGNN/nn/optimizers/class-use/BatchOptimizer.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.optimizers.BatchOptimizer - + diff --git a/docs/javadoc/mklab/JGNN/nn/optimizers/class-use/GradientDescent.html b/docs/javadoc/mklab/JGNN/nn/optimizers/class-use/GradientDescent.html index b3cdc354..7f5f961f 100644 --- a/docs/javadoc/mklab/JGNN/nn/optimizers/class-use/GradientDescent.html +++ b/docs/javadoc/mklab/JGNN/nn/optimizers/class-use/GradientDescent.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.optimizers.GradientDescent - + diff --git a/docs/javadoc/mklab/JGNN/nn/optimizers/class-use/Regularization.html b/docs/javadoc/mklab/JGNN/nn/optimizers/class-use/Regularization.html index 5bd000c1..1a1d7b0e 100644 --- a/docs/javadoc/mklab/JGNN/nn/optimizers/class-use/Regularization.html +++ b/docs/javadoc/mklab/JGNN/nn/optimizers/class-use/Regularization.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.optimizers.Regularization - + diff --git a/docs/javadoc/mklab/JGNN/nn/optimizers/package-summary.html b/docs/javadoc/mklab/JGNN/nn/optimizers/package-summary.html index 31f73393..04a7c538 100644 --- a/docs/javadoc/mklab/JGNN/nn/optimizers/package-summary.html +++ b/docs/javadoc/mklab/JGNN/nn/optimizers/package-summary.html @@ -1,11 +1,11 @@ - + mklab.JGNN.nn.optimizers - + @@ -42,7 +42,7 @@
                                                    @@ -62,6 +62,21 @@

                                                    Package mklab.JGNN.nn


                                                    package mklab.JGNN.nn.optimizers
                                                    +
                                                    +
                                                    Contains optimizers that can be used to update training losses. Instantiate + optimizers, and let methods like + Model.train(mklab.JGNN.nn.Loss, mklab.JGNN.nn.Optimizer, java.util.List, java.util.List) + request parameter update rules given the internally computed outcome of + backprogagation. When writing training procedure of your own, use the + BatchOptimizer to wrap some base optimizer + and accumulate gradient updates until calling + BatchOptimizer.updateAll() at the end of + each batch or epoch.
                                                    +
                                                    +
                                                    Author:
                                                    +
                                                    Emmanouil Krasanakis
                                                    +
                                                    +
                                                    • @@ -71,7 +86,10 @@

                                                      Package mklab.JGNN.nn
                                                      Package
                                                      Description
                                                      -
                                                       
                                                      +
                                                      +
                                                      Implements neural networks components that are combined to define GNNs or + other types of machine learning models.
                                                      +

                                                    • @@ -98,8 +116,8 @@

                                                      Package mklab.JGNN.nn
                                                      -
                                                      Wraps an Optimizer by applying the derivative of L2 loss - on every tensor during Optimizer.update(Tensor, Tensor).
                                                      +
                                                      Wraps an Optimizer by applying the derivative of L2 loss on every + tensor during Optimizer.update(Tensor, Tensor).
                                                      diff --git a/docs/javadoc/mklab/JGNN/nn/optimizers/package-tree.html b/docs/javadoc/mklab/JGNN/nn/optimizers/package-tree.html index b990cdca..162f08b0 100644 --- a/docs/javadoc/mklab/JGNN/nn/optimizers/package-tree.html +++ b/docs/javadoc/mklab/JGNN/nn/optimizers/package-tree.html @@ -1,11 +1,11 @@ - + mklab.JGNN.nn.optimizers Class Hierarchy - + diff --git a/docs/javadoc/mklab/JGNN/nn/optimizers/package-use.html b/docs/javadoc/mklab/JGNN/nn/optimizers/package-use.html index 75c5545e..0ae7c992 100644 --- a/docs/javadoc/mklab/JGNN/nn/optimizers/package-use.html +++ b/docs/javadoc/mklab/JGNN/nn/optimizers/package-use.html @@ -1,11 +1,11 @@ - + Uses of Package mklab.JGNN.nn.optimizers - + diff --git a/docs/javadoc/mklab/JGNN/nn/package-summary.html b/docs/javadoc/mklab/JGNN/nn/package-summary.html index 6359547c..af1968f2 100644 --- a/docs/javadoc/mklab/JGNN/nn/package-summary.html +++ b/docs/javadoc/mklab/JGNN/nn/package-summary.html @@ -1,11 +1,11 @@ - + mklab.JGNN.nn - + @@ -46,7 +46,7 @@
                                                      @@ -66,6 +66,20 @@

                                                      Package mklab.JGNN.nn


                                                      package mklab.JGNN.nn
                                                      +
                                                      +
                                                      Implements neural networks components that are combined to define GNNs or + other types of machine learning models. Hand-wiring everything may be + cumbersome, so prefer using ModelBuilder and its + extensions to construct Model instances. Components + matching common neural operations are provided in sub-packages, where they + are separated by their functional role as activations, inputs, operations, or + pooling functions. In addition to operations. Additionally, Java code + components are provided for losses and model parameter initialization.
                                                      +
                                                      +
                                                      Author:
                                                      +
                                                      Emmanouil Krasanakis
                                                      +
                                                      +
                                                      • @@ -75,19 +89,35 @@

                                                        Package mklab.JGNN.nn

                                                        Package
                                                        Description
                                                        -
                                                         
                                                        +
                                                        +
                                                        Implements activations function to be used as model operations.
                                                        +
                                                        -
                                                         
                                                        +
                                                        +
                                                        Implements initializers to be applied on Model + parameters to stochastically induce some desired property at the first + training epoch.
                                                        +
                                                        -
                                                         
                                                        +
                                                        +
                                                        Contains various types of neural architecture inputs.
                                                        +
                                                        -
                                                         
                                                        +
                                                        +
                                                        Contains classes for instantiating loss function.
                                                        +
                                                        -
                                                         
                                                        +
                                                        +
                                                        Contains popular neural network and GNN operations.
                                                        +
                                                        -
                                                         
                                                        +
                                                        +
                                                        Contains optimizers that can be used to update training losses.
                                                        +
                                                        -
                                                         
                                                        +
                                                        +
                                                        Contains pooling/reduction operations that reduce the dimensions of inputs.
                                                        +
                                                      • @@ -104,27 +134,21 @@

                                                        Package mklab.JGNN.nn

                                                        -
                                                        This class provides an abstract implementation of loss functions - to be used during Model training.
                                                        +
                                                        This class provides an abstract implementation of loss functions to be used + during Model training.
                                                        This class is a way to organize NNOperation trees into trainable machine learning models.
                                                        - +
                                                        -
                                                        This is a helper class that automates the definition of training processes of Model instances - by defining the number of epochs, loss functions, number of batches and the ability to use ThreadPool - for parallelized batch computations.
                                                        -
                                                        - -
                                                        This class defines an abstract neural network operation with forward and backpropagation capabilities.
                                                        - -
                                                        + +
                                                        Provides an interface for training tensors.
                                                        diff --git a/docs/javadoc/mklab/JGNN/nn/package-tree.html b/docs/javadoc/mklab/JGNN/nn/package-tree.html index fe905be1..942eec31 100644 --- a/docs/javadoc/mklab/JGNN/nn/package-tree.html +++ b/docs/javadoc/mklab/JGNN/nn/package-tree.html @@ -1,11 +1,11 @@ - + mklab.JGNN.nn Class Hierarchy - + @@ -64,7 +64,6 @@

                                                        Class Hierarchy

                                                      • mklab.JGNN.nn.Initializer
                                                      • mklab.JGNN.nn.Loss
                                                      • mklab.JGNN.nn.Model
                                                      • -
                                                      • mklab.JGNN.nn.ModelTraining
                                                      • mklab.JGNN.nn.NNOperation
                                                      diff --git a/docs/javadoc/mklab/JGNN/nn/package-use.html b/docs/javadoc/mklab/JGNN/nn/package-use.html index 24a598c0..fca3c44d 100644 --- a/docs/javadoc/mklab/JGNN/nn/package-use.html +++ b/docs/javadoc/mklab/JGNN/nn/package-use.html @@ -1,11 +1,11 @@ - + Uses of Package mklab.JGNN.nn - + @@ -57,25 +57,54 @@

                                                      Uses of Package
                                                      mklab
                                                      Package
                                                      Description
                                                      -
                                                       
                                                      - -
                                                       
                                                      - -
                                                       
                                                      - -
                                                       
                                                      - -
                                                       
                                                      - -
                                                       
                                                      - -
                                                       
                                                      - -
                                                       
                                                      - -
                                                       
                                                      - -
                                                       
                                                      +
                                                      +
                                                      Contains classes that simplify data loading, model building, and training.
                                                      +
                                                      + +
                                                      +
                                                      Contains model training strategies that correspond to different predictive + tasks.
                                                      +
                                                      + +
                                                      +
                                                      Implements neural networks components that are combined to define GNNs or + other types of machine learning models.
                                                      +
                                                      + +
                                                      +
                                                      Implements activations function to be used as model operations.
                                                      +
                                                      + +
                                                      +
                                                      Implements initializers to be applied on Model + parameters to stochastically induce some desired property at the first + training epoch.
                                                      +
                                                      + +
                                                      +
                                                      Contains various types of neural architecture inputs.
                                                      +
                                                      + +
                                                      +
                                                      Contains classes for instantiating loss function.
                                                      +
                                                      + +
                                                      +
                                                      Contains losses that wrap other losses and augment their numeric computations + with live reporting of the training status.
                                                      +
                                                      + +
                                                      +
                                                      Contains popular neural network and GNN operations.
                                                      +
                                                      + +
                                                      +
                                                      Contains optimizers that can be used to update training losses.
                                                      +
                                                      + +
                                                      +
                                                      Contains pooling/reduction operations that reduce the dimensions of inputs.
                                                      +
                                                        @@ -85,16 +114,39 @@

                                                        Uses of Package
                                                        mklab
                                                        Class
                                                        Description
                                                        - +
                                                        +
                                                        This class provides an abstract implementation of loss functions to be used + during Model training.
                                                        +
                                                        + +
                                                        This class is a way to organize NNOperation trees into trainable machine learning models.
                                                        - -
                                                        + +
                                                        This class defines an abstract neural network operation with forward and backpropagation capabilities.
                                                        + +
                                                        +
                                                        Provides an interface for training tensors.
                                                        +
                                                        +
                                                        +

                                                      + +
                                                    • +
                                                      + +
                                                      +
                                                      Class
                                                      +
                                                      Description
                                                      + +
                                                      +
                                                      This class is a way to organize NNOperation trees into trainable machine + learning models.
                                                      +
                                                    • @@ -110,27 +162,21 @@

                                                      Uses of Package
                                                      mklab
                                                      -
                                                      This class provides an abstract implementation of loss functions - to be used during Model training.
                                                      +
                                                      This class provides an abstract implementation of loss functions to be used + during Model training.
                                                      This class is a way to organize NNOperation trees into trainable machine learning models.
                                                      - +
                                                      -
                                                      This is a helper class that automates the definition of training processes of Model instances - by defining the number of epochs, loss functions, number of batches and the ability to use ThreadPool - for parallelized batch computations.
                                                      -
                                                      - -
                                                      This class defines an abstract neural network operation with forward and backpropagation capabilities.
                                                      - -
                                                      + +
                                                      Provides an interface for training tensors.
                                                      @@ -190,8 +236,8 @@

                                                      Uses of Package
                                                      mklab
                                                      Description
                                                      -
                                                      This class provides an abstract implementation of loss functions - to be used during Model training.
                                                      +
                                                      This class provides an abstract implementation of loss functions to be used + during Model training.

                                                      @@ -204,8 +250,8 @@

                                                      Uses of Package
                                                      mklab
                                                      Description
                                                      -
                                                      This class provides an abstract implementation of loss functions - to be used during Model training.
                                                      +
                                                      This class provides an abstract implementation of loss functions to be used + during Model training.

                                                    diff --git a/docs/javadoc/mklab/JGNN/nn/pooling/Max.html b/docs/javadoc/mklab/JGNN/nn/pooling/Max.html index ac5c48d5..43c0d5ab 100644 --- a/docs/javadoc/mklab/JGNN/nn/pooling/Max.html +++ b/docs/javadoc/mklab/JGNN/nn/pooling/Max.html @@ -1,11 +1,11 @@ - + Max - + diff --git a/docs/javadoc/mklab/JGNN/nn/pooling/Mean.html b/docs/javadoc/mklab/JGNN/nn/pooling/Mean.html index 5545f930..32b37693 100644 --- a/docs/javadoc/mklab/JGNN/nn/pooling/Mean.html +++ b/docs/javadoc/mklab/JGNN/nn/pooling/Mean.html @@ -1,11 +1,11 @@ - + Mean - + diff --git a/docs/javadoc/mklab/JGNN/nn/pooling/SoftMax.html b/docs/javadoc/mklab/JGNN/nn/pooling/SoftMax.html index f30ffd43..54e7b69d 100644 --- a/docs/javadoc/mklab/JGNN/nn/pooling/SoftMax.html +++ b/docs/javadoc/mklab/JGNN/nn/pooling/SoftMax.html @@ -1,11 +1,11 @@ - + SoftMax - + diff --git a/docs/javadoc/mklab/JGNN/nn/pooling/Sort.html b/docs/javadoc/mklab/JGNN/nn/pooling/Sort.html index 5d7c9330..96d50ecf 100644 --- a/docs/javadoc/mklab/JGNN/nn/pooling/Sort.html +++ b/docs/javadoc/mklab/JGNN/nn/pooling/Sort.html @@ -1,11 +1,11 @@ - + Sort - + diff --git a/docs/javadoc/mklab/JGNN/nn/pooling/Sum.html b/docs/javadoc/mklab/JGNN/nn/pooling/Sum.html index 3958cd72..fe2f5975 100644 --- a/docs/javadoc/mklab/JGNN/nn/pooling/Sum.html +++ b/docs/javadoc/mklab/JGNN/nn/pooling/Sum.html @@ -1,11 +1,11 @@ - + Sum - + diff --git a/docs/javadoc/mklab/JGNN/nn/pooling/class-use/Max.html b/docs/javadoc/mklab/JGNN/nn/pooling/class-use/Max.html index 4d1b214b..bac6e04c 100644 --- a/docs/javadoc/mklab/JGNN/nn/pooling/class-use/Max.html +++ b/docs/javadoc/mklab/JGNN/nn/pooling/class-use/Max.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.pooling.Max - + diff --git a/docs/javadoc/mklab/JGNN/nn/pooling/class-use/Mean.html b/docs/javadoc/mklab/JGNN/nn/pooling/class-use/Mean.html index f790f080..fc3b4915 100644 --- a/docs/javadoc/mklab/JGNN/nn/pooling/class-use/Mean.html +++ b/docs/javadoc/mklab/JGNN/nn/pooling/class-use/Mean.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.pooling.Mean - + diff --git a/docs/javadoc/mklab/JGNN/nn/pooling/class-use/SoftMax.html b/docs/javadoc/mklab/JGNN/nn/pooling/class-use/SoftMax.html index bb10e6b5..a78954ba 100644 --- a/docs/javadoc/mklab/JGNN/nn/pooling/class-use/SoftMax.html +++ b/docs/javadoc/mklab/JGNN/nn/pooling/class-use/SoftMax.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.pooling.SoftMax - + diff --git a/docs/javadoc/mklab/JGNN/nn/pooling/class-use/Sort.html b/docs/javadoc/mklab/JGNN/nn/pooling/class-use/Sort.html index 31b3507c..2ff7b7ce 100644 --- a/docs/javadoc/mklab/JGNN/nn/pooling/class-use/Sort.html +++ b/docs/javadoc/mklab/JGNN/nn/pooling/class-use/Sort.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.pooling.Sort - + @@ -57,7 +57,9 @@

                                                    Uses of Class
                                                    Package
                                                    Description
                                                    -
                                                     
                                                    +
                                                    +
                                                    Contains pooling/reduction operations that reduce the dimensions of inputs.
                                                    +
                                                      diff --git a/docs/javadoc/mklab/JGNN/nn/pooling/class-use/Sum.html b/docs/javadoc/mklab/JGNN/nn/pooling/class-use/Sum.html index aa56129e..ecbb6c93 100644 --- a/docs/javadoc/mklab/JGNN/nn/pooling/class-use/Sum.html +++ b/docs/javadoc/mklab/JGNN/nn/pooling/class-use/Sum.html @@ -1,11 +1,11 @@ - + Uses of Class mklab.JGNN.nn.pooling.Sum - + diff --git a/docs/javadoc/mklab/JGNN/nn/pooling/package-summary.html b/docs/javadoc/mklab/JGNN/nn/pooling/package-summary.html index f2d7237e..d9de9c0e 100644 --- a/docs/javadoc/mklab/JGNN/nn/pooling/package-summary.html +++ b/docs/javadoc/mklab/JGNN/nn/pooling/package-summary.html @@ -1,11 +1,11 @@ - + mklab.JGNN.nn.pooling - + @@ -42,7 +42,7 @@
                                                      @@ -62,6 +62,14 @@

                                                      Package mklab.JGNN.nn.po


                                                      package mklab.JGNN.nn.pooling
                                                      +
                                                      +
                                                      Contains pooling/reduction operations that reduce the dimensions of inputs. + In JGNN, this mainly means reducing matrices to vectors or smaller matrices.
                                                      +
                                                      +
                                                      Author:
                                                      +
                                                      Emmanouil Krasanakis
                                                      +
                                                      +
                                                      • @@ -71,7 +79,10 @@

                                                        Package mklab.JGNN.nn.po
                                                        Package
                                                        Description
                                                        -
                                                         
                                                        +
                                                        +
                                                        Implements neural networks components that are combined to define GNNs or + other types of machine learning models.
                                                        +

                                                      • diff --git a/docs/javadoc/mklab/JGNN/nn/pooling/package-tree.html b/docs/javadoc/mklab/JGNN/nn/pooling/package-tree.html index 36670585..af152088 100644 --- a/docs/javadoc/mklab/JGNN/nn/pooling/package-tree.html +++ b/docs/javadoc/mklab/JGNN/nn/pooling/package-tree.html @@ -1,11 +1,11 @@ - + mklab.JGNN.nn.pooling Class Hierarchy - + diff --git a/docs/javadoc/mklab/JGNN/nn/pooling/package-use.html b/docs/javadoc/mklab/JGNN/nn/pooling/package-use.html index 04d2ba2d..48110057 100644 --- a/docs/javadoc/mklab/JGNN/nn/pooling/package-use.html +++ b/docs/javadoc/mklab/JGNN/nn/pooling/package-use.html @@ -1,11 +1,11 @@ - + Uses of Package mklab.JGNN.nn.pooling - + @@ -57,7 +57,9 @@

                                                        Uses of Package<
                                                        Package
                                                        Description
                                                        -
                                                         
                                                        +
                                                        +
                                                        Contains pooling/reduction operations that reduce the dimensions of inputs.
                                                        +
                                                        -
                                                      • mklab.JGNN.nn.ModelTraining
                                                      • +
                                                      • mklab.JGNN.adhoc.ModelTraining + +
                                                      • mklab.JGNN.nn.NNOperation
                                                        • mklab.JGNN.nn.operations.Add
                                                        • diff --git a/docs/javadoc/package-search-index.js b/docs/javadoc/package-search-index.js index 49d10b8e..93af43c5 100644 --- a/docs/javadoc/package-search-index.js +++ b/docs/javadoc/package-search-index.js @@ -1 +1 @@ -packageSearchIndex = [{"l":"All Packages","u":"allpackages-index.html"},{"l":"mklab.JGNN.adhoc"},{"l":"mklab.JGNN.adhoc.datasets"},{"l":"mklab.JGNN.adhoc.parsers"},{"l":"mklab.JGNN.core.distribution"},{"l":"mklab.JGNN.core.empy"},{"l":"mklab.JGNN.core.matrix"},{"l":"mklab.JGNN.core.tensor"},{"l":"mklab.JGNN.nn"},{"l":"mklab.JGNN.nn.activations"},{"l":"mklab.JGNN.nn.initializers"},{"l":"mklab.JGNN.nn.inputs"},{"l":"mklab.JGNN.nn.loss"},{"l":"mklab.JGNN.nn.loss.report"},{"l":"mklab.JGNN.nn.operations"},{"l":"mklab.JGNN.nn.optimizers"},{"l":"mklab.JGNN.nn.pooling"}];updateSearchResults(); \ No newline at end of file +packageSearchIndex = [{"l":"All Packages","u":"allpackages-index.html"},{"l":"mklab.JGNN.adhoc"},{"l":"mklab.JGNN.adhoc.datasets"},{"l":"mklab.JGNN.adhoc.parsers"},{"l":"mklab.JGNN.adhoc.train"},{"l":"mklab.JGNN.core.distribution"},{"l":"mklab.JGNN.core.empy"},{"l":"mklab.JGNN.core.matrix"},{"l":"mklab.JGNN.core.tensor"},{"l":"mklab.JGNN.nn"},{"l":"mklab.JGNN.nn.activations"},{"l":"mklab.JGNN.nn.initializers"},{"l":"mklab.JGNN.nn.inputs"},{"l":"mklab.JGNN.nn.loss"},{"l":"mklab.JGNN.nn.loss.report"},{"l":"mklab.JGNN.nn.operations"},{"l":"mklab.JGNN.nn.optimizers"},{"l":"mklab.JGNN.nn.pooling"}];updateSearchResults(); \ No newline at end of file diff --git a/docs/javadoc/type-search-index.js b/docs/javadoc/type-search-index.js index 85734293..8d5dbe2a 100644 --- a/docs/javadoc/type-search-index.js +++ b/docs/javadoc/type-search-index.js @@ -1 +1 @@ -typeSearchIndex = [{"p":"mklab.JGNN.core.matrix","l":"AccessCol"},{"p":"mklab.JGNN.core.matrix","l":"AccessRow"},{"p":"mklab.JGNN.core.tensor","l":"AccessSubtensor"},{"p":"mklab.JGNN.nn.loss","l":"Accuracy"},{"p":"mklab.JGNN.nn.optimizers","l":"Adam"},{"p":"mklab.JGNN.nn.operations","l":"Add"},{"l":"All Classes and Interfaces","u":"allclasses-index.html"},{"p":"mklab.JGNN.nn.operations","l":"Attention"},{"p":"mklab.JGNN.nn.optimizers","l":"BatchOptimizer"},{"p":"mklab.JGNN.nn.loss","l":"BinaryCrossEntropy"},{"p":"mklab.JGNN.nn.loss","l":"CategoricalCrossEntropy"},{"p":"mklab.JGNN.adhoc.datasets","l":"Citeseer"},{"p":"mklab.JGNN.core.matrix","l":"ColumnRepetition"},{"p":"mklab.JGNN.nn.operations","l":"Complement"},{"p":"mklab.JGNN.nn.operations","l":"Concat"},{"p":"mklab.JGNN.nn.inputs","l":"Constant"},{"p":"mklab.JGNN.adhoc.datasets","l":"Cora"},{"p":"mklab.JGNN.adhoc","l":"Dataset"},{"p":"mklab.JGNN.core.matrix","l":"DenseMatrix"},{"p":"mklab.JGNN.core.tensor","l":"DenseTensor"},{"p":"mklab.JGNN.core.matrix","l":"Diagonal"},{"p":"mklab.JGNN.core","l":"Distribution"},{"p":"mklab.JGNN.nn.operations","l":"Dropout"},{"p":"mklab.JGNN.core.empy","l":"EmptyMatrix"},{"p":"mklab.JGNN.core.empy","l":"EmptyTensor"},{"p":"mklab.JGNN.nn.activations","l":"Exp"},{"p":"mklab.JGNN.adhoc.parsers","l":"FastBuilder"},{"p":"mklab.JGNN.core.util","l":"FastEntry"},{"p":"mklab.JGNN.nn.operations","l":"From"},{"p":"mklab.JGNN.nn.operations","l":"Gather"},{"p":"mklab.JGNN.nn.optimizers","l":"GradientDescent"},{"p":"mklab.JGNN.adhoc","l":"IdConverter"},{"p":"mklab.JGNN.nn.operations","l":"Identity"},{"p":"mklab.JGNN.nn","l":"Initializer"},{"p":"mklab.JGNN.nn.initializers","l":"KaimingNormal"},{"p":"mklab.JGNN.nn.initializers","l":"KaimingUniform"},{"p":"mklab.JGNN.nn.activations","l":"L1"},{"p":"mklab.JGNN.adhoc.parsers","l":"LayeredBuilder"},{"p":"mklab.JGNN.nn.operations","l":"Log"},{"p":"mklab.JGNN.nn","l":"Loss"},{"p":"mklab.JGNN.core.util","l":"Loss"},{"p":"mklab.JGNN.nn.activations","l":"LRelu"},{"p":"mklab.JGNN.nn.operations","l":"LSTM"},{"p":"mklab.JGNN.nn.operations","l":"LSTM.LSTMState"},{"p":"mklab.JGNN.nn.operations","l":"MatMul"},{"p":"mklab.JGNN.core","l":"Matrix"},{"p":"mklab.JGNN.nn.pooling","l":"Max"},{"p":"mklab.JGNN.nn.pooling","l":"Mean"},{"p":"mklab.JGNN.core","l":"Memory"},{"p":"mklab.JGNN.nn","l":"Model"},{"p":"mklab.JGNN.adhoc","l":"ModelBuilder"},{"p":"mklab.JGNN.nn","l":"ModelTraining"},{"p":"mklab.JGNN.nn.operations","l":"Multiply"},{"p":"mklab.JGNN.adhoc.parsers","l":"Neuralang"},{"p":"mklab.JGNN.nn.activations","l":"NExp"},{"p":"mklab.JGNN.nn","l":"NNOperation"},{"p":"mklab.JGNN.core.distribution","l":"Normal"},{"p":"mklab.JGNN.nn","l":"Optimizer"},{"p":"mklab.JGNN.nn.inputs","l":"Parameter"},{"p":"mklab.JGNN.nn.activations","l":"PRelu"},{"p":"mklab.JGNN.adhoc.datasets","l":"Pubmed"},{"p":"mklab.JGNN.core.util","l":"Range"},{"p":"mklab.JGNN.core.util","l":"Range2D"},{"p":"mklab.JGNN.nn.operations","l":"Reduce"},{"p":"mklab.JGNN.nn.optimizers","l":"Regularization"},{"p":"mklab.JGNN.nn.activations","l":"Relu"},{"p":"mklab.JGNN.nn.operations","l":"Repeat"},{"p":"mklab.JGNN.core.matrix","l":"RepeatMatrix"},{"p":"mklab.JGNN.core.tensor","l":"RepeatTensor"},{"p":"mklab.JGNN.nn.operations","l":"Reshape"},{"p":"mklab.JGNN.core.matrix","l":"RowRepetition"},{"p":"mklab.JGNN.core","l":"Memory.Scope"},{"p":"mklab.JGNN.nn.activations","l":"Sigmoid"},{"p":"mklab.JGNN.core","l":"Slice"},{"p":"mklab.JGNN.nn.pooling","l":"SoftMax"},{"p":"mklab.JGNN.core.util","l":"Sort"},{"p":"mklab.JGNN.nn.pooling","l":"Sort"},{"p":"mklab.JGNN.core.matrix","l":"SparseMatrix"},{"p":"mklab.JGNN.core.matrix","l":"SparseSymmetric"},{"p":"mklab.JGNN.core.tensor","l":"SparseTensor"},{"p":"mklab.JGNN.nn.pooling","l":"Sum"},{"p":"mklab.JGNN.nn.activations","l":"Tanh"},{"p":"mklab.JGNN.core","l":"Tensor"},{"p":"mklab.JGNN.core","l":"ThreadPool"},{"p":"mklab.JGNN.nn.operations","l":"To"},{"p":"mklab.JGNN.nn.operations","l":"Transpose"},{"p":"mklab.JGNN.core.matrix","l":"TransposedMatrix"},{"p":"mklab.JGNN.core.distribution","l":"Uniform"},{"p":"mklab.JGNN.nn.inputs","l":"Variable"},{"p":"mklab.JGNN.nn.initializers","l":"VariancePreservingInitializer"},{"p":"mklab.JGNN.core.matrix","l":"VectorizedMatrix"},{"p":"mklab.JGNN.core.tensor","l":"VectorizedTensor"},{"p":"mklab.JGNN.nn.loss.report","l":"VerboseLoss"},{"p":"mklab.JGNN.core.matrix","l":"WrapCols"},{"p":"mklab.JGNN.core.matrix","l":"WrapRows"},{"p":"mklab.JGNN.nn.initializers","l":"XavierNormal"},{"p":"mklab.JGNN.nn.initializers","l":"XavierUniform"}];updateSearchResults(); \ No newline at end of file +typeSearchIndex = [{"p":"mklab.JGNN.core.matrix","l":"AccessCol"},{"p":"mklab.JGNN.core.matrix","l":"AccessRow"},{"p":"mklab.JGNN.core.tensor","l":"AccessSubtensor"},{"p":"mklab.JGNN.nn.loss","l":"Accuracy"},{"p":"mklab.JGNN.nn.optimizers","l":"Adam"},{"p":"mklab.JGNN.nn.operations","l":"Add"},{"l":"All Classes and Interfaces","u":"allclasses-index.html"},{"p":"mklab.JGNN.nn.operations","l":"Attention"},{"p":"mklab.JGNN.nn.optimizers","l":"BatchOptimizer"},{"p":"mklab.JGNN.nn.loss","l":"BinaryCrossEntropy"},{"p":"mklab.JGNN.nn.loss","l":"CategoricalCrossEntropy"},{"p":"mklab.JGNN.adhoc.datasets","l":"Citeseer"},{"p":"mklab.JGNN.core.matrix","l":"ColumnRepetition"},{"p":"mklab.JGNN.nn.operations","l":"Complement"},{"p":"mklab.JGNN.nn.operations","l":"Concat"},{"p":"mklab.JGNN.nn.inputs","l":"Constant"},{"p":"mklab.JGNN.adhoc.datasets","l":"Cora"},{"p":"mklab.JGNN.adhoc","l":"Dataset"},{"p":"mklab.JGNN.core.matrix","l":"DenseMatrix"},{"p":"mklab.JGNN.core.tensor","l":"DenseTensor"},{"p":"mklab.JGNN.core.matrix","l":"Diagonal"},{"p":"mklab.JGNN.core","l":"Distribution"},{"p":"mklab.JGNN.nn.operations","l":"Dropout"},{"p":"mklab.JGNN.core.empy","l":"EmptyMatrix"},{"p":"mklab.JGNN.core.empy","l":"EmptyTensor"},{"p":"mklab.JGNN.nn.activations","l":"Exp"},{"p":"mklab.JGNN.adhoc.parsers","l":"FastBuilder"},{"p":"mklab.JGNN.core.util","l":"FastEntry"},{"p":"mklab.JGNN.nn.operations","l":"From"},{"p":"mklab.JGNN.nn.operations","l":"Gather"},{"p":"mklab.JGNN.nn.optimizers","l":"GradientDescent"},{"p":"mklab.JGNN.adhoc","l":"IdConverter"},{"p":"mklab.JGNN.nn.operations","l":"Identity"},{"p":"mklab.JGNN.nn","l":"Initializer"},{"p":"mklab.JGNN.nn.initializers","l":"KaimingNormal"},{"p":"mklab.JGNN.nn.initializers","l":"KaimingUniform"},{"p":"mklab.JGNN.nn.activations","l":"L1"},{"p":"mklab.JGNN.adhoc.parsers","l":"LayeredBuilder"},{"p":"mklab.JGNN.nn.operations","l":"Log"},{"p":"mklab.JGNN.nn","l":"Loss"},{"p":"mklab.JGNN.core.util","l":"Loss"},{"p":"mklab.JGNN.nn.activations","l":"LRelu"},{"p":"mklab.JGNN.nn.operations","l":"LSTM"},{"p":"mklab.JGNN.nn.operations","l":"LSTM.LSTMState"},{"p":"mklab.JGNN.nn.operations","l":"MatMul"},{"p":"mklab.JGNN.core","l":"Matrix"},{"p":"mklab.JGNN.nn.pooling","l":"Max"},{"p":"mklab.JGNN.nn.pooling","l":"Mean"},{"p":"mklab.JGNN.core","l":"Memory"},{"p":"mklab.JGNN.nn","l":"Model"},{"p":"mklab.JGNN.adhoc","l":"ModelBuilder"},{"p":"mklab.JGNN.adhoc","l":"ModelTraining"},{"p":"mklab.JGNN.nn.operations","l":"Multiply"},{"p":"mklab.JGNN.adhoc.parsers","l":"Neuralang"},{"p":"mklab.JGNN.nn.activations","l":"NExp"},{"p":"mklab.JGNN.nn","l":"NNOperation"},{"p":"mklab.JGNN.adhoc.train","l":"NodeClassification"},{"p":"mklab.JGNN.core.distribution","l":"Normal"},{"p":"mklab.JGNN.nn","l":"Optimizer"},{"p":"mklab.JGNN.nn.inputs","l":"Parameter"},{"p":"mklab.JGNN.nn.activations","l":"PRelu"},{"p":"mklab.JGNN.adhoc.datasets","l":"Pubmed"},{"p":"mklab.JGNN.core.util","l":"Range"},{"p":"mklab.JGNN.core.util","l":"Range2D"},{"p":"mklab.JGNN.nn.operations","l":"Reduce"},{"p":"mklab.JGNN.nn.optimizers","l":"Regularization"},{"p":"mklab.JGNN.nn.activations","l":"Relu"},{"p":"mklab.JGNN.nn.operations","l":"Repeat"},{"p":"mklab.JGNN.core.matrix","l":"RepeatMatrix"},{"p":"mklab.JGNN.core.tensor","l":"RepeatTensor"},{"p":"mklab.JGNN.nn.operations","l":"Reshape"},{"p":"mklab.JGNN.core.matrix","l":"RowRepetition"},{"p":"mklab.JGNN.core","l":"Memory.Scope"},{"p":"mklab.JGNN.nn.activations","l":"Sigmoid"},{"p":"mklab.JGNN.core","l":"Slice"},{"p":"mklab.JGNN.nn.pooling","l":"SoftMax"},{"p":"mklab.JGNN.core.util","l":"Sort"},{"p":"mklab.JGNN.nn.pooling","l":"Sort"},{"p":"mklab.JGNN.core.matrix","l":"SparseMatrix"},{"p":"mklab.JGNN.core.matrix","l":"SparseSymmetric"},{"p":"mklab.JGNN.core.tensor","l":"SparseTensor"},{"p":"mklab.JGNN.nn.pooling","l":"Sum"},{"p":"mklab.JGNN.nn.activations","l":"Tanh"},{"p":"mklab.JGNN.core","l":"Tensor"},{"p":"mklab.JGNN.core","l":"ThreadPool"},{"p":"mklab.JGNN.nn.operations","l":"To"},{"p":"mklab.JGNN.nn.operations","l":"Transpose"},{"p":"mklab.JGNN.core.matrix","l":"TransposedMatrix"},{"p":"mklab.JGNN.core.distribution","l":"Uniform"},{"p":"mklab.JGNN.nn.inputs","l":"Variable"},{"p":"mklab.JGNN.nn.initializers","l":"VariancePreservingInitializer"},{"p":"mklab.JGNN.core.matrix","l":"VectorizedMatrix"},{"p":"mklab.JGNN.core.tensor","l":"VectorizedTensor"},{"p":"mklab.JGNN.nn.loss.report","l":"VerboseLoss"},{"p":"mklab.JGNN.core.matrix","l":"WrapCols"},{"p":"mklab.JGNN.core.matrix","l":"WrapRows"},{"p":"mklab.JGNN.nn.initializers","l":"XavierNormal"},{"p":"mklab.JGNN.nn.initializers","l":"XavierUniform"}];updateSearchResults(); \ No newline at end of file