From 0404df02cbb9e39631b8ed12853737b244234a27 Mon Sep 17 00:00:00 2001
From: maniospas Training epochs for the created model can be implemented
manually, by passing inputs, obtaining outputs, computing losses, and triggering backpropagation
on an optimizer. As these steps may be complicated, JGNN automates common
- training patterns with a In the example, a parameter initializer is applied on the model before training is conducted.
- This is a cold start scenario, as opposed to a warm start that continues training already
- trained parameters.
- Selecting an initializer is not part of training strategies
+ training patterns by extending a base Of data needed for training, the graph adjacency matrix and node features are already declared as constants by the
+ To finish describing the training strategy, the example selects
+ Trained models and their generating builders can be saved and loaded. The next snippet demonstrates
how raw predictions can be made too. During this process,
@@ -358,7 +366,7 @@ model
(the same instance as the first
- * argument).
- */
- public Model train(Model model, Matrix features, Matrix labels, Slice trainingSamples, Slice validationSamples) {
- // ACTUΑL TRAINING
- double minLoss = Double.POSITIVE_INFINITY;
- HashMapthis
classification training instance.
+ */
+ public SampleClassification setOutputs(Matrix labels) {
+ if (this.labels != null)
+ throw new RuntimeException("Can only set labels once in a SampleClassification instance.");
+ this.labels = labels;
+ return this;
+ }
+
+ /**
+ * Sets a slice of training samples. These should be identifiers of
+ * feature/label rows; basically, they reflect which rows of these matrices
+ * should be retrieved during training. If multiple batches are set, for example
+ * with {@link #setNumBatches(int)}, then these samples are further split for
+ * each batch.
+ *
+ * @param trainingSamples The slice of training samples.
+ * @return this
classification training instance.
+ */
+ public SampleClassification setTrainingSamples(Slice trainingSamples) {
+ if (this.trainingSamples != null)
+ throw new RuntimeException("Can only set a training sample slice once in a SampleClassification instance.");
+ this.trainingSamples = trainingSamples;
+ return this;
+ }
+
+ /**
+ * Sets a slice of validation samples. These should be identifiers of
+ * feature/label rows; basically, they reflect which rows of these matrices
+ * should be retrieved during validation.
+ *
+ * @param validationSamples The slice of validation samples.
+ * @return this
classification training instance.
+ */
+ public SampleClassification setValidationSamples(Slice validationSamples) {
+ if (this.validationSamples != null)
+ throw new RuntimeException(
+ "Can only set a validation sample slice once in a SampleClassification instance.");
+ this.validationSamples = validationSamples;
+ return this;
+ }
+
+ @Override
+ protected void onStartEpoch(int epoch) {
+ if (stochasticGradientDescent)
+ trainingSamples.shuffle(epoch);
+ }
+
+ @Override
+ protected Listthis
Tensor instance.
*/
public Tensor setDimensionName(Tensor other) {
- assertMatching(other);
- if (dimensionName == null)
+ //assertMatching(other);
+ if (other.getDimensionName() != null)
dimensionName = other.getDimensionName();
return this;
}
diff --git a/JGNN/src/main/java/mklab/JGNN/core/matrix/DenseMatrix.java b/JGNN/src/main/java/mklab/JGNN/core/matrix/DenseMatrix.java
index 7c7b9c47..9f9e5f79 100644
--- a/JGNN/src/main/java/mklab/JGNN/core/matrix/DenseMatrix.java
+++ b/JGNN/src/main/java/mklab/JGNN/core/matrix/DenseMatrix.java
@@ -105,7 +105,7 @@ public Matrix matmul(Matrix with) {
@Override
public Matrix matmul(Matrix with, boolean transposeThis, boolean transposeWith) {
- if (with instanceof SparseMatrix)
+ if (!(with instanceof DenseMatrix) && !(with instanceof VectorizedMatrix))
return super.matmul(with, transposeThis, transposeWith);
// Determine the dimensions based on whether we transpose or not
@@ -124,8 +124,7 @@ public Matrix matmul(Matrix with, boolean transposeThis, boolean transposeWith)
// Create the resulting matrix
DenseMatrix ret = new DenseMatrix(rowsThis, colsWith);
- double[] with_tensor_values = (with instanceof VectorizedMatrix) ? ((VectorizedMatrix) with).tensor.values
- : ((DenseMatrix) with).tensor.values;
+ double[] with_tensor_values = (with instanceof VectorizedMatrix) ? ((VectorizedMatrix) with).tensor.values: ((DenseMatrix) with).tensor.values;
for (int col2 = 0; col2 < colsWith; ++col2) {
for (int row = 0; row < rowsThis; ++row) {
diff --git a/JGNN/src/main/java/mklab/JGNN/core/matrix/VectorizedMatrix.java b/JGNN/src/main/java/mklab/JGNN/core/matrix/VectorizedMatrix.java
index 8d6edf55..5c1884d8 100644
--- a/JGNN/src/main/java/mklab/JGNN/core/matrix/VectorizedMatrix.java
+++ b/JGNN/src/main/java/mklab/JGNN/core/matrix/VectorizedMatrix.java
@@ -105,7 +105,7 @@ public Matrix matmul(Matrix with) {
@Override
public Matrix matmul(Matrix with, boolean transposeThis, boolean transposeWith) {
- if (with instanceof SparseMatrix)
+ if (!(with instanceof DenseMatrix) && !(with instanceof VectorizedMatrix))
return super.matmul(with, transposeThis, transposeWith);
// Determine the dimensions based on whether we transpose or not
diff --git a/JGNN/src/main/java/mklab/JGNN/core/util/Range.java b/JGNN/src/main/java/mklab/JGNN/core/util/Range.java
index ee25c7f2..10d6f68e 100644
--- a/JGNN/src/main/java/mklab/JGNN/core/util/Range.java
+++ b/JGNN/src/main/java/mklab/JGNN/core/util/Range.java
@@ -4,33 +4,45 @@
import java.util.NoSuchElementException;
/**
- * Implements an iterator that traverses a range (similar to Python's range(min, max) method).
- * It is often used by {@link mklab.JGNN.core.Tensor} derived classes to traverse through all
- * element positions in sequential order.
+ * Implements an iterator that traverses a range [min, max) where the right side
+ * is non-inclusive. That is, this method behaves similarly to Python's
+ * range(min, max). It is often used by {@link mklab.JGNN.core.Tensor} derived
+ * classes to traverse through all element positions in sequential order.
*
* @author Emmanouil Krasanakis
*/
public class Range implements Iteratorthis
Model instance.
* @see #addOutput(NNOperation)
@@ -155,9 +177,10 @@ public Model addInput(Variable input) {
inputs.add(input);
return this;
}
-
+
/**
* Adds to the model's output the output of the provided operation.
+ *
* @param output An operation to set as an output.
* @return this
Model instance.
* @see #addInput(Variable)
@@ -169,22 +192,24 @@ public Model addOutput(NNOperation output) {
outputs.add(output);
return this;
}
-
+
/**
- * Retrieves a list of model inputs. Editing this list affects
- * the model and is not recommended. Input order is based on
- * the chronological addition of inputs through {@link #addInput(Variable)}.
+ * Retrieves a list of model inputs. Editing this list affects the model and is
+ * not recommended. Input order is based on the chronological addition of inputs
+ * through {@link #addInput(Variable)}.
+ *
* @return A list of {@link Variable} instances.
* @see #getOutputs()
*/
public ArrayList2. Quickstart
ModelTraining
class. Instances of this class
- accept a method chain notation to set their parameters, like the number of epochs, patience
- for early stopping, the employed optimizer, and loss functions. An example is presented below,
- where Adam
optimization with learning rate 0.01 is performed, and a verbose
- variation of a validation loss prints the progress. To run a full training process,
- pass the defined strategy to a model alongside input data, corresponding output data, as well
- as training and validation slices.ModelTraining
class with training strategies
+ tailored to different data formats and predictive tasks. Find these subclasses in the
+ adhoc.train
+ Javadoc. Instances of model trainers
+ accept a method chain notation to set their parameters. Parameters usually include training and validation data
+ (these should be made first and depend on the model training class) and aspects of the training strategy like the number of epochs, patience
+ for early stopping, the employed optimizer, and loss functions. An example is presented below.FastBuilder
constructor, as node classification takes place on the same graph
+ with fully known node features. Thus, input features are a column of node identifiers, which
+ classify
method above uses to gather
+ the predictions on respective nodes. Architecture outputs are softmax approximation of the one-hot
+ encodings of respective node labels. The simplest way to handle missing labels for test data without modifying
+ the example is to leave their one-hot encodings as zeroes only.
+ Additionally, this particular training strategy accepts training and validation data slices, where slices are lists
+ of integer entries pointing to rows of inputs and outputs - find more later.Adam
optimization with learning rate 0.01, and training
+ over many epochs with early stopping. A verbose
+ loss prints every 10 epochs the progress of cross entropy and accuracy on validation data, where the
+ first of these two is used for the early stopping criterion.
+ To run a full training process, pass a strategy to a model.
+ In a cold start scenario, apply a parameter initializer first before training is conducted.
+ A warm start that resumes training from some previously trained outcomes would skip this step.
+ Selecting an initializer is not part of the training strategy
to signify its model-dependent nature; dense layers should maintain the expected
input variances in the output before the first epoch, and therefore the initializer depends
- on the type of activation functions. Moreover,
- the graph's adjacency matrix and node features are already declared as constants by the
- FastBuilder
constructor, as node classification takes place on the same graph
- with fully known node features. Architecture anputs are the node identifiers, which in the
- classify
method above are used to gather
- the predictions on respective nodes, and desired outputs are the corresponding labels from
- the dataset. Labels that are not known still need to have some value; as a convention when working
- with your own data, leave the one-hot label encoding of test nodes as zeroes. Doing so in our
- present example would not affect the outcome either.
- The last two training arguments of the train
method
- then accept training and validation data slices. Slices are effectively lists of integer entries
- pointing to rows of inputs and outputs - find more later.
+ on the type of activation functions.
ModelTraining trainer = new ModelTraining()
+
+ .init(new XavierNormal())
+ .train(trainer);Slice nodes = dataset.samples().getSlice().shuffle(); // a permutation of node identifiers
+Matrix inputFeatures = Tensor.fromRange(nodes.size()).asColumn(); // each node has its identifier as an input (equivalent to: nodes.samplesAsFeatures())
+ModelTraining trainer = new SampleClassification()
+ // training data
+ .setFeatures(inputFeatures)
+ .setLabels(dataset.labels())
+ .setTrainingSamples(nodes.range(0, 0.6))
+ .setValidationSamples(nodes.range(0.6, 0.8))
+ // training strategy
.setOptimizer(new Adam(0.01))
.setEpochs(3000)
.setPatience(100)
.setLoss(new CategoricalCrossEntropy())
- .setValidationLoss(new VerboseLoss(new Accuracy()).setInterval(10)); // print validation every 10 epochs
-
-Slice nodes = dataset.samples().getSlice().shuffle(); // a permutation of node identifiers
-Matrix inputData = Tensor.fromRange(nodes.size()).asColumn(); // each node has its identifier as an input
+ .setValidationLoss(new VerboseLoss(new CategoricalCrossEntropy(), new Accuracy()).setInterval(10)); // print every 10 epochs
+
Model model = modelBuilder.getModel()
- .init(new XavierNormal())
- .train(trainer,
- inputData,
- dataset.labels(),
- nodes.range(0, 0.6), // training slice
- nodes.range(0.6, 0.8) // validation slice
- );
2. Quickstart
modelBuilder.save(Paths.get("gcn_cora.jgnn")); // needs a Path as an input
-Model loadedModel = ModelBuilder.load(Paths.get("gcn_cora.jgnn")).getModel(); // loading creates an intermediate modelbuilder
+Model loadedModel = ModelBuilder.load(Paths.get("gcn_cora.jgnn")).getModel(); // loading creates a new modelbuilder from which to get the model
Matrix output = loadedModel.predict(Tensor.fromRange(0, nodes.size()).asColumn()).get(0).cast(Matrix.class);
double acc = 0;
diff --git a/tutorials/Data.md b/tutorials/Data.md
deleted file mode 100644
index f3d9fa00..00000000
--- a/tutorials/Data.md
+++ /dev/null
@@ -1,105 +0,0 @@
-# :zap: Data creation
-
-If you have been following the tutorial, we have only used automatically downloaded datasets till now.
-In practice, you will want to use your own data. This tutorial covers typical code patterns on doing so.
-
-1. [Creating preallocated feature matrices](#creating-preallocated-feature-matrices)
-2. [Converting lists of tensors to matrices](#converting-lists-of-tensors=to-matrices)
-3. [Constructing graph adjacency matrices](#constructing-graph-adjacency-matrices)
-4. [Managing identifiers](#managing-identifiers)
-
-## Creating preallocated feature matrices
-If you know the number of nodes or data samples and features a-priori, you can create
-dense feature matrices with the following code. This uses the bare minimum memory necessary
-to construct the feature matrix. If features are dense (do not have a lot of zeroes),
-you could also consider using the `DenseMatrix` class instead of initializing a sparse
-matrix - the two classes are interoperable and have the same constructor arguments
- so that the rest of the code in the tutorials remains the same.
-
-```java
-Matrix features = new SparseMatrix(numNodes, numFeatures);
-for(long nodeId=0; nodeId rows = new ArrayList();
-try(BufferedReader reader = new BufferedReader(new FileReader(file))){
- String line = reader.readLine();
- while (line != null) {
- String[] cols = line.split(",");
- Tensor features = new SparseTensor(cols.length); // or a dense tensor
- for(int col=0;col edge : edges)
- matrix.put(edge.getKey(), edge.getValue(), 1).put(edge.getValue(), edge.getKey(), 1);
-```
-
-:bulb: Don't forget to normalize or apply the renormalization trick (self-edges) on matrices
-if these are needed by your algorithm, for instance by calling `adjacency.setMainDiagonal(1).setToSymmetricNormalization();` after matrix construction.
-
-## Managing identifiers
-The above snippets all reference node identifiers. To help you with managing these, JGNN
-provides an `IdConverter` class. You can convert hashable objects (e.g., Strings) to identifiers
-by calling `IdConverter.getOrCreateId(object)`. The same functionality is also helpful
-for one-hot encoding of class labels. If you want to search only for previously registered identifiers,
-for example to catch logical errors, you can use `IdConverter.get(object)`.
-
-For example, you can construct a label matrix of one-hot encodings for your training data per:
-
-```java
-// register the ids in data
-IdConverter nodeIds = new IdConverter();
-IdConverter classIds = new IdConverter();
-for(Entry entry : nodeLabels) {
- nodeids.getOrCreateId(entry.getKey()); // or .get(entry.getKey()) if reusing nodeIds of feature loading
- classIds.getOrCreateId(entry.getValue());
-}
-// create the matrix
-Matrix labels = new SparseMatrix(nodeIds.size(), classIds.size());
-for(Entry entry : nodeLabels)
- labels.put(nodeids.get(entry.getKey()), classIds.get(entry.getValue()), 1);
-```
-
-As a final remark, you can reverse-search the `IdConverter` to obtain the original object of your
-predictions using the `IdConverter.get(long identifier)` to retrieve the identifier. For example:
-
-```java
-long nodeId = nodeIds.get("nodeName");
-Tensor prediction = labels.accessRow(nodeId);
-long predictedClassId = prediction.argmax();
-System.out.println(classIds.get(predictedClassId));
-```
-
-
-
-[NEXT: Primitives](Primitives.md)
\ No newline at end of file
diff --git a/tutorials/Debugging.md b/tutorials/Debugging.md
deleted file mode 100644
index 9d108807..00000000
--- a/tutorials/Debugging.md
+++ /dev/null
@@ -1,149 +0,0 @@
-# :zap: Debugging
-JGNN offers high-level tools for debugging base architectures.
-This tutorial covers what errors to expect, what diagnostics to run,
-and how to make sense of error messages to fix erroneous architectures.
-
-1. [Name checking](#name-checking)
-2. [Debugging execution DAGs](#debugging-execution-dags)
-3. [Debugging logical errors](#debugging-logical-errors)
-4. [Monitoring operations](#monitoring-operations)
-
-## Name checking
-When parsing operations, values should be assigned to variables before
-subsequent use. Model builders check for unused variables and raise
-respective runtime exceptions.
-
-For example, for a `FastBuilder` that tries to parse the expression
-`.layer("h{l+1}=relu(hl@matrix(features, 32, reg)+vector(32))")`,
-where we remind that the layer definition is an enhanced version of
-operation declaration, and `hl` is a typographical error of `h{l}`,
-the following exception is thrown:
-
-```java
-Exception in thread "main" java.lang.RuntimeException: Symbol hl not defined.
-```
-
-## Debugging execution DAGs
-Model builders are responsible for creating directed acyclic graphs (DAGs)
-in models they are managing (these are not to be confused with graph inputs GNNs
-are managing). During parsing, builders may create temporary variables, which
-start with the `_tmp` prefix and are followed by a number, and linking components
-to others that use them.
-
-The easiest way to understand execution DAGs is to actually look
-at them. The library provides two tools to that end: a) a `.print()`
-method for model build functional flows that prints all the parsed
-expressions and intermediate expression in the system console, and b)
-a. `.getExecutionGraphDot()` that returns a String holding the execution
-graph in *.dot* format for visualization with external tools, such
-as [GraphViz](https://dreampuf.github.io/GraphvizOnline).
-
-A second error-checking procedure consists of checking
-for model operations that do not
-eventually reach any outputs, for example one of the output operation
-outcomes defined by `.out(String)`. Avoiding this behavior is particularly
-important, as it messes with graph traversal counting during backpropagation.
-However, to accomodate complex use cases, these checks can only be manually performed
-at the very end of model building with the builder method `.assertBackwardValidity()`.
-Calling these checks early on in functional model building
-will likely throw exceptions that are not trully logical errors - the
-outputs may be declared at later functional steps. Thrown errors would look like this:
-```java
-Exception in thread "main" java.lang.RuntimeException: The component class mklab.JGNN.nn.operations.Multiply: _tmp102 = null does not lead to an output
- at mklab.JGNN.nn.ModelBuilder.assertBackwardValidity(ModelBuilder.java:504)
- at nodeClassification.APPNP.main(APPNP.java:45)
-```
-For example, this indicates that the component *_tmp102* and we should look
-at the execution tree to understand its role.
-
-
-## Debugging logical errors
-There are two main mechanisms for the identification of logically erroneous
-architectures: a) mismatched dimension size, and b) mismatched dimension names.
-Of the two, dimension sizes are easy to comprehend, since they just mean that
-operations are mathematically invalid.
-
-On the other hand, dimension names need to be determined for
-starting data, such as model inputs and parameters, and are automatically
-inferred from operations on such primitives. For in-line declaration of
-parameters in operations or layers, dimension names are copied from any hyperperameters.
-Therefore, for easier debugging,
-prefer using functionl expressions that declare hyperperameters:
-
-```java
-new ModelBuilder()
- .config("features", 7)
- .config("hidden", 64)
- .var("x")
- .operation("h = x@matrix(features, hidden)");
-```
-instead of the simpler `new ModelBuilder().var(x).operation('h = x@matrix(features, hidden)')`
-
-
-Both mismatched dimensions and mismatched dimension names
-throw runtime exceptions. The beginning of their
-error console traces should start with something like this:
-```java
-java.lang.IllegalArgumentException: Mismatched matrix sizes between SparseMatrix (3327,32) 52523/106464 entries and DenseMatrix (64, classes 6)
-During the forward pass of class mklab.JGNN.nn.operations.MatMul: _tmp4 = null with the following inputs:
- class mklab.JGNN.nn.activations.Relu: h1 = SparseMatrix (3327,32) 52523/106464 entries
- class mklab.JGNN.nn.inputs.Parameter: _tmp5 = DenseMatrix (64, classes 6)
-java.lang.IllegalArgumentException: Mismatched matrix sizes between SparseMatrix (3327,32) 52523/106464 entries and DenseMatrix (64, classes 6)
- at mklab.JGNN.core.Matrix.matmul(Matrix.java:258)
- at mklab.JGNN.nn.operations.MatMul.forward(MatMul.java:21)
- at mklab.JGNN.nn.NNOperation.runPrediction(NNOperation.java:180)
- at mklab.JGNN.nn.NNOperation.runPrediction(NNOperation.java:170)
- at mklab.JGNN.nn.NNOperation.runPrediction(NNOperation.java:170)
- at mklab.JGNN.nn.NNOperation.runPrediction(NNOperation.java:170)
- at mklab.JGNN.nn.NNOperation.runPrediction(NNOperation.java:170)
- at mklab.JGNN.nn.NNOperation.runPrediction(NNOperation.java:170)
- ...
-```
-
-As an example, let us try to understand what this error tels us. First,
-it notifies us of the actual problem: that the architecture encounters mismatched matrix
-sizes when trying to multiply a 3327x32 SparseMatrix with a 64x6 dense matrix.
-This is easy to understand and there are also dimension names in there;
-for this example, only *classes* is a named dimension, but if models
-and input data are well-designed more names will be in there and some
-errors will also arise from different dimension names.
-
-At any rate, understanding the exact error is easy - the inner matrix dimensions
-of matrix multiplication
-do not agree. However, we need to find the error within our architecture to
-be able to fix whatever is causing this.
-
-To do this, we continue reading and see the message
-`During the forward pass of class mklab.JGNN.nn.operations.MatMul: _tmp4 = null`.
-This tells us that the problem occurs when trying to calculate *_tmp4*
-which is currently assigned a *null* tensor as value (this is pretty normal,
-as the forward pass has not yet already concluded for that variable to assume a value).
-Some more information is there to see what the operation's inputs are like - in this case
-they coincide with the multiplication's inputs, but this will not always be the case.
-
-The important point, is to go back to the execution tree and see during which exact operation
-this variable is defined. There, we will undoubtedly find that some dimension had 64 instead
-of 32 elements or conversely.
-
-## Monitoring operations
-In addition to all other debugging mechanisms, JGNN presents a way to show when
-forward and backward operations of specific code components are executed and with what kinds
-of arguments.
-This can be particularly useful when testing new components in real (complex) architectures.
-
-The practice consists of calling a *monitor(...)* function within operations.
-This does not affect what expressions do and only enables printing execution tree operations
-on operation components. For example, to monitor the outcome of matrix multiplication within
-the following operation:
-
-```java
-builder.operation("h = relu(x@matrix(features, 64) + vector(64))")
-```
-
-it should be converted to:
-
-```java
-builder.operation("h = relu(monitor(x@matrix(features, 64)) + vector(64))")
-```
-
-[NEXT: Message passing GNNs](Message.md)
\ No newline at end of file
diff --git a/tutorials/GNN.md b/tutorials/GNN.md
deleted file mode 100644
index 1e9ee8f1..00000000
--- a/tutorials/GNN.md
+++ /dev/null
@@ -1,185 +0,0 @@
-# :zap: Graph neural networks for node classification
-
-Graph neural networks (GNNs) extend the concept of base [neural networks](tutorials/NN.md).
-You can already write any GNN with the base the `LayerBuilder` class for designing neural models,
-but JGNN provides some common design choices that simplify the process for node classification.
-
-1. [Initializing a GNN builder](#initializing-a-gnn-builder)
-2. [GNN concepts](#gnn-concepts)
-3. [Adding a classification layer](#adding-a-classification-layer)
-4. [Example architecture](#example-architecture)
-5. [GNN training](#gnn-training)
-
-*Full implementations can be found in the [examples](../JGNN/src/examples/nodeClassification/APPNP.java).*
-
-## Initializing a GNN builder
-The `FastBuilder` class for building GNN architectures extends the generic
-`LayerBuilder` with common graph neural network operations.
-The only difference is that now we initialize it with a
-square matrix A, which is typically a normalization of the adjacency matrix, and a feature matrix h0
-(this is different than the symbol h{0}).
-Given that you will most likely use normal neural layers, you only need
-to remember that in symbolic parsing A will correspond to the adjacency matrix
-and that layer representations should be annotated with h{l}. We may make a more
-customizeable version of the builder in the future, but these symbols will always remain
-the default. Preferrably, each row of the feature matrix should correspond to the features
-of one node/sample. The normalized adjacency matrix can -and usually should-
-be sparse to save on memory.
-
-Most GNNs perform the renormalization trick by adding a self-loop
-before applying symmetric normalization on the adjacency matrix.
-Assuming no existing self-loops, the following snippet shows how to apply those
-transformations on adjacency matrices, such as ones obtained from `Dadaset.graph()`.
-The snippet use in-place arithmetics to directly alter raw matrix data:
-
-```java
-adjacency.setMainDiagonal(1).setToSymmetricNormalization();
-```
-
-Finally, you can instantiate the builder by providing the adjacency and feature
-matrices per:
-
-```java
-FastBuilder modelBuilder = new FastBuilder(adjacency, features);
-```
-
-Sending specific tensors to the builder's consructor
-does not restrict you from editing or replacing them later,
-even after architectures have been trained.
-For example, you can add node edges later by editing an element of the
-adjacency matrix per:
-
-```java
-Matrix adjacency = ((Constant)modelBuilder.get("A")).get(); // retrieves constant's value from the architecture
-adjacency.put(from, to, value);
-```
-
-
-## GNN concepts
-
-The base operation of GNNs is to propagate node representations to neighbors via graph edges,
-where they are aggregated. Aggeration typically consists of a weighted average
-per the normalized adjacency matrix edge weights, which propagates information
-while respecting spectral graph characteristics. Other types of aggregation
-are can be found in the more advanced tutorial for [Message passing GNNs](Message.md).
-Spectral aggregation can be achieved with a simple matrix multiplication on the previous layer's
-node features per `.layer("h{l+1}=A @ h{l}")`. In practice, you will often want to
-add more operations on the propagation, such as passing it through a dense layer.
-For example, the original GCN architecture defines layers of the form:
-
-```java
-.layer("h{l+1}=relu(A@(h{l}@matrix(features, hidden, reg))+vector(hidden))")
-.layer("h{l+1}=A@(h{l}@matrix(hidden, classes, reg))+vector(classes)")
-```
-
-Most architectures nowadays also perform edge dropout, which is as simple as applying dropout
-on the adjacency matrix values on each layer per:
-
-```java
-.layer("h{l+1}=dropout(A,0.5) @ h{l}")
-```
-
-Recent areas of heterogenous graph research also explicitly use the graph Laplacian,
-which you can insert into the architecture as a normal constant per `.constant("L", adjacency.negative().cast(Matrix.class).setMainDiagonal(1))`. Even more complex concepts
-can be modelled with edge attention that gathers and
-perform the dot product of edge nodes to provide new edge weights, exponentiating
-non-zero weights with *nexp* and applying row-wise L1 transformation. This yields
-an adjacency matrix weighting unique to the layer per `.operation("A{l}" = L1(nexp(att(A, h{l})))")`.
-Nonetheless, it is recommended that you stay away from these kinds complex architectures
-when learning from large graphs, as JGNN is designed to be lightweight and not fast.
-Consider using GPU GNNs if 1-2% accuracy gains matter enough to make your application
-several folds slower.
-
-
-## Adding a classification layer
-This far, we touched on propagation mechanisms of GNNs, which consider the features of all nodes.
-However, when moving to a node classification setting,
-training data labels are typically available only for certain nodes.
-We thus need a mechanism that can retrieve the predictions of the top neural layer for certain nodes
-and pass them through a softmax activation.
-This can already be achieved with normal neural model definitions using the gather bracket operation
-after declaring a variable of which nodes to retrieve:
-
-```java
-.var("nodes")
-.layer("h{l} = softmax(h{l})")
-.operation("ouput = h{l}[nodes]")
-```
-
-Recall that h{l} always points to the top layer when writting a new layer.
-
-
-This way, the built model takes as inputs a set of nodes, perform the forward pass of the
-architecture and then selects the provided nodes to use as outputs (and backpropagate from).
-**All** nodes are needed for training because they are made aware of each other via the
-graph's structure.
-
-To simplify how node classification architectures are defined,
-the above symbolic snippet is automatically generated and applied by calling the
-`.classify()` method of the `FastBuilder` instead.
-
-## Example architecture
-
-As an example of how to define a full GNN with symbolic parsing, let us define
-the well-known APPNP architecture. This comprises two normal dense layers and then
-propagates their predictions through the graph structure with a fixed-depth approximation
-of the personalized PageRank algorithm. To define the architecture,
-let us consider a `Dataset dataset` loaded by the library, for which we normalize the
-adjacency matrix and send everything to the GNN builder class. We let the outcome of
-the first two dense layers to be remembered as `h{0}` (this is *not* `h0`), define
-a diffusion rate constant `a` and then perform 10 times the
-personalized PageRank diffusion scheme on a graph with edge dropout 0.5. This is all achieved
-with the same `layer` and `layerRepeat` methods as neural builders.
-
-```java
-dataset.graph().setMainDiagonal(1).setToSymmetricNormalization();
-long numClasses = dataset.labels().getCols();
-
-ModelBuilder modelBuilder = new FastBuilder(dataset.graph(), dataset.features())
- .config("reg", 0.005)
- .config("hidden", 16)
- .config("classes", numClasses)
- .layer("h{l+1}=relu(h{l}@matrix(features, hidden, reg)+vector(hidden))")
- .layer("h{l+1}=h{l}@matrix(hidden, classes)+vector(classes)")
- .rememberAs("0")
- .constant("a", 0.9)
- .layerRepeat("h{l+1} = a*(dropout(A, 0.5)@h{l})+(1-a)*h{0}", 10)
- .classify();
-```
-
-
-## GNN training
-
-GNN classification models can be backpropagated by considering a list of node indeces and desired
-predictions for those nodes. However, you can also use the interfaces discussed in the
-[learning](tutorials/Learning.md) tutorial to automate the training process and control it
-in a fixed manner.
-
-Recall that training needs to call the model's method
-`.train(optimizer, features, labels, train, valid)`.
-The important question is what to consider as training inputs and outputs, given that node features
-and the graph are passed to the `FastBuilder` constructor.
-
-The answer is that the (ordered) list of all node identifiers *0,1,2,...* constitutes the training inputs
-and the corresponding labels constitute the outputs. You can create a slice of identifiers
-and use JGNN to design the training process per:
-
-```java
-Slice nodes = dataset.samples().getSlice().shuffle(100); // or nodes = new Slice(0, numNodes).shuffle(100);
-Model model = modelBuilder()
- .getModel()
- .init(...)
- .train(trainer,
- nodes.samplesAsFeatures(),
- dataset.labels(),
- nodes.range(0, trainSplit),
- nodes.range(trainSplit, validationSplit));
-
-```
-
-In the above snipper, the label matrix can have zeroes for the nodes not used for training.
-If only the first nodes have known labels, the label matrix may also have less rows.
-
-
-
-[NEXT: Graph neural networks for graph classification](GraphClassification.md)
\ No newline at end of file
diff --git a/tutorials/GraphClassification.md b/tutorials/GraphClassification.md
deleted file mode 100644
index 9c033aec..00000000
--- a/tutorials/GraphClassification.md
+++ /dev/null
@@ -1,190 +0,0 @@
-# :zap: Graph neural networks for graph classification
-Most neural network architectures are designed with the idea of learning to classify
-nodes or samples. However, GNNs also provide the prospect of classifying graphs
-based on their structure.
-
-1. [Organizing data](#organizing-data)
-2. [Defining the architecture](#defining-the-architecture)
-3. [Training the architecture](#training-the-architecture)
-4. [Sort pooling](#sort-pooling)
-5. [Parallelized training](#parallelized-training)
-
-*Full implementations can be found in the [examples](../JGNN/src/examples/graphClassification/SortPooling.java).*
-
-## Organizing data
-
-To define architectures for graph classification,
-we can make use of the generic `LayeredBuilder` class. The main difference compared
-to traditional neural networks is that architecture inputs do not all exhibit the
-same size (e.g. some graphs may have more nodes than others) and therefore they
-can not be organized into tensors of common dimensions.
-
-Instead, let us presume that training data are stored in the following lists:
-
-```java
-ArrayList adjacencyMatrices = new ArrayList();
-ArrayList nodeFeatures = new ArrayList();
-ArrayList graphLabels = new ArrayList();
-```
-
-## Defining the architecture
-
-The `LayeredBuilder` already introduces the input variable *h0* for sample features.
-We can use to it to pass node features to the architectures, so we only need to add
-a second input storing the (sparse) adjacency matrix per `.var("A")`. We can proceed
-to define a GNN architecture, for instance as explained in previous tutorials.
-
-This time, though, we do not aim to classify nodes but the whole graph. For this reason,
-we need to pool top layer node representations, for instance by averaging them
-across all nodes per `.layer("h{l+1}=softmax(mean(h{l}, row))")`. Remember to apply
-a softmax activation for classification tasks.
-Finally, we need to set up the top layer as the built model's
-output per `.out("h{l}")`.
-
-An example architecture following these principles is the following:
-
-```java
-ModelBuilder builder = new LayeredBuilder()
- .var("A")
- .config("features", nodeLabelIds.size())
- .config("classes", graphLabelIds.size())
- .config("hidden", 16)
- .layer("h{l+1}=relu(A@(h{l}@matrix(features, hidden)))")
- .layer("h{l+1}=relu(A@(h{l}@matrix(hidden, classes)))")
- .layer("h{l+1}=softmax(mean(h{l}, row))")
- .out("h{l}");
-```
-
-## Training the architecture
-
-For the time being, training architectures like the above on prepared data should
-manually call the backpropagation for each epoch and each graph in the training
-batch. To do this, first retrieve the model and initialize its parameters:
-
-```java
-Model model = builder.getModel().init(new XavierNormal());
-```
-
-Next, define a loss function and set up a batch optimization
-strategy wrapping any base optimizer and accumulating parameter updates until
-`BatchOptimizer.updateAll()` is called later on:
-
-```java
-Loss loss = new CategoricalCrossEntropy();
-BatchOptimizer optimizer = new BatchOptimizer(new Adam(0.01));
-```
-
-Finally, training can be conducted by iterating through epochs and training samples
-and appropriately calling the `Model.train` for combinations of of node features and graph
-adjacency matrix inputs, and graph label outputs.
-At the end of each batch (e.g. each epoch), do not forget
-to call the `optimizer.updateAll()` method to apply the accumulated gradients. This
-process can be realized with the following code:
-
-```java
-for(int epoch=0; epoch<300; epoch++) {
- for(int graphId=0; graphIda) `var` to define inputs
-
b) `config` to define hyperparameters
-
c) `operation` to parse symbolic operations
-
d) `out` to define output variables
-
-We can retrieve the defined model at anytime with the builder's `.getModel()` method.
-Until that point, models are incrementally constructed with functional programming.
-For this example, we define a two-layer perceptron, with a relu hidden layer and
-a row-wide softmax activation. Learnable matrices and vectors could be defined manually,
-but we automatically generate them in operation definitions. The number of
-hidden dimensions (64 right now) could also have been set as a hyperparameter.
-`@` corresponds to matrix multiplication. Details on how to write operations
-are presented in the [next tutorial](NN.md).
-
-```java
-ModelBuilder modelBuilder = new ModelBuilder()
- .config("feats", numFeatures)
- .config("labels", numClasses)
- .config("reg", 1.E-5)
- .var("x")
- .operation("h = relu(x@matrix(feats, 64, reg)+vector(64))")
- .operation("yhat = softmax(h@matrix(64, labels)+vector(labels), row)")
- .out("yhat")
- .assertBackwardValidity();
-```
-
-In addition to normal syntax checks, the last method call asserts that all operations
-are eventually used by outputs, creating an error message otherwise.
-To further check up on the architecture, let's extract its execution graph in *.dot* format
-by writting:
-
-```java
-System.out.println(modelBuilder.getExecutionGraphDot());
-```
-
-Copying-and-pasting the outputted description to [GraphvizOnline](https://dreampuf.github.io/GraphvizOnline/) creates the following visualization
-of the execution graph. A more detailed overview of potential debugging actions
-is presented later in in the [debugging](Debugging.md) tutorial.
-
-![Example execution graph](graphviz.png)
-
-## Training
-To train the model, we set up 50-25-25 training-validation-test data slices.
-These basically handle shuffled sample identifiers. You can use integers instead of
-doubles in the `range` method to reference a fixed number of samples instead of fractional slice sizes.
-
-```java
-Slice samples = dataset.samples().getSlice().shuffle(); // or samples = new Slice(0, labels.getRows()).shuffle();
-Slice train = samples.range(0, 0.5);
-Slice valid = samples.range(0.5, 0.75);
-Slice test = samples.range(0.75, 1);
-```
-
-Next, we set up create a leaning strategy given an Adam optimizer,
-a binary cross-entropy loss, and validation loss patience of 100 epochs:
-
-
-```java
-Optimizer optimizer = new Adam(0.1);
-
-ModelTraining trainer = new ModelTraining()
- .setOptimizer(optimizer)
- .setLoss(new BinaryCrossEntropy())
- .setEpochs(3000)
- .setPatience(100);
-```
-
-Finally, we train the model under this strategy by initializing its parameters
-and calling the optimizer:
-
-```java
-model
- .init(new XavierNormal())
- .train(optimizer, features, labels, train, valid);
-```
-
-:bulb: Real-world settings can further separate rows of the test set first without using
-more memory, but we keep things simple here.
-
-
-## Testing
-We finally report training accuracy on the test set. We demonstrate how single-sample predictions can be
-made and measure the accuracy of those. To do this, we use `Matrix.accessRow` to obtain specific matrix rows from node features as tensors and `Tensor.asRow` to convert the obtained tensors into a row representation. Row representations are matrices and can pass through the model just fine.
-We use `argmax` to convert one-hot predictions to label ids.
-
-```java
-double acc = 0;
-for(Long node : test) {
- Matrix nodeFeatures = features.accessRow(node).asRow();
- Matrix nodeLabels = labels.accessRow(node).asRow();
- Tensor output = model.predict(nodeFeatures).get(0);
- acc += (output.argmax()==nodeLabels.argmax()?1:0);
-}
-System.out.println("Acc\t "+acc/testIds.size());
-```
-
-[NEXT: Neural networks](NN.md)
\ No newline at end of file
diff --git a/tutorials/Message.md b/tutorials/Message.md
deleted file mode 100644
index c4f148c2..00000000
--- a/tutorials/Message.md
+++ /dev/null
@@ -1,85 +0,0 @@
-# :zap: Message passing GNNs
-JGNN also supports the more generic view of GNNs as message passing mechanisms
-between node neighbors. This supports complex types of relational analysis,
-such as edge attributes, but may be computationally intense. Thus, prefer
-using [simpler GNNs](GNN.md) when possible.
-
-1. [Message passing principles](#message-passing-principles)
-2. [Creating messages](#creating-messages)
-3. [Receiving messages](#receiving-messages)
-4. [Neighbor attention](#neighbor-attention)
-
-## Message passing principles
-Message passing considers a setting where each edge is responsible for appropriately
-transforming and propagating represenetations to node neighbors. In this tutorial
-we show how to use JGNN to implement a generic realization in which the transformation
-can depend on the specific edge. The implementation is compatible to the formulation that
-[Velickovic (2022)](https://arxiv.org/pdf/2202.11097.pdf) shows to be able to capture
-several settings of interest.
-
-## Creating messages
-In the more general sense, messages can be matrices whose rows correspond to edges
-and columns to edge features. In the simplest scenario, you can create such matrices
-by gathering the features of edge source and destination nodes by accessing
-the resspective elements of a feature matrix *h{l}*. Doing so requires that you
-first obtain edge source indexes *src=from(A)* and destination indexes *dst=to(A)*
-where *A* is an adjacency matrix. Thus, you can construct edge features per:
-
-```java
-modelBuilder
- .operation("src=from(A)")
- .operation("dst=to(A)")
- .operation("message{l}=h{l}[src] | h{l}[dst]");
-```
-
-The model builder parses *|* as the horizontal concatenation expression. You can
-also construct a global independent edge feature matrix and concatenate that
-too.
-
-Given that you have constructed a message, you can contintue by defining any kind
-of ad-hoc mechanism or neural processing of messages with traditional matrix
-operations (take care to define correct matrix sizes for dense transformations, e.g.,
-twice the number of columns as *H{l}* in the previous snippet).
-For any kind of *LayeredBuilder* don't forget that *message{l}* within
-operations is needed to obtain a message from the representations *H{l}*
-that is not accidentally shared with future layers.
-
-## Receiving messages
-Receiver mechanisms need to perform some kind of reduction on messages.
-For the time being, JGNN implements only summation reduction,
-given that this has the same theoretical expressive power as the alternative
-of maximum-based reduction but is easier to backpropagate through.
-
-Reduction can be performed with the following code snippet. Note that
-the sum is weighted per the values of the adjacency matrix *A*. Thus,
-perform adjacency matrix normalization only if you want such weighting
-to take place.
-
-```java
-modelBuilder
- .operation("received{l}=reduce(transformed_message{l}, A)")
-```
-
-You can finally define layers that can transform node representations
-while accounting for received messages, for example per:
-```java
-modelBuilder
- .config("2feats{l}", ...)
- .layer("h{l+1}=(h{l} | received{l})@matrix(2feats,dims) + vector(dims)")
-```
-
-where *2feats{l}* is configured to a matching number of dimensions as the sum
-of the number of columns of *h{l}* and *transformed_message{l}*.
-
-## Neighbor attention
-A common realization of message passing GNNs is via sparse-dense matrix multiplication
-to emulate neighbor attention per: *A.(hTh)* where *A* is a sparse
-adjacency matrix, *.* is the Hadamard product (element-by-element multiplication)
-and *h* a dense matrix whose rows hold respective node representations.
-JGNN implements this operation and you can include it in symbolic definitions with the
-expression `att(A, h)`. Its implementation is considerably more lightweight
-than the equivalent message passing mechanism.
-
-True neighbor attention in the style of gated attention networks can be implemented
-by exponantiating all non-zero elements of the adjacency matrix and performing row-wise
-normalization per `L1(nexp(att(A, h)))`.
diff --git a/tutorials/Models.md b/tutorials/Models.md
deleted file mode 100644
index 5f1d0a58..00000000
--- a/tutorials/Models.md
+++ /dev/null
@@ -1,142 +0,0 @@
-# Models and builders
-
-## Table of Contents
-
-1. [JGNN Expressions](#jgnn-expressions)
-2. [JGNN Models](#jgnn-models)
-3. [Symbolic Model Definition](#symbolic-model-definition)
-4. [Learning Parameters](#learning-parameters)
-5. [Neural Network Examples](#neural-network-examples)
-6. [Multithread Batch Learning](#multithread-batch-learning)
-
-## JGNN Expressions
-
-The base structure used to define machine learning operations is the `mklab.JGGN.core.NNOperation` abstract class.
-This is implemented by common mathematical operations, which are presented in the following table. Operation instances
-can be added to inputs of other operations through the `addInput(NNOperation)` method of the latter. Starting points
-of operations are variables, constants and parameters, whose differences will be discussed later.
-
-:bulb: The hustle of learning to write expressions is removed with [symbolic model definition](#symbolic-model-definition).
-You can safely skip to that segment to learn how to write machine learning models without the tedious definitions of intermediate steps explained here.
-
-|Operator| Constructor | Number of inputs |
-| --- | --- | --- |
-| + | mklab.JGNN.nn.operations.Add() | 2 |
-| * | mklab.JGNN.nn.operations.Multiply() | 2 |
-| @ | mklab.JGNN.nn.operations.MatMul() | 2 |
-| 1-x | mklab.JGNN.nn.operations.Complement() | 1 |
-| log | mklab.JGNN.nn.operations.Log() | 1 |
-| variable | mklab.JGNN.nn.operations.Variable() | 0 |
-| constant | mklab.JGNN.nn.operations.Constant(tensor) | 0 |
-| parameter | mklab.JGNN.nn.operations.Parameter(tensor) | 0 |
-| relu | mklab.JGNN.nn.activations.Relu() | 1 |
-| tanh | mklab.JGNN.nn.activations.Tanh() | 1 |
-| sigmoid | mklab.JGNN.nn.activations.Sigmoid() | 1 |
-| lrelu | mklab.JGNN.nn.activations.LRelu() | 2 |
-| prelu | mklab.JGNN.nn.activations.PRelu() | 2 |
-| dropout | mklab.JGNN.nn.activations.Dropout() | 2 |
-
-:warning: In principle, the `addInput` should be called a number of times equal to the number of operator arguments for each operator.
-It is defined for the sake of convenience, for example to initialize operators at different parts of the code than the one linking them.
-
-:warning: Detailed error checking of JGNN operations is under development.
-
-For example, the expression *y=log(2x+1)* can be constructed with the following code - a more consise way to do this is presented in
-[symbolic model definition](#symbolic-model-definition):
-
-```java
- Variable x = new Variable();
- Constant c1 = new Constant(Tensor.fromDouble(1)); // holds the constant "1"
- Constant c2 = new Constant(Tensor.fromDouble(2)); // holds the constant "2"
- NNOperation mult = new Multiply().addInput(x).addInput(c2);
- NNOperation add = new Add().addInput(mult).addInput(c1);
- NNOperation y = new Log().addInput(add);
-```
-
-## JGNN Models
-
-Constructed expressions can be organized into machine learning models. Models are implemented by the class `mklab.JGNN.core.Model`
-and defining them is as simple as marking the input variables with the method `Model addInput(Variable)` and output operations
-with the method `Model addOutput(NNOperation)`. For example, constructing a model holding the previous expression is as simple as writing
-`Model model = new Model().addInput(x).addOutput(y)`. Potential backpropagation machine learning operations are automatically handled
-by models.
-
-Running the model once to create outputs can be achieved with `Tensor Model.predict(Tensor...)` method. This takes as input one or more
-comma-separated tensors to pass into the model.
-If the number of inputs is dynamically created, an overloaded version of the same method supports an array list of input tensors
-`Tensor Model.predict(ArrayList)`.
-
-:warning: Input tensor order should be the same as the order in which variables were added to the model.
-
-Obtaining the last value of intermediate (i.e. non-ouput) operations *after* the run can achieved with the `Tensor NNOperation.getPrediction()` method. To sum up with an example, running a model of the previously defined *y=log(2x+1)* for *x=2* and printing both the value of *y* (approx. 1.61) and the value inside the logarithm (5) can be achieved with with the following code:
-
-```java
- Model model = new Model().addInput(x).addOutput(log);
- System.out.println(model.predict(Tensor.fromDouble(2)));
- System.out.println(add.getPrediction());
-```
-
-
-## Symbolic model definition
-JGNN supports the definition of models from high-level expressions.
-This involves using a builder pattern to
-declare input variables, constants, parameters, output
-variables, and forward assignment operations. For example, the following
-code declares a linear model.
-
-```java
-ModelBuilder modelBuilder = new ModelBuilder()
- .var("x") // first argument
- .constant("a", Tensor.fromDouble(2))
- .constant("b", Tensor.fromDouble(1))
- .operation("yhat = a*x+b")
- .out("yhat")
- .print() // comment out this line to not print the model
- ;
-System.out.println(modelBuilder.getModel().predict(Tensor.fromDouble(2)));
-```
-
-## Learning parameters
-
-Examples up to this point were limited to using constant and variable data. However, machine learning
-tasks typically introduce the notion of *parameters* as constants whose values can be learned to optimize
-certain objectives, such as making model output values as close as possible to desired ones.
-
-Parameter operations can be instantiated with the constructor `new mklab.JGGN.nn.Parameter(Tensor initialValue)`,
-where their initial values or provided. Model builder parameters need to be defined before they are used
-by operations and can be symbolically defined with the method
-`ModelBuilder ModelBuilder.param(String name, Tensor initialValue)`.
-
-Approximating ideal parameter values for a model requires three steps: a) selecting an optimization scheme responsible for
-updating parameters based on backpropagated errors, b) selecting a loss function quantifying how much model outputs deviate
-from optimal ones and c) repeatedly calling one of the model's overloaded `Model.trainSample` methods for a number of epochs.
-For the sake of simplicity, in the following example we consider a single sample before we discuss how to handle multiple ones:
-
-
-```java
-ModelBuilder modelBuilder = new ModelBuilder()
- .var("x") // first argument
- .var("y") // second argument
- .param("a", Tensor.fromDouble(1))
- .param("b", Tensor.fromDouble(0))
- .operation("yhat = a*x+b")
- .operation("error = (y-yhat)*(y-yhat)")
- .out("error")
- .print();
-Optimizer optimizer = new Adam(0.1);
-// when no output is passed to training, the output is considered to be an error
-for(int i=0;i<200;i++)
- modelBuilder.getModel().trainSample(optimizer, Arrays.asList(new DenseTensor(1,2,3,4,5), new DenseTensor(3,5,7,9,11)));
-//run the wrapped model and obtain an internal variable prediction
-System.out.println(modelBuilder.runModel(Tensor.fromDouble(2), Tensor.fromDouble(0)).get("yhat").getPrediction());
-```
-
-:bulb: Examples with multiple features should either be organized into sparce matrices or be fed one-by-one to learners
-through a [batch-learning](#multithread-batch-learning) scheme. Specifically for graph neural networks, computation
-speed benefits tremendously from organizing all node features into one matrix and simultaneously passing this through
-graph convolutional layers.
-
-## Neural Network Examples
-
-
-## Multithread Batch Learning
\ No newline at end of file
diff --git a/tutorials/NN.md b/tutorials/NN.md
deleted file mode 100644
index 465effac..00000000
--- a/tutorials/NN.md
+++ /dev/null
@@ -1,110 +0,0 @@
-# :zap: Neural networks
-For this example, we refer to the same dataset and experimentation
-methodology as in the in the [learning](tutorials/Learning.md) tutorial.
-But we will see how to easily create a multilayer perceptron.
-We cover the following topics:
-
-1. [Building layers](#building-layers)
-2. [Deep architectures](#deep-architectures)
-3. [Writing operations](#writing-operations)
-4. [Save and load architectures](#save-and-load-architectures)
-
-*Full implementations can be found in the [examples](../JGNN/src/examples/tutorial/NN.java).*
-
-## Building layers
-The class for building layered architectures (`LayeredBuilder`) improves base builder
-functionalities by introducing methods like `.layer(String)`. This
-is an extension of normal `.operation(String)` definitions,
-with the addition that specifically the expressions `{l}` and `{l+1}` are replaced
-by the previous and current layer identifiers respectively.
-Setting the input layer to `"h0"` lets it get parsed by subsequent calls.
-
-```java
-ModelBuilder modelBuilder = new LayeredBuilder("h0")
- .config("features", numFeatures)
- .config("classes", numClasses)
- .config("hidden", 64)
- .layer("h{l+1} = relu(h{l}@matrix(features, hidden)+vector(hidden))")
- .layer("yhat = softmax(h{l}@matrix(hidden, classes)+vector(classes), row)")
- .out("yhat");
-```
-
-## Deep architectures
-Now that we have explained how simple layers work, let's look at two more advanced
-`LayeredBuilder` methods pivotal to many deep neural networks.
-The first is `.layerRepeat(String, int)`, which just repeats
-the layer expression a set number of times without breaking the
-functional model definition pipeline. The second is `.concat(int)`,
-which concatenates horizontally concatenates a number of top layers. Concatenation
-is also possible in symbolic parsing through the `|` operation,
-but calling the method easily scales it over a large number of layers
-(e.g., across several graph convolutional layers).
-
-We now make a more advanved model using these methods:
-
-```java
-ModelBuilder modelBuilder = new LayeredBuilder()
- .config("features", numFeatures)
- .config("classes", numClasses)
- .config("hidden", 64)
- .config("2hidden", 2*64)
- .layer("h{l+1} = relu(h{l}@matrix(features, hidden)+vector(hidden))")
- .layerRepeat("h{l+1} = relu(h{l}@matrix(hidden, hidden)+vector(hidden))", 2)
- .concat(2)
- .layer("yhat = softmax(h{l}@matrix(2hidden, classes)+vector(classes), row)")
- .out("yhat");
-```
-
-## Writing operations
-This is a good point to present symbols you can use within expressions.
-Unless otherwise specified, you can replace x and y with any expression. Sometimes,
-y needs to be a constant defined either by presenting a number, calling
-`ModelBuilder.config(y, double)`, or calling `ModelBuilder.constant(y, double)` to
-set the numbers as hyperparameters.
-
-|Symbol| Type | Number of inputs |
-| --- | --- | --- |
-| x = y | Operator | Assign to variable x the outcome of executing y.
-| x + y | Operator | Element-by-element addition. |
-| x * y | Operator | Element-by-element multiplication. |
-| x - y | Operator | Element-by-element subtraction. |
-| x @ y | Operator | Matrix multiplication. |
-| x | y | Operator | Row-wise concatenation of x and y. |
-| x [y] | Operator | Gathers the rows with indexes y of x.|
-| transpose(x) | Function | Transposes matrix x. |
-| log(x) | Function | Apply logarithm on each tensor element. |
-| relu(x) | Function | Apply relu on each tensor element. |
-| tanh(x) | Function | Apply a tanh activation on each tensor element. |
-| sigmoid(x) | Function | Apply a sigmoid activation on each tensor element. |
-| dropout(x, y) | Function | Apply training dropout on tensor x with constant dropout rate y. |
-| lrelu(x, y) | Function | Leaky relu on tensor x with constant negative slope y. |
-| prelu(x) | Function | Leaky relu on tensor x with learnanble negative slope. |
-| softmax(x, y) | Function | Apply y-wide softmax on x, where y is either row or col.|
-| sum(x, y) | Function | Apply y-wide sum reduction on x, where y is either row or col.|
-| max(x, y) | Function | Apply y-wide max reduction on x, where y is either row or col.|
-| matrix(x, y) | Function | Generate a matrix parameter with respective hyperparameter dimensions. |
-| vector(x) | Function | Generate a vector with respective hyperparameter size.|
-
-Prefer using hyperparameters (set via `.config`) for matrix and vector creation, as these transfer their names to respective
-dimensions for error checking. For `dropout,matrix,vector` you can also use the short names `drop,mat,vec`.
-
-## Save and load architectures
-Saving a model needs to be done via its builder. Saving stores the whole parameter
-state in a specified Java path per:
-
-```java
-modelBuilder.save(Paths.get("file.jgnn"));
-```
-
-A new builder (of the same type as the one that saved the model)
-can be constructed given the save Path per:
-
-```java
-modelBuilder = (LayeredBuilder)ModelBuilder.load(Paths.get("file.jgnn"));
-```
-
-You can continue working with the loaded builder, for example by adding more
-layers if needed, and you can call its `.getModel()` per normal.
-
-
-[NEXT: Graph neural networks for node classification](GNN.md)
\ No newline at end of file
diff --git a/tutorials/Neuralang.md b/tutorials/Neuralang.md
deleted file mode 100644
index 0eebac22..00000000
--- a/tutorials/Neuralang.md
+++ /dev/null
@@ -1,77 +0,0 @@
-# Neuralang
-
-This is a scripting language for defining graph and traditional
-neural network architectures. It extends JGNN's symbolic definition
-with function declarations.
-
-
-
-## Script
-
-Neuralang scripts consist of functions like the ones bellow.
-These define neural network components and their interactions
-using a syntax inspired by Mojo. Use a Rust highlighter to cover
-all keywords.
-
-```rust
-fn classify(nodes, h, epochs: !3000, patience: !100, lr: !0.01) {
- return softmax(h[nodes], dim: "row");
-}
-```
-
-The classify function takes two inputs: the input nodes for classification; h is the feature matrix. A softmax is returned for the specified nodes. The function's signature also has several configuration values, whose defaults are indicated by a colon (:). The same notation is used to set/overwrite configurations when calling functions, as we do for softmax to apply it row-wise. Think of configurations as keyword arguments.
-
-Exclamation marks (!) before numbers broadcast them to all subsequent function calls as new defaults for the same configurations. Broadcasted configurations are retrievable from JGNN's Neuralang model builder too; which is useful for Java integration later. Configuration values have the priority:
-
-1. function call arguments
-2. broacasted configurations (last value, includes configurations set by Java)
-3. function signature defaults
-
-```rust
-fn gcnlayer(A, h, hidden: 64, reg: 0.005) {
- return relu(A@h@matrix(?, hidden, reg) + vector(hidden));
-}
-```
-
-The gcnlayer function accepts the following parameters: A is the adjacency matrix; h is the input feature matrix; hidden is a configuration that defaults to 64 and specifies the number of hidden units; and reg is the regularization term that defaults to 0.005. The ? in matrix definitions lets the autosize feature of Java integration later determine the dimension size based on a test run. The function returns the activated output of the GCN layer using ReLU.
-
-```rust
-fn gcn(A, h, classes: extern) {
- h = gcnlayer(A, h);
- h = gcnlayer(A, h, hidden: classes);
- return h;
-}
-```
-
-The gcn function declares the popular Graph Convoluational Network (GCN) architecture and has as configuration the number of output classes. The function first applies a gcnlayer, and then applies another layer of the same type with the hidden units configuration set to the value of classes. Thus the output matches the number of classes, which is set as externally declared (there is no default), for example by broadcasted defaults or Java.
-
-
-## Java integration
-
-Neuralang scripts can be integrated into Java code for building and training models. Below is an example of how to do so:
-
-
-```java
-Dataset dataset = new Cora();
-dataset.graph().setMainDiagonal(1).setToSymmetricNormalization();
-
-ModelBuilder modelBuilder = new Neuralang()
- .parse(Paths.get("../architectures.nn"))
- .constant("A", dataset.graph())
- .constant("h", dataset.features())
- .var("nodes")
- .config("classes", dataset.labels().getCols())
- .config("hidden", 16)
- .out("classify(nodes, gcn(A,h))")
- .autosize(new EmptyTensor(dataset.samples().getSlice().size()));
-
-ModelTraining trainer = new ModelTraining()
- .configFrom(modelBuilder)
- .setVerbose(true)
- .setLoss(new CategoricalCrossEntropy())
- .setValidationLoss(new CategoricalCrossEntropy());
-```
-
-In the above example, a dataset (Cora) is loaded, and its graph is prepared by adding self-loops (the renormalization trick) and performing symmetric normalization. A Neuralang instance is then created; this is a ModelBuilder that can parse scripts as either file Paths or pure text. Constants like the adjacency matrix A and feature matrix h are set, along with variables (nodes) and configurations (classes, hidden). The model and its output is defined with a Neuralang statement. Finally, dimension names and sizes for ? found model declaration are filled by calling autosize to perform a test run. In the example we use empty tensors to avoid unecessary computations while determining the dimensions.
-
-A ModelTraining instance is finally configured using parameters from the ModelBuilder, utilizing the configurations found in the classification method. Don't forget to broadcast configuration values that you need to access from Java code later.
diff --git a/tutorials/Primitives.md b/tutorials/Primitives.md
deleted file mode 100644
index 020c07b3..00000000
--- a/tutorials/Primitives.md
+++ /dev/null
@@ -1,145 +0,0 @@
-# Primitives
-Primitive operations found in other tutorials suffice for most
-use cases. However, you may need to process
-neural inputs, postprocess learning outcomes, create custom
-parameters, contribute to the library with more components,
-or make derivative works based on native java vector and matrix
-arithmetics.
-
-This tutorial lists JGNN primitives and explains how to work with them.
-
-In general, JGNN provides the `mklab.JGNN.core.Tensor` abstract class
-for storing data. Vector and matrix operations use primitives
-of this or derived classes. To reduce the number of code predicates
-and improve comprehensibility, operations between two tensors are implemented
-by calling respective methods of the first one.
-
-## Table of contents
-
-1. [Tensor operations](#tensor-operations)
-2. [Vector initialization](#vector-initialization)
-3. [Matrix initialization and operations](#matrix-initialization-and-operations)
-4. [Named dimensions](#named-dimensions)
-
-## Tensor operations
-Tensor operations are performed element-by-element and can be split into
-the following categories:
-
a) *arithmetic* - combine the values of two tensors to create a new one
-
b) *in-place arithmetic* - combine the values of two tensors to alter the first one
-
c) *summary statistics* - output simple numeric values
-
c) *element access* - manipulation of specific values
-
-
-:bulb: In-place arithmetics follow the same naming conventions of base arithmetics and
-begin with with a "self" prefix for pairwise operations or "setTo" prefix to perform operators.
-
-Here we present some commonly used operations applicable to all tensors, whose functionality is inferable
-from their name and argument types. For more operations or details, please refer to the project's
-[Javadoc](https://mklab-iti.github.io/JGNN/).
-
-Operation | Type | Comments
---- | --- | ---
-`Tensor copy()` | arithmetic
-`Tensor zeroCopy()` | arithmetic | Zero copies share the same type with the tensor and comprise only zeros.
-`Tensor add(Tensor)` | arithmetic
-`Tensor substract(Tensor)` | arithmetic
-`Tensor multiply(Tensor)` | arithmetic | Multiplication is performed element-by-element.
-`Tensor multiply(double)` | arithmetic
-`Tensor normalized()` | arithmetic | Division with L2 norm (if non-zero).
-`Tensor toProbability()` | arithmetic | Division with the sum (if non-zero).
-`Tensor setToZero()` | in-place arithmetic
-`Tensor selfAdd(Tensor)` | in-place arithmetic
-`Tensor selfSubtract(Tensor)` | in-place arithmetic
-`Tensor setMultiply(Tensor)` | in-place arithmetic
-`Tensor selfMultiply(double)` | in-place arithmetic
-`Tensor setToRandom()` | in-place arithmetic | element selected from uniform distribution in the range [0,1]
-`Tensor setToOnes()` | in-place arithmetic
-`Tensor setToNormalized()` | in-place arithmetic | Division with L2 norm (if non-zero).
-`Tensor setToProbability()` | in-place arithmetic | Division with the sum (if non-zero).
-`double dot(Tensor)` | summary statistics
-`double norm()` | summary statistics | The L2 norm.
-`double sum()` | summary statistics
-`double max()` | summary statistics
-`double min()` | summary statistics
-`long argmax()` | summary statistics
-`long argmin()` | summary statistics
-`double toDouble()` | summary statistics | Converts tensor with exactly one element to a double (throws exception if more elements).
-`Tensor set(long position, double value)` | element access | NaN values throw exceptions. Is in-place.
-`double get(long position)` | element access
-`Iterator getNonZeroElements()` | element access | Traverses all elements for dense tensors, but skips zero elements for sparse tensors. (Guarantee: there is no non-zero element not traversed.) Returns element positions **positions**.
-`String describe()` | summary statistics | Description of type and dimensions.
-
-
-:bulb: To write code that accommodates both dense and sparse tensors, make sure that iterating over indices elements is performed with the iterator `Iterator getNonZeroElements()`.
-
-Prefer in-place arithmetic operations when transforming tensor values or for intermediate calculation steps, as these do not allocate new memory. For example, the following code can be used for creating and normalizing a tensor of ones without using additional memory:
-
-```Java
-Tensor normalized = new DenseTensor(10).setToOnes().setToNormalized();
-```
-
-
-## Vector initialization
-
-You can initialize a dense tensor with the expression `Tensor denseTensor = new mklab.JGNN.tensor.DenseTensor(long size)` .
-If there are many zero elements expected, or if sizes go beyond the max integer limit Java imposes on array sizes (and hence a dense representation can not be stored as an array), a sparse tensor can be used per `Tensor sparseTensor = new mklab.JGNN.tensor.SparseTensor(long size)`. For example, one-hot encodings for classification problems can be generated with the following code. This creates a dense tensor with *numClasses* elements and puts at element *classId* the value 1:
-
-```java
-int classId = ...;
-int numClasses = ...;
-Tensor oneHotEncoding = new mklab.JGNN.tensor.DenseTensor(numClasses).set(classId, 1);
-```
-
-Dense tensors serialized with their `String toString()` method and can be deserialized into new tensors with the constructor `mklab.JGNN.tensor.DenseTensor(String)`.
-
-
-## Matrix initialization and operations
-The `Matrix` class extends the concept of tensors with additional operations. Under the hood,
-Matrices linearly store elements and use computations to transform the (row,col) position of
-their elements to respective positions. The outcome of some methods inherited from tensors may
-need to be typecast back into a matrix (e.g. for all in-place operations).
-
-Operation | Type | Comments
---- | --- | ---
-`Matrix onesMask()` | arithmetic | Copy of a matrix with elements set to one.
-`Matrix transposed()` | arithmetic | There is no method for in-place transposition.
-`Matrix asTransposed()` | arithmetic | Shares data with the original.
-`Tensor getRow(long)` | arithmetic | Shares data with the original.
-`Tensor getCol(long)` | arithmetic | Shares data with the original.
-`Tensor transform(Tensor x)` | arithmetic | Outputs a dense tensor that holds the linear transformation of the given tensor (using it as a column vector) by multiplying it with the matrix.
-`Matrix matmul(Matrix with)` | arithmetic | Outputs the matrix multiplication **this \* with**. There is no in-place matrix multiplication.
-`Matrix matmul(Matrix with, boolean transposeSelf, boolean transposeWith)` | arithmetic | Does not perform memory allocation to compute transpositions.
-`Matrix external(Tensor horizontal, Tensor vertical)` | static method | External product of two tensors. Is a dense matrix.
-`Matrix symmetricNormalization()` | in-place arithmetic | The symmetrically normalized matrix.
-`Matrix setToSymmetricNormalization()` | in-place arithmetic | The symmetrically normalized matrix.
-`Matrix setMainDiagonal(double value)` | in-place arithmetic | Sets diagonal elements.
-`Matrix setDiagonal(long diagonal, double value)` | in-place arithmetic | Sets diagonal elements.
-`Matrix put(long row, long col, double value)` | element access | NaN values throw exceptions. Is in-place.
-`Iterable> getNonZeroEntries()` | element access | Similar to getNonZeroElements() but iterates through (row, col) pairs.
-
-
-## Named dimensions
-In addition to other operations, there exist methods that do not affect
-tensor or matrix values but are only responsible for naming dimensions. Functioanlly, these
-are decorative and aim to improve debugging by throwing errors for incompatible non-null names.
-For example, adding two matrices with different dimension names will result in an error.
-Likewise, the inner dimension names during matrix multiplication should agree.
-
-Operation | Type | Comments
---- | --- | ---
-`Tensor setDimensionName(String name)` | arithmetic | For naming tensor dimensions (of the 1D space tensors lie in).
- `Tensor setRowName(String rowName)` | arithmetic | For naming what kind of information matrix rows hold (e.g. `"samples"`).
- `Tensor setColName(String colName)` | arithmetic | For naming what kind of information matrix columns hold (e.g. `"features"`).
- `Tensor setDimensionName(String rowName, String colName)` | arithmetic | A shorthand of calling `setRowName(rowName).setColName(colName)`.
-
-
-Arithmetic operations, *including* matrix multiplication and copying,
-automatically infer dimension names in the result to make sure that only compatible data types
-are compared. Dimension names can be freely changed for any Tensor *without*
-backtracking changes (even for see-through data types, such as the outcome of asTransposed()).
-
-:bulb: Matrices effectively have three dimension names: for their rows, columns, and inner
-data as long as they are treated as tensors.
-
-
-[NEXT: Debugging](Debugging.md)
diff --git a/tutorials/README.md b/tutorials/README.md
deleted file mode 100644
index 0fd88712..00000000
--- a/tutorials/README.md
+++ /dev/null
@@ -1,17 +0,0 @@
-# :dart: Tutorials
-Overall, JGNN provides machine learning primitives (e.g. tensors, matrices),
-neural components, model builders that parse expressions to create components,
-model initializers, and training strategies.
-Tutorials cover the following subjects:
-
-## Introduction
-1. [Learning](Learning.md)
-2. [Neural networks](NN.md)
-3. [GNNs for node classification](GNN.md)
-3. [GNNs for graph classification](GraphClassification.md)
-4. [Data creation](Data.md)
-
-## Advanced topics
-4. [Primitives](Primitives.md)
-5. [Debugging](Debugging.md)
-6. [Message passing GNNs](Message.md)
diff --git a/tutorials/graphviz.png b/tutorials/graphviz.png
deleted file mode 100644
index 80f264629a871d81facc1aa55d5bb31fe9bcf94d..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001
literal 36910
zcmcG$hd-5l{6Bt>6p=!TjFvsK$|y4-+aX)Y-f_(Al!jD_B6}Zu9*%L4G?48G2c@!C
z#+!fB?(X}#&bi+2>ouOQ=kxjQiJp!+JuMq83WcKAxT$J@LQ(0U
zP!tD84#R)3$&c}eUliU3>dL6%?`LP=4~HC-w3Se(vRJy^JJj&!qnKFB{5
zWATs|6DE2O~T7wx5r0;Ir$p^p){iifU>X*9?`^j;V@^iIvu|Ff+1<
zF^U~Mnk}aam&pBzWd_O6nr
z?`#aa?6bg4j2uUF6fF{E2MT@NDk!Nt#)7&Ewb>6MZkkoJ6$bfLScixQEncl~NL?3&
zs}+qHKZMnI{(AU>!|z@2s`l-we6-*}1S6KmZRC`(zFFlLSE5Ko9B*g2orIuS|NJ+>
zkN9P~FB1pKM=TFQ59oH1uOA$%vpU8;;YL?auQ;#q*G;XVs18Sgx_hemXtbO9i+9;t
z4ufgoKQ>p!tnXYtpdA1G)!w7QZeud`&Q2mK>I{zG<_
z_;;AOQ@(q8cn%E3S?P}XHuk-?clzxk!%R%dCg7`N7mGzBk`?15^zI>9TziwLf^eD!P+naA!^`pN+-z
z)3YRcE>7vjnMdp$?u|bPO>q*rKVER$LFcBc?A?G@XCd^zpAi~bF;~3GF3*u0NZ
z3|+VP$Q7UUvo)Bc!&_IocIe^aD_pPxhRjCFkxyFM|NXi1thcS&AiG7`+%Y<6=2C@6
z>FzS4Vc(lhn$hFNO>c|Q617hmHK#r3ZD7{
z^3>gwl_r*dmi(pNCFMMu3NbE(b1x)aPDzWay|mxmnU@HqfsdV1hHY$ldCo!i`u<{_
zJ%`+*qHN}b{$R1n+3ZjLQ=O}RjB(etr!ORl+dd-r&1@`;IMA_*tdaXa{>xI>UH&-s
z8JpQj^v7TPOCAr!5f1i8#^pxle^fMFzq;fhLK#F}tm?Y9)A54q`XX-EWp1!&ELA;}
zJlyt{iC1Un!&UEH1(f20|DK`T&K@(nDVO3wkeVAKjXzOYpM9TvjYE1+v)qm@>7+sp
zS0n?c{4~SAxJz1pNp8|EqcvqxE`uGzl}@&}vA_bik?Kx@eMevOOrCz`sO^^ZbWI7P
zph>Y$Las(!TcZ){XS$*E_b0n1hstbUUE{W{^DWxjoQyRtyQ_vHICOE=O}>zuU%-u5
z?<^6<6%w?vCp>->VlV>Y4)>GVVe;Xhg4;jxQXcaO5@k_B=$k{64b4Y
zNrTyb`+(I%;oB23{@)U0lB7GwpIkk2`Ce&rB#n8IS=DIsOpVv1YsWirY1bjK_SEYH
zIZq2hNJHqtyX?rmQ^ICL4dgMG>w9Z)MdrBggx}WV0`q}wFF0IqB;O>tP!U1n!cu<2
zoXwyUhL}9xh2j0NPljI%6=$=)Dm=i7@juXvVUwEw%A4vlCHjeI9SX(q>paFAtG%amSbMW?zB+H5bL-5G>N7MI>K8qv5{db>kIfrtx1s$-(w*xUlt>lsggsv}
zeLbFYhTSQcX`EN!88MLNgS%yJnqaY*oO(j;yOV)U#U5nqfIlql38JYazwzJ@mv;%)
zAwP-<`+?@!Re2Av#q+~e?IAl0GIcwPb(ZUwZoWMdTu?$(ry(gh6J~OfENtj{DTxED^t>FGvm
z?lM^DMQXh9;_S?Df7LC^FGg;9jx}3N^nQCNWu$-uc2;3?BGSRiFF0P-Z_yQAvoD$>xDD^O
zv}nbIVd@JHDgBo7xxh33^VY9tqQk$x##z?2{7v>7bBcYaA*{`@GnZS@)&10(
z2~V)Jv)CNx$L^w)1*9+vDvMIRD&Co2Wlx+i3ebnj$Pg^+Mqp`ZD5
z$q`H4=_;fkMm95+TbLcC?cKRx{UCo@^FUW@|FlB)PC8lBQkkW79Z&6K{Z?2(q=7eZ
zx4ATjc;2?{`Ql|#O33w_sTvJB+ZMClxn1#`U74}hoKr)#^XUo#iy~8-myOQ!2Qlw<
z%vLaJrt2yd1+LF7R-78^$y6r=Q=)#+Ac-pJe$i4k^mFFZI6u#`rn04jN`(xC*8V@K
z{kn@hhA(Ja)+-BAj45UMRZcIpa&UOgaU0Nl#WLi5EBBhb;U`u|RJ2va8W%ag6*Q^d
zUZ1ODOe^+nE)$Yn?r+9Nm~!~5`y)$YOSABJFb+N8-~$iUGU)#u{<=dB68*hf22
z(u;*x;Z)T3Wg`xB!nd`VzPitWT?*u(f|ABrk-nwrV1sD`RBW`Bt->{w;;tfqg+zuM
z7p613`Hd*_Zg1HwUcEB(AmreYjv_u7~ZjTsGd`#Y5h6l#s83>aax>T
zGTG&1_{8cFmPY4{6`epG>^9$7RnA21*5mlE?^94&Uq2uX>>h6a)b
z)O3ul8YEmnJNFOh=h{dK$}2QZupSAVr;Td6ZV-34>
zDNdpoufwD=okP{pP5Z)6(dmSbDV}-OEGzO>a6u7OEnm+(Ch(RL+XZH&steUlg)was
zGfilk3ak)+cOF}&NYHlt!2_St!bQ&=@{*s@g&|?>bXA%>z*mLrlUXCZN>w_|v@aQk
zFrEe26nipYpV9YJOB3Oh5y@C~>{G{jy%hNXi@Jwn@rOcr>$`o?%0?81N92hYQF!g-
zmhqI0NOEo}t?;Z_-c+8r(yV3;wxmPvtM>K=#1h;p2xTqk1N`mdV0_
zMJ%sT=jK5aJjpz7|pPOdw*GC$~Fm
zs#xswS#tIc{b4P1l#lfZ>a)}V3X3)g-zUb)@Ilzid1MuGerXn2Ba|xroIy!k3oD8{
zWqIF$id(kGisQ)dF0LbIub!+XIK(n6eEe>GerZBZuhOw6%+F?KW9oZy!J7%4vF2=-
zx_fU0X(-1ZOfLDeNzoqKCX@Znm*n8+$<^K~44hPcV*$BA^8mmCn&gK3y0or)0EF`k
z$QuU$*5MsI5l&-azBJZA3RxVjrLtl?(qCZGwQ&94mkZ(Rb3~edf7)3cd$Tmsf_FB;
zPh7ZBGyW=Qv39CsDS)PXvC)U%T(>@GP6bU_)>~nP@)h|q+Bd=B`-dMvwT(y5%J~ZE
zmEZfS2q>5G`tGtynnmsXaCNR=$%&|vrp=W}sxuPy$BvyAR
zL~~@<=mE?#lqQxKbecN}zj%2mY}7I*&d+9HSb3&4KmSu)f(t1h5#&8&`
zg+&ljs+0WmIF~8oTvnBzxXkrtYcgzcuXioU;)9E*yn0W!X9%oZf^DTJ?e4tc{`S>b7w9|VVCn|GtW>m$)OMW
zwe>1ilDksJ1_wss1KCmB_C4t1(MQd8eK%Y~KYQ=3n=!V*=xLbys7N%s`y!ivV`%6b
z6{fo{a@dKR(wHsHZ;~OJaY2supRyDsXq_K$p}65Vw
z;&%U^$KlnmoKQZfm!$Y8(e;jmNf>8rXC9VJvy7uXl&c_OJvN`8TZwfZ3SsVsrw{fS
zpb8sO`E)e;C@l|W_FcnH@%Y5dhzZ@{Ejh1=LG#W^_gi9;!lrkty)u7P5g+|uhG1)g
zs^T%%hM&B!**g}2dmF7xAu2+>zmKatdKiV{Mibm?nuSLDCx9l{6h&AH0pnxlqBDOI
z_6~PFJZfLkkfVm#eRMm+^o#Om3#Xp0pC3EFm(}ngWW@~t3a_#9w3;Kh#|sYM?n&Q$
zXc|R(=jSEq{=7TBol^Fe_)Yp09_<#&WWxX45exLjMeSs0ZZYgT^qkt-V+}z=Z~o-!
zY{iJHl8dU69x<%5%frAa9olGGVif{32TJ52+*BD()%oJAc+iF^p*d5azfU&_*vXBq
z>mjz1vH_QcDvK=Hr5pLDyRLuadD5N52)ETZ!cZZ-yPoiaPV`;yO4Rs7PwR>C5O(Q1
zt%4<+F+DbeLKTuu|M5pc)K2Lm+v}a%WEtDw(?IQLAzZ~ryM*_h@9y}1=D1_|Cjy8q
z*8yxP_RXn{50YOA+$RT1j?>*3C`bZ&;m0{QvU^g>)e7MO<$Axd85tmAC+-3xI+USU
z;qP_Zq_&$j&VH;m!Ar7M@4l%s;Jz
zS)}`{@|^lQ#sWUOj$WyFex|eBUP{oQRmfbr!YJQj{fo4R^hhPY>#xT!IdS?o6BAQwG)V_DpZ~o}gN6;#mARP?;l69x5H03vT(-A}
zjyut4=-u~_n|AAKaC`fI7*xT;6&=I4xhzdX)p!vp^Xd*)627!4TZ60oo_KqbOyRez
z{I&}jN6+~rnODDa8)7_#e{i9Gr>?nf2}*dtdtnR5XAw*FZYj7K8wPQ~3Y@SNVW?4H
z_wD-dX#49a4V~NC8I)tjts{#Y$okJ;q43hE7(?fu9K5ttgllQz%D(y2`t;F6sS*d>
zimToo+Z$4Ka=#+;p}3hg1g^WnTXRb6HG^{?QPrMaa^0Xo{@jSDP>;C=c=8vyY&%!yU09=<9f*V
z?u(eY!78Q5!=}K=)X)EolDT*qD2)o*D#0NfAy~_i62*N=-;44WllGBx{%?#dm&4T&u`*kFli4
zw4&`Pi=cfRuWt2lmCH^MQ0~%zGB6;4iZ5A&7Jv*#D~B)R5s*A{LGTQY(jk
zz5UakU|xIw&piCsa-elK|NFiZA5d5JcE~spOZ@4Bi>;z;k;avd`bn4Vp0dY*DDvxY
zoC?Yl$cXtI0o{0^_invx`qAFADY@N}Frg>|z3KIQ;?Tr%;El${g2|QzW>wgQUf0lQ
ziP;pBTpf;`b)q$C2egP^r!3b|yPqF(g*YCUUq+EOe%Nej?7e@U~Y@
z5U1e_Dik9DN@`O;!%7?b#*l4nkDKVD9);s@Z_HDsu+_k|9(A{-Fp78)OW6^eN9y>~
zY3BKq+|-9W@NB#hOks3*U=a%)x>Ahu4KB$AuI0fMjKc~gF#m=KqEOKu(^#5`jE3bF
zx<#LVzcmQd`elLZ&Vp9;HOS~!+_>#b0R5v?-RJ|#q7o`#9VX?n!*^GNWHUc;W;WiW
z{GLXVLXb4FoI_#oNLo4WK?s=#vu(#~-l?4gt*ZNfGy(d52%=Ata{2E~Za%-jiAtCU
z(x?&$J=5*AnY~jMLYOYY?3u3X2NxwNP%pz6KcG}KQ?CWj*G{EWw2Kl(|6^=_J?K{`
zG0HcPhS!Ab^2;XJ9mO}Jz>rDbX*pz$_sJiRh@M{0*GrGs+gAG;H^hCT(qnvVu+Y?2!r}Xx`Rhm2bf<3r`Sy0nfc8`ZfeP#UH
zlwADOK)&H;D`c~|>N7d?
zsa5bgdJaG)-2MG;SE>i(N+}Rc-AFcE1a_KxkQB7kAmZsd-bnHg4_f)ex{#ln@{jy0
zLFF++ty(a7X?$tJFze=>e1mL+6eT$mix-ALt^0KY-e{RSKtP8mo`8cr@&a(=`&PTX
zCDsIqbPa?==X6O|j@ms1Z*>U5q!l|@WIlo$51Cuq(^7JV{DZ6i{f^V!p89T#HR8wl
zLIt_Gdjz(r6L=U8!tTFk1hVx?S;3e?YOwEj&7oHp)JKQ4cQzJQ&EWy=s(Df}x}*ee
zn6h6FsYe$WcY0h+8Vm#)?N|bb(M|fQlx!k^U7OAAIFJQNHI#U!z
zQv81%O%l5kF@9ItsrO=mVeSippL7&P|C|2f9!w4#5)hu&tlfIhi{!W;{3&h`
z&>|MS5*}elORb1CdBUw6v3B?U6XxM4iLII~&Bd|+mAYl}O5aJ7DuG#+X
z)}G6yW3+*`pI>15-sq;t|3H7_e6XzK@}oTa&zDyp3C8mmuBaV{kD0JQ!uNR_FHEpq
zA)7yG+LPsQB%eO*L8y9%SmVE|k#_n&l_JWA&x}C<^y;9Hz?8C>31$VjG0gi82QR9}
z9lp@r^<~>3C)Zw>MO}L`)Z968TTdpcvhZMtN!Nn$>|ELT?RCBjvu!r{=b-?dgzpY>
z%sk2>nXfW{MK3`0_m=pfaIm{-oTqp0fbB_wfHTmYu?5QX9J2jmAv{Riu-h4w|nH-Hc-iv6qrL5v0GevS0(aPHvZ}bXK?@`v~ql#~y
zvq^pHjL4m7uKM2Fr+a;25FKG!U|i%11L_7y4I?Zj7I#48!G<1%jCO>To!1UegL+c@
zYz#Pnzk>=bp-y+kL4f;Fdgo&y)Iy}I-n@C`8wDeO?6vJYdLZp#614?lbH&_C#H`*V
z?w(`IG3^fk9!{QuwQA)^y}R-J^c{9-*JX3hGD*{iZIN&J^jkdOf}<48N1HD7;BSF+
zRR^%`n}-IQq)J+jQ7i*g#dOHhE;eb`Y21WG<;3zLscx@9%X_w;f22O3fNqicV_{*w-vD$_By}?+C
z&74d)*y{pP(4LBhVYlnyMYHQ#&yKsqtLK~JiiJ(fTX}R-<~Jx%(cmBmdzPaahkvie
zi#}30_^NIHVF{lT$3>2dk5g~mSs1R8cI@U(x(dF5lg97yAm5RiDU
zPJ85CP=8Sq>4Z)Ks5L(fTIgJxn^QTRLJo-jzX~;@7=B06O!oX43tEa9SWjl&l~fCS
znd0E-HCE}=hc7e95a>jNP>>G&b~3OdiBc}3TdAS@TOC{6QKHmDNN96z#%1$P?dTN{
zJb$#su&+xtsYM}&2}^f&J}sDfw;U~^p^n(2;&Zfs2}mD0c$;P>Vw{Y
ze>+`7>z5nY3@!VUU0WK50AXAX_AEvMr_psvMp;*~acsXPZG)e>AW>oMC|%KV8p{DwRxrpUzMLJXF4>~3n@{ih|MkRPnPjO(bU@By9ooEhV*y59D
z@b=nZrIR6f--RU7V8Z5|!zehrk>@;sAzC_+Nq(t8+API?zbCsuNy02ZykzWHQgcQX
z!;i4(jiUZTBW*UQc@U(H$
zX;uU2-I}V)D6_Xu0rCuT7|q$3TW(%_KrHIkj8ZmHrJ3l_9yp7>_lpYc*8fL8PA%_z
zmX`x2i_of8fY_N%q@xTm-e)pSAfm34&2D!!}zxGwYr$t|eQuG+MhF8iD4c=7K&P2cXFrvZuD-9{)=<
z)n>8N3~J?IVJv`eu3yY!yp+Jm9%4%Hsx*JQqHP$;&TC}uos8_OFp
zx9DCy{(^=Hz1xq0THd&7-eND%?@~b4K!4`mvs$h)l(~wD>)+(19z&G$hJ@K0`Q)3D
zH;VPrm5pIg!Ig--n+jAIpx0e;aa`B?c?-v$HPRP66EZ+=I63KK@X27{(w@<62H*k`
z>MWtQckXbVM_Ig)8S}p|!SP@_NFR6a>ubf&FF2?$v;9YftQ$wG0%!=~5W6kO)_5PM
zG$1Vm+zwfV?qP_3qp(2TpIv5%r&0)9^PB>jd`6@IybEdGQ{Th$4Ra6IN6sq(BBg{b
zVbvsuI_^)i91gTV%_@8W9q{B$B=^i4F_aSa9iD`P9(68iT4uh96o-TTJ(3&L$f-p?
zOdWn71^^N!Gm@2(%8GlLoMHbK$AX|(-*Z+?$Na;AF>XYzjw||C-9P4gEzojpdwMGm
zn@KdY?TA=2o2w$ayp;2E$6YDQu_vw@RrO3MmQ;l}zoxgeUW#jT$)~e)WYNWC{x~63
zEObS?2Cp?MHK0LazQ8uQ-b}+)v(v}Cn!dI_z3&s$oc~*8MtnvhojN#jK)K;F+v7jf
z#dN;J_9?gVAaiXn4stX9hqH!QIpzf(9^$oc%I*tqz_&TQem3pN-^_nb57pb!n%FGr
z<7#-%@mnCZo3@45IhduJYQ@?RfadBVn})u015INg8#8V|EreG#yqo3xt}WXs{4@
zyJ_>M`=UI+F?e2sn-b>#Bv|{-?eNJjj1$H^K7HzzHJr`Ji}Ljfp-nJL;xFg8uoh%i
zYFZ&ciDE><4X^&~?<|?eaw?2oVd}JhwAv@Dn)p+fuFie5rpMz`)U;IlJMm0{pOa*Y
zM#Ul!PheF=onLrsVpRs1<~DSV(JTs>T~a3rN5
zl*Rt;sxHw^SR66(k)V6I(`O>}1*n$4gW*qKNTtHPqX<9ZJDVcyzieX?-m9F=Hkql1
z*;&Ai0OH(rx%2=~b15RT3)_s~F)k){iLw|ho%XFzdZ?U07c$x+OirxA#ydJaS|-}l
zL<{d2gg#iTnPgxWzpEONsP_vj1_*V8C~`s|!z1o36STq2(K6e|$mjzMhUNjV1l-~p
zr=sU6&!8AJu>FV?r|tO}TS%5_;5qS4Ua*2%DL>7(0(LWfx#SWK8J!PT?m$vk5cap*
zLv~lxfh4PDCuDy!O_s%s-aEaLkF6XYVe=mm@X%Uti~jInY@-1;@c)m-`3Pl!jXCv@
z{sn#SJtn|4RZvFi(lQKz>@I?SV_a&Zgsax|(WBNNH{AV1OEz>7MkWMs*|-9ZuYdus
z7WwwIcD-8+?%Y4xfDt_p*jQ8xvb%oB7=R}iSv3p;i8vb~Fq$XJ
zBKuVp*ai}1J*|YeXwaq{J7xGf$oz(f+{r6B<94bTz~X>sO43;5b;n}^dAk*07aK?BYPjU?9|Z@
zaZ&`-3^=KO&4ipvHhSAvIR`YWq5%k9sFvE*?{Xxg-)0p@4|<2qLl5?FNr@b&u&hVm
za6i%gW(ow>#3V6FduTDa10~ku+D#{jF0PQRtdOmY;Xqx~@qb;ffKC|V((Cj4%Ly3S
zlVNE5D!fkz-=XSB8Rm`nvF4GyCe``v2x}OENH@x63L13LI3-H4R|T$FGPwiE
zLc%4d0BD2%nIq@yfZ}`ui=B{P)4lia_g6PCWFkP4Amubvna0w-W*bB517)|2yJ1J6-CEt
zmaX~L6|%+`=$4(LC$W@w#~Xu5%1w}d$C{6#M8Z$C?rlw_x&f#P(fZX3pdVs2h&w(Wa7BN)Xa|D-UG!&2(;m9-vu*4i<;8$#8Skh
z3%fDNLqNz#gx-vJ_u?!{M8EODmh19FvoQb=!lj=DRtA}{;Hl^(?2DF;m3c%tWYPz9(c_^Nz_x&6Qe)NV9LaiOC(VmTKE
z12k_I*ZyUb^ZNC050)Q72PiMJs*jDvyO!9vwq?CL;Pc7~oIaen4@vIX?Xdu&efy*JK@=l=Bcc
zK9zv5o;ZoD-7!lqW4RyaVNJLc60q^ZVF8E}OvN+>c;M9`7czQ~RIw(6Abwv0oauPV
zGFa#50mkL$9CCx#S?vKiZM4vd)`$dx_SpO1C+6^fgHrAYHX~|EaRVgwT!oYED
zu0KC67zSKv-}&3n#S6@BHJWc4KAQR(_gRFndmlu~B)VG=Gu|v0srjh|v_%`CWvVWM{9*{kq6I4f^u}_R;@URNcp)c1I0{S8
zADp-vvRgx^k2Q_~LwFJaBd%G;7;Nrqi*+&==
zY{AfO{`?`2enKPE5;NE|H!uacKQMx4E41%SGzP8!_ktNjxwA-~R`90yx5S_YpiZ$r
zTLYEE{KeU;yIQuf_RB$k-#fU%c;E{T?%Pl@=b;9WMPcCyAcLTK?m^PW^Judn{Az(1
zznI;(H(v>oi;fXbz?@xM`~d2+U%2J~oK*5$L86Zy;U7UwssI&@2(YZ{O1_xoF>O6@
zu+0!L&_Tvlp;R?RiCsN}Qtd%_PNCUfIee=1t_3w{2lQCn;|EWT~KvflbO}0sg?mrMjW8Xk+`~bwGLC&q2!7zh?
zV?eTih=h8T)wBXF#|+F-yYUa5U>6ZOa0IwcVeqIUTSKso5Be_xwO|vd9Jj^%_DLZYMm{mzsU0&gm7dX_!KQ
zBXtf~O-D7TB3@vtI(atCLcFF?!@keMIY-#<*v9}`Mz5VTwl6}fKw
zs6mjCN?hKs5g=|Lkx+*a5OtDS*sLG(r0;faUiZ8oysJ9kF1{j$f)MAzICqnTC-zG_
zApKc>If>~r{G_G%hVM?2jE8TWN;Fm#YT2`XHl1|j5DIjte`J_+82^Je_;393F`Np+
z9<@GZ_o{)oF0QlWL#G)@IgeBq(cOZ|TM5Bs2H?Ts9hfUMGabV&vq3)Dh{XS5
zBw|%Ke**&hd_MSWkuDqbGl~_meFhP?!5{Dg4ide8dKNS#;G7jNI$@U~iE62xt4_Nq
zXB`y)3HTn6Cb0;DMI`K>;8;VQ=Ld`_;_oQC0w*_iijICp%FT@!HtCREe`OG7S;06S
z#@GFu5ok~xyGMNwa-@tPwcqFZz^r5jwx=8xxLpL*+~Pl|U;`4XapQvkzo*~WbjyAM
z^Iiq|cYhH(8suI?y8dt+L1>2|rYhi^g=xHy>8Mj{^ojjTE06Ji|3E7PLGWh|2QR_`
ztmMHuBzFMFl3atJ0^tCIwFM~Zr~P;03put17^nuc|NjL?gD~2T5JCxnAUeOnaYh7c
zz@F?Rw58~0-uMA^*J*2Y3I{xYVk0l&WnjDtZU^p1`6FZAT?!7!j0_U~5167Hzzq7<
zt}}6faAR$#&aVU}$zE;XOrg%(Edv==g+v#)O?O=YYJ5SqTgbHDzYMSdc$5}g0CM;-
zX&?e6Nc7IY?c)I4ZW8OSK*cPBB3Rqk`h>|1>TW!EK5;p3bj(sgHd969#MW?N5NeBk
z62m5FZx5cRKo~z
z8%EMSXt1arIC+R#3ESs4_Uv`_L(+fhH;%*g-jaUY!riNHSUg$pPQfL=Bw`ekLfP
zOTv#Q1r$2?sX@Vl;Ku?59|%7pVemZgg9H`)3ZsLKgWvxzZ*R5bH^_oRYg@T&sIZ#0
zgBS{KPNW&R4V5;-R^3A=0Km?}5dEYrsMCO}=HM`??S2)^9IhJw8S+qGw+psbRQ&J`Gye%1vm#MWCxLJ`j_9@?B6;%5~@^j6;fN
zkuZ_(gYo=eG_(qAD%Sz#-DLtF2{$M5ei2!5e~d7l1B&G6f1jiW^>)wuzNNLvtSLr~%@*a02|Bhj4_xIL4*
z=Y5B?h73rp97v2Ag12X_>pfxv*}n?)jupw>6~rjx28rnlQ#I0wTd9$2%lmS5N1)fZ
z0lq|LSeZ|o0KP4+^y)5f$@_PM>B9~%deXvJ!-%ZUv<=X!l~Dc6VZIdtxn|J^>F%i@
zg;(!xt(wCzhS3mZ>A?{zay1hiat_Q|sb*~`O2kq8hviyPQK^Ve0DL%z|7Ee#og_N~
zboeFRlu=|ncIwS4Q;woF&AIjd`$KMR8O~$s&L$&3m|OAju#po0Xk
zn0Eo356GD9Bti!}8chH5ELG^W^|@6OWz#kz%l7
zzCtJhN06H&5$y{!+Hc6Xs;vFwOI9CXRY9O3ghl=sD0xm5ijBioB((!|JF=odEJ!F|Fzac
zb-cB%`y%$flkc^t+=UTb-Gn62h~|wCY74IApWgtFN?ki-=?{27&lAW&Kxi3WbUls0
zO{YMXL|68N(vEJ^}C*w?5PNS^Mq^aMhbcpqXaC
zf}(f`=|TuG(eUq^ATm88;}8kRB=O7XDi5**65BlUGjasp@KkNiS9I-aQJ`)HBHMd+
zZ+jgvoUnelcChQc1LuVyRP6V&HFRMCmi`DFnBOChAkuBS;-070yA6bRzixzzWhv{HD2hM+hjOb+4DPsFVW
zj;?tDZ|GrueWg^Wm&{NwF9swU*8@b5zT0|)$U;Bmhhz$WrM)YE$#bj^)!sY7xxeOpmMN`_r;
zXn!ml;VBV(23|yW09W{e${VVEi!#prUx4oYiQCocssbIV8q%Ba$9kK++hGfYq!6t&
za0ory+3Bs^`Z-e`JuDh;#T0yQ89>CA@Jc&$@-Cm)fmW%|z4ZjSrCXeC<1`z+X!m~rEZa1
zp&o_@Q^?f?keFA@GafTYvRK?*<<_B@hX_tU{2%+9lgC+)fcT_d0AQZZyG(;-#Eh)j
zN1SU?I1iRZ&saBbX0jgBK{_lhKqT-xf^$J=sAPDLEudB1F64I=)Kk9*{d*94%@J1L
z!G&qe_2BKw@qeP!8c{!ff6Y8#%mkP|SmB^c{-d}_c*`Pad>b$!OvJV!?b&^9vjO~)
zZR)~im7b~I{6Jmq$aNPcpzhKjA?Gt5Oa^0pgAhpGAzJ;To*(HKxhue8~Fyp2}YA$^2{@Yp!IN>Ljem@wRGkEsFzFa#;@*HrSSlA
zOdF|dODbZA9RI3mn~$d{QGusr$s%dGjPDC#!YvlG;KTehGHL7f4K`+8n#A
zqm@{k*KWhE&EYw*_y0Tk4VG^UYD3$vYg&khc6
zWrS#*rp^Rw>wxy_f1g;bYoryd(I=K_CSO!sBS0ziE
RGFTQkcki?u7Dzz2L6c^W7x$!-rFUPQJO(NfSQlH$)7
z!i_3h=HySj@%vn4VvR8)8Av_LZV8y)IfrnP))sJ{2q|(e7`QRrx$Dsx=A{N{k!n$=I^}b*
zH@jf~nPm_`M|C%6D{J#{$h&-70a%OpCvm{DZZ6gLs
zhVN~$Tuat$gVb8-$}}%})X4eY+F>Txjj!|Q@}h0|%ZVAXW*c+GbtPtFQhlF>g(
zfg~1xBQIJ7ZV&KR&Kc*s-=S5ZJwe+^%SEdj$D@7U@*i_NMzM%i2Wf;8hDl?7`q6#i
z_=xh9q?`ehq28dgDQ%bYwb0nQ&6kw&4`^)||(#1)bL1l2xi5byhU~-0W&GdFC$F
zg|IImquso3MA6-B=40sCOOe9Ri`kJq%?7TF_r*HfgQhgMWKR>7G8o=PPR6wO(HkUW
zc2{>VUQL%|ybjWugQW@0$JuSC!QogrX
z*f@R*oX$JR^g&7gjuPpcfN|lpy@^pa&y^b*gj^~+{Y#okT1(kH+1)|8o>^4GHc>Zd
zzS7d-kE5d_;*9c?)15UPKk4>Y>5`73U+Ho81@Sb0Gkh8G63Z+*lV%xybJAbA?%&Kx
zeq!cSW_NaR;)(a58-$AVP2C9BP0s}8P~@Vs0aKMAin{Ir)-u%`eM2=>811ZD@+2qs
zxN+oKRJef6_NdEfQbZ@XNan0$8W`A)=MZO~3Kn}xm|=JF8e6+VR40p=9V05N1zt{<
z9V)P&ZOL867C#F-zE;yUpe^+#qRgsqVlJ<^m^Sj~Ou)3)Gc2Z=Ac65q2B#7Gg?xfk
zl$XAzp^UP=jnTV)2iI|yWNpM4Y>66ofW
zgndLAZTerjDQizRJNfh|-2n%MrId2I28~=g`Y^`_{?h3t1Iu;jcax3p6T0U#mK9~E
zEyj7~0v;~mFQmIQM{euMqK<16g?fyI-O*!oQAVBpdE+K_hOtU51VSQ4hea5D
zh=uJuoBGA=vkwo1RM$cZeq}n3*YX51Dl{&~s-za!yuDHKI3;vh>PUk%wNA3ngb4os
zYwpYcVqX8hr$|Rxl9pqMq@qPx3MVAtkfe2#Y7&u>rqZftBTAO6QAqoyO{JPPI)p
zrL>F^juvgC(t1B%cxm~+2Zyn|A4|*fb-P7m!^H<#w
zs~e_43z!yxObxD+UC_?bxkbGOLRsD{CR3DY^K+4++9?ofDg()i@j@|yNo6mB6!lYj
zEZa4V;NCEuZZfnr=)QGTwX2bIeM=_)-470i+xKxh)aK}BeoxYhZF=h^>t0bCzRbI2
zJ4=??Z=_|sQdmF0r1y%Vi_rd&jj84j{H4Uk@|MI@3i!7Jjt%?VF8#OQypOYGVs{Ca
zJ~_RLsZ?9M%VtfWTV}k|S9j|)ryoD{3KyN;Gs11vaa$=iQt@z>!Pe9nTQrh{Cuioc
zmwNdtMdvNp#eE)gjXySU`>~MrzjT~Mip!FE3JqUiCb+HE!4Sazlmn0xhb=y&qc&
zt48FoMRh8ciF3aGgDdgCFpZzwVkYg=SxVvNgk93Q;DfLykj|MaZ_V18d-X*=4
zIg6?5#?sTg-+67;YxV!OzdPI7l``dk)QiyakL_A}SxJF>+Cy{AKN?GFO6^V^O;D86
zNH$8ipC}S@&C2-HBC%=O9e3oxTf6gxMhk;*nHq}+7
zT=$)}*(s$cIn=r@lp$5sl_j{$!@#Th*wvUU{rNVDU*jqh`6B}pG`k`$Y!#Py+cjro
zv_NES%N(OChJj$x>XNo1i(nvpVtl
zgHn5D{J^O9sngsa10@P3XZLOa
zRT4@YfWkQqt?*nsfOV~CD1^NPTPJcSe%2c67u3fAxMurd_P__hr;dG0P8rSFoFWvF
zo?@TxFPBY$lmP#vFaSOnPRpEL=|*n&2YuBs&Leamat8U%AGI(Z@>^NEI(laS&-(nP
zOS8D@+lO-&>;gG_c)_ARi6nfER#Y0OD~DQySN4o>C|!pin1YZcWR!fWV*-nOhYxt(
zX4h=kIopd*y$|q(U{Td)c9XDD&vDhKQ=#mf-#6<8*Eb2OjAgAs%luSZdn`&*n|_q-
ze!SsN&x`=89jamlL&i}x#Jsawgf;kjA8)DxjN>o4i{EPa53i3eSP7MPC8ha+2~GFn
zTUwQ7v2MuxB?ialBl|8l2*;&6jt%$Y*J~-32VUUmH<#0kB%?dO#?F1$K74=28h}ydPx6sLufipK8vig3Npay5QqGXxkqXG^-@
z<-H$~FwHiSJJczYsIt{cN^~sf9*yf|5gPgY1efb9QVuwG*9rwikd{&Kdcq$`zf1u2UWKoO_Gl$P8U_P
zU^$z+KXn=@(-Ie|*ckra-ZHXWTyAf~VSSrOwTm15-){i|sdH~yu*0T%t^UrI3uivA
zzi`C-0kY%2gBza1
zh?kIim!J5<{e*6cnr$4QRJO~0z3?=JJHT-i36ZHOyAJOPV1D*^6v&aN+&B70@ma>^
zX3T4wlYH*gJ$(BfWXV-g;Fo7&x0|Ss5kq)$>5f@s+|ontL#bLv)v?pqT&|&!@NhI1
z%crb`?njI$tA+QxI~DRi7^3sxc1}k8vBl=TNAHL3{Y(1|-R-OHZy^(&X+Vu@ord0p
zaejuZI~u0-m!Y_5#L{pAXPj4WM2@;zFqWe=|P+h5ExVk_npmvT2GZski28&GVfK&sysS(WwAhuzT
zC&JrVAmBnnVV$ixpgOGPXK8alGL~gt0xAB4I`;ZU)91>+2NS|IH-Jtf6LeL9jy&-|
z#QWX2lkW_qcY|AdnDDOH9hVLZW2*{S-j$Oo?PVZ*MrQKuX!>GI^E=fKa96jTZ&%O~&_8M#ziWxkX7TF~1|6EW+Us2D2OZX=ntjlo`B8CE
zokB@r?yGY0GdTZoQB&$l{ro_ene{rl7I)|5Eje1G9}?gHNKj*wb(8$D?C8jIjfPHJ
zt793gnx^AE3;bVzQPC1*!h?;GG_}h-_-X3Vh7^y1r-q{r{rhh!GgpH|b56bm>h5Y1
zplrhISUmb3hox1mm;oyvIP?BQa6|00!&*mD6_Z#4NXn
z3)`;w&sey=3Pj*Gva&KDPl{^5V(d;}I#t~dXgPm|l31Xo*_5}(lvD+Kk=Bas!FYqy
zg`1xUsO=x`!EYUTGE3eb3sEf|xEi@B3zY^^18EJSgO1oexW@6auSg_)I^MMk9Kf2;
zg_4RUFX&B4h1IHzFp`JFnCdCt|CuN(WUIRsNtULnBrpB8fvU-*mC
zR&MiL$9Q}q#zc3l&j6dqX=oro8;sbH#hh>g`qqtp1V~^5Jeaf`t3uA>V-_{$IrVaS
zI<&?SpEzJK$MhaDY=^9rE%S-k$qJ^wiFvTU^uy7*2ljZ#*+B4z(}MLNfyjm@?qARZ
zY-nkI;svuP$v0nC!&=oi;R5)&H%?oQk)TdE15#k3*|I{?y`GS>jglSzL1587ke_c~kcBN)lo{S>F?1kug
zSkIDifYMK^LQB9f!9`zA_oiFdCOo)Gr|pN_x*Ok3o3+r67*x_?q=9RLfP%4Yfu403
zjzQA&wgl=L#_tj*(vsNJYDl4=7-^d#e-6wi4DWUM$^XKDZG)cZr-jQBIHodTvoZ&m
z&594fWA2Cknjw-#@rLqimgCUDiiRq@!0y5=+c$+6Z$arDppRG*4gOsE?~_37+4CJM
zW|?N&>XnK|Lk?jLCg&kovO@d@!KL&V|I9wtu)47AywW#_cd`W+pR95U+K6Q^h!&_Y
z#S#b(t^1Rh)fi8g>v>pS?xulCrn*{3|;-~TvVB|Hwp8XMjsRC7IKY9Xd7g%`z
zGssn81T&F6n{aBZ|2avHD9fScD4UxI^KB#KU*k8=>@d!9nA2(iz&RI2WD9Uj6rV~~
z>FF~egJZZH#8YRlEdVz6u3}|4JZPk$EBpq&34Hx(Pt9n9J|&Z+Fw%^rm?R%WZ}bRM
zO%wQ(UjBnX1jiis8KN@S;nHu&redCBcVLtx+>}^(xIH-3UvLNYtTd1N3mAeJ3XH$K?SFh^ZE7d8ZvY>WFPU|oPGG!e|sA1
zE0Kiz9;=&{E6koM5S`|2$Y3P=IBLwOnOuYh+5uw5Q>(%&UEq+xfmyh7##qTkA
z9fLL_JIm%dDJJJK?XIE)Y$Wfs{Ow@~)sDRgThj9vPV`uh_Npb5=GYP>f(&*c#eFL0
zBpQHp(O7{4%mcd=#bVSfTH&L>L$YVD)zq^|Pp-z{Fohb0Re#r{QbX5Ahp<2sHwQYM*7ff|FWxeaY6dYaI6C(Ia~s
z0rlknrqmlwY{$ko`oit7i#F=bzp_k!k)l3BM4xKa+XUy2We5?sS$BG&N(Fd%Hc)^w
z5WF=(G4+KikT92pl4guLkg^qlp$${mTWb}z5(A1>4y
zS9O9D&I38qudXaTS2!MQx5|R+2gOchp-3-IaVka>WJA?+ye_Gr`%zQ#kLuyXcIQ>s
zwnC%v6vuuWXCF-W$iG$?y0$*%TgS>v=&s{CwG9%2&$eB}
zg<34_JgMffGg?cq7JJaWH1Va9%y%>gg`>qVrW`{Gfv>|;1h29^BHEX5PC_|?Q3FMO
zbj7L+QYk_j{T@}}FuXa(S;{{-M~hdiFnHiRf4{O>=3~8X-}EY3?;;p|JVIwb-$C8Q
z&mL~mT~!3Jj*g81Kx8l08lz&mtzmLvX!E-1Pk*JX3Dsu%*!*GZj$+}D*AMb
z=&BVDEzv@`?iKrVwXWajhCDygDW
zCCg#|*wdA;`A-~?34*@)mDE62>m(TMp0(_PFA_eGOtONG{(D`a$6F(*1X9j!I?6}4
zL{w>q+ut#Cc4>qVr=ApRL~RQ@kQuz@oFE?Zzndjbkq65n-}%6WW)Diu?Tixk;civ(
z^6num8zOODVw2hs>;V<+E%b2oWR2g0SzKy`o6NL}p%4d!Y%rKLi?1#qlhc!2h}c-}
z9QVEjH746@%jF
z8pEyR`qKfO6qg>oMSQcsDgiF2d(JDISPrI+#=*f!-pSU+NTByqRbEK0ymXD^^B2p5
z$Ura=KUtl>R&QL>t=(YVH2wP7nu`RYflGe~dW5>3|A-2=fGs!bp7F}h1F!-GE#-!5
z>U%$kVaF5nRmn2$i+Y7ljkQyFTBQNX@n5c!ypSq}^sb!zH2&>w#?FS~+Jdbbt?!=X
zwo98bWd%|VD24(pV)yG_0oD2dr{S(nt$c`9(ns#IPsa3+Jm~lmg_F73;z{>$aIi8R
ztlbR6?Vxp=uxftmkm!Hk<=9B_&;ih$>D5Utf179PhAvQQPdbmfx^~DX#he#?H6(uU
zC#SR6AnYh=TH&RXJ)M`5dv12#OBT2${;!ydiTaHAmx^M?nd3quq}m*}J1$~qx=tX!
zPi#cYyEgtfL|yE%P-~{%r}rlBOomDbF3cs_J+o@yJyW21G|gpa%ASsXKZFvguMTXVYO{#yCvttKfZFPV2u8mgZ7
ztb7WGIv|*|uJiTq-gw@B;P0yPrg0#Z@B=1CasF1d|-+=ej7U7^PtzGGg(PjSpDcajn+E__OU*J%`Qg*d0?@Y;?
zj~yj}Ly~co|2#W6XmY~DRD(J1ow~u$;v%V_1i%{a-F0gJRD8Q_9>5ppHQbIhFc7@{BJJ}FQX7qW;hJD1ezn3@u+ayDX@B{RtS
z%kKKw<5E(Gx*ly8N2O_{@5ieCB2(4ENqor6(?#jwG|4QreS4E$xPOvi3)B9~h0!a9m#8qY>n*qPC$8jly{G
z$|pS+pFEdMX@gMmSDPd!G*!iD`*fGoI@6hE%(Yi>1C%
z{t$vRI_1T`9ZXcnVis5VW~SccoC@;YkioVmQ@RP*y~k}rhco;tVZ
z^Zn=1*2(qzZGug1*Sz*PTK}Y^Tvgzo3s&c>{byYoGR#|7aS*!&Fz>(j6*XeAG=(%e
z4=M;Tzeb$h1T?v4)znnun0?~nIch;1`5A9d9o0BtxLoLxoKYm(GKnTENbdt?K
zA;gqyhvv%~kW&C8;sWSGBzTt{9RBbmdsGU-iG7rx%ctrua9(1?{C0$FJ468pYz4E;
z^Xv2VGB#B~x9-_w750ah;Ew>rM@=iiiR#I1cg9kiM#)|$
zOn-t!(x3K=f<-`hZ@9lCKfjg8Of*rlRVXV+_BNlk1^M`epvceUsY`?LawB|Fd0v=3Ulu6Eg4Ps+5FSn`(tfxP~wm?FQ4od<_G~y
zwW73y$OaTD=O2D^hbd{%WSORnht?^q-Fbu`OB;J(tf(l+8aN
zp)W7a`;NhZ^E|!sUz|H_jh`Xp3P9-G-FGWx6rwY}3O<5w9)
z>>5Mg`@=%?TP_=h?w04-1?;=1p#4-Wbw7T4ZtbU#
zfO4y3&Yv30zx;=Jc$Hmzz)$myCa1gRwfnlLt@1DPPk8P7^{(ewUHg?=oX#aPSU>&q
z$>=ANrY1E^p%F6<3mL56EFiaH(P-=G*?;?h0ob+}d3pYSW`S)gq&qaks>~0mjWzDl
zxyxB|G~%e9`=%4$59Qr6*?qt5`Y_y(ZPQ>S(7IqCQgz=mEi0C7H-Cz_XSGcTWrt3OdLX5J595&IR#Ccl
zjNH(q+NGpG_Sz>0v(0`6Ql+O$_ZiBkHZ5m-!P80!%Y2#~(Ap+&H
z$!d%dAPu5aj~cyo4NOzkVY~v&)@|}r+x>2Xh0DczH3ZLDYH3!@k1MYd+~-=W;Z6if
zW|8*|R{dj72w=M41$0yw#RnY#zT26yp2j?3<(QRBz)>W{Ua%(q+MxOd7RIF&kOB61
zhwqARN5jLGIfvGl^xTeEXY93)$%^*x)$$eg53VMN*1wO(UgNmmYEzOrBv>>v1mefv
z?-UrjG8=X#3y>2V6k6>z#lddffRU{m6LPwSm?A<&Z)&(7HBGDjQ>kJ=a#uHJBkU-|
z2fezX6pqUK;-aC^pEdlsLM|!3a0l~|v
z5{h>*1MTc7Jkt67j23I4{Dg&&Q)mXtU*YQyYn_L+7F0R<&$L+cT1D6Mw$Ha=)#A(Z
ziqlf^I!+0Q_+Tx<_e~S3@5HYo82>_B0^2kFntWf~*^pfkwW&psnfy7AUuz5w0eG%z
zlMMe!(~W2Q1Zyz+D9cd3DWB${y^LT#GlnG43$1L6BOF{q)TGqaCj_N{lJG(Yaxn%51Z%MUd!`G?wu&t
zQ`NWQ4>Q2dWJq5nyC)5;-vFfe3agZZLFq;za&xdbjCdC(0pzj*eb5rKJAXV)Ar~}k
zsleCR?PoBRI%eBe91j(oqNPo|A|u9{c-XdDOmFy;vg2M
zE`a`-qPKjNv^$))MpH)cAliDMqG8U2wggYZ^qz?!t1$UrU2s)-GD)yi!s0utK5Y)r
zN}v^RcoI2AsjN4Y_f-!f2QyDQXHN|lB!G8OH5^ZjZL#z-;3u`gj`?A66?gcYMX0bN
z*&}WuhXwqC=pujSv{f4WGW&j8g}9*{90w(c;Bx96FuG3bG&P#
z9Fruu(H(#(5FW5oj1WbUDHtsOq;4-S($MjWwkPC8?1r#}E+hbKf-ioVy=Omdm2
zU!c<(n0qb-x)Ainr%AsSAx!Z;)xVYMb>
zE`9&myf+k9d(-jH?9IEcUz0obp{OA!0a(<{s;Sa%$>6D_tn;92zPPek6`)7ks8fTt
z0in{_!SFXzgC$dPHNog;%?2&zrlSKuU~R`SAYSx^_kdAG1~ZXG7hR$5yVY{lnIaXS
z-atBYm@IDr&Vv56jO6Et^&7Gfy-De)Y)*$fA=xx=U1=Sg
zs;Jqg$zy@V1@iuaToAs(wGXFXE;@)e1UBN=Y$Hp*|N5(_7?GR(
zN{|dzUXdf2gUm-<4G>x?={Yjg+spYJgb_Xha~cv7q+e1vgw{f1EklFq$*bpPF7S}Q
z{`BW-ADpqdXk+qqq$v;(G5lQ=v$*!in}w6}8DVa`)huY`)2;3UfTkN%sBdTkkV{_P
zW2s~zI=BNE_r4FkCV;cfA%YkHeuj=P!FR!e0N#A}+FeCw`RnV!7I^wTMhR!*CMpU^
zNDbseh6eA8z=yw=(kj7Qyg|w;=UJ7Rh!sJhAIOMF@fktxQIo`ndz$G(%}p6kvomm!+MPi3$O
z7}-R@q&H?ho+obg;aym^SEun-k^TS>B$68ssE-#hT4dtFTZfj!;D|mHZE8p!5xQnIK>tg$1-%`$3
z{S6g-C3XN87gSlmm6rxwf?WiLJ-&Iy2@Ntkmp)rbNq68!Pr*NX{d0s0;>SB_G3uez
zG1{W>|Gp*r6M`%}#HGhgme0p=M8+X3N{2$vUS7II$b2#&!IkL2rvRiz1>Eymr|8EX$(sm(
zu_jbMvDH9>
z6cDMR%!1q>aPC?k2q4FGZOQ=^5w7c!-5IYR^`UzmNab&iT^`@v)SX@d(Wm_|Gse-cPhWWu?7d0`c?|{#$p(9!E1J5)pO6vti&A
zB9o?;^3leeWa6G^slySBvps_-ii_{~{hyM-|F^-ENRU1EHg@Day#y)NbI74==-472
z92ehF^MnvMUPd8pqMvODv~MPv0`Vinv(u?}?&T6J9yu~5Y#ONAI$>2?Jkps;r;DHJ
z!4Y3Kv|eiuy%2%z`8IAD;bQ}!9|V|Vab8RTZv9y4gxq@+F-MhU8Lx6&g?0KpYRLoQ
zh;?f%5$?@Cc$P9goI-A;Xye=G;0an_rW&hTNgJe#5^`Q$U3R&7C>d{)g*o-9x7O8l(a!9N(`@wVO1F_(YFt6DanOGSUre*L=kh$DEfZh?`0{G
z`s5U3Srhmoo8nDkDUkSE@2Sd#zsG`70Z?
zd*am>;NFe9et>%^ES7APd8!eeCqV@uLSbEA3u1POEZmTh9ytF;Es*6rKg8Ya`%rX(
zYlfhJb=w5ge-9pvr~DUJRGY=N_W?66d|U&GVOL{rs>M#z(9UVeoOmGvgZB5aa9-VE
zvNUjPFK7~b*BUqa$7Y+w3|V>J7v2IOJ0@Oc4Sd{r9@RM5xW6d=Q~mm)6QAcy&i{||
zIeC;7X8T%r&)9wEY+!tzR)OK$Jr4fyc32@OuPSpIBV;D;D;l7vH{&eC`%ehJ@h+&n<|{2;r@eX3!z(bY;Kx-nOW*L3~QcT1@;VLPO49*j#>#`M^}k;$rnewS-7+^7;0UkD;6231_N)Y>6cZ4vd{NHnr*{6Uc>WI4@4JlpTOX@Z@P
zT+s^k`uuqvqUR~gNAY&u8AZrQpx_8u%&YBNA)d%Z3u{K;x%bX+md9;Bow0{>_#RiBAR7q61^R~RcLpFbu^Ak_LXLIppB)n6GRm%BOHfj%lb{1(WNNx`n8Q&
zCx0Dx09A_7`V}toRHC+IOt-BZ{w{kkNTtG_h>GN;_lHx)79EpbF{q)GAw{9Xn;@&8
zk&E)?xiJMN0hQ2;&t%Z|k`#{dbPcLO4vm|5btky*&Izx8CFmjH
zs^q5JuQaj{^tmRkdP?1woj?*aW;l&Ubcrd&@J!?~r0sb>)AYyahI!`(v7>NhJ-}f>
z(kAlrQ9F#I?I87<8B$pg!h!SYYQCkY*M~qDF$#UR2-RUtg&M{;|n@ExyT{
ze7xIx8PWmNb498U)9BIHx`h(D$;G20YaiVR+cnQbjUr6J=(f)ge?=X5*dLqv9#6T6
zWL_R8h(c~cc{7ZVD$tHBIU$A!<%;8f{~x*V@oD~_3;->e>!c$AE<^fC
zKFoA*%ez09@*L*x38B(?(K1>;
zb6vBR9U?Dyi(Y26P5BIJf=eZB;gTf(H8dw8^JsgE&ST!Ab@)b7X)jM*_+|>?xS_ei
z^%3M8^?UQ}rUkpd5-bJr>{>FeI6X$c-qM@Rh~ahOJI??!`r^w51T&7jAl_*^wG*5`
z78I}WgkB?Nj{R>xY285av8=0id*ee;Vj;z5JfK)vSX|LM-UEdD1_6$}Euy@8ukjJ2
zF-Yf9f&*O;43%g$=5%vd{%Ay`287gw!a71sBsn;T_i_vC;eh;MB!3t08B}%)_~T}
z9!&dG2TjTexL>8{OG)z
z_}Fc1*v0b%Ei8D)(ns$}JV^ujOGBgk$Iutpju>p%6ljUio=OV-pO8a0$?Mv7(0Gq`
z#*z2E0q5fU1J)xj6nBTJmSj@!n3hTG-!oI8+M|F_AxV{EV5~iEEd2}ZFICBlPUcYS
z;$p`TSwIL*(EGU6+-QMcp@$OJ*Acj9nwLZZ$DM}kRwP-WMi}oFCULw?QjZv
z;xb~iI(BUmEuU4qqJa60u^NCJrx%=H3GuaCN#J;%Xr?EA-cExgi^)a6a={5qP
zRj0?=zhV9|GBeKG!q|D9_XDm^Fo@}KgOF>ZlA3*aAxd8X+m&V;h^aO;ZoOuC;>!)v
zDy-Ao#T|BwBCEfux)Pw)NGOb_3OyHWG;qWuexEyOXbj65ID4s#a;gZ@aHQh^*r`7X#K+U1q
zaM!y#5_Ny@*?C^ITgP>~<`C*c-3z{hyK#7`qP+RiU0oZKjR<3@M|1YO|D;$e-N?1w
zw>z)e$^7B|vPMG(ZIz2-|2rNYfFUAikp@Pi*)a9zH9*LweB!vqL&nY`-iE#kWQkK1
zeY-OdX>T2kb9aTWTk$%T?G2!;&*HUt9=lsnAIIlU3x+i5jsX4uG(3QL__%`{A>6sA
zps^ersL^ktDb`mzI^}kXzPX4g1*?!a0kF3j+74uq$gbseL;>Y1X;AZ^zIvQWJKY+A
zKtf6kDSUKBTlo7?L(7YvLMp+dB*4^>t(^&IZYkA>RlB*3q&39yLwwjf?!oTr6^r-azmuLv#V}?d)(^s+gUb=-q2`H%GG?4d@r7R|3L%1Vgju
z5zbeIKQM&Ulb}pu2vZdaFSTZPqm85Bk0TO=GcV1qR{Px9c(bbGFWj2;j>J8A8N1A(
zZ}^4;A@@;6m|12ka&T6)Lv5Q)LZ_BBK46#9kcrxtr6)pf_c&Yj!@35SM4&sfAX6
zsm#L?x1!my!Gm{?)&U@_D!E*==7B5Sx>%p2V1V4|SBddvTPg7Z)dA`MmBkwrzi
zkx`$J9oUhT5jTr~dai}V3e`|lkoUX}m7SUH0m^N}=f9MdxY0bcG~COrz3K^E;{TJJ
z2n6%LUOufh3jdKlOSiJqJx;wrdV1-}FKo)N^S@yPbNQ5VunEM&7tWGttW!VVAwuPR
zw#1l#{~5|E)S6M1zZg#@ZM48=h`p{(0f!NLGl%#U_n8+iU<;0Zm05=Ba^zmd)MY_c
zG6i_bJiLtWqhtVc{Yx|thb!-KokFZqjwxK1)AzNXbbvOfl9j8dNS8?TY#8tmyGO_`
zK57}S^`2&D_#R{Zfx4hs-O2IP-opsgsG(9+@%98%?IaolWo9e*2`?>&%5;AMBJLo<;|IQ&&)6|`PbWy9n(
zh)>Z(8DfyFcytR{gF4ElQKk<3W4**a8Vx{4RJN>PowZH?j1gu>sg1F=ZH$M-2A*@Dm@g9O;g1y(R|qLxNj`bM8N1)up;*j
z3Q23Zx_z^WGr!P9CnE!)v+=DZ%dJVYlHLh`cd4YE$r90^w4}Xd+qhG4=6Ckq?n(#;
z;6+PVledKLqoYb!6pQnB1tVwIQIb#<7a|2oe@C9zRw2
zgLY7I_&4+u>({I}VM)FsXji}PuHPoGO>bxPDyt_wfq2OM8OXt16^Gw^EruY!O1iO!
zy|?g1wV9mW_6e|+W_Y#ny8OhKirR}S!CbP|<2t&XyN4xbg2IiN*9(Eb7bK?QN@pa7
zTKM%=gD}TsoV`v6;dd5(T`ox?G8*eAtjwH^1n{Zn{-!B#4%m{Ngyj9*4p+}2_v!Au
zq3dQz!flWXJlGZ@%HChu57CUr8Wi(`JUs5t7{%9E_l`i<*rn)8(C*3ZJjLcz$-`weOQc@?To$>p`~E0WOm4c1vW{qmXKLh3Y$FEi(Bp3KJ?_lkGmQaAtf
zJ|S6p)6*3%*qxFSNU3TV6je-XffRLGUS@{GyBj#53VK55$ZHbxf6D9iXBg#I)i!rN
zOw7HztxM6K8%Ghr<7YY{33vpMtO3^`why9@a{vzJdd)0u=asTt`YybaFqXY(VnO^1Cdmd3Du>?rcLgF@Io&_D;*UsB
zp7m7s3mEYNE^}khsB_$t$Zz`|R^4I?F3k2ZVzVjaHafIA{$f8UPag5~ptJ66@J3YTmh+?hDAmg-Z-Sjj3Ef6dn
zgPbLkK%NEU91#(uyXYh}U~ux<>otOT=K%pYuWNwJeaR~?p{S;gYCuVDAbGIZ3Z~0|
z%@A$+A;Es2w0CO?R;A$VyR1*mf&LSI7XdSeuok6_S7
zWu!DeI*r%km7Cwo2qZ&gOeQx+I)rK>_X|}7ufr8nL>({K5x=ir>;qloiTTMb*r4F}
zVbh=FE}&4#(y5HAJXW`VH}04ChL^~NigWOe_&s6(iOR^cOSdgnGS5j8W&yY+9E@}O
z7wX8;M^H(v(~Xiy=4Ku^oqC7{P~vS0Q8&3iwe7E1aGj!X^@RSWS-(A?;A99BC-Rl)
zL+SK%>3QXuXoH9=`rK{;1vw4&cdiFkV-n?h$)y#>O_!>jX>j8K`iOk?34*blKJEt=
z`BM5G?hm2NV2i?JC_yLU|98H9j{LlXG`k}n3_~!Nmz59S$*=IgFXNal__rF3MW^jB
zDaIGnOl1s9@)q7@g~!K0Nf0GWDE_D0Im6%33?D%y$VM-Od`mB0@THdBf11oJlPgB<
zyFMPn)P#dKm_c}}sEB(-