Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add LoihiRectifiedLinear neuron type #322

Draft
wants to merge 11 commits into
base: main
Choose a base branch
from
17 changes: 17 additions & 0 deletions CHANGES.rst
Original file line number Diff line number Diff line change
Expand Up @@ -32,25 +32,42 @@ Release history
- Added ``Simulator.clear_probes`` to clear probe histories. This can help reduce memory
usage during long runs, by running for a segment of the full run time, recording the
relevant outputs, calling ``clear_probes``, and resuming the run. (`#303`_)
- ``Block`` now has a ``.discretize_info`` attribute that stores parameters used
for discretizing that block. (`#309`_)
- ``Model`` now has a ``connection_decode_neurons`` attribute that maps ``Connection``
objects that require decode neurons to the corresponding ``Ensemble`` objects
implementing them. (`#309`_)
- Added the ``GreedyComms`` and ``PartitionComms`` allocators, which reduce inter-chip
communication, speeding up networks with high traffic between chips.
``PartitionComms`` typically finds a more optimal partitioning than ``GreedyComms``,
but does require the ``nxmetis`` package. (`#309`_)
- Added the ``LoihiRectifiedLinear`` neuron type to train deep networks for Loihi using
Nengo or NengoDL. It is a rate neuron type and thus must ultimitely be swapped for
``LoihiSpikingRectifiedLinear`` to run on Loihi. (`#309`_)

**Changed**

- Build errors specify the associated objects, making them easier to debug. (`#289`_)
- Deobfuscated NxSDK API calls. (`#320`_)
- The builder now respects the `precision.bits`_ attribute in ``nengorc`` files,
allowing for reduced-precision builds to save memory. (`#309`_)

**Fixed**

- Fixed several issues to ensure that memory is freed when a ``Simulator``
is deleted. (`#312`_)
- Fixed probe filters such that multiple ``Simulator.run`` calls now results in
the same probe data as a single call of equivalent length. (`#271`_, `#303`_)
- Fixed handling of ``dt`` within ``DecodeNeurons`` for ``dt != 0.001``. (`#309`_)

.. _#271: https://github.com/nengo/nengo-loihi/issues/271
.. _#289: https://github.com/nengo/nengo-loihi/pull/289
.. _#303: https://github.com/nengo/nengo-loihi/pull/303
.. _#309: https://github.com/nengo/nengo-loihi/pull/309
.. _#312: https://github.com/nengo/nengo-loihi/pull/312
.. _#317: https://github.com/nengo/nengo-loihi/pull/317
.. _#320: https://github.com/nengo/nengo-loihi/pull/320
.. _precision.bits: https://www.nengo.ai/nengo/nengorc.html#configuration-options

1.0.0 (January 20, 2021)
========================
Expand Down
19 changes: 13 additions & 6 deletions nengo_loihi/block.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,19 +154,21 @@ class Compartment:
def __init__(self, n_compartments, label=None):
self.n_compartments = n_compartments
self.label = label
# dtype must be float32, because of how we discretize in place to int32
self.dtype = np.float32

# parameters specific to compartments/block
self.decay_u = np.ones(n_compartments, dtype=np.float32)
self.decay_u = np.ones(n_compartments, dtype=self.dtype)
# ^ default to no filter
self.decay_v = np.zeros(n_compartments, dtype=np.float32)
self.decay_v = np.zeros(n_compartments, dtype=self.dtype)
# ^ default to integration
self.tau_s = None
self.scale_u = True
self.scale_v = False

self.refract_delay = np.zeros(n_compartments, dtype=np.int32)
self.vth = np.zeros(n_compartments, dtype=np.float32)
self.bias = np.zeros(n_compartments, dtype=np.float32)
self.vth = np.zeros(n_compartments, dtype=self.dtype)
self.bias = np.zeros(n_compartments, dtype=self.dtype)
self.enable_noise = np.zeros(n_compartments, dtype=bool)

# parameters common to core
Expand All @@ -176,6 +178,8 @@ def __init__(self, n_compartments, label=None):
self.noise_exp = 0
self.noise_at_membrane = 0

self.discretize_info = None

def __str__(self):
return "%s(%s)" % (type(self).__name__, self.label if self.label else "")

Expand Down Expand Up @@ -517,13 +521,14 @@ def bits_per_axon(self, n_weights):

synapse_idx_bits = 4
n_synapses_bits = 6
bits_per_memunit = 64
bits = 0
synapses_per_block = self.n_synapses + 1
for i in range(0, n_weights, synapses_per_block):
n = min(n_weights - i, synapses_per_block)
bits_i = n * bits_per_weight + synapse_idx_bits + n_synapses_bits
# round up to nearest memory unit
bits_i = -64 * (-bits_i // 64)
bits_i = -bits_per_memunit * (-bits_i // bits_per_memunit)
bits += bits_i

return bits
Expand Down Expand Up @@ -682,9 +687,11 @@ def _set_weights_indices(
self,
weights,
indices=None,
weight_dtype=np.float32,
weight_dtype=None,
compression=0,
):
# must be float32, because of how we discretize in place to int32
weight_dtype = np.float32 if weight_dtype is None else weight_dtype
weights = [
np.array(w, copy=False, dtype=weight_dtype, ndmin=2) for w in weights
]
Expand Down
8 changes: 6 additions & 2 deletions nengo_loihi/builder/builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,9 @@ class Model:

Attributes
----------
connection_decode_neurons : dict of {Connection: Ensemble}
Map of each `nengo.Connection` that requires DecodeNeurons, to the
`nengo.Ensemble` that implements said DecodeNeurons.

Build parameters

Expand Down Expand Up @@ -126,13 +129,14 @@ def __init__(self, dt=0.001, label=None, builder=None):
self.blocks = OrderedDict()
self.block_shapes = {}
self.probes = []
self.block_comp_map = {}

# Will be filled in by the simulator __init__
self.split = None
self.connection_decode_neurons = {}

# Will be filled in by the network builder
self.toplevel = None
self.config = None
self.split = None

# Resources used by the build process
self.objs = defaultdict(dict) # maps Nengo objects to Loihi objects
Expand Down
33 changes: 20 additions & 13 deletions nengo_loihi/builder/connection.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
from nengo.connection import LearningRule
from nengo.ensemble import Neurons
from nengo.exceptions import BuildError, ValidationError
from nengo.rc import rc
from nengo.solvers import Solver

from nengo_loihi.block import Axon, LoihiBlock, Synapse
Expand Down Expand Up @@ -207,6 +208,7 @@ def build_host_to_chip(model, conn):
ens.label = None if conn.label is None else "%s_ens" % conn.label
_inherit_seed(host, ens, model, conn)
host.build(ens)
model.connection_decode_neurons[conn] = ens

pre2ens = Connection(
conn.pre,
Expand Down Expand Up @@ -521,6 +523,10 @@ def build_full_chip_connection(model, conn): # noqa: C901
if neuron_type is not None and hasattr(neuron_type, "amplitude"):
weights = scale_matrix(weights, neuron_type.amplitude)

# to proper dtype
transform = transform.astype(rc.float_dtype)
weights = weights.astype(rc.float_dtype)

# loihi_weights has shape (in, out), to match the shape by block.Synapses
loihi_weights = weights.T

Expand All @@ -540,15 +546,13 @@ def build_full_chip_connection(model, conn): # noqa: C901
# use the same scaling as the ensemble does, to get good
# decodes. Note that this assumes that the decoded value
# is in the range -radius to radius, which is usually true.
gain = 1.0 / conn.pre_obj.radius
gain = np.array(1.0 / conn.pre_obj.radius, dtype=rc.float_dtype)

decoder_block = LoihiBlock(2 * d, label="%s" % conn)
decoder_block.compartment.configure_nonspiking(
dt=model.dt, vth=model.vth_nonspiking
)
decoder_block.compartment.bias[:] = 0
model.add_block(decoder_block)
model.objs[conn]["decoded"] = decoder_block

dec_syn = Synapse(n, label="probe_decoders")
weights2 = stack_matrices(
Expand All @@ -558,12 +562,12 @@ def build_full_chip_connection(model, conn): # noqa: C901

dec_syn.set_weights(weights2)
decoder_block.add_synapse(dec_syn)
model.objs[conn]["decoders"] = dec_syn
else:
# use spiking decode neurons for on-chip connection
if isinstance(conn.post_obj, Ensemble):
# loihi encoders don't include radius, so handle scaling here
loihi_weights = scale_matrix(loihi_weights, 1.0 / conn.post_obj.radius)
gain = np.array(1.0 / conn.post_obj.radius, dtype=rc.float_dtype)
loihi_weights = scale_matrix(loihi_weights, gain)

post_d = conn.post_obj.size_in
post_inds = np.arange(post_d, dtype=np.int32)[post_slice]
Expand All @@ -575,15 +579,16 @@ def build_full_chip_connection(model, conn): # noqa: C901
loihi_weights, block_label="%s" % conn, syn_label="decoders"
)

model.add_block(decoder_block)
model.objs[conn]["decoded"] = decoder_block
model.objs[conn]["decoders"] = dec_syn
model.add_block(decoder_block)
model.objs[conn]["decoded"] = decoder_block
model.objs[conn]["decoders"] = dec_syn
model.connection_decode_neurons[conn] = decoder_block

# use tau_s for filter into decode neurons, decode_tau for filter out
decoder_block.compartment.configure_filter(tau_s, dt=model.dt)
post_tau = model.decode_tau

target_axons = -np.ones(pre_obj.n_neurons, dtype=int)
target_axons = -np.ones(pre_obj.n_neurons, dtype=np.int32)
target_axons[pre_slice] = np.arange(target_axons[pre_slice].size)
pre_slice = slice(None)

Expand Down Expand Up @@ -662,7 +667,7 @@ def build_full_chip_connection(model, conn): # noqa: C901
post_obj.add_synapse(syn)
model.objs[conn]["weights"] = syn

target_axons = -np.ones(mid_obj.n_neurons, dtype=int)
target_axons = -np.ones(mid_obj.n_neurons, dtype=np.int32)
target_axons[pre_slice] = np.arange(target_axons[pre_slice].size)
assert target_axons[pre_slice].size == n1

Expand All @@ -684,7 +689,8 @@ def build_full_chip_connection(model, conn): # noqa: C901
assert post_obj.n_neurons == n2

# loihi encoders don't include radius, so handle scaling here
loihi_weights = scale_matrix(loihi_weights, 1.0 / conn.post_obj.radius)
scale = np.array(1.0 / conn.post_obj.radius, dtype=rc.float_dtype)
loihi_weights = scale_matrix(loihi_weights, scale)

syn = Synapse(n1, label="%s::decoder_weights" % conn)
syn.set_weights(loihi_weights)
Expand Down Expand Up @@ -783,6 +789,7 @@ def build_conv2d_connection(model, transform, conn):
obj=conn.post_obj.ensemble,
)
kernel = kernel * gain[0]
kernel = kernel.astype(rc.float_dtype)

pop_type = model.config[conn].pop_type
new_transform = copy.copy(transform)
Expand All @@ -802,9 +809,9 @@ def build_conv2d_connection(model, transform, conn):
"is therefore emulator-only."
)

target_axons = -np.ones(pre_obj.n_neurons, dtype=int)
target_axons = -np.ones(pre_obj.n_neurons, dtype=np.int32)
target_axons[conn.pre_slice] = pixel_idxs(input_shape)
atoms = np.zeros(pre_obj.n_neurons, dtype=int)
atoms = np.zeros(pre_obj.n_neurons, dtype=np.int32)
atoms[conn.pre_slice] = channel_idxs(input_shape)

ax = Axon(np.prod(input_shape.spatial_shape), label="conv2d_weights")
Expand Down
16 changes: 11 additions & 5 deletions nengo_loihi/builder/discretize.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ def decay_magnitude(decay, x0=2 ** 21, bits=12, offset=0):

x_i = floor(r x_{i-1})

where ``r = (2**bits - offset - decay)``.
where ``r = (2**bits - offset - decay) / 2**bits``.

To simulate the effects of rounding in decay, we subtract an expected loss
due to rounding (``q``) each iteration. Our estimated series is therefore::
Expand Down Expand Up @@ -242,10 +242,11 @@ def discretize_block(block):
w_maxs = [s.max_abs_weight() for s in block.synapses]
w_max = max(w_maxs) if len(w_maxs) > 0 else 0

p = discretize_compartment(block.compartment, w_max)
info = discretize_compartment(block.compartment, w_max)
for synapse in block.synapses:
discretize_synapse(synapse, w_max, p["w_scale"], p["w_exp"])
return p["v_scale"]
discretize_synapse(synapse, w_max, info["w_scale"], info["w_exp"])

return info["v_scale"]


def discretize_compartment(comp, w_max):
Expand Down Expand Up @@ -363,7 +364,12 @@ def discretize_compartment(comp, w_max):
vmaxe = np.clip(np.round((np.log2(vmax + 1) - 9) * 0.5), 0, 2 ** 3 - 1)
comp.vmax = 2 ** (9 + 2 * vmaxe) - 1

return dict(w_max=w_max, w_scale=w_scale, w_exp=w_exp, v_scale=v_scale)
info = dict(
w_max=w_max, w_exp=w_exp, v_scale=v_scale, b_scale=b_scale, w_scale=w_scale
)
comp.discretize_info = info

return info


def discretize_synapse(synapse, w_max, w_scale, w_exp):
Expand Down
18 changes: 14 additions & 4 deletions nengo_loihi/builder/ensemble.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,16 @@
from nengo.builder.ensemble import BuiltEnsemble, gen_eval_points
from nengo.dists import Distribution, get_samples
from nengo.exceptions import BuildError
from nengo.rc import rc

from nengo_loihi.block import LoihiBlock
from nengo_loihi.builder.builder import Builder


def get_gain_bias(ens, rng=np.random, intercept_limit=1.0):
def get_gain_bias(ens, rng=np.random, intercept_limit=1.0, dtype=None):
# Modified from the Nengo version to handle `intercept_limit`

dtype = rc.float_dtype if dtype is None else dtype
if ens.gain is not None and ens.bias is not None:
gain = get_samples(ens.gain, ens.n_neurons, rng=rng)
bias = get_samples(ens.bias, ens.n_neurons, rng=rng)
Expand Down Expand Up @@ -60,6 +62,11 @@ def get_gain_bias(ens, rng=np.random, intercept_limit=1.0):
"by reducing the maximum intercept value to below 1."
)

dtype = rc.float_dtype
gain = gain.astype(dtype) if gain is not None else gain
bias = bias.astype(dtype) if bias is not None else bias
max_rates = max_rates.astype(dtype) if max_rates is not None else max_rates
intercepts = intercepts.astype(dtype) if intercepts is not None else intercepts
return gain, bias, max_rates, intercepts


Expand All @@ -71,13 +78,14 @@ def build_ensemble(model, ens):
# Create random number generator
rng = np.random.RandomState(model.seeds[ens])

eval_points = gen_eval_points(ens, ens.eval_points, rng=rng)
eval_points = gen_eval_points(ens, ens.eval_points, rng=rng, dtype=rc.float_dtype)

# Set up encoders
if isinstance(ens.encoders, Distribution):
encoders = get_samples(ens.encoders, ens.n_neurons, ens.dimensions, rng=rng)
encoders = np.asarray(encoders, dtype=rc.float_dtype)
else:
encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
encoders = npext.array(ens.encoders, min_dims=2, dtype=rc.float_dtype)

if ens.normalize_encoders:
encoders /= npext.norm(encoders, axis=1, keepdims=True)
Expand All @@ -90,7 +98,9 @@ def build_ensemble(model, ens):
)

# Build the neurons
gain, bias, max_rates, intercepts = get_gain_bias(ens, rng, model.intercept_limit)
gain, bias, max_rates, intercepts = get_gain_bias(
ens, rng, intercept_limit=model.intercept_limit, dtype=rc.float_dtype
)

block = LoihiBlock(ens.n_neurons, label="%s" % ens)
block.compartment.bias[:] = bias
Expand Down
Loading