diff --git a/docs/_templates/layout.html b/docs/_templates/layout.html
new file mode 100644
index 000000000..44d9152dd
--- /dev/null
+++ b/docs/_templates/layout.html
@@ -0,0 +1,8 @@
+{# Import the theme's layout. #}
+{% extends "!layout.html" %}
+
+{%- block extrahead %}
+
+{# Call the parent block #}
+{{ super() }}
+{%- endblock %}
diff --git a/docs/api.rst b/docs/api.rst
new file mode 100644
index 000000000..d14e9b8d8
--- /dev/null
+++ b/docs/api.rst
@@ -0,0 +1,29 @@
+*************
+API reference
+*************
+
+.. default-role:: obj
+
+.. autosummary::
+
+ nengo_loihi.add_params
+ nengo_loihi.set_defaults
+ nengo_loihi.Simulator
+ nengo_loihi.builder.Model
+ nengo_loihi.builder.Builder
+ nengo_loihi.loihi_cx.CxSimulator
+ nengo_loihi.loihi_interface.LoihiSimulator
+
+.. autofunction:: nengo_loihi.add_params
+
+.. autofunction:: nengo_loihi.set_defaults
+
+.. autoclass:: nengo_loihi.Simulator
+
+.. autoclass:: nengo_loihi.builder.Model
+
+.. autoclass:: nengo_loihi.builder.Builder
+
+.. autoclass:: nengo_loihi.loihi_cx.CxSimulator
+
+.. autoclass:: nengo_loihi.loihi_interface.LoihiSimulator
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 000000000..e018cb715
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1,117 @@
+# -*- coding: utf-8 -*-
+#
+# This file is execfile()d with the current directory set
+# to its containing dir.
+
+import sys
+
+try:
+ import nengo_loihi
+ import guzzle_sphinx_theme
+except ImportError:
+ print("To build the documentation, nengo_loihi and guzzle_sphinx_theme "
+ "must be installed in the current environment. Please install these "
+ "and their requirements first. A virtualenv is recommended!")
+ sys.exit(1)
+
+extensions = [
+ 'sphinx.ext.autodoc',
+ 'sphinx.ext.autosummary',
+ 'sphinx.ext.githubpages',
+ 'sphinx.ext.intersphinx',
+ 'sphinx.ext.mathjax',
+ 'sphinx.ext.todo',
+ 'sphinx.ext.viewcode',
+ 'guzzle_sphinx_theme',
+ 'numpydoc',
+ 'nbsphinx',
+]
+
+default_role = 'py:obj'
+
+# -- sphinx.ext.autodoc
+autoclass_content = 'both' # class and __init__ docstrings are concatenated
+autodoc_default_flags = ['members']
+autodoc_member_order = 'bysource' # default is alphabetical
+
+# -- sphinx.ext.intersphinx
+intersphinx_mapping = {
+ 'nengo': ('https://www.nengo.ai/nengo/', None),
+ 'numpy': ('https://docs.scipy.org/doc/numpy', None),
+ 'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
+}
+
+# -- sphinx.ext.todo
+todo_include_todos = True
+
+# -- numpydoc
+numpydoc_show_class_members = False
+
+# -- nbsphinx
+nbsphinx_timeout = -1
+
+# -- sphinx
+nitpicky = True
+exclude_patterns = ['_build']
+source_suffix = '.rst'
+source_encoding = 'utf-8'
+master_doc = 'index'
+
+# Need to include https Mathjax path for sphinx < v1.3
+mathjax_path = ("https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.3/"
+ "config/TeX-AMS-MML_HTMLorMML.js")
+
+project = u'Nengo Loihi'
+authors = u'Applied Brain Research'
+copyright = nengo_loihi.__copyright__
+version = '.'.join(nengo_loihi.__version__.split('.')[:2])
+release = nengo_loihi.__version__ # Full version, with tags
+pygments_style = 'default'
+
+# -- Options for HTML output --------------------------------------------------
+
+pygments_style = "sphinx"
+templates_path = ["_templates"]
+html_static_path = ["_static"]
+
+html_theme_path = guzzle_sphinx_theme.html_theme_path()
+html_theme = "guzzle_sphinx_theme"
+
+html_theme_options = {
+ "project_nav_name": "Nengo Loihi %s" % (version,),
+ "base_url": "https://www.nengo.ai/nengo-loihi",
+}
+
+html_title = "Nengo Loihi {0} docs".format(release)
+htmlhelp_basename = 'Nengo Loihi'
+html_last_updated_fmt = '' # Suppress 'Last updated on:' timestamp
+html_show_sphinx = False
+
+# -- Options for LaTeX output -------------------------------------------------
+
+latex_elements = {
+ 'papersize': 'letterpaper',
+ 'pointsize': '11pt',
+ # 'preamble': '',
+}
+
+latex_documents = [
+ # (source start file, target, title, author, documentclass [howto/manual])
+ ('index', 'nengo_loihi.tex', html_title, authors, 'manual'),
+]
+
+# -- Options for manual page output -------------------------------------------
+
+man_pages = [
+ # (source start file, name, description, authors, manual section).
+ ('index', 'nengo_loihi', html_title, [authors], 1)
+]
+
+# -- Options for Texinfo output -----------------------------------------------
+
+texinfo_documents = [
+ # (source start file, target, title, author, dir menu entry,
+ # description, category)
+ ('index', 'nengo_loihi', html_title, authors, 'Nengo',
+ 'Loihi backend for Nengo', 'Miscellaneous'),
+]
diff --git a/docs/examples.rst b/docs/examples.rst
new file mode 100644
index 000000000..e48f8a9ef
--- /dev/null
+++ b/docs/examples.rst
@@ -0,0 +1,19 @@
+**************
+Example models
+**************
+
+The following examples are similar
+to the `Nengo core examples `_,
+but run using Nengo Loihi.
+We compare performance with Nengo core where appropriate.
+
+.. toctree::
+ :maxdepth: 1
+
+ examples/communication_channel
+ examples/integrator
+ examples/integrator_multi_d
+ examples/oscillator
+ examples/oscillator_nonlinear
+ examples/learn_communication_channel
+ examples/keyword_spotting
diff --git a/examples/communication_channel.ipynb b/docs/examples/communication_channel.ipynb
similarity index 100%
rename from examples/communication_channel.ipynb
rename to docs/examples/communication_channel.ipynb
diff --git a/examples/ens_ens.py b/docs/examples/ens_ens.py
similarity index 100%
rename from examples/ens_ens.py
rename to docs/examples/ens_ens.py
diff --git a/examples/ens_ens_slice.py b/docs/examples/ens_ens_slice.py
similarity index 100%
rename from examples/ens_ens_slice.py
rename to docs/examples/ens_ens_slice.py
diff --git a/examples/integrator.ipynb b/docs/examples/integrator.ipynb
similarity index 100%
rename from examples/integrator.ipynb
rename to docs/examples/integrator.ipynb
diff --git a/examples/integrator_multi_d.ipynb b/docs/examples/integrator_multi_d.ipynb
similarity index 99%
rename from examples/integrator_multi_d.ipynb
rename to docs/examples/integrator_multi_d.ipynb
index baf65ec2e..46f198705 100644
--- a/examples/integrator_multi_d.ipynb
+++ b/docs/examples/integrator_multi_d.ipynb
@@ -4,7 +4,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "# Integrator\n",
+ "# Multidimensional integrator\n",
"\n",
"This demo implements an N-dimensional neural integrator.\n",
"\n",
diff --git a/examples/keyword_spotting.ipynb b/docs/examples/keyword_spotting.ipynb
similarity index 99%
rename from examples/keyword_spotting.ipynb
rename to docs/examples/keyword_spotting.ipynb
index ee655b2fa..60e70c6f8 100644
--- a/examples/keyword_spotting.ipynb
+++ b/docs/examples/keyword_spotting.ipynb
@@ -18,9 +18,9 @@
"This example uses optimized parameters\n",
"generated by Nengo DL:\n",
"\n",
- "- [`reference_params.pkl`](\n",
+ "* [reference_params.pkl](\n",
"https://drive.google.com/open?id=149rLqXnJqZPBiqvWpOAysGyq4fvunlnM)\n",
- "- [`test_stream.pkl`](\n",
+ "* [test_stream.pkl](\n",
"https://drive.google.com/open?id=1AQavHjQKNu1sso0jqYhWj6zUBLKuGNvV)\n",
"\n",
"If you have `requests` installed,\n",
@@ -272,7 +272,7 @@
" return stream[ti % len(stream)]\n",
"\n",
" model.inp.output = play_stream\n",
- " n_steps = stream.shape[1]\n",
+ " n_steps = stream.shape[0]\n",
"\n",
" sim = nengo_loihi.Simulator(model, dt=dt, precompute=True)\n",
" with sim:\n",
diff --git a/examples/learn_communication_channel.ipynb b/docs/examples/learn_communication_channel.ipynb
similarity index 100%
rename from examples/learn_communication_channel.ipynb
rename to docs/examples/learn_communication_channel.ipynb
diff --git a/examples/node_ens_ens.py b/docs/examples/node_ens_ens.py
similarity index 100%
rename from examples/node_ens_ens.py
rename to docs/examples/node_ens_ens.py
diff --git a/examples/oscillator.ipynb b/docs/examples/oscillator.ipynb
similarity index 100%
rename from examples/oscillator.ipynb
rename to docs/examples/oscillator.ipynb
diff --git a/examples/oscillator_nonlinear.ipynb b/docs/examples/oscillator_nonlinear.ipynb
similarity index 100%
rename from examples/oscillator_nonlinear.ipynb
rename to docs/examples/oscillator_nonlinear.ipynb
diff --git a/docs/index.rst b/docs/index.rst
new file mode 100644
index 000000000..720423236
--- /dev/null
+++ b/docs/index.rst
@@ -0,0 +1,23 @@
+***********
+Nengo Loihi
+***********
+
+A backend for running Nengo models on Intel's Loihi architecture.
+
+To use the backend, simply replace::
+
+ nengo.Simulator(model)
+
+with::
+
+ nengo_loihi.Simulator(model)
+
+.. toctree::
+ :maxdepth: 2
+
+ overview
+ installation
+ examples
+ api
+ tips
+ setup/index
diff --git a/docs/installation.rst b/docs/installation.rst
new file mode 100644
index 000000000..39690379a
--- /dev/null
+++ b/docs/installation.rst
@@ -0,0 +1,181 @@
+************
+Installation
+************
+
+Local machine
+=============
+
+On a local machine *not* connected to a Loihi host,
+you can use any version of Python
+that has ``pip``.
+
+.. code-block:: bash
+
+ cd path/to/nengo-loihi
+ pip install .
+
+Note that the ``.`` at the end of ``pip`` command is required.
+
+``pip`` will do its best to install
+Nengo Loihi's requirements.
+If anything goes wrong during this process,
+it is likely related to installing NumPy.
+Follow `our NumPy install instructions
+`_,
+then try again.
+
+Superhost
+=========
+
+.. note:: These instructions assume that you are working
+ on a superhost that has already been configured
+ as per the :doc:`setup/superhost` page.
+ Those instructions only need to be run once
+ for each superhost,
+ while these instructions need to be run
+ by every user that is using the superhost.
+
+If you are installing Nengo Loihi on a superhost,
+there are several additional constraints
+due to needing to install NxSDK.
+The easiest way to satisfy
+all of those constraints is to use
+`Miniconda `_
+to set up an isolated environment
+for running Loihi models.
+
+1. Ensure that ``conda`` is available.
+
+ To see if it is available, run
+
+ .. code-block:: bash
+
+ conda -V
+
+ If conda is available, the conda version should be printed
+ to the console.
+
+ If it is not available:
+
+ a. Ask your superhost administrator if conda is installed.
+ If it is, you need to add the ``bin`` directory of
+ the conda installation to your path.
+
+ .. code-block:: bash
+
+ export PATH="/path/to/conda/bin:$PATH"
+
+ Running this once will change your path for the current session.
+ Adding it to a shell configuration file
+ (e.g., ``~/.profile``, ``~/.bashrc``)
+ will change your path for all future terminal sessions.
+
+ b. If conda is not installed, install Miniconda.
+
+ .. code-block:: bash
+
+ wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh
+ bash miniconda.sh
+
+ Follow the prompts to set up Miniconda as desired.
+
+2. Create a new ``conda`` environment.
+ Note, you _must_ use Python 3.5.5 when working with NxSDK.
+
+ .. code-block:: bash
+
+ conda create --name loihi python=3.5.5
+
+3. Activate your new environment.
+
+ .. code-block:: bash
+
+ source activate loihi
+
+ Sometimes the environment can have issues when first created.
+ Before continuing, run ``which pip`` and ensure that the path
+ to ``pip`` is in your conda environment.
+
+ .. note:: You will need to run ``source activate loihi`` every time
+ you log onto the superhost.
+
+4. Install NumPy with conda.
+
+ .. code-block:: bash
+
+ conda install numpy
+
+ The NumPy provided by conda is usually faster
+ than those installed by other means.
+
+5. Install Nengo Loihi.
+
+ .. code-block:: bash
+
+ cd path/to/nengo-loihi
+ pip install .
+
+ ``pip`` will install other requirements like Nengo automatically.
+
+6. Clone the NxSDK git repository.
+
+ As of August 2018, NxSDK is not publicly available,
+ but is available through Intel's NRC cloud.
+ See their documentation for the NxSDK location,
+ then
+
+ .. code-block:: bash
+
+ git clone path/to/NxSDK.git
+
+7. Check out a release tag.
+
+ As of August 2018, the most recent release is 0.5.5,
+ which is compatible with Nengo Loihi.
+
+ .. code-block:: bash
+
+ cd NxSDK
+ git checkout 0.5.5
+
+8. Add a ``setup.py`` file to NxSDK.
+
+ As of August 2018, NxSDK does not have a ``setup.py`` file,
+ which is necessary for installing NxSDK in a conda environment.
+
+ To add it, execute the following command.
+
+ .. code-block:: bash
+
+ cat > setup.py << 'EOL'
+ import sys
+ from setuptools import setup
+
+ if not ((3, 5, 2) <= sys.version_info[:3] < (3, 6, 0)):
+ pyversion = ".".join("%d" % v for v in sys.version_info[:3])
+ raise EnvironmentError(
+ "NxSDK has .pyc files that only work on Python 3.5.2 through 3.5.5. "
+ "You are running version %s." % pyversion)
+
+ setup(
+ name='nxsdk',
+ version='0.5.5',
+ install_requires=[
+ "numpy",
+ "pandas",
+ "matplotlib",
+ "teamcity-messages",
+ "rpyc<4",
+ ]
+ )
+ EOL
+
+
+ Or you may paste the text above (excluding the first and last lines)
+ into a text editor and save as ``setup.py`` in the NxSDK folder.
+
+9. Install NxSDK.
+
+ .. code-block:: bash
+
+ pip install -e .
diff --git a/docs/overview.rst b/docs/overview.rst
new file mode 100644
index 000000000..e663859fa
--- /dev/null
+++ b/docs/overview.rst
@@ -0,0 +1,116 @@
+********
+Overview
+********
+
+Hardware
+========
+
+Intel's neuromorphic Loihi chip
+is made accessible through an FPGA board.
+We will refer to the devices involved in
+a Loihi model using the following terms.
+
+- *Board*: The Loihi board, which contains one or more Loihi chips.
+- *Chip*: A Loihi chip, which contains several cores.
+- *Core*: A computational unit on a chip.
+ Each chip has several neuron cores, which simulate compartments,
+ synapses, etc. and several Lakemont cores, which are general purpose
+ CPUs for handling input/output and other general tasks.
+- *Host*: The FPGA board that the Loihi board is connected to.
+ The host runs a Linux-based operating system to allow programs
+ to interact with the board using drivers provided by Intel.
+- *Superhost*: The PC physically connected to the FPGA board.
+ Typically the superhost and host communicate over ethernet,
+ but it is also possible to communicate over serial USB.
+- *Local machine*: The computer you are currently using.
+ We usually assume that your local machine is not the superhost,
+ though you can work directly on the superhost.
+
+Nengo Loihi runs on the superhost
+and will automatically handle the communication
+with the host and board.
+Unless you are setting up a new host and board,
+you will only need to interact with
+your local machine and the superhost.
+
+.. note:: If you are setting up a new host or board,
+ see the :doc:`setup/host-board` page.
+
+Software
+========
+
+Nengo Loihi is a Python package for running
+Nengo models on Loihi boards.
+It contains a Loihi **emulator backend**
+for rapid model development and easier debugging,
+and a Loihi **hardware backend**
+for running models on a Loihi board.
+
+Nengo Loihi requires the Nengo_
+Python package to define large-scale neural models.
+Please refer to the `Nengo documentation `_
+for example models and instructions
+for building your own models.
+
+.. _Nengo: https://www.nengo.ai/nengo/
+
+Nengo and Nengo Loihi's emulator backend
+are pure Python packages that use
+`NumPy `_
+to simulate neural models quickly.
+On your local machine,
+you only need to install
+Nengo Loihi and its dependencies,
+which include Nengo and NumPy.
+See :doc:`installation` for details.
+
+Nengo Loihi's hardware backend
+uses Intel's NxSDK API
+to interact with the host
+and configure the board.
+On the superhost,
+you need to install Nengo Loihi and its dependencies,
+as well as NxSDK.
+See :doc:`installation` for details.
+
+Running models
+==============
+
+While you can use most models constructed
+in Nengo with Nengo Loihi,
+some models will see degraded performance
+due to the discretization process used to
+convert float values to integers
+for processing on the Loihi chip.
+
+We can recover some of this performance
+by choosing parameters better suited
+to the range of values used by the chip.
+Before you create any Nengo objects, call::
+
+ nengo_loihi.set_defaults()
+
+This will change the default parameters
+for the core Nengo objects,
+resulting in better performance.
+
+After creating the model,
+running it on Nengo Loihi is done by replacing::
+
+ nengo.Simulator(model)
+
+with::
+
+ nengo_loihi.Simulator(model)
+
+By default, Nengo Loihi will use the
+hardware backend if it is available.
+You can choose to use the emulator
+even when the hardware backend is installed
+by doing::
+
+ nengo_loihi.Simulator(model, target='sim')
+
+See :doc:`api` for additional options
+and other functions and classes available
+in Nengo Loihi.
diff --git a/docs/setup/host-board.rst b/docs/setup/host-board.rst
new file mode 100644
index 000000000..dbf9e7001
--- /dev/null
+++ b/docs/setup/host-board.rst
@@ -0,0 +1,479 @@
+**************
+Board and host
+**************
+
+Two variants of the Loihi boards exist:
+a single-chip board codenamed "Wolf Mountain"
+and an eight-chip board codenamed "Nahuku."
+
+Currently, both boards use an
+Altera ARRIA 10 SoC FPGA board
+as the host.
+The Wolf Mountain board is paired with
+an ARRIA 10 based on
+the "Meridian Hill" (MH) architecture.
+The Nahuku board is paired with
+an ARRIA 10 based on
+the "Golden Hardware Reference Design" (GHRD) architecture.
+
+The remainder of this page explains
+how to set up a host-board pair.
+We will use the terminology
+introduced in the :doc:`../overview`
+(board, host, superhost).
+
+Wolf Mountain / Meridian Hill
+=============================
+
+The Wolf Mountain board comes pre-connected to its host SoC.
+They are both contained within a single plastic box.
+The images below show the enclosure
+as well as where ports can be found.
+
+.. todo:: Add images
+
+To set up the Wolf Mountain / Meridian Hill system:
+
+1. Configure the power supply units.
+ Two bench power supplies are needed to supply 5.3V and 12V respectively.
+ The bench power supplies should be dialed
+ to the right voltages **before** they are connected
+ to the Loihi system.
+ The bench power supplies should also be **off** before
+ they are wired to the Loihi system.
+
+2. Connect the bench power supplies
+ to the appropriate "5V" and "12V" ports on the box.
+ Take care identifying the correct ports
+ before connecting the bench power supplies.
+ Do not plug the 12V power supply to the 5V port or vice versa!
+
+3. Connect the USB tty cable (USB-A male to USB-A male cable)
+ to the "TTY" port on the Loihi box,
+ and connect the other end of the cable to the superhost.
+
+4. Connect the Ethernet cable to the "eth" port on the Loihi box,
+ and connect the other end of the cable to the superhost.
+
+5. If a microSD card is present in the microSD card slot,
+ and it has not yet been set up (see `below <#sd-card-image>`__),
+ remove the microSD card from its slot.
+ This is done by using a pair of tweezers to push the card in,
+ and then releasing it (the card slot is spring loaded).
+ Next, use the tweezers to grab on to and gently remove the card.
+
+6. If necessary, set up the microSD card as described
+ `below <#sd-card-image>`__.
+ Then reinsert the microSD card into the microSD card slot.
+ Be sure to push the card into the slot
+ far enough to engage the spring-loaded latch.
+
+7. Turn on the bench power supplies (in any order)
+ and check that the system boots properly.
+
+Nahuku / Golden Hardware Reference Design
+=========================================
+
+Useful links:
+
+- `Altera ARRIA 10 SoC GHRD information
+ `_
+- `Altera ARRIA 10 SoC GHRD user guide
+ `_
+
+The user guide is especially useful
+for reading status LEDs on the host
+(see section 5-3).
+The image below shows the location of components
+important to the Nahuku / GHRD Loihi system.
+
+.. image:: nahuku-connections.png
+ :width: 100%
+ :target: _images/nahuku-connections.png
+
+To set up the Nahuku / GHRD system:
+
+1. Install the two FPGA RAM modules on the host
+ (see image above for where they should be installed).
+
+2. Connect the Nahuku board to the "Nahuku board connection" indicated above.
+
+ .. warning:: The pins in the connector can be quite fragile.
+ Ensure that the two sides of the connectors are lined up
+ before applying pressure to mate the two connectors.
+
+3. Connect the USB tty cable (microUSB male to USB-A male cable)
+ to the "TTY" port on the host,
+ and connect the other end of the cable to the superhost.
+
+4. Connect the Ethernet cable to the ethernet port on the host,
+ and connect the other end to the superhost.
+
+5. If a microSD card is present in the microSD card slot,
+ and it has not yet been set up (see `below <#sd-card-image>`__),
+ remove the microSD card from its slot.
+ The card slot has a latch that is spring loaded.
+ To remove the microSD card, push it into the card slot, then release.
+ Once the microSD card is unlatched from the card slot,
+ it can then be removed by sliding it out of the card slot.
+
+6. If necessary, set up the microSD card as described
+ `below <#sd-card-image>`__).
+ Then reinsert the microSD card into the microSD card slot.
+ Be sure to push the card into the slot
+ far enough to engage the spring-loaded latch.
+
+7. Connect the power brick to the power port of the host.
+ Plug the power brick into the wall socket.
+
+8. Turn on the power switch on the host
+ and check that the system boots properly.
+
+Creating an SD card image
+=========================
+
+The microSD card on the host
+contains its operating system.
+Creating an SD card image
+requires you to:
+
+1. compile Ubuntu 16.04 for the ARM processor,
+2. add Loihi specific configuration files, and
+3. run a Python script to create the SD card image.
+
+Instructions for each step follow.
+
+Compiling Ubuntu
+----------------
+
+These steps are based on `this guide
+`_.
+These steps should be performed on the superhost.
+You will need root access.
+
+For simplicity,
+begin these steps in a new empty directory
+on a partition with several GB of free space.
+
+Begin by switching to the root user.
+
+.. code-block:: bash
+
+ sudo -s
+
+Then, as ``root``:
+
+1. Create and navigate to a new folder for storing Ubuntu files.
+
+ .. code-block:: bash
+
+ mkdir ubuntu-rootfs
+ cd ubuntu-rootfs
+
+2. Download the latest Ubuntu 16.04 release compiled for ARM.
+
+ .. code-block:: bash
+
+ wget http://cdimage.ubuntu.com/ubuntu-base/releases/16.04/release/ubuntu-base-16.04.4-base-armhf.tar.gz -o ubuntu-base.tar.gz
+
+3. Untar the files from the downloaded tarball.
+
+ .. code-block:: bash
+
+ tar -xpf ubuntu-base.tar.gz
+
+4. Install ``qemu-user-static`` and copy it to ``ubuntu-rootfs``.
+
+ .. code-block:: bash
+
+ apt install qemu-user-static
+ cp /usr/bin/qemu-arm-static ./usr/bin/
+
+5. Copy the superhost's ``/etc/resolv.conf`` file to ``ubuntu-rootfs``.
+ This will allow us to access repositories on the internet in later steps.
+
+ .. code-block:: bash
+
+ cp /etc/resolv.conf ./etc/resolv.conf
+
+6. Return to the parent directory.
+
+ .. code-block:: bash
+
+ cd ..
+
+ If you do ``ls``, you should see the ``ubuntu-rootfs`` directory
+ that you were working on earlier.
+
+The ``ubuntu-rootfs`` directory you set up
+contains operating system files.
+We will now use ``chroot`` to
+act as though we are using those files
+rather than the actual superhost OS.
+Note that we are still running as the ``root`` user.
+
+Begin by mounting system components and running ``chroot``.
+
+.. code-block:: bash
+
+ mount -t proc /proc ./ubuntu-rootfs/proc
+ mount -t sysfs /sys ./ubuntu-rootfs/sys
+ mount -o bind /dev ./ubuntu-rootfs/dev
+ mount -o bind /dev/pts ./ubuntu-rootfs/dev/pts
+ chroot ./ubuntu-rootfs
+
+Then, within the ``chroot`` environment:
+
+1. Update ``apt`` sources.
+
+ .. code-block:: bash
+
+ apt update
+
+2. Install a minimal set of general packages.
+ Since you are in the ``chroot`` environment,
+ these will be installed inside ``ubuntu-rootfs``,
+ not the superhost's OS files.
+
+ .. code-block:: bash
+
+ apt install --no-install-recommends \
+ language-pack-en-base sudo ssh rsyslog \
+ net-tools ethtool network-manager wireless-tools iputils-ping \
+ lxde xfce4-power-manager \
+ xinit xorg lightdm lightdm-gtk-greeter \
+ alsa-utils gnome-mplayer bash-completion \
+ lxtask htop python-gobject-2 python-gtk2 \
+ synaptic resolvconf
+
+3. Install packages needed to run Loihi models.
+
+ .. code-block:: bash
+
+ apt install libffi6 python3-pip python3-dev fake-hwclock
+
+4. Add a user to the OS, and give it admin privileges.
+
+ We will call our user ``abr-user``,
+ but you can use a different name if desired.
+
+ .. code-block:: bash
+
+ adduser abr-user
+ addgroup abr-user adm && addgroup abr-user sudo
+
+5. Set a unique hostname.
+
+ We use ``loihi-mh`` for our Wolf Mountain / Meridian Hill system
+ and ``loihi-ghrd`` for our Nahuku / GHRD system.
+ If you have more than one of the same type of system,
+ use a more detailed naming scheme.
+
+ .. code-block:: bash
+
+ echo 'loihi-xxx' > /etc/hostname
+
+6. Add host entries.
+
+ .. code-block:: bash
+
+ echo '127.0.0.1 localhost' >> /etc/hosts
+ echo '127.0.1.1 loihi-xxx' >> /etc/hosts
+
+7. Assign a static IP to the board.
+
+ Begin by opening ``/etc/network/interfaces``
+ your text editor of choice.
+ If you are not sure, try
+
+ .. code-block:: bash
+
+ nano /etc/network/interfaces
+
+ Add the following text to the end of the ``interfaces`` file.
+ Replace ```` with:
+
+ * ``10.42.0.34`` for Wolf Mountain / Meridian Hill systems
+ * ``10.42.0.100`` for Nahuku / GHRD systems
+
+ .. code-block:: text
+
+ auto lo
+ iface lo inet loopback
+
+ auto eth0
+ iface eth0 inet static
+ address
+ netmask 255.255.255.0
+ gateway 10.42.0.1
+
+ dns-nameserver 10.42.0.1
+
+8. Update DNS configuration based on the network connection.
+ This will modify the ``/etc/resolv.conf`` we changed previously.
+
+ When prompted, select "Yes" to the dialog box
+ because we want to allow dynamic updates.
+
+ .. code-block:: bash
+
+ dpkg-reconfigure resolvconf
+
+9. (Optional) Set up NFS.
+
+ .. todo:: Add instructions for setting up NFS.
+
+We can now exit the ``chroot`` environment
+
+.. code-block:: bash
+
+ exit
+
+And unmount the environment files
+
+.. code-block:: bash
+
+ umount ubuntu-rootfs/proc
+ umount ubuntu-rootfs/sys
+ umount ubuntu-rootfs/dev/pts
+ umount ubuntu-rootfs/dev
+
+But stay as the root user for the remaining steps.
+
+Adding Loihi-specific FPGA configuration files
+----------------------------------------------
+
+The Loihi specific configuration files
+can be obtained from Intel's cloud server.
+Download all of the files below to the directory
+that contains the ``ubuntu-rootfs`` directory.
+
+As of August 2018,
+the latest files for the
+two Loihi boards are located in:
+
+* *Wolf Mountain*: ``/nfs/ncl/ext/boot/mh_2018-07-04/``
+* *Nahuku*: ``/nfs/ncl/ext/boot/ghrd_2018-07-04/``
+
+Download the following files:
+
+* ``zImage``: A linux kernel compiled for the host.
+* ``u-boot.scr``: The ``uboot`` script for configuring the FPGA.
+* ``socfpga.rbf``: The FPGA configuration file.
+
+and one of the following FPGA device tree blob files,
+depending on the system:
+
+* *Wolf Mountain*: ``meridian_hill_fab1b.dtb``
+* *Nahuku*: ``socfpga_arria10_socdk.dtb``
+
+Additionally, you need the u-boot preloader image,
+``uboot_w_dtb-mkpimage.bin``.
+The location of this file is also system dependent.
+
+* *Wolf Mountain*:
+ Download ``NxRuntime_01_05_17.tar.gz`` from the Intel sharepoint site
+ and extract it. ``uboot_w_dtb-mkpimage.bin`` is in the ``board`` folder.
+* *Nahuku*:
+ Located in the ``/nfs/ncl/ext/boot/ghrd_2018-05-17`` folder
+ on the Intel cloud server.
+
+Your folder should now contain the following files
+if you are setting up a Wolf Mountain system:
+
+* ``ubuntu-rootfs/``
+* ``meridian_hill_fab1b.dtb``
+* ``socfpga.rbf``
+* ``u-boot.scr``
+* ``uboot_w_dtb-mkpimage.bin``
+* ``zImage``
+
+And the following files
+if you are setting up a Nahuku system.
+
+* ``ubuntu-rootfs/``
+* ``socfpga.rbf``
+* ``socfpga_arria10_socdk.dtb``
+* ``u-boot.scr``
+* ``uboot_w_dtb-mkpimage.bin``
+* ``zImage``
+
+Making the SD card image
+------------------------
+
+The easiest way to make the SD card image
+is to use a Python script provided by RocketBoards.org.
+
+We assume in the following steps that you are
+in the directory containing ``ubuntu-rootfs``
+and the Loihi FPGA files,
+and that you are still acting as the root user
+(if not, do ``sudo -s``).
+
+1. Download the SD card image script.
+
+ .. code-block:: bash
+
+ wget http://releases.rocketboards.org/release/2017.10/gsrd/tools/make_sdimage.py
+
+2. Run the script with to create the SD card image.
+
+ .. note:: Replace ``.dtb`` below with the appropriate
+ ``*.dtb`` file from the previous step.
+
+ .. code-block:: bash
+
+ python ./make_sdimage.py -f \
+ -P uboot_w_dtb-mkpimage.bin,num=3,format=raw,size=10M,type=A2 \
+ -P ubuntu-rootfs/*,num=2,format=ext3,size=3000M \
+ -P zImage,socfpga.rbf,.dtb,u-boot.scr,num=1,format=vfat,size=500M \
+ -s 3550M \
+ -n sdimage_small.img
+
+ After running this command,
+ you should have a ``sdimage_small.img`` in the current directory.
+
+ This image file contains three partitions:
+
+ * Partition 1 (500 MB): contains the ``/boot`` partition,
+ which contains ``zImage``, ``socfpga.rbf``, ``.dtb``,
+ and ``u-boot.scr``.
+ * Partition 2 (3 GB): contains the Ubuntu OS file system.
+ * Partition 3 (10 MB): contains the u-boot preloader image.
+
+ .. note:: The partition sizes should not be changed from the values above.
+
+ .. note:: When making an SD card for the Nahuku system,
+ the Python script may throw an error
+ when finalizing the third partition.
+ This error can be safely ignored.
+ It occurs because the ``uboot_w_dtb-mkpimage.bin`` image
+ for Nahuku is 1 byte larger than the 10 MB partition size.
+ However, this does not seem to impact the functionality
+ of the SD card image.
+
+3. Connect an SD card to the superhost.
+ Determine the identifier assigned to it by Linux with
+
+ .. code-block:: bash
+
+ lsblk
+
+ You should be able to determine which device (e.g. ``sdc``)
+ is the SD card via the size and mountpoint.
+
+4. Write the SD card image to the physical SD card.
+
+ .. warning:: Be sure to use the correct device
+ in the ``dd`` command below.
+ Using the wrong device will destroy
+ existing data on that device.
+
+ .. code-block:: bash
+
+ dd if=sdimage_small.img | pv -s 3550M | dd of=/dev/
+
+ where ```` is the device determined with ``lsblk``.
+
+5. Remove the SD card from the superhost
+ and insert it into the host SD card slot.
diff --git a/docs/setup/index.rst b/docs/setup/index.rst
new file mode 100644
index 000000000..7b1947253
--- /dev/null
+++ b/docs/setup/index.rst
@@ -0,0 +1,26 @@
+**************
+Hardware setup
+**************
+
+These pages are for Loihi system administrators
+or other individuals who are setting up a Loihi system
+for the first time.
+
+While other parts of Nengo Loihi documentation
+are written with enough ambiguity that
+they should work on a variety of different setups,
+this page is specific to certain hardware designs
+and has complete explicit steps.
+As such, these instructions are not likely to work
+with other designs,
+and as these designs and other infrastructure change,
+these instructions may no longer work.
+If you run into problems,
+please contact the hardware distributor
+(usually Intel).
+
+.. toctree::
+ :maxdepth: 2
+
+ host-board
+ superhost
diff --git a/docs/setup/nahuku-connections.png b/docs/setup/nahuku-connections.png
new file mode 100644
index 000000000..45672104e
Binary files /dev/null and b/docs/setup/nahuku-connections.png differ
diff --git a/docs/setup/superhost.rst b/docs/setup/superhost.rst
new file mode 100644
index 000000000..351233b8b
--- /dev/null
+++ b/docs/setup/superhost.rst
@@ -0,0 +1,105 @@
+*********
+Superhost
+*********
+
+.. todo:: Add missing sections (setting up Slurm, etc)
+
+Adding a new user
+=================
+
+1. Log in as a user who can use ``sudo``.
+
+2. Change to the super user.
+
+ .. code-block:: bash
+
+ sudo -s
+
+3. Add the user.
+
+ .. code-block:: bash
+
+ adduser
+
+4. *(Optional)*: Enable the user to use ``sudo``.
+
+ .. code-block:: bash
+
+ usermod -aG sudo
+
+5. Add the user to the ``loihi_sudo`` group.
+
+ This is necessary for allowing the user
+ to run models on Loihi boards.
+
+ .. code-block:: bash
+
+ usermod -aG loihi_sudo
+
+6. Propagate the new user information to connected Loihi boards.
+
+ .. code-block:: bash
+
+ make -C /var/yp
+
+You can then run ``exit`` to exit the superuser session.
+
+Note that the final step copies user information
+to the Loihi boards.
+You therefore do not have to make a new user account
+on the hosts or boards that are connected to the superhost.
+
+To be sure that the user information has been copied correctly,
+once finishing the above steps,
+you should test by logging into all connected hosts and boards.
+
+For example, on the superhost try
+
+.. code-block:: bash
+
+ ssh @host-1
+ ssh @board-1
+
+Connecting to a host
+====================
+
+The host and superhost communicate through
+a hardwired Ethernet connection.
+The superhost therefore must have
+at least two networks interfaces,
+one for an external internet connection
+and one to connect to the FPGA host.
+
+The host only has one network interface,
+which is connected to the superhost.
+In order to access the internet,
+the superhost must share
+its external connection with the host.
+
+To do this, assuming that you are running Ubuntu:
+
+1. Open "Network Connections".
+
+2. Identify the Ethernet connection being used
+ to connect to the Loihi system.
+ Clicking the network icon in the task bar
+ will inform you which network interfaces are available.
+
+3. Select "Wired connection " and click "Edit".
+
+4. Navigate to "IPv4 Settings" and change
+ "Method" to "Shared to other computers".
+
+5. Click "Save".
+
+6. Check that the network interface has been assigned the correct IP.
+
+ When the Ethernet cable between the host and superhost is connected, do:
+
+ .. code-block:: bash
+
+ sudo ifconfig -a
+
+ to display the information for each network interface.
+ The network interface being used to connect to the Loihi system
+ should be assigned the IP ``10.42.0.1``.
diff --git a/docs/tips.rst b/docs/tips.rst
new file mode 100644
index 000000000..dc49a2b18
--- /dev/null
+++ b/docs/tips.rst
@@ -0,0 +1,430 @@
+***************
+Tips and tricks
+***************
+
+Local machine
+=============
+
+SSH hosts
+---------
+
+Adding ``ssh hosts`` to your SSH configuration
+will make working with remote superhosts, hosts, and boards
+much quicker and easier.
+After setting them up,
+you will be able to connect to any machine
+through a single ``ssh `` command.
+
+To begin, make a ``~/.ssh/config`` file.
+
+.. code-block:: bash
+
+ touch ~/.ssh/config
+
+Then open that file in a text editor
+and add a ``Host`` entry
+for each machine that you want to interact with remotely.
+
+Typically machines that you can connect to directly
+will have a configuration like this:
+
+.. code-block:: text
+
+ Host
+ User
+ HostName
+
+For security, the port on which ssh connections are accepted
+is often changed. To specify a port, add the following
+to the ``Host`` entry.
+
+.. code-block:: text
+
+ Host
+ ...
+ Port 1234
+
+Finally, many machines (especially hosts and boards)
+are not accessible through the open internet
+and must instead be accessed through another machine,
+like a superhost.
+To access these with one command,
+add the following to the ``Host`` entry.
+```` refers to the ````
+of the ``Host`` entry through which
+you access the machine
+(e.g., the ```` entry uses
+the superhost's short name for ````).
+
+.. code-block:: text
+
+ Host
+ ...
+ ProxyCommand ssh -W %h:%p
+
+Once host entries are defined, you can access those machine with:
+
+.. code-block:: bash
+
+ ssh
+
+You can also use the short name in ``rsync``, ``scp``,
+and other commands that use ``ssh`` under the hood.
+
+For more details and options, see `this tutorial
+`_.
+
+We recommend that Loihi system administrators
+make specific host entries for their system
+available to all users.
+
+SSH keys
+--------
+
+SSH keys allow you to log in to remote machines
+without providing your password.
+This is especially useful when accessing
+a board through a host and superhost,
+each of which require authentication.
+
+You may already have created
+an SSH key for another purpose.
+By default, SSH keys are stored as
+
+* ``~/.ssh/id_rsa`` (private key)
+* ``~/.ssh/id_rsa.pub`` (public key)
+
+If these files exist when you do ``ls ~/.ssh``,
+then you already have an SSH key.
+
+If you do not have an SSH key,
+you can create one with
+
+.. code-block:: bash
+
+ ssh-keygen
+
+Follow the prompts,
+using the default values when unsure.
+We recommend setting a passphrase
+in case someone obtains
+your SSH key pair.
+
+Once you have an SSH key pair,
+you will copy your public key
+to each machine you want to
+log into without a password.
+
+.. code-block:: bash
+
+ ssh-copy-id
+
+```` is the name you specified
+in your SSH config file for that host
+(e.g., ``ssh-copy-id loihi-host``).
+You will be prompted for your password
+in order to copy the key.
+Once it is copied, try ``ssh ``
+to confirm that you can log in
+without providing a password.
+
+Remote port tunneling
+---------------------
+
+Tunneling a remote port to your local machine
+allows you to run the Jupyter notebook server
+or the Nengo GUI server on the superhost or host,
+but access the web-based interface
+on your local machine.
+
+To do this, we will
+create a new terminal window on the local machine
+that we will keep open while the tunnel is active.
+In this terminal, do
+
+.. code-block:: bash
+
+ ssh -L :localhost:
+
+You will then enter an SSH session
+in which you can start the process
+that will communicate over ````.
+
+**Example 1**:
+Starting a Nengo GUI server on port 8000
+of ``superhost-1``,
+which has a ``loihi`` conda environment.
+
+.. code-block:: bash
+
+ # In a new terminal window on your local machine
+ ssh -L 8000:localhost:8000 superhost-1
+ # We are now on superhost-1
+ source activate loihi
+ cd ~/nengo-loihi/docs/examples
+ nengo --port 8000 --no-browser --auto-shutdown 0 --backend nengo_loihi
+
+On your local machine,
+open ``http://localhost:8000/``
+and you should see the Nengo GUI interface.
+
+**Example 2**:
+Starting a Jupyter notebook server on port 8080
+of ``superhost-2``,
+which has a ``loihi`` virtualenv environment.
+
+.. code-block:: bash
+
+ # In a new terminal window on your local machine
+ ssh -L 8080:localhost:8080 superhost-2
+ # We are now on superhost-2
+ workon loihi
+ cd ~/nengo-loihi/docs/examples
+ jupyter notebook --no-browser --port 8080
+
+The ``jupyter`` command should print out a URL of the form
+``http://localhost:8888/?token=``,
+which you can open on your local machine.
+
+Syncing with rsync
+------------------
+
+If you work on your local machine
+and push changes to multiple remote superhosts,
+it is worth spending some time to set up
+a robust solution for syncing files
+between your local machine and the superhosts.
+
+``rsync`` is a good option because it is fast
+(it detects what has changed and only sends changes)
+and can be configured to ensure that
+the files on your local machine are the canonical files
+and are not overwritten by changes made on remotes.
+``rsync`` also uses SSH under the hood,
+so the SSH hosts you set up previously can be used.
+
+``rsync`` is available from most package managers
+(e.g. ``apt``, ``brew``)
+and in many cases
+will already be installed
+on your system.
+
+The basic command that is most useful is
+
+.. code-block:: bash
+
+ rsync -rtuv --exclude=*.pyc /src/folder /dst/folder
+
+* ``-r`` recurses into subdirectories
+* ``-t`` copies and updates file modifications times
+* ``-u`` replaces files with the most up-to-date version
+ as determined by modification time
+* ``-v`` adds more console output to see what has changed
+* ``--exclude=*.pyc`` ensures that ``*.pyc`` files are not copied
+
+See also `more details and options
+`_.
+
+When sending files to a remote host,
+you may also want to use the ``--delete`` option
+to delete files in the destination folder
+that have been removed from the source folder.
+
+To simplify ``rsync`` usage,
+you can make small ``bash`` functions
+to make your workflow explicit.
+
+For example, the following
+bash functions will sync the ``NxSDK``
+and ``nengo-loihi`` folders
+between the local machine
+and the user's home directory on ``host-1``.
+In this example, the ``--delete`` flag
+is only used on pushing so that files
+are never deleted from the local machine.
+The ``--exclude=*.pyc`` flag
+is only used for ``nengo-loihi`` because
+``*.pyc`` files are an important
+part of the NxSDK source tree.
+These and other options can be adapted
+based on your personal workflow.
+
+.. code-block:: bash
+
+ LOIHI="/path/to/nengo-loihi/"
+ NXSDK="/path/to/NxSDK/"
+ push_host1() {
+ rsync -rtuv --exclude=*.pyc --delete "$LOIHI" "host-1:nengo-loihi"
+ rsync -rtuv --delete "$NXSDK" "host-1:NxSDK"
+ }
+ pull_host1() {
+ rsync -rtuv --exclude=*.pyc "host-1:nengo-loihi/" "$LOIHI"
+ rsync -rtuv "host-1:NxSDK" "$NXSDK"
+ }
+
+These functions are placed in the ``~/.bashrc`` file
+and executed at a terminal with
+
+.. code-block:: bash
+
+ push_host1
+ pull_host1
+
+Remote editing with SSHFS
+-------------------------
+
+If you primarily work with a single remote superhost,
+SSHFS is a good option that allows you
+to mount a remote filesystem to your local machine,
+meaning that you manipulate files as you
+normally would on your local machine,
+but those files will actually exist
+on the remote machine.
+SSHFS ensures that change you make locally
+are efficiently sent to the remote.
+
+SSHFS is available from most package managers,
+including ``apt`` and ``brew``.
+
+To mount a remote directory to your local machine,
+create a directory to mount to,
+then call ``sshfs`` to mount it.
+
+.. code-block:: bash
+
+ mkdir -p
+ sshfs -o allow_other,defer_permissions :
+
+When you are done using the remote files,
+unmount the mount point.
+
+.. code-block:: bash
+
+ fusermount -u
+
+.. note::
+ If ``fusermount`` is not available
+ and you have ``sudo`` access, you can also unmount with
+
+ .. code-block:: bash
+
+ sudo umount
+
+As with ``rsync``, since you may do these commands frequently,
+it can save time to make a short bash function.
+The following example functions mount and unmount
+the ``host-2`` ``~/loihi`` directory
+to the local machine's ``~/remote/host-2`` directory.
+
+.. code-block:: bash
+
+ mount_host2() {
+ mkdir -p ~/remote/host-2
+ sshfs host-2:loihi ~/remote/host-2
+ }
+ unmount_host2() {
+ fusermount -u ~/remote/host-2
+ }
+
+Superhost
+=========
+
+Slurm cheatsheet
+----------------
+
+Most Loihi superhosts use `Slurm `_
+to schedule and distribute jobs to Loihi hosts.
+Below are the commands that Slurm makes available
+and what they do.
+
+``sinfo``
+ Check the status (availability) of connected hosts.
+``squeue``
+ Check the status of your jobs.
+``scancel ``
+ Kill one of your jobs.
+``scancel --user=``
+ Kill all of your jobs.
+``sudo scontrol update nodename="" state="idle"``
+ Mark a Loihi host as "idle",
+ which places it in the pool of available hosts to be used.
+ Use this when a Loihi host that was down comes back up.
+
+ .. note:: This should only be done by a system administrator.
+
+Use Slurm by default
+--------------------
+
+Most superhosts use Slurm to run models on the host.
+Normally you can opt in to executing a command with
+
+.. code-block:: bash
+
+ SLURM=1 my-command
+
+However, you will usually want to use Slurm,
+so to switch to an opt-out setup,
+open your shell configuration file
+in a text editor (usually ``~/.bashrc``),
+and add the following line to the end of the file.
+
+.. code-block:: bash
+
+ export SLURM=1
+
+Once making this change you can opt out of using Slurm
+by executing a command with
+
+.. code-block:: bash
+
+ SLURM=0 my-command
+
+Running large models
+--------------------
+
+Normally you do not need to do anything
+other than setting the ``SLURM`` environment variable
+to run a model on Slurm.
+However, in some situation Slurm may kill your job
+due to long run times or other factors.
+
+Custom Slurm partitions can be used to run
+your job with different sets of restrictions.
+Your system administrator will have to set up the partition.
+
+As an example, to run a job with the ``loihiinf`` partition,
+where ``n2driverservice`` is in ``/nfs/ncl/bin``,
+first open two terminal windows.
+
+In one terminal, run
+
+.. code-block:: bash
+
+ srun -p loihiinf /nfs/ncl/bin/n2driverservice
+
+In the other terminal, run your job without Slurm.
+
+.. code-block:: bash
+
+ SLURM=0 python models/my_model.py
+
+Plotting
+--------
+
+If you are generating plots with Matplotlib
+on the superhost or host,
+you may run into issues due to there being
+no monitor attached to those machines
+(i.e., they are "headless").
+Rather than plotting to a screen,
+you can instead save plots as files
+with ``plt.savefig``.
+You will also need to configure
+Matplotlib to use a headless backend by default.
+
+The easiest way to do this is with a ``matplotlibrc`` file.
+
+.. code-block:: bash
+
+ mkdir -p ~/.config/matplotlib
+ echo "backend: Agg" >> ~/.config/matplotlib/matplotlibrc
diff --git a/nengo_loihi/builder.py b/nengo_loihi/builder.py
index f44cec621..37341c690 100644
--- a/nengo_loihi/builder.py
+++ b/nengo_loihi/builder.py
@@ -43,7 +43,45 @@
class Model(CxModel):
"""The data structure for the chip/simulator.
- CxModel defines adding ensembles, discretizing, and tracks the simulator
+ This is a subclass of CxModel, which defines methods for adding ensembles,
+ discretizing, and tracking the simulator. This class handles build
+ functions and keeping track of chip/host communication.
+
+ Parameters
+ ----------
+ dt : float, optional (Default: 0.001)
+ The length of a simulator timestep, in seconds.
+ label : str, optional (Default: None)
+ A name or description to differentiate models.
+ builder : Builder, optional (Default: None)
+ A `.Builder` instance to keep track of build functions.
+ If None, the default builder will be used.
+
+ Attributes
+ ----------
+ builder : Builder
+ The build functions used by this model.
+ dt : float
+ The length of a simulator timestep, in seconds.
+ label : str or None
+ A name or description to differentiate models.
+ objs : dict
+ Dictionary mapping from Nengo objects to Nengo Loihi objects.
+ params : dict
+ Mapping from objects to namedtuples containing parameters generated
+ in the build process.
+ probes : list
+ List of all probes. Probes must be added to this list in the build
+ process, as this list is used by Simulator.
+ seeded : dict
+ All objects are assigned a seed, whether the user defined the seed
+ or it was automatically generated. 'seeded' keeps track of whether
+ the seed is user-defined. We consider the seed to be user-defined
+ if it was set directly on the object, or if a seed was set on the
+ network in which the object resides, or if a seed was set on any
+ ancestor network of the network in which the object resides.
+ seeds : dict
+ Mapping from objects to the integer seed assigned to that object.
"""
def __init__(self, dt=0.001, label=None, builder=None):
super(Model, self).__init__()
@@ -546,8 +584,9 @@ def build_connection(model, conn):
if conn.learning_rule_type is not None:
if isinstance(conn.learning_rule_type, nengo.PES):
pes_learn_rate = conn.learning_rule_type.learning_rate
- # scale learning rates such that the default would be 10
- pes_learn_rate *= 10 / nengo.PES.learning_rate.default
+ # scale learning rates to roughly match Nengo
+ # 1e-4 is the Nengo core default learning rate
+ pes_learn_rate *= 4 / 1e-4
assert isinstance(conn.learning_rule_type.pre_synapse,
nengo.synapses.Lowpass)
pes_pre_syn = conn.learning_rule_type.pre_synapse.tau
diff --git a/nengo_loihi/config.py b/nengo_loihi/config.py
index 3a42f1441..b27d6cc48 100644
--- a/nengo_loihi/config.py
+++ b/nengo_loihi/config.py
@@ -3,7 +3,26 @@
def add_params(network):
- """Create custom config options for nengo_loihi"""
+ """Add nengo_loihi config option to *network*.
+
+ The following options will be added:
+
+ `nengo.Ensemble`
+ * ``on_chip``: Whether the ensemble should be simulated
+ on a Loihi chip. Marking specific ensembles for simulation
+ off of a Loihi chip can help with debugging.
+
+ Examples
+ --------
+
+ >>> with nengo.Network() as model:
+ ... ens = nengo.Ensemble(10, dimensions=1)
+ ... # By default, ens will be placed on a Loihi chip
+ ... nengo_loihi.add_params(model)
+ ... model.config[ens].on_chip = False
+ ... # Now it will be simulated with Nengo
+
+ """
config = network.config
cfg = config[nengo.Ensemble]
@@ -13,5 +32,14 @@ def add_params(network):
def set_defaults():
+ """Modify Nengo's default parameters for better performance with Loihi.
+
+ The following defaults will be modified:
+
+ `nengo.Ensemble`
+ * ``max_rates``: Set to ``Uniform(low=100, high=120)``
+ * ``intercepts``: Set to ``Uniform(low=-0.5, high=0.5)``
+
+ """
nengo.Ensemble.max_rates.default = nengo.dists.Uniform(100, 120)
nengo.Ensemble.intercepts.default = nengo.dists.Uniform(-0.5, 0.5)
diff --git a/nengo_loihi/loihi_api.py b/nengo_loihi/loihi_api.py
index 440748f41..21c657f81 100644
--- a/nengo_loihi/loihi_api.py
+++ b/nengo_loihi/loihi_api.py
@@ -314,8 +314,14 @@ def validate(self, core=None):
class VthProfile(Profile):
- VTH_MAX = 2**17 - 1
-
+ """Represents the VthProfile of a compartment (Cx).
+
+ Attributes
+ ----------
+ vth : int
+ The mantissa of the voltage threshold for a compartment. To get the
+ actual voltage threshold, this is multiplied by VTH_EXP (64).
+ """
params = ('vth',)
def __init__(self, vth):
@@ -323,7 +329,7 @@ def __init__(self, vth):
self.vth = vth
def validate(self, core=None):
- assert 0 < self.vth <= self.VTH_MAX
+ assert 0 < self.vth <= VTH_MAN_MAX
# if core is not None:
# assert self.realVth < core.dendrite_shared_cfg.v_max
diff --git a/nengo_loihi/loihi_cx.py b/nengo_loihi/loihi_cx.py
index 1e4417496..a9b408e2a 100644
--- a/nengo_loihi/loihi_cx.py
+++ b/nengo_loihi/loihi_cx.py
@@ -384,7 +384,15 @@ def get_simulator(self, seed=None):
class CxSimulator(object):
- """Numerical simulation of chip behaviour given a CxModel"""
+ """Software emulator for Loihi chip.
+
+ Parameters
+ ----------
+ model : Model
+ Model specification that will be simulated.
+ seed : int, optional (Default: None)
+ A seed for all stochastic operations done in this simulator.
+ """
def __init__(self, model, seed=None):
self.build(model, seed=seed)
@@ -393,6 +401,7 @@ def __init__(self, model, seed=None):
self._probe_filter_pos = {}
def build(self, model, seed=None): # noqa: C901
+ """Set up NumPy arrays to emulate chip memory and I/O."""
if seed is None:
seed = np.random.randint(2**31 - 1)
@@ -514,6 +523,8 @@ def noiseGen(n=self.n_cx, rng=self.rng):
self.noiseTarget = noiseTarget
def step(self): # noqa: C901
+ """Advance the simulation by 1 step (``dt`` seconds)."""
+
# --- connections
self.q[:-1] = self.q[1:] # advance delays
self.q[-1] = 0
@@ -618,6 +629,13 @@ def step(self): # noqa: C901
self.t += 1
def run_steps(self, steps):
+ """Simulate for the given number of ``dt`` steps.
+
+ Parameters
+ ----------
+ steps : int
+ Number of steps to run the simulation for.
+ """
for _ in range(steps):
self.step()
diff --git a/nengo_loihi/loihi_interface.py b/nengo_loihi/loihi_interface.py
index acc2fde3a..e79399e22 100644
--- a/nengo_loihi/loihi_interface.py
+++ b/nengo_loihi/loihi_interface.py
@@ -363,9 +363,22 @@ def build_probe(n2core, core, group, probe, cx_idxs):
class LoihiSimulator(object):
+ """Simulator to place a Model onto a Loihi board and run it.
+
+ Parameters
+ ----------
+ cx_model : CxModel
+ Model specification that will be placed on the Loihi board.
+ seed : int, optional (Default: None)
+ A seed for stochastic operations.
+
+ .. warning :: Setting the seed has no effect on stochastic
+ operations run on the Loihi board.
+ snip_max_spikes_per_step : int, optional (Default: 50)
+ Maximum number of spikes that can be sent through
+ the nengo_io_h2c channel on one timestep.
"""
- Simulator to place CxModel onto board and run it.
- """
+
def __init__(self, cx_model, seed=None, snip_max_spikes_per_step=50):
self.n2board = None
self._probe_filters = {}
@@ -477,7 +490,7 @@ def get_probe_output(self, probe):
x = x if cx_probe.weights is None else np.dot(x, cx_probe.weights)
return self._filter_probe(cx_probe, x)
- def create_io_snip(self):
+ def create_io_snip(self, io_steps):
# snips must be created before connecting
assert not self.is_connected()
@@ -496,6 +509,17 @@ def create_io_snip(self):
if core.learning_coreid:
n_errors += 1
+ n_inputs = 0
+ for chip in self.board.chips:
+ for core in chip.cores:
+ for inp, cx_ids in core.iterate_inputs():
+ axon_ids = inp.axon_ids[0]
+ # axon_ids are in pairs, due to the positive and
+ # negative channels, but we just want the number
+ # of values to send
+ assert len(axon_ids) % 2 == 0
+ n_inputs += len(axon_ids) // 2
+
n_outputs = 1
probes = []
cores = set()
@@ -520,6 +544,8 @@ def create_io_snip(self):
code = template.render(
n_outputs=n_outputs,
n_errors=n_errors,
+ n_inputs=n_inputs,
+ io_steps=io_steps,
cores=cores,
probes=probes,
)
@@ -546,13 +572,15 @@ def create_io_snip(self):
phase="preLearnMgmt",
)
- size = self.snip_max_spikes_per_step * 2 + 1 + n_errors*2
+ size = n_inputs + n_errors*2
logger.debug("Creating nengo_io_h2c channel")
+ # double the size of the buffers so we don't have to be in lock-step
self.nengo_io_h2c = self.n2board.createChannel(b'nengo_io_h2c',
- "int", size)
+ "int", size*2)
logger.debug("Creating nengo_io_c2h channel")
+ # double the size of the buffers so we don't have to be in lock-step
self.nengo_io_c2h = self.n2board.createChannel(b'nengo_io_c2h',
- "int", n_outputs)
+ "int", n_outputs*2)
self.nengo_io_h2c.connect(None, nengo_io)
self.nengo_io_c2h.connect(nengo_io, None)
self.nengo_io_c2h_count = n_outputs
diff --git a/nengo_loihi/simulator.py b/nengo_loihi/simulator.py
index 14a908f3c..f35eff87e 100644
--- a/nengo_loihi/simulator.py
+++ b/nengo_loihi/simulator.py
@@ -72,6 +72,71 @@ def reset(self):
class Simulator(object):
+ """Nengo Loihi simulator for Loihi hardware and emulator.
+
+ The simulator takes a `nengo.Network` and builds internal data structures
+ to run the model defined by that network on Loihi emulator or hardware.
+ Run the simulator with the `.Simulator.run` method, and access probed data
+ through the ``data`` attribute.
+
+ Building and running the simulation allocates resources. To properly free
+ these resources, call the `.Simulator.close` method. Alternatively,
+ `.Simulator.close` will automatically be called if you use
+ ``with`` syntax::
+
+ with nengo_loihi.Simulator(my_network) as sim:
+ sim.run(0.1)
+ print(sim.data[my_probe])
+
+ Note that the ``data`` attribute is still accessible even when a simulator
+ has been closed. Running the simulator, however, will raise an error.
+
+ Parameters
+ ----------
+ network : Network or None
+ A network object to be built and then simulated. If None,
+ then the *model* parameter must be provided instead.
+ dt : float, optional (Default: 0.001)
+ The length of a simulator timestep, in seconds.
+ seed : int, optional (Default: None)
+ A seed for all stochastic operators used in this simulator.
+ Will be set to ``network.seed + 1`` if not given.
+ model : Model, optional (Default: None)
+ A `.Model` that contains build artifacts to be simulated.
+ Usually the simulator will build this model for you; however, if you
+ want to build the network manually, or you want to inject build
+ artifacts in the model before building the network, then you can
+ pass in a `.Model` instance.
+ precompute : bool, optional (Default: True)
+ Whether model inputs should be precomputed to speed up simulation.
+ When *precompute* is False, the simulator will be run one step
+ at a time in order to use model outputs as inputs in other parts
+ of the model.
+ target : str, optional (Default: None)
+ Whether the simulator should target the emulator (``'sim'``) or
+ Loihi hardware (``'loihi'``). If None, *target* will default to
+ ``'loihi'`` if NxSDK is installed, and the emulator if it is not.
+
+ Attributes
+ ----------
+ closed : bool
+ Whether the simulator has been closed.
+ Once closed, it cannot be reopened.
+ data : ProbeDict
+ The dictionary mapping from Nengo objects to the data associated
+ with those objects. In particular, each `nengo.Probe` maps to
+ the data probed while running the simulation.
+ model : Model
+ The `.Model` containing the data structures necessary for
+ simulating the network.
+ precompute : bool
+ Whether model inputs should be precomputed to speed up simulation.
+ When *precompute* is False, the simulator will be run one step
+ at a time in order to use model outputs as inputs in other parts
+ of the model.
+
+ """
+
# 'unsupported' defines features unsupported by a simulator.
# The format is a list of tuples of the form `(test, reason)` with `test`
# being a string with wildcards (*, ?, [abc], [!abc]) matched against Nengo
@@ -82,7 +147,8 @@ class Simulator(object):
unsupported = []
def __init__(self, network, dt=0.001, seed=None, model=None, # noqa: C901
- precompute=True, target=None):
+ precompute=True, target=None,
+ snip_io_steps=1):
self.closed = True # Start closed in case constructor raises exception
if model is None:
@@ -92,6 +158,14 @@ def __init__(self, network, dt=0.001, seed=None, model=None, # noqa: C901
self.model = model
self.precompute = precompute
+ self.snip_io_steps = snip_io_steps
+
+ if target is None:
+ try:
+ import nxsdk
+ target = 'loihi'
+ except ImportError:
+ target = 'sim'
self.chip2host_sent_steps = 0 # how many timesteps have been sent
if network is not None:
@@ -112,8 +186,10 @@ def __init__(self, network, dt=0.001, seed=None, model=None, # noqa: C901
progress_bar=False)
else:
# we need online communication
+ spiking_interneurons_on_host = target != 'loihi'
host, chip, h2c, c2h_params, c2h = splitter.split(
- network, INTER_RATE, INTER_N)
+ network, INTER_RATE, INTER_N,
+ spiking_interneurons_on_host=spiking_interneurons_on_host)
network = chip
self.chip2host_receivers = c2h
self.host2chip_senders = h2c
@@ -140,13 +216,6 @@ def __init__(self, network, dt=0.001, seed=None, model=None, # noqa: C901
self.loihi = None
self.simulator = None
- if target is None:
- try:
- import nxsdk
- target = 'loihi'
- except ImportError:
- target = 'sim'
-
if target == 'simreal':
logger.info("Using real-valued simulator")
self.simulator = self.model.get_simulator(seed=seed)
@@ -211,7 +280,7 @@ def __exit__(self, exc_type, exc_value, traceback):
@property
def dt(self):
- """(float) The time step of the simulator."""
+ """(float) The step time of the simulator."""
return self.model.dt
@dt.setter
@@ -233,10 +302,9 @@ def close(self):
Any call to `.Simulator.run`, `.Simulator.run_steps`,
`.Simulator.step`, and `.Simulator.reset` on a closed simulator raises
- a `.SimulatorClosed` exception.
+ a ``SimulatorClosed`` exception.
"""
self.closed = True
- self.signals = None # signals may no longer exist on some backends
def _probe(self):
"""Copy all probed signals to buffers."""
@@ -263,8 +331,6 @@ def _probe(self):
assert len(self._probe_outputs[probe]) == self.n_steps
def _probe_step_time(self):
- # self._n_steps = self.signals[self.model.step].item()
- # self._time = self.signals[self.model.time].item()
self._time = self._n_steps * self.dt
def reset(self, seed=None):
@@ -286,23 +352,28 @@ def reset(self, seed=None):
self._n_steps = 0
- # reset signals
- # for key in self.signals:
- # self.signals.reset(key)
-
- # rebuild steps (resets ops with their own state, like Processes)
- # self.rng = np.random.RandomState(self.seed)
- # self._steps = [op.make_step(self.signals, self.dt, self.rng)
- # for op in self._step_order]
-
# clear probe data
for probe in self.model.probes:
self._probe_outputs[probe] = []
self.data.reset()
- # self._probe_step_time()
-
def run(self, time_in_seconds):
+ """Simulate for the given length of time.
+
+ If the given length of time is not a multiple of ``dt``,
+ it will be rounded to the nearest ``dt``. For example, if ``dt``
+ is 0.001 and ``run`` is called with ``time_in_seconds=0.0006``,
+ the simulator will advance one timestep, resulting in the actual
+ simulator time being 0.001.
+
+ The given length of time must be positive. The simulator cannot
+ be run backwards.
+
+ Parameters
+ ----------
+ time_in_seconds : float
+ Amount of time to run the simulation for. Must be positive.
+ """
if time_in_seconds < 0:
raise ValidationError("Must be positive (got %g)"
% (time_in_seconds,), attr="time_in_seconds")
@@ -322,7 +393,14 @@ def step(self):
self.run_steps(1)
- def run_steps(self, steps):
+ def run_steps(self, steps): # noqa: C901
+ """Simulate for the given number of ``dt`` steps.
+
+ Parameters
+ ----------
+ steps : int
+ Number of steps to run the simulation for.
+ """
if self.closed:
raise SimulatorClosed("Simulator cannot run because it is closed.")
@@ -349,17 +427,19 @@ def run_steps(self, steps):
self.handle_chip2host_communications()
self.host_post_sim.run_steps(steps)
elif self.host_sim is not None:
- self.loihi.create_io_snip()
+ self.loihi.create_io_snip(io_steps=self.snip_io_steps)
self.loihi.run_steps(steps, async=True)
+
+ targets = self.determine_spike_targets()
+ self.loihi.nengo_io_h2c.write(len(targets), targets)
+
for i in range(steps):
self.host_sim.run_steps(1)
- self.handle_host2chip_communications()
- self.handle_chip2host_communications()
+ if i % self.snip_io_steps == 0:
+ self.handle_host2chip_communications()
+ self.handle_chip2host_communications()
logger.info("Waiting for completion")
- self.loihi.nengo_io_h2c.write(1, [0])
- self.loihi.nengo_io_h2c.write(1, [0])
- self.loihi.nengo_io_h2c.write(1, [0])
self.loihi.wait_for_completion()
logger.info("done")
else:
@@ -369,11 +449,37 @@ def run_steps(self, steps):
logger.info("Finished running for %d steps", steps)
self._probe()
+ def determine_spike_targets(self):
+ spike_targets = []
+ for sender, receiver in self.host2chip_senders.items():
+ if not isinstance(receiver, splitter.PESModulatoryTarget):
+ inp = receiver.cx_spike_input
+ assert len(inp.axon_ids) == 1 # TODO: handle len>1
+ axon_ids = inp.axon_ids[0]
+ # the first half are the positive channels and the second
+ # half are the negative channels
+ half = len(axon_ids)//2
+ for i in range(len(axon_ids)//2):
+ # we currently only handle one Loihi chip, so assert
+ # that chip_id is zero
+ assert axon_ids[i][0] == 0
+ # the core_ids of the positive and negative channels
+ # should be the same
+ assert axon_ids[i][1] == axon_ids[half+i][1]
+
+ spike_targets.extend((
+ axon_ids[i][1], # the core for this input
+ axon_ids[i][2], # axon_id for the positive channel
+ axon_ids[half+i][2] # axon_id for the negative channel
+ ))
+ return spike_targets
+
def handle_host2chip_communications(self): # noqa: C901
if self.simulator is not None:
if self.precompute or self.host_sim is not None:
# go through the list of host2chip connections
for sender, receiver in self.host2chip_senders.items():
+ learning_rate = 50 # This is set to match hardware
if isinstance(receiver, splitter.PESModulatoryTarget):
for t, x in sender.queue:
probe = receiver.target
@@ -383,7 +489,8 @@ def handle_host2chip_communications(self): # noqa: C901
z = self.simulator.z[dec_syn]
x = np.hstack([-x, x])
- delta_w = np.outer(z, x)
+
+ delta_w = np.outer(z, x) * learning_rate
for i, w in enumerate(dec_syn.weights):
w += delta_w[i].astype('int32')
@@ -436,33 +543,16 @@ def handle_host2chip_communications(self): # noqa: C901
del sender.queue[:]
else:
+ latest = None
for t, x in sender.queue:
- receiver.receive(t, x)
+ latest = x
del sender.queue[:]
- spike_input = receiver.cx_spike_input
- sent_count = spike_input.sent_count
- axon_ids = spike_input.axon_ids
- spikes = spike_input.spikes
- while sent_count < len(spikes):
- for j, s in enumerate(spikes[sent_count]):
- if s:
- for output_axon in axon_ids:
- to_send.append(output_axon[j])
- sent_count += 1
- spike_input.sent_count = sent_count
-
- max_spikes = self.loihi.snip_max_spikes_per_step
- if len(to_send) > max_spikes:
- warnings.warn("Too many spikes (%d) sent in one time "
- "step. Increase the value of "
- "snip_max_spikes_per_step (currently "
- "set to %d)" % (len(to_send), max_spikes))
- del to_send[max_spikes:]
-
- msg = [len(to_send)]
- for spike in to_send:
- assert spike[0] == 0
- msg.extend(spike[1:3])
+ if latest is not None:
+ msg = (x * (1 << 15)).astype(int)
+ to_send.extend(msg.tolist())
+
+ msg = []
+ msg.extend(to_send)
for error in errors:
assert len(error) == 2
msg.extend(error)
@@ -541,11 +631,12 @@ def handle_chip2host_communications(self): # noqa: C901
receiver.receive(self.dt*(time_step), x)
else:
# onchip probes
- self.snip_probes[probe].append(x)
+ x = np.repeat([x], self.snip_io_steps, axis=0)
+ self.snip_probes[probe].extend(x)
else:
raise NotImplementedError()
- def trange(self, dt=None):
+ def trange(self, sample_every=None):
"""Create a vector of times matching probed data.
Note that the range does not start at 0 as one might expect, but at
@@ -553,10 +644,10 @@ def trange(self, dt=None):
Parameters
----------
- dt : float, optional (Default: None)
+ sample_every : float, optional (Default: None)
The sampling period of the probe to create a range for.
- If None, the simulator's ``dt`` will be used.
+ If None, a time value for every ``dt`` will be produced.
"""
- dt = self.dt if dt is None else dt
- n_steps = int(self.n_steps * (self.dt / dt))
- return dt * np.arange(1, n_steps + 1)
+ period = 1 if sample_every is None else sample_every / self.dt
+ steps = np.arange(1, self.n_steps + 1)
+ return self.dt * steps[steps % period < 1]
diff --git a/nengo_loihi/snips/nengo_io.c.template b/nengo_loihi/snips/nengo_io.c.template
index 14c6c55ba..daf3e58f6 100644
--- a/nengo_loihi/snips/nengo_io.c.template
+++ b/nengo_loihi/snips/nengo_io.c.template
@@ -2,9 +2,17 @@
#include
#include "nengo_io.h"
+#define N_INPUTS {{ n_inputs }}
#define N_OUTPUTS {{ n_outputs }}
#define N_ERRORS {{ n_errors }}
+#define IDX_SPIKE_TARGETS (N_ERRORS * 2)
+#define IDX_VALUES (IDX_SPIKE_TARGETS + (N_INPUTS*3))
+#define IDX_POS_ACCUMULATORS (IDX_VALUES + (N_INPUTS*3)*4)
+#define IDX_NEG_ACCUMULATORS (IDX_POS_ACCUMULATORS + (N_INPUTS*3)*4)
+
+#define IO_STEPS {{ io_steps }}
+
int guard_io(runState *s) {
return 1;
}
@@ -17,41 +25,100 @@ void nengo_io(runState *s) {
int inChannel = getChannelID("nengo_io_h2c");
int outChannel = getChannelID("nengo_io_c2h");
int32_t count[1];
- int32_t spike[2];
+ int32_t spike[3];
int32_t error[2];
int32_t output[N_OUTPUTS];
+ int32_t *value = (int32_t*)(s->userData+IDX_VALUES);
+ int32_t *pos_accum = (int32_t*)(s->userData+IDX_POS_ACCUMULATORS);
+ int32_t *neg_accum = (int32_t*)(s->userData+IDX_NEG_ACCUMULATORS);
+
if (inChannel == -1 || outChannel == -1) {
printf("Got an invalid channel ID\n");
return;
}
- if (s->time % 100 == 0) {
+ if (s->time == 1) {
+ printf("initializing\n");
+ for (int i=0; iuserData[IDX_SPIKE_TARGETS+(i*3)+0] = spike[0]; // core id
+ s->userData[IDX_SPIKE_TARGETS+(i*3)+1] = spike[1]; // pos axon
+ s->userData[IDX_SPIKE_TARGETS+(i*3)+2] = spike[2]; // neg axon
+ printf(" spike target %d: (%d %d %d)\n",
+ i, spike[0], spike[1], spike[2]);
+ }
+ }
+
+ if (s->time % 1000 == 0) {
printf("time %d\n", s->time);
}
- readChannel(inChannel, count, 1);
+ //readChannel(inChannel, count, 1);
// printf("count %d\n", count[0]);
- for (int i=0; i < count[0]; i++) {
- readChannel(inChannel, spike, 2);
- // printf("send spike %d.%d\n", spike[0], spike[1]);
- coreId = (CoreId) { .id=spike[0] };
- nx_send_discrete_spike(s->time, coreId, spike[1]);
+ // --- read input values (once every IO_STEPS)
+ // Note that we do this at the *start* of an IO_STEP period (since
+ // s->time starts at 1), so that we don't have a period at the beginning
+ // of the simulation where we are ignoring the input.
+ if ((IO_STEPS==1) || (s->time % IO_STEPS == 1)) {
+ for (int i=0; i < N_INPUTS; i++) {
+ readChannel(inChannel, spike, 1);
+ //printf(" %d: stim value %d.%d\n", s->time, i, spike[0]);
+ value[i] = spike[0]; // discretized version of the real value
+ // to be used as input
+ }
+
+ // Communicate with learning snip
+ for (int i=0; i < N_ERRORS; i++) {
+ readChannel(inChannel, error, 2);
+ // printf("send error %d.%d\n", error[0], error[1]);
+ s->userData[0] = error[0];
+ s->userData[1] = error[1];
+ }
}
- // Communicate with learning snip
- for (int i=0; i < N_ERRORS; i++) {
- readChannel(inChannel, error, 2);
- // printf("send error %d.%d\n", error[0], error[1]);
- s->userData[0] = error[0];
- s->userData[1] = error[1];
+ // --- turn input values into spikes using accumulators
+ for (int i=0; i < N_INPUTS; i++) {
+ //printf("%d value:%d accum:%d\n");
+ pos_accum[i] += (value[i] + (1<<15));
+ if (pos_accum[i] >= (1<<16)) {
+ uint8_t core = s->userData[IDX_SPIKE_TARGETS+(i*3)];
+ uint8_t cx = s->userData[IDX_SPIKE_TARGETS+(i*3)+1];
+ //printf(" spike %d.%d\n", core, cx);
+
+ coreId = (CoreId) { .id=core };
+ nx_send_discrete_spike(s->time, coreId, cx);
+
+ pos_accum[i] -= (1<<16);
+ } else if (pos_accum[i] < 0) {
+ pos_accum[i] = 0;
+ }
+
+ neg_accum[i] += (-value[i] + (1<<15));
+ if (neg_accum[i] >= (1<<16)) {
+ uint8_t core = s->userData[IDX_SPIKE_TARGETS+(i*3)];
+ uint8_t cx = s->userData[IDX_SPIKE_TARGETS+(i*3)+2];
+ //printf(" spike %d.%d\n", core, cx);
+
+ coreId = (CoreId) { .id=core };
+ nx_send_discrete_spike(s->time, coreId, cx);
+
+ neg_accum[i] -= (1<<16);
+ } else if (neg_accum[i] < 0) {
+ neg_accum[i] = 0;
+ }
}
- output[0] = s->time;
+ if (N_OUTPUTS > 0) {
+ // --- write output values (once every IO_STEPS)
+ if ((IO_STEPS==1) || (s->time % IO_STEPS == 1)) {
+ output[0] = s->time;
{% for n_out, core, cx in probes %}
- output[{{ n_out }}] = core{{ core }}->cx_state[{{ cx }}].V;
+ output[{{ n_out }}] = core{{ core }}->cx_state[{{ cx }}].V;
{% endfor %}
- writeChannel(outChannel, output, N_OUTPUTS);
+ writeChannel(outChannel, output, N_OUTPUTS);
+ }
+ }
}
diff --git a/nengo_loihi/splitter.py b/nengo_loihi/splitter.py
index a6bc7b078..68fa9d28c 100644
--- a/nengo_loihi/splitter.py
+++ b/nengo_loihi/splitter.py
@@ -91,7 +91,8 @@ def __init__(self, dimensions):
super(ChipReceiveNeurons, self).__init__(dimensions, dimensions)
-def split(model, inter_rate, inter_n): # noqa: C901
+def split(model, inter_rate, inter_n, # noqa: C901
+ spiking_interneurons_on_host=True):
"""Split a model into code running on the host and on-chip"""
logger.info("Splitting model into host and chip parts")
@@ -178,16 +179,11 @@ def split(model, inter_rate, inter_n): # noqa: C901
receive = ChipReceiveNode(dim * 2, size_out=dim)
nengo.Connection(receive, c.post, synapse=c.synapse)
with host:
+ # TODO: check whether this max_rate makes sense and
+ # whether it should be dependent on dt
max_rate = inter_rate * inter_n
assert max_rate <= 1000
- logger.debug("Creating NIF ensemble for %s", c)
- ens = nengo.Ensemble(
- 2 * dim, dim, neuron_type=NIF(tau_ref=0.0),
- encoders=np.vstack([np.eye(dim), -np.eye(dim)]),
- max_rates=[max_rate] * dim + [max_rate] * dim,
- intercepts=[-1] * dim + [-1] * dim)
-
# scale the input spikes based on the radius of the
# target ensemble
if isinstance(c.post_obj, nengo.Ensemble):
@@ -195,16 +191,33 @@ def split(model, inter_rate, inter_n): # noqa: C901
else:
scaling = 1.0
- logger.debug("Creating HostSendNode for %s", c)
- send = HostSendNode(dim * 2)
- nengo.Connection(c.pre, ens,
- function=c.function,
- solver=c.solver,
- eval_points=c.eval_points,
- scale_eval_points=c.scale_eval_points,
- synapse=None,
- transform=c.transform * scaling)
- nengo.Connection(ens.neurons, send, synapse=None)
+ if spiking_interneurons_on_host:
+ logger.debug("Creating NIF ensemble for %s", c)
+ ens = nengo.Ensemble(
+ 2 * dim, dim, neuron_type=NIF(tau_ref=0.0),
+ encoders=np.vstack([np.eye(dim), -np.eye(dim)]),
+ max_rates=[max_rate] * dim + [max_rate] * dim,
+ intercepts=[-1] * dim + [-1] * dim)
+ logger.debug("Creating HostSendNode for %s", c)
+ send = HostSendNode(dim * 2)
+ nengo.Connection(c.pre, ens,
+ function=c.function,
+ solver=c.solver,
+ eval_points=c.eval_points,
+ scale_eval_points=c.scale_eval_points,
+ synapse=None,
+ transform=c.transform * scaling)
+ nengo.Connection(ens.neurons, send, synapse=None)
+ else:
+ logger.debug("Creating HostSendNode for %s", c)
+ send = HostSendNode(dim)
+ nengo.Connection(c.pre, send,
+ function=c.function,
+ solver=c.solver,
+ eval_points=c.eval_points,
+ scale_eval_points=c.scale_eval_points,
+ synapse=None,
+ transform=c.transform * scaling)
host2chip_senders[send] = receive
elif pre_onchip and not post_onchip:
dim = c.size_out
diff --git a/nengo_loihi/tests/test_examples.py b/nengo_loihi/tests/test_examples.py
index eefa2f8af..efcc625ad 100644
--- a/nengo_loihi/tests/test_examples.py
+++ b/nengo_loihi/tests/test_examples.py
@@ -7,7 +7,7 @@
examples_dir = os.path.realpath(os.path.join(
- os.path.dirname(__file__), os.pardir, os.pardir, "examples"
+ os.path.dirname(__file__), os.pardir, os.pardir, "docs", "examples"
))
diff --git a/nengo_loihi/tests/test_snips.py b/nengo_loihi/tests/test_snips.py
index ae5f6b9df..c670757b7 100644
--- a/nengo_loihi/tests/test_snips.py
+++ b/nengo_loihi/tests/test_snips.py
@@ -1,5 +1,6 @@
import nengo
import pytest
+import numpy as np
@pytest.mark.skipif(pytest.config.getoption("--target") != "loihi",
@@ -13,3 +14,32 @@ def test_snip_input_count(Simulator, seed, plt):
with Simulator(model, precompute=False) as sim:
with pytest.warns(UserWarning, match="Too many spikes"):
sim.run(0.01)
+
+
+@pytest.mark.skipif(pytest.config.getoption("--target") != "loihi",
+ reason="snips only exist on loihi")
+@pytest.mark.parametrize("snip_io_steps", [1, 10])
+def test_snip_skipping(Simulator, seed, plt, snip_io_steps):
+ dt = 0.001
+ with nengo.Network(seed=seed) as model:
+ a = nengo.Ensemble(200, 1)
+
+ def stim_func(t):
+ step = int(t/dt)
+ if step % snip_io_steps == 1 % snip_io_steps:
+ return 0
+ else:
+ return 1
+ stim = nengo.Node(stim_func)
+ nengo.Connection(stim, a, synapse=None)
+ output = nengo.Node(None, 1)
+ nengo.Connection(a, output, synapse=0.1)
+ p_output = nengo.Probe(output)
+ p_a = nengo.Probe(a, synapse=0.1)
+
+ with Simulator(model, dt=dt, precompute=False,
+ snip_io_steps=snip_io_steps) as sim:
+ sim.run(1.0)
+
+ assert np.allclose(sim.data[p_a], 0, atol=0.1)
+ assert np.allclose(sim.data[p_output], 0, atol=0.1)
diff --git a/nengo_loihi/tests/test_splitter.py b/nengo_loihi/tests/test_splitter.py
new file mode 100644
index 000000000..6c3f035f3
--- /dev/null
+++ b/nengo_loihi/tests/test_splitter.py
@@ -0,0 +1,108 @@
+import numpy as np
+
+import nengo
+import nengo.utils.matplotlib
+import pytest
+
+import nengo_loihi
+import nengo_loihi.splitter as splitter
+
+
+def test_interneuron_structures():
+ D = 2
+ radius = 2.0
+ with nengo.Network() as model:
+ nengo_loihi.add_params(model)
+
+ stim = nengo.Node(np.eye(D)[0])
+ ens = nengo.Ensemble(n_neurons=10, dimensions=D, radius=radius)
+
+ def conn_func(x):
+ return x
+ solver = nengo.solvers.NoSolver(None)
+ synapse = nengo.synapses.Lowpass(0.1)
+ transform = np.random.uniform(-1, 1, (D, D))
+ nengo.Connection(stim, ens,
+ function=conn_func,
+ solver=solver,
+ synapse=synapse,
+ transform=transform)
+
+ inter_rate = 1000
+ inter_n = 1
+
+ host, chip, _, _, _ = splitter.split(model, inter_rate, inter_n,
+ spiking_interneurons_on_host=True)
+
+ assert len(host.all_ensembles) == 1
+ assert len(host.all_connections) == 2
+ conn = host.connections[0]
+ assert conn.pre is stim
+ assert conn.function is conn_func
+ assert conn.solver is solver
+ assert conn.synapse is None
+ assert np.allclose(conn.transform, transform / radius)
+
+ host, chip, _, _, _ = splitter.split(model, inter_rate, inter_n,
+ spiking_interneurons_on_host=False)
+
+ assert len(host.all_ensembles) == 0
+ assert len(host.all_connections) == 1
+ conn = host.connections[0]
+ assert conn.pre is stim
+ assert conn.function is conn_func
+ assert conn.solver is solver
+ assert conn.synapse is None
+ assert np.allclose(conn.transform, transform / radius)
+
+
+def test_no_interneuron_input():
+ with nengo.Network() as model:
+ nengo_loihi.add_params(model)
+
+ stim = nengo.Node(np.sin)
+ ens = nengo.Ensemble(n_neurons=1, dimensions=1)
+ nengo.Connection(stim, ens, synapse=0.1)
+ probe = nengo.Probe(stim)
+
+ host, chip, h2c, _, _ = splitter.split(model, inter_rate=1000, inter_n=1,
+ spiking_interneurons_on_host=False)
+
+ assert len(h2c) == 1
+ sender, receiver = list(h2c.items())[0]
+
+ with nengo.Simulator(host) as sim:
+ sim.run(1.0)
+
+ assert np.allclose(sim.trange(), [q[0] for q in sender.queue])
+ assert np.allclose(sim.data[probe], [q[1] for q in sender.queue])
+
+
+@pytest.mark.parametrize('precompute', [False, True])
+def test_input_interneurons_running(Simulator, allclose, plt, precompute):
+ synapse = 0.1
+ with nengo.Network() as model:
+ stim = nengo.Node(lambda t: 1 if t % 0.5 < 0.25 else 0)
+ ens = nengo.Ensemble(n_neurons=1, dimensions=1,
+ encoders=[[1]],
+ intercepts=[0],
+ max_rates=[40])
+ c = nengo.Connection(stim, ens, synapse=synapse)
+ p_stim = nengo.Probe(stim)
+ p_neurons = nengo.Probe(ens.neurons, synapse=0.1)
+ with Simulator(model, precompute=precompute) as sim:
+ sim.run(1.0)
+ c.synapse = None # TODO: input synapses are currently ignored!
+ with nengo.Simulator(model) as ref:
+ ref.run(1.0)
+
+ plt.plot(sim.trange(), sim.data[p_neurons], label='nengo_loihi')
+ plt.plot(sim.trange(), ref.data[p_neurons], label='nengo')
+ plt.legend(loc='best')
+ plt.twinx()
+ plt.plot(sim.trange(), sim.data[p_stim])
+
+ rmse = np.sqrt(np.mean((sim.data[p_neurons]-ref.data[p_neurons])**2))
+ assert rmse < 5
+
+ assert allclose(sim.data[p_neurons], ref.data[p_neurons], atol=11.0)
diff --git a/sandbox/dl/mnist_pseudo_conv.py b/sandbox/dl/mnist_pseudo_conv.py
new file mode 100644
index 000000000..38d8f01d0
--- /dev/null
+++ b/sandbox/dl/mnist_pseudo_conv.py
@@ -0,0 +1,207 @@
+# TODO: add some more advanced discretization logic, or modify the training
+# in some way that will result in weights more amenable to discretization
+
+# TODO: I believe the performance used to be better, (currently around 12%
+# error) and was negatively impacted by some recent change, but need to
+# track that down
+
+import collections
+import gzip
+import os
+import pickle
+from urllib.request import urlretrieve
+import zipfile
+
+import numpy as np
+import matplotlib.pyplot as plt
+import tensorflow as tf
+
+import nengo
+import nengo_dl
+import nengo_loihi
+
+# load mnist dataset
+if not os.path.exists('mnist.pkl.gz'):
+ urlretrieve('http://deeplearning.net/data/mnist/mnist.pkl.gz',
+ 'mnist.pkl.gz')
+
+with gzip.open('mnist.pkl.gz') as f:
+ train_data, _, test_data = pickle.load(f, encoding="latin1")
+train_data = list(train_data)
+test_data = list(test_data)
+for data in (train_data, test_data):
+ one_hot = np.zeros((data[0].shape[0], 10))
+ one_hot[np.arange(data[0].shape[0]), data[1]] = 1
+ data[1] = one_hot
+
+
+def pseudo_conv(input, input_shape, kernel_shape, kernel_stride, n_filters):
+ """Create a set of ensembles with sparsely tiled connections from the
+ input."""
+
+ input_inds = np.reshape(np.arange(len(input)), input_shape)
+
+ row_range = np.arange(0, input_shape[0] - kernel_shape[0] + 1,
+ kernel_stride[0])
+ col_range = np.arange(0, input_shape[1] - kernel_shape[1] + 1,
+ kernel_stride[1])
+ output = nengo.Node(size_in=len(row_range) * len(col_range) * n_filters)
+ ensembles = []
+ for i, row in enumerate(row_range):
+ for j, col in enumerate(col_range):
+ ens = nengo.Ensemble(n_filters, 1).neurons
+ ensembles.append(ens)
+
+ input_idxs = np.ravel(input_inds[
+ row:row + kernel_shape[0],
+ col:col + kernel_shape[1]])
+
+ nengo.Connection(input[input_idxs], ens,
+ transform=nengo_dl.dists.He())
+
+ output_idx = (i * len(col_range) + j) * n_filters
+ c = nengo.Connection(
+ ens, output[output_idx:output_idx + n_filters])
+
+ # set connections to the passthrough nodes non-trainable
+ conf = nengo.Config.context[-1]
+ conf[c].trainable = False
+
+ return output, ensembles
+
+
+# build the network
+with nengo.Network(seed=0) as net:
+ # set up default parameters
+ net.config[nengo.Ensemble].neuron_type = nengo.LIFRate(
+ amplitude=0.01)
+ net.config[nengo.Ensemble].max_rates = nengo.dists.Choice([100])
+ net.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])
+ net.config[nengo.Connection].synapse = None
+ nengo_dl.configure_settings(trainable=None)
+ nengo_loihi.add_params(net)
+
+ # input node
+ presentation_time = 0.1
+ inp = nengo.Node(
+ nengo.processes.PresentInput(test_data[0], presentation_time),
+ size_in=0, size_out=28 * 28)
+
+ # convolutional layer
+ conv_layer, ens = pseudo_conv(inp, (28, 28, 1), (7, 7), (3, 3), 64)
+
+ # dense layer
+ dense_layer = nengo.Ensemble(128, 1).neurons
+ nengo.Connection(conv_layer, dense_layer, transform=nengo_dl.dists.He())
+ # note: we could connect directly ensemble-to-ensemble (rather than
+ # going through a passthrough node), but we run out of synapse memory
+ # for e in ens:
+ # nengo.Connection(e, dense_layer, transform=nengo_dl.dists.He())
+
+ # linear readout
+ out = nengo.Node(label='out', size_in=10)
+ nengo.Connection(dense_layer, out, transform=nengo_dl.dists.He())
+
+ out_p = nengo.Probe(out)
+
+ # debugging probes
+ # inp_p = nengo.Probe(inp, label="input")
+ # conv_p = nengo.Probe(conv_layer, label="conv")
+ # ens_p = nengo.Probe(ens[0], label="ens")
+ # dense_p = nengo.Probe(dense_layer, label="dense")
+
+# set up training/test data
+train_inputs = {inp: train_data[0][:, None, :]}
+train_targets = {out_p: train_data[1][:, None, :]}
+test_inputs = {inp: test_data[0][:, None, :]}
+test_targets = {out_p: test_data[1][:, None, :]}
+
+
+def crossentropy(outputs, targets):
+ """Cross-entropy loss function (for training)."""
+ return tf.nn.softmax_cross_entropy_with_logits_v2(logits=outputs,
+ labels=targets)
+
+
+def classification_error(outputs, targets):
+ """Classification error function (for testing)."""
+ return 100 * tf.reduce_mean(
+ tf.cast(tf.not_equal(tf.argmax(outputs[:, -1], axis=-1),
+ tf.argmax(targets[:, -1], axis=-1)),
+ tf.float32))
+
+
+# train our network in NengoDL
+with nengo_dl.Simulator(net, minibatch_size=256) as sim:
+ print("error before training: %.2f%%" %
+ sim.loss(test_inputs, test_targets, classification_error))
+
+ # run training
+ sim.train(train_inputs, train_targets,
+ tf.train.RMSPropOptimizer(learning_rate=0.001),
+ objective=crossentropy,
+ n_epochs=10)
+
+ print("error after training: %.2f%%" %
+ sim.loss(test_inputs, test_targets, classification_error))
+
+ # store trained parameters back into the network
+ sim.freeze_params(net)
+
+# convert neurons to spiking LIF and add synapse to output probe
+for ens in net.all_ensembles:
+ ens.neuron_type = nengo.LIF(amplitude=0.01)
+out_p.synapse = 0.02
+
+
+def plot_results(sim):
+ """Output results from the given Simulator."""
+
+ # classification error
+ data = np.reshape(sim.data[out_p],
+ (-1, int(presentation_time / sim.dt), 10))
+ print("error: %.2f%%" % (100 * np.mean(
+ np.argmax(data[:, -1], axis=-1) !=
+ np.argmax(test_data[1][:data.shape[0]], axis=-1))))
+
+ # plot some examples
+ n_examples = 5
+ f, axes = plt.subplots(2, n_examples)
+ for i in range(n_examples):
+ axes[0][i].imshow(np.reshape(test_data[0][i], (28, 28)))
+
+ axes[1][i].plot(data[i])
+ if i == 0:
+ axes[1][i].legend([str(i) for i in range(10)], loc="upper left")
+ axes[1][i].set_xlabel("time")
+ axes[1][i].set_title(str(np.argmax(data[i, -1])))
+
+ # for p in (inp_p, conv_p, ens_p, dense_p):
+ # print(p)
+ # data = sim.data[p][:int(presentation_time / sim.dt)]
+ # print(np.min(data), np.mean(data), np.max(data))
+ #
+ # rates = np.sum(data > 0, axis=0) / presentation_time
+ # print(np.min(rates), np.mean(rates), np.max(rates))
+ #
+ # plt.figure()
+ # plt.plot(data)
+ # plt.title(p.label)
+
+
+# run in default nengo simulator
+print("NENGO")
+n_test = 200
+with nengo.Simulator(net) as sim:
+ sim.run(presentation_time * n_test)
+
+plot_results(sim)
+
+# run in nengo_loihi simulator
+print("NENGO_LOIHI")
+with nengo_loihi.Simulator(net, precompute=False) as sim:
+ sim.run(presentation_time * n_test)
+
+plot_results(sim)
+
+plt.show()
diff --git a/sandbox/snips/learn-multi.py b/sandbox/snips/learn-multi.py
new file mode 100644
index 000000000..e05ec4c64
--- /dev/null
+++ b/sandbox/snips/learn-multi.py
@@ -0,0 +1,40 @@
+import nengo
+import nengo_loihi
+import numpy as np
+
+D = 3
+
+with nengo.Network(seed=1) as model:
+ stim = nengo.Node(lambda t: [0.5]*D)
+
+ a = nengo.Ensemble(500, D, label='a',
+ max_rates=nengo.dists.Uniform(100, 120),
+ intercepts=nengo.dists.Uniform(-0.9, 0.9)
+ )
+ nengo.Connection(stim, a, synapse=None)
+
+ def output(t, x):
+ return x
+
+ out = nengo.Node(output, size_in=1, size_out=1)
+ c = nengo.Connection(a, out,
+ learning_rule_type=nengo.PES(learning_rate=1e-3),
+ function=lambda x: 0,
+ synapse=0.01)
+
+ error = nengo.Node(None, size_in=1)
+
+ nengo.Connection(out, error, transform=1)
+ nengo.Connection(stim[0], error, transform=-1)
+
+ nengo.Connection(error, c.learning_rule, transform=1.0)
+
+ p = nengo.Probe(out, synapse=0.05)
+
+T = 0.01
+with nengo_loihi.Simulator(model, precompute=False) as sim:
+ sim.run(T)
+
+print(sim.time_per_step)
+
+print(sim.data[p][-10:])