From 396bbf5ed88e236fe267b20915876f1be873d688 Mon Sep 17 00:00:00 2001 From: IkemOkoh <154571143+IkemOkoh@users.noreply.github.com> Date: Fri, 31 May 2024 18:44:53 -0700 Subject: [PATCH 1/2] Update leakyparallel.py First set of modifications. --- snntorch/_neurons/leakyparallel.py | 57 ++++++++++++++++++++---------- 1 file changed, 38 insertions(+), 19 deletions(-) diff --git a/snntorch/_neurons/leakyparallel.py b/snntorch/_neurons/leakyparallel.py index 379e6d90..93b89b37 100644 --- a/snntorch/_neurons/leakyparallel.py +++ b/snntorch/_neurons/leakyparallel.py @@ -12,6 +12,8 @@ class LeakyParallel(nn.Module): Membrane potential decays exponentially with rate beta. For :math:`U[T] > U_{\\rm thr} ⇒ S[T+1] = 1`. + Whenever the neuron emits a spike we have: + .. math:: U[t+1] = βU[t] + I_{\\rm in}[t+1] @@ -24,13 +26,24 @@ class LeakyParallel(nn.Module): Several differences between `LeakyParallel` and `Leaky` include: - * Negative hidden states are clipped due to the forced ReLU operation in RNN - * Linear weights are included in addition to recurrent weights - * `beta` is clipped between [0,1] and cloned to `weight_hh_l` only upon layer initialization. It is unused otherwise - * There is no explicit reset mechanism - * Several functions such as `init_hidden`, `output`, `inhibition`, and `state_quant` are unavailable in `LeakyParallel` - * Only the output spike is returned. Membrane potential is not accessible by default - * RNN uses a hidden matrix of size (num_hidden, num_hidden) to transform the hidden state vector. This would 'leak' the membrane potential between LIF neurons, and so the hidden matrix is forced to a diagonal matrix by default. This can be disabled by setting `weight_hh_enable=True`. + * Negative hidden states are clipped due to the + forced ReLU operation in RNN. + * Linear weights are included in addition to + recurrent weights. + * `beta` is clipped between [0,1] and cloned to + `weight_hh_l` only upon layer initialization. + It is unused otherwise. + * There is no explicit reset mechanism. + * Several functions such as `init_hidden`, `output`, + `inhibition`, and `state_quant` are unavailable + in `LeakyParallel`. + * Only the output spike is returned. Membrane potential + is not accessible by default. + * RNN uses a hidden matrix of size (num_hidden, num_hidden) + to transform the hidden state vector. This would 'leak' + the membrane potential between LIF neurons, and so the + hidden matrix is forced to a diagonal matrix by default. + This can be disabled by setting `weight_hh_enable=True`. Example:: @@ -117,22 +130,28 @@ def forward(self, x): where: - `L = sequence length` + * **`L** = sequence length` - `N = batch size` + * **`N** = batch size` - `H_{in} = input_size` + * **`H_{in}** = input_size` - `H_{out} = hidden_size` + * **`H_{out}** = hidden_size` Learnable Parameters: - - **rnn.weight_ih_l** (torch.Tensor) - the learnable input-hidden weights of shape (hidden_size, input_size) - - **rnn.weight_hh_l** (torch.Tensor) - the learnable hidden-hidden weights of the k-th layer which are sampled from `beta` of shape (hidden_size, hidden_size) - - **bias_ih_l** - the learnable input-hidden bias of the k-th layer, of shape (hidden_size) - - **bias_hh_l** - the learnable hidden-hidden bias of the k-th layer, of shape (hidden_size) - - **threshold** (torch.Tensor) - optional learnable thresholds - must be manually passed in, of shape `1` or`` (input_size). - - **graded_spikes_factor** (torch.Tensor) - optional learnable graded spike factor + - **rnn.weight_ih_l** (torch.Tensor) - the learnable input-hidden + weights of shape (hidden_size, input_size). + - **rnn.weight_hh_l** (torch.Tensor) - the learnable hidden-hidden + weights of the k-th layer which are sampled from `beta` of shape + (hidden_size, hidden_size). + - **bias_ih_l** - the learnable input-hidden bias of the k-th layer, + of shape (hidden_size). + - **bias_hh_l** - the learnable hidden-hidden bias of the k-th layer, + of shape (hidden_size). + - **threshold** (torch.Tensor) - optional learnable thresholds must be + manually passed in, of shape `1` or`` (input_size). + - **graded_spikes_factor** (torch.Tensor) - optional learnable graded + spike factor. """ @@ -303,4 +322,4 @@ def _threshold_buffer(self, threshold, learn_threshold): if learn_threshold: self.threshold = nn.Parameter(threshold) else: - self.register_buffer("threshold", threshold) \ No newline at end of file + self.register_buffer("threshold", threshold) From 2085cbca3569e9423878ec172f9eafd99dda46f5 Mon Sep 17 00:00:00 2001 From: Jason Eshraghian <40262130+jeshraghian@users.noreply.github.com> Date: Fri, 31 May 2024 18:51:25 -0700 Subject: [PATCH 2/2] Update leakyparallel.py --- snntorch/_neurons/leakyparallel.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/snntorch/_neurons/leakyparallel.py b/snntorch/_neurons/leakyparallel.py index 93b89b37..531671ee 100644 --- a/snntorch/_neurons/leakyparallel.py +++ b/snntorch/_neurons/leakyparallel.py @@ -12,8 +12,6 @@ class LeakyParallel(nn.Module): Membrane potential decays exponentially with rate beta. For :math:`U[T] > U_{\\rm thr} ⇒ S[T+1] = 1`. - Whenever the neuron emits a spike we have: - .. math:: U[t+1] = βU[t] + I_{\\rm in}[t+1]