Skip to content

Commit

Permalink
Propagate #2621
Browse files Browse the repository at this point in the history
  • Loading branch information
alejoe91 committed Apr 30, 2024
1 parent a5cb02f commit d8b7fdb
Show file tree
Hide file tree
Showing 4 changed files with 12 additions and 2 deletions.
1 change: 1 addition & 0 deletions doc/releases/0.100.6.rst
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ SpikeInterface 0.100.6 release notes

Minor release with bug fixes

* Avoid np.prod in make_shared_array (#2621)
* Improve caching of MS5 sorter (#2690)
* Allow for remove_excess_spikes to remove negative spike times (#2716)
* Update ks4 wrapper for newer version>=4.0.3 (#2701, #2774)
7 changes: 7 additions & 0 deletions doc/whatisnew.rst
Original file line number Diff line number Diff line change
Expand Up @@ -40,11 +40,18 @@ Release notes
releases/0.9.1.rst


Version 0.100.6
===============

* Minor release with bug fixes


Version 0.100.5
===============

* Minor release with bug fixes


Version 0.100.4
===============

Expand Down
4 changes: 3 additions & 1 deletion src/spikeinterface/core/core_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import json
from copy import deepcopy

from math import prod

import numpy as np
from tqdm import tqdm
Expand Down Expand Up @@ -163,7 +164,8 @@ def make_shared_array(shape, dtype):
from multiprocessing.shared_memory import SharedMemory

dtype = np.dtype(dtype)
nbytes = int(np.prod(shape) * dtype.itemsize)
shape = tuple(int(x) for x in shape) # We need to be sure that shape comes in int instead of numpy scalars
nbytes = prod(shape) * dtype.itemsize
shm = SharedMemory(name=None, create=True, size=nbytes)
arr = np.ndarray(shape=shape, dtype=dtype, buffer=shm.buf)
arr[:] = 0
Expand Down
2 changes: 1 addition & 1 deletion src/spikeinterface/core/waveform_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -483,7 +483,7 @@ def extract_waveforms_to_single_buffer(
if sparsity_mask is None:
num_chans = recording.get_num_channels()
else:
num_chans = max(np.sum(sparsity_mask, axis=1))
num_chans = int(max(np.sum(sparsity_mask, axis=1))) # This is a numpy scalar, so we cast to int
shape = (num_spikes, nsamples, num_chans)

if mode == "memmap":
Expand Down

0 comments on commit d8b7fdb

Please sign in to comment.